gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import absolute_import, unicode_literals
import pytest
import select
import ssl
import socket
import sys
import time
import uuid
from collections import OrderedDict
try:
from collections.abc import Callable
except ImportError:
from collections import Callable
from itertools import count
from case import Mock, call, patch, skip
from kombu.five import Empty, range, monotonic
from kombu.transport.qpid import (AuthenticationFailure, Channel, Connection,
ConnectionError, Message, NotFound, QoS,
Transport)
from kombu.transport.virtual import Base64
QPID_MODULE = 'kombu.transport.qpid'
@pytest.fixture
def disable_runtime_dependency_check(patching):
mock_dependency_is_none = patching(QPID_MODULE + '.dependency_is_none')
mock_dependency_is_none.return_value = False
return mock_dependency_is_none
class QpidException(Exception):
"""
An object used to mock Exceptions provided by qpid.messaging.exceptions
"""
def __init__(self, code=None, text=None):
super(Exception, self).__init__(self)
self.code = code
self.text = text
class BreakOutException(Exception):
pass
@skip.if_python3()
@skip.if_pypy()
class test_QoS__init__(object):
def setup(self):
self.mock_session = Mock()
self.qos = QoS(self.mock_session)
def test__init__prefetch_default_set_correct_without_prefetch_value(self):
assert self.qos.prefetch_count == 1
def test__init__prefetch_is_hard_set_to_one(self):
qos_limit_two = QoS(self.mock_session)
assert qos_limit_two.prefetch_count == 1
def test__init___not_yet_acked_is_initialized(self):
assert isinstance(self.qos._not_yet_acked, OrderedDict)
@skip.if_python3()
@skip.if_pypy()
class test_QoS_can_consume(object):
def setup(self):
session = Mock()
self.qos = QoS(session)
def test_True_when_prefetch_limit_is_zero(self):
self.qos.prefetch_count = 0
self.qos._not_yet_acked = []
assert self.qos.can_consume()
def test_True_when_len_of__not_yet_acked_is_lt_prefetch_count(self):
self.qos.prefetch_count = 3
self.qos._not_yet_acked = ['a', 'b']
assert self.qos.can_consume()
def test_False_when_len_of__not_yet_acked_is_eq_prefetch_count(self):
self.qos.prefetch_count = 3
self.qos._not_yet_acked = ['a', 'b', 'c']
assert not self.qos.can_consume()
@skip.if_python3()
@skip.if_pypy()
class test_QoS_can_consume_max_estimate(object):
def setup(self):
self.mock_session = Mock()
self.qos = QoS(self.mock_session)
def test_return_one_when_prefetch_count_eq_zero(self):
self.qos.prefetch_count = 0
assert self.qos.can_consume_max_estimate() == 1
def test_return_prefetch_count_sub_len__not_yet_acked(self):
self.qos._not_yet_acked = ['a', 'b']
self.qos.prefetch_count = 4
assert self.qos.can_consume_max_estimate() == 2
@skip.if_python3()
@skip.if_pypy()
class test_QoS_ack(object):
def setup(self):
self.mock_session = Mock()
self.qos = QoS(self.mock_session)
def test_ack_pops__not_yet_acked(self):
message = Mock()
self.qos.append(message, 1)
assert 1 in self.qos._not_yet_acked
self.qos.ack(1)
assert 1 not in self.qos._not_yet_acked
def test_ack_calls_session_acknowledge_with_message(self):
message = Mock()
self.qos.append(message, 1)
self.qos.ack(1)
self.qos.session.acknowledge.assert_called_with(message=message)
@skip.if_python3()
@skip.if_pypy()
class test_QoS_reject(object):
@pytest.fixture(autouse=True)
def setup_qpid(self, patching):
self.mock_qpid = patching(QPID_MODULE + '.qpid')
self.mock_Disposition = self.mock_qpid.messaging.Disposition
self.mock_RELEASED = self.mock_qpid.messaging.RELEASED
self.mock_REJECTED = self.mock_qpid.messaging.REJECTED
def setup(self):
self.mock_session = Mock()
self.mock_message = Mock()
self.qos = QoS(self.mock_session)
def test_reject_pops__not_yet_acked(self):
self.qos.append(self.mock_message, 1)
assert 1 in self.qos._not_yet_acked
self.qos.reject(1)
assert 1 not in self.qos._not_yet_acked
def test_reject_requeue_true(self):
self.qos.append(self.mock_message, 1)
self.qos.reject(1, requeue=True)
self.mock_Disposition.assert_called_with(self.mock_RELEASED)
self.qos.session.acknowledge.assert_called_with(
message=self.mock_message,
disposition=self.mock_Disposition.return_value,
)
def test_reject_requeue_false(self):
message = Mock()
self.qos.append(message, 1)
self.qos.reject(1, requeue=False)
self.mock_Disposition.assert_called_with(self.mock_REJECTED)
self.qos.session.acknowledge.assert_called_with(
message=message, disposition=self.mock_Disposition.return_value,
)
@skip.if_python3()
@skip.if_pypy()
class test_QoS(object):
def mock_message_factory(self):
"""Create and return a mock message tag and delivery_tag."""
m_delivery_tag = self.delivery_tag_generator.next()
m = 'message %s' % (m_delivery_tag, )
return m, m_delivery_tag
def add_n_messages_to_qos(self, n, qos):
"""Add N mock messages into the passed in qos object"""
for i in range(n):
self.add_message_to_qos(qos)
def add_message_to_qos(self, qos):
"""Add a single mock message into the passed in qos object.
Uses the mock_message_factory() to create the message and
delivery_tag.
"""
m, m_delivery_tag = self.mock_message_factory()
qos.append(m, m_delivery_tag)
def setup(self):
self.mock_session = Mock()
self.qos_no_limit = QoS(self.mock_session)
self.qos_limit_2 = QoS(self.mock_session, prefetch_count=2)
self.delivery_tag_generator = count(1)
def test_append(self):
"""Append two messages and check inside the QoS object that they
were put into the internal data structures correctly
"""
qos = self.qos_no_limit
m1, m1_tag = self.mock_message_factory()
m2, m2_tag = self.mock_message_factory()
qos.append(m1, m1_tag)
length_not_yet_acked = len(qos._not_yet_acked)
assert length_not_yet_acked == 1
checked_message1 = qos._not_yet_acked[m1_tag]
assert m1 is checked_message1
qos.append(m2, m2_tag)
length_not_yet_acked = len(qos._not_yet_acked)
assert length_not_yet_acked == 2
checked_message2 = qos._not_yet_acked[m2_tag]
assert m2 is checked_message2
def test_get(self):
"""Append two messages, and use get to receive them"""
qos = self.qos_no_limit
m1, m1_tag = self.mock_message_factory()
m2, m2_tag = self.mock_message_factory()
qos.append(m1, m1_tag)
qos.append(m2, m2_tag)
message1 = qos.get(m1_tag)
message2 = qos.get(m2_tag)
assert m1 is message1
assert m2 is message2
@skip.if_python3()
@skip.if_pypy()
class ConnectionTestBase(object):
@patch(QPID_MODULE + '.qpid')
def setup(self, mock_qpid):
self.connection_options = {
'host': 'localhost',
'port': 5672,
'transport': 'tcp',
'timeout': 10,
'sasl_mechanisms': 'ANONYMOUS',
}
self.mock_qpid_connection = mock_qpid.messaging.Connection
self.conn = Connection(**self.connection_options)
@skip.if_python3()
@skip.if_pypy()
class test_Connection__init__(ConnectionTestBase):
def test_stores_connection_options(self):
# ensure that only one mech was passed into connection. The other
# options should all be passed through as-is
modified_conn_opts = self.connection_options
assert modified_conn_opts == self.conn.connection_options
def test_class_variables(self):
assert isinstance(self.conn.channels, list)
assert isinstance(self.conn._callbacks, dict)
def test_establishes_connection(self):
modified_conn_opts = self.connection_options
self.mock_qpid_connection.establish.assert_called_with(
**modified_conn_opts
)
def test_saves_established_connection(self):
created_conn = self.mock_qpid_connection.establish.return_value
assert self.conn._qpid_conn is created_conn
@patch(QPID_MODULE + '.ConnectionError', new=(QpidException, ))
@patch(QPID_MODULE + '.sys.exc_info')
@patch(QPID_MODULE + '.qpid')
def test_mutates_ConnError_by_message(self, mock_qpid, mock_exc_info):
text = 'connection-forced: Authentication failed(320)'
my_conn_error = QpidException(text=text)
mock_qpid.messaging.Connection.establish.side_effect = my_conn_error
mock_exc_info.return_value = 'a', 'b', None
try:
self.conn = Connection(**self.connection_options)
except AuthenticationFailure as error:
exc_info = sys.exc_info()
assert not isinstance(error, QpidException)
assert exc_info[1] == 'b'
assert exc_info[2] is None
else:
self.fail('ConnectionError type was not mutated correctly')
@patch(QPID_MODULE + '.ConnectionError', new=(QpidException, ))
@patch(QPID_MODULE + '.sys.exc_info')
@patch(QPID_MODULE + '.qpid')
def test_mutates_ConnError_by_code(self, mock_qpid, mock_exc_info):
my_conn_error = QpidException(code=320, text='someothertext')
mock_qpid.messaging.Connection.establish.side_effect = my_conn_error
mock_exc_info.return_value = 'a', 'b', None
try:
self.conn = Connection(**self.connection_options)
except AuthenticationFailure as error:
exc_info = sys.exc_info()
assert not isinstance(error, QpidException)
assert exc_info[1] == 'b'
assert exc_info[2] is None
else:
self.fail('ConnectionError type was not mutated correctly')
@patch(QPID_MODULE + '.ConnectionError', new=(QpidException, ))
@patch(QPID_MODULE + '.sys.exc_info')
@patch(QPID_MODULE + '.qpid')
def test_connection__init__mutates_ConnError_by_message2(self, mock_qpid,
mock_exc_info):
"""
Test for PLAIN connection via python-saslwrapper, sans cyrus-sasl-plain
This test is specific for what is returned when we attempt to connect
with PLAIN mech and python-saslwrapper is installed, but
cyrus-sasl-plain is not installed.
"""
my_conn_error = QpidException()
my_conn_error.text = 'Error in sasl_client_start (-4) SASL(-4): no '\
'mechanism available'
mock_qpid.messaging.Connection.establish.side_effect = my_conn_error
mock_exc_info.return_value = ('a', 'b', None)
try:
self.conn = Connection(**self.connection_options)
except AuthenticationFailure as error:
exc_info = sys.exc_info()
assert not isinstance(error, QpidException)
assert exc_info[1] == 'b'
assert exc_info[2] is None
else:
self.fail('ConnectionError type was not mutated correctly')
@patch(QPID_MODULE + '.ConnectionError', new=(QpidException, ))
@patch(QPID_MODULE + '.sys.exc_info')
@patch(QPID_MODULE + '.qpid')
def test_unknown_connection_error(self, mock_qpid, mock_exc_info):
# If we get a connection error that we don't understand,
# bubble it up as-is
my_conn_error = QpidException(code=999, text='someothertext')
mock_qpid.messaging.Connection.establish.side_effect = my_conn_error
mock_exc_info.return_value = 'a', 'b', None
try:
self.conn = Connection(**self.connection_options)
except Exception as error:
assert error.code == 999
else:
self.fail('Connection should have thrown an exception')
@patch.object(Transport, 'channel_errors', new=(QpidException, ))
@patch(QPID_MODULE + '.qpid')
@patch(QPID_MODULE + '.ConnectionError', new=IOError)
def test_non_qpid_error_raises(self, mock_qpid):
mock_Qpid_Connection = mock_qpid.messaging.Connection
my_conn_error = SyntaxError()
my_conn_error.text = 'some non auth related error message'
mock_Qpid_Connection.establish.side_effect = my_conn_error
with pytest.raises(SyntaxError):
Connection(**self.connection_options)
@patch(QPID_MODULE + '.qpid')
@patch(QPID_MODULE + '.ConnectionError', new=IOError)
def test_non_auth_conn_error_raises(self, mock_qpid):
mock_Qpid_Connection = mock_qpid.messaging.Connection
my_conn_error = IOError()
my_conn_error.text = 'some non auth related error message'
mock_Qpid_Connection.establish.side_effect = my_conn_error
with pytest.raises(IOError):
Connection(**self.connection_options)
@skip.if_python3()
@skip.if_pypy()
class test_Connection_class_attributes(ConnectionTestBase):
def test_connection_verify_class_attributes(self):
assert Channel == Connection.Channel
@skip.if_python3()
@skip.if_pypy()
class test_Connection_get_Qpid_connection(ConnectionTestBase):
def test_connection_get_qpid_connection(self):
self.conn._qpid_conn = Mock()
returned_connection = self.conn.get_qpid_connection()
assert self.conn._qpid_conn is returned_connection
@skip.if_python3()
@skip.if_pypy()
class test_Connection_close(ConnectionTestBase):
def test_connection_close(self):
self.conn._qpid_conn = Mock()
self.conn.close()
self.conn._qpid_conn.close.assert_called_once_with()
@skip.if_python3()
@skip.if_pypy()
class test_Connection_close_channel(ConnectionTestBase):
def setup(self):
super(test_Connection_close_channel, self).setup()
self.conn.channels = Mock()
def test_connection_close_channel_removes_channel_from_channel_list(self):
mock_channel = Mock()
self.conn.close_channel(mock_channel)
self.conn.channels.remove.assert_called_once_with(mock_channel)
def test_connection_close_channel_handles_ValueError_being_raised(self):
self.conn.channels.remove = Mock(side_effect=ValueError())
self.conn.close_channel(Mock())
def test_connection_close_channel_set_channel_connection_to_None(self):
mock_channel = Mock()
mock_channel.connection = False
self.conn.channels.remove = Mock(side_effect=ValueError())
self.conn.close_channel(mock_channel)
assert mock_channel.connection is None
@skip.if_python3()
@skip.if_pypy()
class ChannelTestBase(object):
@pytest.fixture(autouse=True)
def setup_channel(self, patching):
self.mock_qpidtoollibs = patching(QPID_MODULE + '.qpidtoollibs')
self.mock_broker_agent = self.mock_qpidtoollibs.BrokerAgent
self.conn = Mock()
self.transport = Mock()
self.channel = Channel(self.conn, self.transport)
@skip.if_python3()
@skip.if_pypy()
class test_Channel_purge(ChannelTestBase):
def setup(self):
self.mock_queue = Mock()
def test_gets_queue(self):
self.channel._purge(self.mock_queue)
getQueue = self.mock_broker_agent.return_value.getQueue
getQueue.assert_called_once_with(self.mock_queue)
def test_does_not_call_purge_if_message_count_is_zero(self):
values = {'msgDepth': 0}
queue_obj = self.mock_broker_agent.return_value.getQueue.return_value
queue_obj.values = values
self.channel._purge(self.mock_queue)
assert not queue_obj.purge.called
def test_purges_all_messages_from_queue(self):
values = {'msgDepth': 5}
queue_obj = self.mock_broker_agent.return_value.getQueue.return_value
queue_obj.values = values
self.channel._purge(self.mock_queue)
queue_obj.purge.assert_called_with(5)
def test_returns_message_count(self):
values = {'msgDepth': 5}
queue_obj = self.mock_broker_agent.return_value.getQueue.return_value
queue_obj.values = values
result = self.channel._purge(self.mock_queue)
assert result == 5
@patch(QPID_MODULE + '.NotFound', new=QpidException)
def test_raises_channel_error_if_queue_does_not_exist(self):
self.mock_broker_agent.return_value.getQueue.return_value = None
with pytest.raises(QpidException):
self.channel._purge(self.mock_queue)
@skip.if_python3()
@skip.if_pypy()
class test_Channel_put(ChannelTestBase):
@patch(QPID_MODULE + '.qpid')
def test_channel__put_onto_queue(self, mock_qpid):
routing_key = 'routingkey'
mock_message = Mock()
mock_Message_cls = mock_qpid.messaging.Message
self.channel._put(routing_key, mock_message)
address_str = '{0}; {{assert: always, node: {{type: queue}}}}'.format(
routing_key,
)
self.transport.session.sender.assert_called_with(address_str)
mock_Message_cls.assert_called_with(
content=mock_message, subject=None, durable=True
)
mock_sender = self.transport.session.sender.return_value
mock_sender.send.assert_called_with(
mock_Message_cls.return_value, sync=True,
)
mock_sender.close.assert_called_with()
@patch(QPID_MODULE + '.qpid')
def test_channel__put_onto_exchange(self, mock_qpid):
mock_routing_key = 'routingkey'
mock_exchange_name = 'myexchange'
mock_message = Mock()
mock_Message_cls = mock_qpid.messaging.Message
self.channel._put(mock_routing_key, mock_message, mock_exchange_name)
addrstr = '{0}/{1}; {{assert: always, node: {{type: topic}}}}'.format(
mock_exchange_name, mock_routing_key,
)
self.transport.session.sender.assert_called_with(addrstr)
mock_Message_cls.assert_called_with(
content=mock_message, subject=mock_routing_key, durable=True
)
mock_sender = self.transport.session.sender.return_value
mock_sender.send.assert_called_with(
mock_Message_cls.return_value, sync=True,
)
mock_sender.close.assert_called_with()
@skip.if_python3()
@skip.if_pypy()
class test_Channel_get(ChannelTestBase):
def test_channel__get(self):
mock_queue = Mock()
result = self.channel._get(mock_queue)
self.transport.session.receiver.assert_called_once_with(mock_queue)
mock_rx = self.transport.session.receiver.return_value
mock_rx.fetch.assert_called_once_with(timeout=0)
mock_rx.close.assert_called_once_with()
assert mock_rx.fetch.return_value is result
@skip.if_python3()
@skip.if_pypy()
class test_Channel_close(ChannelTestBase):
@pytest.fixture(autouse=True)
def setup_basic_cancel(self, patching, setup_channel):
self.mock_basic_cancel = patching.object(self.channel, 'basic_cancel')
self.channel.closed = False
@pytest.fixture(autouse=True)
def setup_receivers(self, setup_channel):
self.mock_receiver1 = Mock()
self.mock_receiver2 = Mock()
self.channel._receivers = {
1: self.mock_receiver1, 2: self.mock_receiver2,
}
def test_channel_close_sets_close_attribute(self):
self.channel.close()
assert self.channel.closed
def test_channel_close_calls_basic_cancel_on_all_receivers(self):
self.channel.close()
self.mock_basic_cancel.assert_has_calls([call(1), call(2)])
def test_channel_close_calls_close_channel_on_connection(self):
self.channel.close()
self.conn.close_channel.assert_called_once_with(self.channel)
def test_channel_close_calls_close_on_broker_agent(self):
self.channel.close()
self.channel._broker.close.assert_called_once_with()
def test_channel_close_does_nothing_if_already_closed(self):
self.channel.closed = True
self.channel.close()
self.mock_basic_cancel.assert_not_called()
def test_channel_close_does_not_call_close_channel_if_conn_is_None(self):
self.channel.connection = None
self.channel.close()
self.conn.close_channel.assert_not_called()
@skip.if_python3()
@skip.if_pypy()
class test_Channel_basic_qos(ChannelTestBase):
def test_channel_basic_qos_always_returns_one(self):
self.channel.basic_qos(2)
assert self.channel.qos.prefetch_count == 1
@skip.if_python3()
@skip.if_pypy()
class test_Channel_basic_get(ChannelTestBase):
@pytest.fixture(autouse=True)
def setup_channel_attributes(self, setup_channel):
self.channel.Message = Mock()
self.channel._get = Mock()
def test_channel_basic_get_calls__get_with_queue(self):
mock_queue = Mock()
self.channel.basic_get(mock_queue)
self.channel._get.assert_called_once_with(mock_queue)
def test_channel_basic_get_creates_Message_correctly(self):
mock_queue = Mock()
self.channel.basic_get(mock_queue)
mock_raw_message = self.channel._get.return_value.content
self.channel.Message.assert_called_once_with(
mock_raw_message, channel=self.channel,
)
def test_channel_basic_get_acknowledges_message_by_default(self):
mock_queue = Mock()
self.channel.basic_get(mock_queue)
mock_qpid_message = self.channel._get.return_value
acknowledge = self.transport.session.acknowledge
acknowledge.assert_called_once_with(message=mock_qpid_message)
def test_channel_basic_get_acknowledges_message_with_no_ack_False(self):
mock_queue = Mock()
self.channel.basic_get(mock_queue, no_ack=False)
mock_qpid_message = self.channel._get.return_value
acknowledge = self.transport.session.acknowledge
acknowledge.assert_called_once_with(message=mock_qpid_message)
def test_channel_basic_get_acknowledges_message_with_no_ack_True(self):
mock_queue = Mock()
self.channel.basic_get(mock_queue, no_ack=True)
mock_qpid_message = self.channel._get.return_value
acknowledge = self.transport.session.acknowledge
acknowledge.assert_called_once_with(message=mock_qpid_message)
def test_channel_basic_get_returns_correct_message(self):
mock_queue = Mock()
basic_get_result = self.channel.basic_get(mock_queue)
expected_message = self.channel.Message.return_value
assert expected_message is basic_get_result
def test_basic_get_returns_None_when_channel__get_raises_Empty(self):
mock_queue = Mock()
self.channel._get = Mock(side_effect=Empty)
basic_get_result = self.channel.basic_get(mock_queue)
assert self.channel.Message.call_count == 0
assert basic_get_result is None
@skip.if_python3()
@skip.if_pypy()
class test_Channel_basic_cancel(ChannelTestBase):
@pytest.fixture(autouse=True)
def setup_receivers(self, setup_channel):
self.channel._receivers = {1: Mock()}
def test_channel_basic_cancel_no_error_if_consumer_tag_not_found(self):
self.channel.basic_cancel(2)
def test_channel_basic_cancel_pops_receiver(self):
self.channel.basic_cancel(1)
assert 1 not in self.channel._receivers
def test_channel_basic_cancel_closes_receiver(self):
mock_receiver = self.channel._receivers[1]
self.channel.basic_cancel(1)
mock_receiver.close.assert_called_once_with()
def test_channel_basic_cancel_pops__tag_to_queue(self):
self.channel._tag_to_queue = Mock()
self.channel.basic_cancel(1)
self.channel._tag_to_queue.pop.assert_called_once_with(1, None)
def test_channel_basic_cancel_pops_connection__callbacks(self):
self.channel._tag_to_queue = Mock()
self.channel.basic_cancel(1)
mock_queue = self.channel._tag_to_queue.pop.return_value
self.conn._callbacks.pop.assert_called_once_with(mock_queue, None)
@skip.if_python3()
@skip.if_pypy()
class test_Channel__init__(ChannelTestBase):
def test_channel___init__sets_variables_as_expected(self):
assert self.conn is self.channel.connection
assert self.transport is self.channel.transport
assert not self.channel.closed
self.conn.get_qpid_connection.assert_called_once_with()
expected_broker_agent = self.mock_broker_agent.return_value
assert self.channel._broker is expected_broker_agent
assert self.channel._tag_to_queue == {}
assert self.channel._receivers == {}
assert self.channel._qos is None
@skip.if_python3()
@skip.if_pypy()
class test_Channel_basic_consume(ChannelTestBase):
@pytest.fixture(autouse=True)
def setup_callbacks(self, setup_channel):
self.conn._callbacks = {}
def test_channel_basic_consume_adds_queue_to__tag_to_queue(self):
mock_tag = Mock()
mock_queue = Mock()
self.channel.basic_consume(mock_queue, Mock(), Mock(), mock_tag)
expected_dict = {mock_tag: mock_queue}
assert expected_dict == self.channel._tag_to_queue
def test_channel_basic_consume_adds_entry_to_connection__callbacks(self):
mock_queue = Mock()
self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock())
assert mock_queue in self.conn._callbacks
assert isinstance(self.conn._callbacks[mock_queue], Callable)
def test_channel_basic_consume_creates_new_receiver(self):
mock_queue = Mock()
self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock())
self.transport.session.receiver.assert_called_once_with(mock_queue)
def test_channel_basic_consume_saves_new_receiver(self):
mock_tag = Mock()
self.channel.basic_consume(Mock(), Mock(), Mock(), mock_tag)
new_mock_receiver = self.transport.session.receiver.return_value
expected_dict = {mock_tag: new_mock_receiver}
assert expected_dict == self.channel._receivers
def test_channel_basic_consume_sets_capacity_on_new_receiver(self):
mock_prefetch_count = Mock()
self.channel.qos.prefetch_count = mock_prefetch_count
self.channel.basic_consume(Mock(), Mock(), Mock(), Mock())
new_receiver = self.transport.session.receiver.return_value
assert new_receiver.capacity is mock_prefetch_count
def get_callback(self, no_ack=Mock(), original_cb=Mock()):
self.channel.Message = Mock()
mock_queue = Mock()
self.channel.basic_consume(mock_queue, no_ack, original_cb, Mock())
return self.conn._callbacks[mock_queue]
def test_channel_basic_consume_callback_creates_Message_correctly(self):
callback = self.get_callback()
mock_qpid_message = Mock()
callback(mock_qpid_message)
mock_content = mock_qpid_message.content
self.channel.Message.assert_called_once_with(
mock_content, channel=self.channel,
)
def test_channel_basic_consume_callback_adds_message_to_QoS(self):
self.channel._qos = Mock()
callback = self.get_callback()
mock_qpid_message = Mock()
callback(mock_qpid_message)
mock_delivery_tag = self.channel.Message.return_value.delivery_tag
self.channel._qos.append.assert_called_once_with(
mock_qpid_message, mock_delivery_tag,
)
def test_channel_basic_consume_callback_gratuitously_acks(self):
self.channel.basic_ack = Mock()
callback = self.get_callback()
mock_qpid_message = Mock()
callback(mock_qpid_message)
mock_delivery_tag = self.channel.Message.return_value.delivery_tag
self.channel.basic_ack.assert_called_once_with(mock_delivery_tag)
def test_channel_basic_consume_callback_does_not_ack_when_needed(self):
self.channel.basic_ack = Mock()
callback = self.get_callback(no_ack=False)
mock_qpid_message = Mock()
callback(mock_qpid_message)
self.channel.basic_ack.assert_not_called()
def test_channel_basic_consume_callback_calls_real_callback(self):
self.channel.basic_ack = Mock()
mock_original_callback = Mock()
callback = self.get_callback(original_cb=mock_original_callback)
mock_qpid_message = Mock()
callback(mock_qpid_message)
expected_message = self.channel.Message.return_value
mock_original_callback.assert_called_once_with(expected_message)
@skip.if_python3()
@skip.if_pypy()
class test_Channel_queue_delete(ChannelTestBase):
@pytest.fixture(autouse=True)
def setup_channel_patches(self, patching, setup_channel):
self.mock__has_queue = patching.object(self.channel, '_has_queue')
self.mock__size = patching.object(self.channel, '_size')
self.mock__delete = patching.object(self.channel, '_delete')
self.mock_queue = Mock()
def test_checks_if_queue_exists(self):
self.channel.queue_delete(self.mock_queue)
self.mock__has_queue.assert_called_once_with(self.mock_queue)
def test_does_nothing_if_queue_does_not_exist(self):
self.mock__has_queue.return_value = False
self.channel.queue_delete(self.mock_queue)
self.mock__delete.assert_not_called()
def test_not_empty_and_if_empty_True_no_delete(self):
self.mock__size.return_value = 1
self.channel.queue_delete(self.mock_queue, if_empty=True)
mock_broker = self.mock_broker_agent.return_value
mock_broker.getQueue.assert_not_called()
def test_calls_get_queue(self):
self.channel.queue_delete(self.mock_queue)
getQueue = self.mock_broker_agent.return_value.getQueue
getQueue.assert_called_once_with(self.mock_queue)
def test_gets_queue_attribute(self):
self.channel.queue_delete(self.mock_queue)
queue_obj = self.mock_broker_agent.return_value.getQueue.return_value
queue_obj.getAttributes.assert_called_once_with()
def test_queue_in_use_and_if_unused_no_delete(self):
queue_obj = self.mock_broker_agent.return_value.getQueue.return_value
queue_obj.getAttributes.return_value = {'consumerCount': 1}
self.channel.queue_delete(self.mock_queue, if_unused=True)
self.mock__delete.assert_not_called()
def test_calls__delete_with_queue(self):
self.channel.queue_delete(self.mock_queue)
self.mock__delete.assert_called_once_with(self.mock_queue)
@skip.if_python3()
@skip.if_pypy()
class test_Channel(object):
@patch(QPID_MODULE + '.qpidtoollibs')
def setup(self, mock_qpidtoollibs):
self.mock_connection = Mock()
self.mock_qpid_connection = Mock()
self.mock_qpid_session = Mock()
self.mock_qpid_connection.session = Mock(
return_value=self.mock_qpid_session,
)
self.mock_connection.get_qpid_connection = Mock(
return_value=self.mock_qpid_connection,
)
self.mock_transport = Mock()
self.mock_broker = Mock()
self.mock_Message = Mock()
self.mock_BrokerAgent = mock_qpidtoollibs.BrokerAgent
self.mock_BrokerAgent.return_value = self.mock_broker
self.my_channel = Channel(
self.mock_connection, self.mock_transport,
)
self.my_channel.Message = self.mock_Message
def test_verify_QoS_class_attribute(self):
"""Verify that the class attribute QoS refers to the QoS object"""
assert QoS is Channel.QoS
def test_verify_Message_class_attribute(self):
"""Verify that the class attribute Message refers to the Message
object."""
assert Message is Channel.Message
def test_body_encoding_class_attribute(self):
"""Verify that the class attribute body_encoding is set to base64"""
assert Channel.body_encoding == 'base64'
def test_codecs_class_attribute(self):
"""Verify that the codecs class attribute has a correct key and
value."""
assert isinstance(Channel.codecs, dict)
assert 'base64' in Channel.codecs
assert isinstance(Channel.codecs['base64'], Base64)
def test_size(self):
"""Test getting the number of messages in a queue specified by
name and returning them."""
message_count = 5
mock_queue = Mock()
mock_queue_to_check = Mock()
mock_queue_to_check.values = {'msgDepth': message_count}
self.mock_broker.getQueue.return_value = mock_queue_to_check
result = self.my_channel._size(mock_queue)
self.mock_broker.getQueue.assert_called_with(mock_queue)
assert message_count == result
def test_delete(self):
"""Test deleting a queue calls purge and delQueue with queue name."""
mock_queue = Mock()
self.my_channel._purge = Mock()
result = self.my_channel._delete(mock_queue)
self.my_channel._purge.assert_called_with(mock_queue)
self.mock_broker.delQueue.assert_called_with(mock_queue)
assert result is None
def test_has_queue_true(self):
"""Test checking if a queue exists, and it does."""
mock_queue = Mock()
self.mock_broker.getQueue.return_value = True
result = self.my_channel._has_queue(mock_queue)
assert result
def test_has_queue_false(self):
"""Test checking if a queue exists, and it does not."""
mock_queue = Mock()
self.mock_broker.getQueue.return_value = False
result = self.my_channel._has_queue(mock_queue)
assert not result
@patch('amqp.protocol.queue_declare_ok_t')
def test_queue_declare_with_exception_raised(self,
mock_queue_declare_ok_t):
"""Test declare_queue, where an exception is raised and silenced."""
mock_queue = Mock()
mock_passive = Mock()
mock_durable = Mock()
mock_exclusive = Mock()
mock_auto_delete = Mock()
mock_nowait = Mock()
mock_arguments = Mock()
mock_msg_count = Mock()
mock_queue.startswith.return_value = False
mock_queue.endswith.return_value = False
options = {
'passive': mock_passive,
'durable': mock_durable,
'exclusive': mock_exclusive,
'auto-delete': mock_auto_delete,
'arguments': mock_arguments,
}
mock_consumer_count = Mock()
mock_return_value = Mock()
values_dict = {
'msgDepth': mock_msg_count,
'consumerCount': mock_consumer_count,
}
mock_queue_data = Mock()
mock_queue_data.values = values_dict
exception_to_raise = Exception('The foo object already exists.')
self.mock_broker.addQueue.side_effect = exception_to_raise
self.mock_broker.getQueue.return_value = mock_queue_data
mock_queue_declare_ok_t.return_value = mock_return_value
result = self.my_channel.queue_declare(
mock_queue,
passive=mock_passive,
durable=mock_durable,
exclusive=mock_exclusive,
auto_delete=mock_auto_delete,
nowait=mock_nowait,
arguments=mock_arguments,
)
self.mock_broker.addQueue.assert_called_with(
mock_queue, options=options,
)
mock_queue_declare_ok_t.assert_called_with(
mock_queue, mock_msg_count, mock_consumer_count,
)
assert mock_return_value is result
def test_queue_declare_set_ring_policy_for_celeryev(self):
"""Test declare_queue sets ring_policy for celeryev."""
mock_queue = Mock()
mock_queue.startswith.return_value = True
mock_queue.endswith.return_value = False
expected_default_options = {
'passive': False,
'durable': False,
'exclusive': False,
'auto-delete': True,
'arguments': None,
'qpid.policy_type': 'ring',
}
mock_msg_count = Mock()
mock_consumer_count = Mock()
values_dict = {
'msgDepth': mock_msg_count,
'consumerCount': mock_consumer_count,
}
mock_queue_data = Mock()
mock_queue_data.values = values_dict
self.mock_broker.addQueue.return_value = None
self.mock_broker.getQueue.return_value = mock_queue_data
self.my_channel.queue_declare(mock_queue)
mock_queue.startswith.assert_called_with('celeryev')
self.mock_broker.addQueue.assert_called_with(
mock_queue, options=expected_default_options,
)
def test_queue_declare_set_ring_policy_for_pidbox(self):
"""Test declare_queue sets ring_policy for pidbox."""
mock_queue = Mock()
mock_queue.startswith.return_value = False
mock_queue.endswith.return_value = True
expected_default_options = {
'passive': False,
'durable': False,
'exclusive': False,
'auto-delete': True,
'arguments': None,
'qpid.policy_type': 'ring',
}
mock_msg_count = Mock()
mock_consumer_count = Mock()
values_dict = {
'msgDepth': mock_msg_count,
'consumerCount': mock_consumer_count,
}
mock_queue_data = Mock()
mock_queue_data.values = values_dict
self.mock_broker.addQueue.return_value = None
self.mock_broker.getQueue.return_value = mock_queue_data
self.my_channel.queue_declare(mock_queue)
mock_queue.endswith.assert_called_with('pidbox')
self.mock_broker.addQueue.assert_called_with(
mock_queue, options=expected_default_options,
)
def test_queue_declare_ring_policy_not_set_as_expected(self):
"""Test declare_queue does not set ring_policy as expected."""
mock_queue = Mock()
mock_queue.startswith.return_value = False
mock_queue.endswith.return_value = False
expected_default_options = {
'passive': False,
'durable': False,
'exclusive': False,
'auto-delete': True,
'arguments': None,
}
mock_msg_count = Mock()
mock_consumer_count = Mock()
values_dict = {
'msgDepth': mock_msg_count,
'consumerCount': mock_consumer_count,
}
mock_queue_data = Mock()
mock_queue_data.values = values_dict
self.mock_broker.addQueue.return_value = None
self.mock_broker.getQueue.return_value = mock_queue_data
self.my_channel.queue_declare(mock_queue)
mock_queue.startswith.assert_called_with('celeryev')
mock_queue.endswith.assert_called_with('pidbox')
self.mock_broker.addQueue.assert_called_with(
mock_queue, options=expected_default_options,
)
def test_queue_declare_test_defaults(self):
"""Test declare_queue defaults."""
mock_queue = Mock()
mock_queue.startswith.return_value = False
mock_queue.endswith.return_value = False
expected_default_options = {
'passive': False,
'durable': False,
'exclusive': False,
'auto-delete': True,
'arguments': None,
}
mock_msg_count = Mock()
mock_consumer_count = Mock()
values_dict = {
'msgDepth': mock_msg_count,
'consumerCount': mock_consumer_count,
}
mock_queue_data = Mock()
mock_queue_data.values = values_dict
self.mock_broker.addQueue.return_value = None
self.mock_broker.getQueue.return_value = mock_queue_data
self.my_channel.queue_declare(mock_queue)
self.mock_broker.addQueue.assert_called_with(
mock_queue,
options=expected_default_options,
)
def test_queue_declare_raises_exception_not_silenced(self):
unique_exception = Exception('This exception should not be silenced')
mock_queue = Mock()
self.mock_broker.addQueue.side_effect = unique_exception
with pytest.raises(unique_exception.__class__):
self.my_channel.queue_declare(mock_queue)
self.mock_broker.addQueue.assert_called_once_with(
mock_queue,
options={
'exclusive': False,
'durable': False,
'qpid.policy_type': 'ring',
'passive': False,
'arguments': None,
'auto-delete': True
})
def test_exchange_declare_raises_exception_and_silenced(self):
"""Create exchange where an exception is raised and then silenced"""
self.mock_broker.addExchange.side_effect = Exception(
'The foo object already exists.',
)
self.my_channel.exchange_declare()
def test_exchange_declare_raises_exception_not_silenced(self):
"""Create Exchange where an exception is raised and not silenced."""
unique_exception = Exception('This exception should not be silenced')
self.mock_broker.addExchange.side_effect = unique_exception
with pytest.raises(unique_exception.__class__):
self.my_channel.exchange_declare()
def test_exchange_declare(self):
"""Create Exchange where an exception is NOT raised."""
mock_exchange = Mock()
mock_type = Mock()
mock_durable = Mock()
options = {'durable': mock_durable}
result = self.my_channel.exchange_declare(
mock_exchange, mock_type, mock_durable,
)
self.mock_broker.addExchange.assert_called_with(
mock_type, mock_exchange, options,
)
assert result is None
def test_exchange_delete(self):
"""Test the deletion of an exchange by name."""
mock_exchange = Mock()
result = self.my_channel.exchange_delete(mock_exchange)
self.mock_broker.delExchange.assert_called_with(mock_exchange)
assert result is None
def test_queue_bind(self):
"""Test binding a queue to an exchange using a routing key."""
mock_queue = Mock()
mock_exchange = Mock()
mock_routing_key = Mock()
self.my_channel.queue_bind(
mock_queue, mock_exchange, mock_routing_key,
)
self.mock_broker.bind.assert_called_with(
mock_exchange, mock_queue, mock_routing_key,
)
def test_queue_unbind(self):
"""Test unbinding a queue from an exchange using a routing key."""
mock_queue = Mock()
mock_exchange = Mock()
mock_routing_key = Mock()
self.my_channel.queue_unbind(
mock_queue, mock_exchange, mock_routing_key,
)
self.mock_broker.unbind.assert_called_with(
mock_exchange, mock_queue, mock_routing_key,
)
def test_queue_purge(self):
"""Test purging a queue by name."""
mock_queue = Mock()
purge_result = Mock()
self.my_channel._purge = Mock(return_value=purge_result)
result = self.my_channel.queue_purge(mock_queue)
self.my_channel._purge.assert_called_with(mock_queue)
assert purge_result is result
@patch(QPID_MODULE + '.Channel.qos')
def test_basic_ack(self, mock_qos):
"""Test that basic_ack calls the QoS object properly."""
mock_delivery_tag = Mock()
self.my_channel.basic_ack(mock_delivery_tag)
mock_qos.ack.assert_called_with(mock_delivery_tag)
@patch(QPID_MODULE + '.Channel.qos')
def test_basic_reject(self, mock_qos):
"""Test that basic_reject calls the QoS object properly."""
mock_delivery_tag = Mock()
mock_requeue_value = Mock()
self.my_channel.basic_reject(mock_delivery_tag, mock_requeue_value)
mock_qos.reject.assert_called_with(
mock_delivery_tag, requeue=mock_requeue_value,
)
def test_qos_manager_is_none(self):
"""Test the qos property if the QoS object did not already exist."""
self.my_channel._qos = None
result = self.my_channel.qos
assert isinstance(result, QoS)
assert result == self.my_channel._qos
def test_qos_manager_already_exists(self):
"""Test the qos property if the QoS object already exists."""
mock_existing_qos = Mock()
self.my_channel._qos = mock_existing_qos
result = self.my_channel.qos
assert mock_existing_qos is result
def test_prepare_message(self):
"""Test that prepare_message() returns the correct result."""
mock_body = Mock()
mock_priority = Mock()
mock_content_encoding = Mock()
mock_content_type = Mock()
mock_header1 = Mock()
mock_header2 = Mock()
mock_properties1 = Mock()
mock_properties2 = Mock()
headers = {'header1': mock_header1, 'header2': mock_header2}
properties = {'properties1': mock_properties1,
'properties2': mock_properties2}
result = self.my_channel.prepare_message(
mock_body,
priority=mock_priority,
content_type=mock_content_type,
content_encoding=mock_content_encoding,
headers=headers,
properties=properties)
assert mock_body is result['body']
assert mock_content_encoding is result['content-encoding']
assert mock_content_type is result['content-type']
assert headers == result['headers']
assert properties == result['properties']
assert (mock_priority is
result['properties']['delivery_info']['priority'])
@patch('__builtin__.buffer')
@patch(QPID_MODULE + '.Channel.body_encoding')
@patch(QPID_MODULE + '.Channel.encode_body')
@patch(QPID_MODULE + '.Channel._put')
def test_basic_publish(self, mock_put,
mock_encode_body,
mock_body_encoding,
mock_buffer):
"""Test basic_publish()."""
mock_original_body = Mock()
mock_encoded_body = 'this is my encoded body'
mock_message = {'body': mock_original_body,
'properties': {'delivery_info': {}}}
mock_encode_body.return_value = (
mock_encoded_body, mock_body_encoding,
)
mock_exchange = Mock()
mock_routing_key = Mock()
mock_encoded_buffered_body = Mock()
mock_buffer.return_value = mock_encoded_buffered_body
self.my_channel.basic_publish(
mock_message, mock_exchange, mock_routing_key,
)
mock_encode_body.assert_called_once_with(
mock_original_body, mock_body_encoding,
)
mock_buffer.assert_called_once_with(mock_encoded_body)
assert mock_message['body'] is mock_encoded_buffered_body
assert (mock_message['properties']['body_encoding'] is
mock_body_encoding)
assert isinstance(
mock_message['properties']['delivery_tag'], uuid.UUID)
assert (mock_message['properties']['delivery_info']['exchange'] is
mock_exchange)
assert (mock_message['properties']['delivery_info']['routing_key'] is
mock_routing_key)
mock_put.assert_called_with(
mock_routing_key, mock_message, mock_exchange,
)
@patch(QPID_MODULE + '.Channel.codecs')
def test_encode_body_expected_encoding(self, mock_codecs):
"""Test if encode_body() works when encoding is set correctly"""
mock_body = Mock()
mock_encoder = Mock()
mock_encoded_result = Mock()
mock_codecs.get.return_value = mock_encoder
mock_encoder.encode.return_value = mock_encoded_result
result = self.my_channel.encode_body(mock_body, encoding='base64')
expected_result = (mock_encoded_result, 'base64')
assert expected_result == result
@patch(QPID_MODULE + '.Channel.codecs')
def test_encode_body_not_expected_encoding(self, mock_codecs):
"""Test if encode_body() works when encoding is not set correctly."""
mock_body = Mock()
result = self.my_channel.encode_body(mock_body, encoding=None)
expected_result = mock_body, None
assert expected_result == result
@patch(QPID_MODULE + '.Channel.codecs')
def test_decode_body_expected_encoding(self, mock_codecs):
"""Test if decode_body() works when encoding is set correctly."""
mock_body = Mock()
mock_decoder = Mock()
mock_decoded_result = Mock()
mock_codecs.get.return_value = mock_decoder
mock_decoder.decode.return_value = mock_decoded_result
result = self.my_channel.decode_body(mock_body, encoding='base64')
assert mock_decoded_result == result
@patch(QPID_MODULE + '.Channel.codecs')
def test_decode_body_not_expected_encoding(self, mock_codecs):
"""Test if decode_body() works when encoding is not set correctly."""
mock_body = Mock()
result = self.my_channel.decode_body(mock_body, encoding=None)
assert mock_body == result
def test_typeof_exchange_exists(self):
"""Test that typeof() finds an exchange that already exists."""
mock_exchange = Mock()
mock_qpid_exchange = Mock()
mock_attributes = {}
mock_type = Mock()
mock_attributes['type'] = mock_type
mock_qpid_exchange.getAttributes.return_value = mock_attributes
self.mock_broker.getExchange.return_value = mock_qpid_exchange
result = self.my_channel.typeof(mock_exchange)
assert mock_type is result
def test_typeof_exchange_does_not_exist(self):
"""Test that typeof() finds an exchange that does not exists."""
mock_exchange = Mock()
mock_default = Mock()
self.mock_broker.getExchange.return_value = None
result = self.my_channel.typeof(mock_exchange, default=mock_default)
assert mock_default is result
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport__init__(object):
@pytest.fixture(autouse=True)
def mock_verify_runtime_environment(self, patching):
self.mock_verify_runtime_environment = patching.object(
Transport, 'verify_runtime_environment')
@pytest.fixture(autouse=True)
def mock_transport_init(self, patching):
self.mock_base_Transport__init__ = patching(
QPID_MODULE + '.base.Transport.__init__')
def test_Transport___init___calls_verify_runtime_environment(self):
Transport(Mock())
self.mock_verify_runtime_environment.assert_called_once_with()
def test_transport___init___calls_parent_class___init__(self):
m = Mock()
Transport(m)
self.mock_base_Transport__init__.assert_called_once_with(m)
def test_transport___init___sets_use_async_interface_False(self):
transport = Transport(Mock())
assert not transport.use_async_interface
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport_drain_events(object):
@pytest.fixture(autouse=True)
def setup_self(self, disable_runtime_dependency_check):
# ^^ disable_runtime.. must be called before this fixture.
self.transport = Transport(Mock())
self.transport.session = Mock()
self.mock_queue = Mock()
self.mock_message = Mock()
self.mock_conn = Mock()
self.mock_callback = Mock()
self.mock_conn._callbacks = {self.mock_queue: self.mock_callback}
def mock_next_receiver(self, timeout):
time.sleep(0.3)
mock_receiver = Mock()
mock_receiver.source = self.mock_queue
mock_receiver.fetch.return_value = self.mock_message
return mock_receiver
def test_socket_timeout_raised_when_all_receivers_empty(self):
with patch(QPID_MODULE + '.QpidEmpty', new=QpidException):
self.transport.session.next_receiver.side_effect = QpidException()
with pytest.raises(socket.timeout):
self.transport.drain_events(Mock())
def test_socket_timeout_raised_when_by_timeout(self):
self.transport.session.next_receiver = self.mock_next_receiver
with pytest.raises(socket.timeout):
self.transport.drain_events(self.mock_conn, timeout=1)
def test_timeout_returns_no_earlier_then_asked_for(self):
self.transport.session.next_receiver = self.mock_next_receiver
start_time = monotonic()
try:
self.transport.drain_events(self.mock_conn, timeout=1)
except socket.timeout:
pass
elapsed_time_in_s = monotonic() - start_time
assert elapsed_time_in_s >= 1.0
def test_callback_is_called(self):
self.transport.session.next_receiver = self.mock_next_receiver
try:
self.transport.drain_events(self.mock_conn, timeout=1)
except socket.timeout:
pass
self.mock_callback.assert_called_with(self.mock_message)
@skip.if_python3()
@skip.if_pypy()
class test_Transport_create_channel(object):
@pytest.fixture(autouse=True)
def setup_self(self, disable_runtime_dependency_check):
# ^^ disable runtime MUST be called before this fixture
self.transport = Transport(Mock())
self.mock_conn = Mock()
self.mock_new_channel = Mock()
self.mock_conn.Channel.return_value = self.mock_new_channel
self.returned_channel = self.transport.create_channel(self.mock_conn)
def test_new_channel_created_from_connection(self):
assert self.mock_new_channel is self.returned_channel
self.mock_conn.Channel.assert_called_with(
self.mock_conn, self.transport,
)
def test_new_channel_added_to_connection_channel_list(self):
append_method = self.mock_conn.channels.append
append_method.assert_called_with(self.mock_new_channel)
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport_establish_connection(object):
@pytest.fixture(autouse=True)
def setup_self(self, disable_runtime_dependency_check):
class MockClient(object):
pass
self.client = MockClient()
self.client.connect_timeout = 4
self.client.ssl = False
self.client.transport_options = {}
self.client.userid = None
self.client.password = None
self.client.login_method = None
self.transport = Transport(self.client)
self.mock_conn = Mock()
self.transport.Connection = self.mock_conn
def test_transport_establish_conn_new_option_overwrites_default(self):
self.client.userid = 'new-userid'
self.client.password = 'new-password'
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
username=self.client.userid,
password=self.client.password,
sasl_mechanisms='PLAIN',
host='localhost',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_establish_conn_empty_client_is_default(self):
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
sasl_mechanisms='ANONYMOUS',
host='localhost',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_establish_conn_additional_transport_option(self):
new_param_value = 'mynewparam'
self.client.transport_options['new_param'] = new_param_value
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
sasl_mechanisms='ANONYMOUS',
host='localhost',
timeout=4,
new_param=new_param_value,
port=5672,
transport='tcp',
)
def test_transport_establish_conn_transform_localhost_to_127_0_0_1(self):
self.client.hostname = 'localhost'
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
sasl_mechanisms='ANONYMOUS',
host='localhost',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_password_no_userid_raises_exception(self):
self.client.password = 'somepass'
with pytest.raises(Exception):
self.transport.establish_connection()
def test_transport_userid_no_password_raises_exception(self):
self.client.userid = 'someusername'
with pytest.raises(Exception):
self.transport.establish_connection()
def test_transport_overrides_sasl_mech_from_login_method(self):
self.client.login_method = 'EXTERNAL'
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
sasl_mechanisms='EXTERNAL',
host='localhost',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_overrides_sasl_mech_has_username(self):
self.client.userid = 'new-userid'
self.client.login_method = 'EXTERNAL'
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
username=self.client.userid,
sasl_mechanisms='EXTERNAL',
host='localhost',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_establish_conn_set_password(self):
self.client.userid = 'someuser'
self.client.password = 'somepass'
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
username='someuser',
password='somepass',
sasl_mechanisms='PLAIN',
host='localhost',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_establish_conn_no_ssl_sets_transport_tcp(self):
self.client.ssl = False
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
sasl_mechanisms='ANONYMOUS',
host='localhost',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_establish_conn_with_ssl_with_hostname_check(self):
self.client.ssl = {
'keyfile': 'my_keyfile',
'certfile': 'my_certfile',
'ca_certs': 'my_cacerts',
'cert_reqs': ssl.CERT_REQUIRED,
}
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
ssl_certfile='my_certfile',
ssl_trustfile='my_cacerts',
timeout=4,
ssl_skip_hostname_check=False,
sasl_mechanisms='ANONYMOUS',
host='localhost',
ssl_keyfile='my_keyfile',
port=5672, transport='ssl',
)
def test_transport_establish_conn_with_ssl_skip_hostname_check(self):
self.client.ssl = {
'keyfile': 'my_keyfile',
'certfile': 'my_certfile',
'ca_certs': 'my_cacerts',
'cert_reqs': ssl.CERT_OPTIONAL,
}
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
ssl_certfile='my_certfile',
ssl_trustfile='my_cacerts',
timeout=4,
ssl_skip_hostname_check=True,
sasl_mechanisms='ANONYMOUS',
host='localhost',
ssl_keyfile='my_keyfile',
port=5672, transport='ssl',
)
def test_transport_establish_conn_sets_client_on_connection_object(self):
self.transport.establish_connection()
assert self.mock_conn.return_value.client is self.client
def test_transport_establish_conn_creates_session_on_transport(self):
self.transport.establish_connection()
qpid_conn = self.mock_conn.return_value.get_qpid_connection
new_mock_session = qpid_conn.return_value.session.return_value
assert self.transport.session is new_mock_session
def test_transport_establish_conn_returns_new_connection_object(self):
new_conn = self.transport.establish_connection()
assert new_conn is self.mock_conn.return_value
def test_transport_establish_conn_uses_hostname_if_not_default(self):
self.client.hostname = 'some_other_hostname'
self.transport.establish_connection()
self.mock_conn.assert_called_once_with(
sasl_mechanisms='ANONYMOUS',
host='some_other_hostname',
timeout=4,
port=5672,
transport='tcp',
)
def test_transport_sets_qpid_message_ready_handler(self):
self.transport.establish_connection()
qpid_conn_call = self.mock_conn.return_value.get_qpid_connection
mock_session = qpid_conn_call.return_value.session.return_value
mock_set_callback = mock_session.set_message_received_notify_handler
expected_msg_callback = self.transport._qpid_message_ready_handler
mock_set_callback.assert_called_once_with(expected_msg_callback)
def test_transport_sets_session_exception_handler(self):
self.transport.establish_connection()
qpid_conn_call = self.mock_conn.return_value.get_qpid_connection
mock_session = qpid_conn_call.return_value.session.return_value
mock_set_callback = mock_session.set_async_exception_notify_handler
exc_callback = self.transport._qpid_async_exception_notify_handler
mock_set_callback.assert_called_once_with(exc_callback)
def test_transport_sets_connection_exception_handler(self):
self.transport.establish_connection()
qpid_conn_call = self.mock_conn.return_value.get_qpid_connection
qpid_conn = qpid_conn_call.return_value
mock_set_callback = qpid_conn.set_async_exception_notify_handler
exc_callback = self.transport._qpid_async_exception_notify_handler
mock_set_callback.assert_called_once_with(exc_callback)
@skip.if_python3()
@skip.if_pypy()
class test_Transport_class_attributes(object):
def test_verify_Connection_attribute(self):
assert Connection is Transport.Connection
def test_verify_polling_disabled(self):
assert Transport.polling_interval is None
def test_transport_verify_supports_asynchronous_events(self):
assert Transport.supports_ev
def test_verify_driver_type_and_name(self):
assert Transport.driver_type == 'qpid'
assert Transport.driver_name == 'qpid'
def test_transport_verify_recoverable_connection_errors(self):
connection_errors = Transport.recoverable_connection_errors
assert ConnectionError in connection_errors
assert select.error in connection_errors
def test_transport_verify_recoverable_channel_errors(self):
channel_errors = Transport.recoverable_channel_errors
assert NotFound in channel_errors
def test_transport_verify_pre_kombu_3_0_exception_labels(self):
assert (Transport.recoverable_channel_errors ==
Transport.channel_errors)
assert (Transport.recoverable_connection_errors ==
Transport.connection_errors)
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport_register_with_event_loop(object):
def test_transport_register_with_event_loop_calls_add_reader(self):
transport = Transport(Mock())
mock_connection = Mock()
mock_loop = Mock()
transport.register_with_event_loop(mock_connection, mock_loop)
mock_loop.add_reader.assert_called_with(
transport.r, transport.on_readable, mock_connection, mock_loop,
)
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport_Qpid_callback_handlers_async(object):
@pytest.fixture(autouse=True)
def setup_self(self, patching, disable_runtime_dependency_check):
self.mock_os_write = patching(QPID_MODULE + '.os.write')
self.transport = Transport(Mock())
self.transport.register_with_event_loop(Mock(), Mock())
def test__qpid_message_ready_handler_writes_symbol_to_fd(self):
self.transport._qpid_message_ready_handler(Mock())
self.mock_os_write.assert_called_once_with(self.transport._w, '0')
def test__qpid_async_exception_notify_handler_writes_symbol_to_fd(self):
self.transport._qpid_async_exception_notify_handler(Mock(), Mock())
self.mock_os_write.assert_called_once_with(self.transport._w, 'e')
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport_Qpid_callback_handlers_sync(object):
@pytest.fixture(autouse=True)
def setup(self, patching, disable_runtime_dependency_check):
self.mock_os_write = patching(QPID_MODULE + '.os.write')
self.transport = Transport(Mock())
def test__qpid_message_ready_handler_dows_not_write(self):
self.transport._qpid_message_ready_handler(Mock())
self.mock_os_write.assert_not_called()
def test__qpid_async_exception_notify_handler_does_not_write(self):
self.transport._qpid_async_exception_notify_handler(Mock(), Mock())
self.mock_os_write.assert_not_called()
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport_on_readable(object):
@pytest.fixture(autouse=True)
def setup_self(self, patching, disable_runtime_dependency_check):
self.mock_os_read = patching(QPID_MODULE + '.os.read')
self.mock_drain_events = patching.object(Transport, 'drain_events')
self.transport = Transport(Mock())
self.transport.register_with_event_loop(Mock(), Mock())
def test_transport_on_readable_reads_symbol_from_fd(self):
self.transport.on_readable(Mock(), Mock())
self.mock_os_read.assert_called_once_with(self.transport.r, 1)
def test_transport_on_readable_calls_drain_events(self):
mock_connection = Mock()
self.transport.on_readable(mock_connection, Mock())
self.mock_drain_events.assert_called_with(mock_connection)
def test_transport_on_readable_catches_socket_timeout(self):
self.mock_drain_events.side_effect = socket.timeout()
self.transport.on_readable(Mock(), Mock())
def test_transport_on_readable_ignores_non_socket_timeout_exception(self):
self.mock_drain_events.side_effect = IOError()
with pytest.raises(IOError):
self.transport.on_readable(Mock(), Mock())
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport_verify_runtime_environment(object):
@pytest.fixture(autouse=True)
def setup_self(self, patching):
self.verify_runtime_environment = Transport.verify_runtime_environment
patching.object(Transport, 'verify_runtime_environment')
self.transport = Transport(Mock())
@patch(QPID_MODULE + '.PY3', new=True)
def test_raises_exception_for_Python3(self):
with pytest.raises(RuntimeError):
self.verify_runtime_environment(self.transport)
@patch('__builtin__.getattr')
def test_raises_exc_for_PyPy(self, mock_getattr):
mock_getattr.return_value = True
with pytest.raises(RuntimeError):
self.verify_runtime_environment(self.transport)
@patch(QPID_MODULE + '.dependency_is_none')
def test_raises_exc_dep_missing(self, mock_dep_is_none):
mock_dep_is_none.return_value = True
with pytest.raises(RuntimeError):
self.verify_runtime_environment(self.transport)
@patch(QPID_MODULE + '.dependency_is_none')
def test_calls_dependency_is_none(self, mock_dep_is_none):
mock_dep_is_none.return_value = False
self.verify_runtime_environment(self.transport)
mock_dep_is_none.assert_called()
def test_raises_no_exception(self):
self.verify_runtime_environment(self.transport)
@skip.if_python3()
@skip.if_pypy()
@pytest.mark.usefixtures('disable_runtime_dependency_check')
class test_Transport(object):
def setup(self):
"""Creates a mock_client to be used in testing."""
self.mock_client = Mock()
def test_close_connection(self):
"""Test that close_connection calls close on the connection."""
my_transport = Transport(self.mock_client)
mock_connection = Mock()
my_transport.close_connection(mock_connection)
mock_connection.close.assert_called_once_with()
def test_default_connection_params(self):
"""Test that the default_connection_params are correct"""
correct_params = {
'hostname': 'localhost',
'port': 5672,
}
my_transport = Transport(self.mock_client)
result_params = my_transport.default_connection_params
assert correct_params == result_params
@patch(QPID_MODULE + '.os.close')
def test_del_sync(self, close):
my_transport = Transport(self.mock_client)
my_transport.__del__()
close.assert_not_called()
@patch(QPID_MODULE + '.os.close')
def test_del_async(self, close):
my_transport = Transport(self.mock_client)
my_transport.register_with_event_loop(Mock(), Mock())
my_transport.__del__()
close.assert_called()
@patch(QPID_MODULE + '.os.close')
def test_del_async_failed(self, close):
close.side_effect = OSError()
my_transport = Transport(self.mock_client)
my_transport.register_with_event_loop(Mock(), Mock())
my_transport.__del__()
close.assert_called()
|
|
#!/usr/bin/env python
#
# dewadl
#
# Turn WADL XML into Python API.
#
# Matt Kubilus 2015
#
# This is written to support the uDeploy WADL specifically. Your mileage may vary with other WADLs.
#
#
import os
import re
import cmd
import json
import urlparse
import urllib2
from types import FunctionType
import xml.etree.ElementTree as ET
from functools import partial
from optparse import OptionParser
import ConfigParser
import pprint
import getpass
DEBUG=False
class wadl_processor(object):
ns={"ns":"http://wadl.dev.java.net/2009/02"}
base_url = ""
def __init__(self, wadl_file=None, wadl_url=None, userid=None, passwd=None):
if wadl_url:
# If we were supplied wadl_url, first we may need to authenticate in order to get the WADL file
self.__auth(wadl_url, userid, passwd)
wadl_string = self.__do_url(wadl_url)
self.__process_wadl(wadl_file=wadl_file, wadl_string=wadl_string)
else:
# If we have a supplied wadl_file, we will need to get the base_url from the file before auth
self.__process_wadl(wadl_file=wadl_file)
self.__auth(self.base_url, userid, passwd)
def __auth(self, url, userid=None, passwd=None):
if userid:
if not passwd:
passwd = getpass.getpass()
p = urlparse.urlparse(url)
auth_url = "%s://%s" % (p.scheme, p.netloc)
if DEBUG:
print "Authenticating to %s" % auth_url
connected = False
for i in range(5):
try:
p_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
p_mgr.add_password(None, auth_url, userid, passwd)
auth_handler = urllib2.HTTPBasicAuthHandler(p_mgr)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
except urllib2.URLError:
print "Error connecting"
time.sleep(i)
continue
connected = True
if DEBUG:
print "Successfully authenticated."
break
if not connected:
print "Could not connect to: %s" % url
sys.exit(1)
def __do_url(self, url, mtype="GET", data_dict=None):
myurl = "/".join(x.strip('/') for x in [self.base_url, url])
myurl = myurl.lstrip("/")
req = urllib2.Request(myurl, json.dumps(data_dict))
req.get_method = lambda: mtype
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError, err:
print "Error %sting url: %s" % (mtype, myurl)
print err
return
con_type = response.info().getheader('Content-Type')
resp_data = response.read()
if resp_data and "application/json" in con_type :
#return json.loads(resp_data)
return json.loads(resp_data, object_hook=wadl_to_obj)
elif resp_data:
return resp_data
def __method_creator(cls, url, mtype, params):
if DEBUG:
print "Creating method: ", url, mtype, params
def method_template(*args, **kwds):
data_dict = kwds.get("data_dict")
if DEBUG:
print "PARAMS:", params
print "ARGS:", args
print "KWDS:", kwds
print "URL:", url
print "DATADICT:", data_dict
arglen = len(args)
m = re.findall("{(.*?)}", url)
if arglen != len(params):
print "Requires %s arguments(s) %s" % (len(params), params)
return
do_url = url
#for idx in xrange(arglen):
for idx in xrange(len(m)):
# First replace REST positional arguments
do_url = do_url.replace("{%s}" % m[idx], args[idx])
url_args = '&'.join([ "%s=%s" % (k,v) for k,v in zip(params[len(m):],args[len(m):])])
do_url = do_url.replace("//","/")
if url_args:
do_url = "%s?%s" % (do_url, url_args)
return cls.__do_url(do_url, mtype, data_dict=data_dict)
return method_template
def __handleResources(self, resources):
if DEBUG:
print resources
self.base_url = resources.get("base")
if DEBUG:
print "Setting base_url to: %s" % self.base_url
for resource in resources:
self.__handleResource(resource)
def __handleResource(self, resource, path=""):
if DEBUG:
print "resource", resource.tag, resource.get('path')
prev_path = path
path = '/'.join([path, resource.get('path')])
params = re.findall("{(.*?)}", path)
method=None
for node in resource:
# We have to assume params come before methods
if node.tag == "{%s}method" % self.ns.get('ns'):
mtype, method, method_params = self.__handleMethod(node, path)
if hasattr(self, method):
# We have clashed with an existing method name
# TODO: After we process the entire file, perhaps cleanup original clashed name
basename = os.path.basename(prev_path)
if DEBUG:
print "RESOURCE: ", prev_path
print "Method %s already exists. Adjusting name to %s" % (method, "%s_%s" % (basename, method))
old_method_t = getattr(self, method)
method = "%s_%s" % (basename, method)
old_method_name = "%s_%s" % (os.path.basename(old_method_t.__prev_path), old_method_t.__name__)
if DEBUG:
print "Also updating %s to %s" % (old_method_t.__name__, old_method_name)
setattr(self, old_method_name, old_method_t)
params.extend(method_params)
#print "Create method for %s" % path
tmethod = self.__method_creator(path, mtype, tuple(params))
tmethod.__doc__ = "%s accepts arguments: %s" % (method, params)
tmethod.__name__ = method
tmethod.__prev_path = prev_path
setattr(self, method, tmethod)
#params = []
if node.tag == "{%s}param" % self.ns.get('ns'):
param = self.__handleParam(node, path)
#params.append(param)
if node.tag == "{%s}resource" % self.ns.get('ns'):
self.__handleResource(node, path)
def __handleRequest(self, request):
if DEBUG:
print " ", request.tag
tparams = []
for node in request:
if node.tag == "{%s}param" % self.ns.get('ns'):
param = self.__handleParam(node, "")
if param:
tparams.append(param)
return tparams
def __handleResponse(self, response):
pass
def __handleMethod(self, method, path):
if DEBUG:
print " ", method.tag, method.get('id')
method_type = method.get('name')
method_name = method.get('id')
method_params = []
for node in method:
if node.tag == "{%s}request" % self.ns.get('ns'):
tparams = self.__handleRequest(node)
method_params.extend(tparams)
elif node.tag == "{%s}response" % self.ns.get('ns'):
self.__handleResponse(node)
return method_type, method_name, method_params
def __handleParam(self, param, path):
if DEBUG:
print " ", param.tag, param.get('name'), param.get('type'), param.get('style')
p = None
if param.get('style') == 'template':
p = param.get('name')
return p
def __process_wadl(self, wadl_file=None, wadl_string=None):
if wadl_file:
tree = ET.parse(wadl_file)
root = tree.getroot()
elif wadl_string:
root = ET.fromstring(wadl_string)
else:
print "Must provide either wadl_file or wadl_string"
return 1
#print root.tag
m = re.match("\{(.*)\}application", root.tag)
if m:
self.ns['ns'] = m.groups()[0]
#print "Setting namespace to: %s" % self.ns.get('ns')
for resources in root.findall('{%s}resources' % self.ns.get('ns')):
self.__handleResources(resources)
def call_method(obj, args):
if len(args) >= 1:
meth_name = args[0]
else:
meth_name = "help"
if args > 1:
params = args[1:]
meths = [method for method in dir(obj) if callable(getattr(obj, method)) and not method.startswith('__')]
if meth_name == "help":
print "------------------"
print "Available methods:"
print "------------------"
for meth in meths:
print meth,
do_method = getattr(obj, meth)
argc = do_method.func_code.co_argcount
print do_method.func_code.co_varnames[1:argc]
print " ", do_method.__doc__
print
return
if meth_name in meths:
do_method = getattr(obj, meth_name)
return do_method(*params)
else:
print "Could not find: %s", meth_name
def wadl_to_obj(d):
tmpobj = _wadl_obj(d)
return tmpobj
class _wadl_obj(dict):
def __init__(self, data):
for key, value in data.iteritems():
setattr(self, key, value)
self.__dict__[key] = value
def __setattr__(self, name, value):
if not hasattr(super(_wadl_obj, self), name):
super(_wadl_obj, self).__setitem__(name, value)
def get_config():
config = ConfigParser.ConfigParser()
config.read([".dewadl", "/etc/dewadl.cfg", os.path.expanduser("~/.dewadl")])
#print config._sections
all_defaults = config._sections
return all_defaults.get("dewadl", {})
if __name__ == "__main__":
cfg_defaults = get_config()
parser = OptionParser()
parser.add_option(
"-f",
"--wadlfile",
action="store",
dest="wadlfile",
default=None
)
parser.add_option(
"-w",
"--weburl",
action="store",
dest="weburl",
default=cfg_defaults.get("weburl")
)
parser.add_option(
"-u",
"--userid",
action="store",
dest="userid",
default=cfg_defaults.get("userid")
)
parser.add_option(
"-p",
"--password",
action="store",
dest="password",
default=cfg_defaults.get("password")
)
parser.add_option(
"-i",
"--interact",
action="store_true",
dest="interact",
default=False
)
opts, args = parser.parse_args()
if opts.wadlfile:
wadl = wadl_processor(wadl_file=opts.wadlfile, userid=opts.userid, passwd=opts.password)
elif opts.weburl:
wadl = wadl_processor(wadl_url=opts.weburl, userid=opts.userid, passwd=opts.password)
else:
parser.error("Please provider either --wadlfile or --weburl")
if opts.interact:
import rlcompleter
import readline
import code
import sys
readline.parse_and_bind('tab: complete')
sys.ps1 = "W) "
sys.ps2 = ". "
vars = globals().copy()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact(banner="\n\n-----------------------------\n\nWelcome to DeWADL Python interface!.\n'wadl' object has been created.\n")
sys.exit(0)
ret = call_method(wadl, args)
if ret:
pprint.pprint(ret)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import vmutilsv2
class VMUtilsV2TestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V VMUtilsV2 class."""
_FAKE_VM_NAME = 'fake_vm'
_FAKE_MEMORY_MB = 2
_FAKE_VCPUS_NUM = 4
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_CTRL_PATH = 'fake_ctrl_path'
_FAKE_CTRL_ADDR = 0
_FAKE_DRIVE_ADDR = 0
_FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
_FAKE_VM_PATH = "fake_vm_path"
_FAKE_ENABLED_STATE = 1
_FAKE_SNAPSHOT_PATH = "_FAKE_SNAPSHOT_PATH"
_FAKE_RES_DATA = "fake_res_data"
_FAKE_RES_PATH = "fake_res_path"
_FAKE_DYNAMIC_MEMORY_RATIO = 1.0
_FAKE_VHD_PATH = "fake_vhd_path"
_FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
def setUp(self):
self._vmutils = vmutilsv2.VMUtilsV2()
self._vmutils._conn = mock.MagicMock()
super(VMUtilsV2TestCase, self).setUp()
def _lookup_vm(self):
mock_vm = mock.MagicMock()
self._vmutils._lookup_vm_check = mock.MagicMock(
return_value=mock_vm)
mock_vm.path_.return_value = self._FAKE_VM_PATH
return mock_vm
def test_create_vm(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.DefineSystem.return_value = (None, self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vmutils._get_wmi_obj = mock.MagicMock()
mock_vm = self._vmutils._get_wmi_obj.return_value
mock_s = mock.MagicMock()
mock_s.VirtualSystemType = self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED
mock_vm.associators.return_value = [mock_s]
self._vmutils._set_vm_memory = mock.MagicMock()
self._vmutils._set_vm_vcpus = mock.MagicMock()
self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
self._FAKE_VCPUS_NUM, False,
self._FAKE_DYNAMIC_MEMORY_RATIO)
self.assertTrue(mock_svc.DefineSystem.called)
self._vmutils._set_vm_memory.assert_called_with(
mock_vm, mock_s, self._FAKE_MEMORY_MB,
self._FAKE_DYNAMIC_MEMORY_RATIO)
self._vmutils._set_vm_vcpus.assert_called_with(mock_vm, mock_s,
self._FAKE_VCPUS_NUM,
False)
def test_attach_ide_drive(self):
self._lookup_vm()
self._vmutils._get_vm_ide_controller = mock.MagicMock()
self._vmutils._get_new_resource_setting_data = mock.MagicMock()
self._vmutils._add_virt_resource = mock.MagicMock()
self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_CTRL_ADDR,
self._FAKE_DRIVE_ADDR)
self.assertTrue(self._vmutils._get_vm_ide_controller.called)
self.assertTrue(self._vmutils._get_new_resource_setting_data.called)
self.assertTrue(self._vmutils._add_virt_resource.called)
def test_attach_volume_to_controller(self):
self._lookup_vm()
self._vmutils._add_virt_resource = mock.MagicMock()
self._vmutils.attach_volume_to_controller(self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_CTRL_ADDR,
self._FAKE_MOUNTED_DISK_PATH)
self.assertTrue(self._vmutils._add_virt_resource.called)
def test_create_scsi_controller(self):
self._lookup_vm()
self._vmutils._add_virt_resource = mock.MagicMock()
self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
self.assertTrue(self._vmutils._add_virt_resource.called)
def test_get_vm_storage_paths(self):
mock_vm = self._lookup_vm()
mock_vmsettings = [mock.MagicMock()]
mock_vm.associators.return_value = mock_vmsettings
mock_sasds = []
mock_sasd1 = mock.MagicMock()
mock_sasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
mock_sasd1.HostResource = [self._FAKE_VHD_PATH]
mock_sasd2 = mock.MagicMock()
mock_sasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
mock_sasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
mock_sasds.append(mock_sasd1)
mock_sasds.append(mock_sasd2)
mock_vmsettings[0].associators.return_value = mock_sasds
storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
(disk_files, volume_drives) = storage
mock_vm.associators.assert_called_with(
wmi_result_class='Msvm_VirtualSystemSettingData')
mock_vmsettings[0].associators.assert_called_with(
wmi_result_class='Msvm_StorageAllocationSettingData')
self.assertEqual([self._FAKE_VHD_PATH], disk_files)
self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
def test_destroy(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.DestroySystem.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vmutils.destroy_vm(self._FAKE_VM_NAME)
mock_svc.DestroySystem.assert_called_with(self._FAKE_VM_PATH)
def test_get_vm_state(self):
self._vmutils.get_vm_summary_info = mock.MagicMock(
return_value={'EnabledState': self._FAKE_ENABLED_STATE})
enabled_state = self._vmutils.get_vm_state(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_ENABLED_STATE, enabled_state)
def test_take_vm_snapshot(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
vmutilsv2.wmi = mock.MagicMock()
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateSnapshot.assert_called_with(
AffectedSystem=self._FAKE_VM_PATH,
SnapshotType=self._vmutils._SNAPSHOT_FULL)
def test_remove_vm_snapshot(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
mock_svc.DestroySnapshot.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
mock_svc.DestroySnapshot.assert_called_with(self._FAKE_SNAPSHOT_PATH)
def test_set_nic_connection(self):
self._lookup_vm()
self._vmutils._get_nic_data_by_name = mock.MagicMock()
self._vmutils._add_virt_resource = mock.MagicMock()
fake_eth_port = mock.MagicMock()
self._vmutils._get_new_setting_data = mock.MagicMock(
return_value=fake_eth_port)
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
self._vmutils._add_virt_resource.assert_called_with(fake_eth_port,
self._FAKE_VM_PATH)
def test_add_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.AddResourceSettings.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._add_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.AddResourceSettings.assert_called_with(self._FAKE_VM_PATH,
[self._FAKE_RES_DATA])
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyResourceSettings.assert_called_with(
ResourceSettings=[self._FAKE_RES_DATA])
def test_remove_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.RemoveResourceSettings.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
self._vmutils._remove_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.RemoveResourceSettings.assert_called_with(
[self._FAKE_RES_PATH])
def test_enable_vm_metrics_collection(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
metric_def = mock.MagicMock()
fake_metric_def_paths = ["fake_0", "fake_1", "fake_2"]
metric_def.path_.side_effect = fake_metric_def_paths
self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
metric_def]
self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
calls = []
for fake_metric_def_path in fake_metric_def_paths:
calls.append(mock.call(
Subject=self._FAKE_VM_PATH,
Definition=fake_metric_def_path,
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
|
|
from __future__ import absolute_import
from django.db.models import Q
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry import roles
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.auth.superuser import is_active_superuser
from sentry.models import (
AuditLogEntryEvent,
OrganizationAccessRequest,
OrganizationMember,
OrganizationMemberTeam,
Team,
)
ERR_INSUFFICIENT_ROLE = "You do not have permission to edit that user's membership."
class OrganizationMemberTeamSerializer(serializers.Serializer):
isActive = serializers.BooleanField()
class RelaxedOrganizationPermission(OrganizationPermission):
_allowed_scopes = [
"org:read",
"org:write",
"org:admin",
"member:read",
"member:write",
"member:admin",
]
scope_map = {
"GET": _allowed_scopes,
"POST": _allowed_scopes,
"PUT": _allowed_scopes,
"DELETE": _allowed_scopes,
}
class OrganizationMemberTeamDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedOrganizationPermission]
def _can_create_team_member(self, request, organization, team_slug):
"""
User can join or add a member to a team:
* If they are an active superuser
* If they are a team admin or have global write access
* If the open membership organization setting is enabled
"""
return (
is_active_superuser(request)
or self._can_admin_team(request, organization, team_slug)
or organization.flags.allow_joinleave
)
def _can_delete(self, request, member, organization, team_slug):
"""
User can remove a member from a team:
* If they are an active superuser
* If they are removing their own membership
* If they are a team admin or have global write access
"""
if is_active_superuser(request):
return True
if not request.user.is_authenticated():
return False
if request.user.id == member.user_id:
return True
if self._can_admin_team(request, organization, team_slug):
return True
return False
def _can_admin_team(self, request, organization, team_slug):
global_roles = [r.id for r in roles.with_scope("org:write") if r.is_global]
team_roles = [r.id for r in roles.with_scope("team:write")]
# must be a team admin or have global write access
return OrganizationMember.objects.filter(
Q(role__in=global_roles)
| Q(organizationmemberteam__team__slug=team_slug, role__in=team_roles),
organization=organization,
user__id=request.user.id,
user__is_active=True,
).exists()
def _get_member(self, request, organization, member_id):
if member_id == "me":
queryset = OrganizationMember.objects.filter(
organization=organization, user__id=request.user.id, user__is_active=True
)
else:
queryset = OrganizationMember.objects.filter(
Q(user__is_active=True) | Q(user__isnull=True),
organization=organization,
id=member_id,
)
return queryset.select_related("user").get()
def _create_access_request(self, request, team, member):
omt, created = OrganizationAccessRequest.objects.get_or_create(team=team, member=member)
if not created:
return
requester = request.user if request.user != member.user else None
if requester:
omt.update(requester=requester)
omt.send_request_email()
def post(self, request, organization, member_id, team_slug):
"""
Join, request access to or add a member to a team.
If the user needs permission to join the team, an access request will
be generated and the returned status code will be 202.
If the user is already a member of the team, this will simply return
a 204.
"""
try:
member = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not request.user.is_authenticated():
return Response(status=status.HTTP_401_UNAUTHORIZED)
try:
team = Team.objects.get(organization=organization, slug=team_slug)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(team=team, organizationmember=member)
except OrganizationMemberTeam.DoesNotExist:
if self._can_create_team_member(request, organization, team_slug):
omt = OrganizationMemberTeam.objects.create(team=team, organizationmember=member)
else:
self._create_access_request(request, team, member)
return Response(status=202)
else:
return Response(status=204)
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=member.user,
event=AuditLogEntryEvent.MEMBER_JOIN_TEAM,
data=omt.get_audit_log_data(),
)
return Response(serialize(team, request.user, TeamWithProjectsSerializer()), status=201)
def delete(self, request, organization, member_id, team_slug):
"""
Leave or remove a member from a team
"""
try:
member = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_delete(request, member, organization, team_slug):
return Response({"detail": ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(organization=organization, slug=team_slug)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(team=team, organizationmember=member)
except OrganizationMemberTeam.DoesNotExist:
pass
else:
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=member.user,
event=AuditLogEntryEvent.MEMBER_LEAVE_TEAM,
data=omt.get_audit_log_data(),
)
omt.delete()
return Response(serialize(team, request.user, TeamWithProjectsSerializer()), status=200)
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Helper module for computations when modelling sessions.
In particular, this module is for when each session is a list
of the Cmd datatype with the params attribute set to a set
of accompanying params.
"""
from collections import defaultdict
from typing import Tuple, List, Union, DefaultDict
import copy
import numpy as np
from ..utils.data_structures import StateMatrix, Cmd
from ..utils.laplace_smooth import (
laplace_smooth_cmd_counts,
laplace_smooth_param_counts,
)
from ....common.exceptions import MsticpyException
# pylint: disable=too-many-locals, too-many-branches
def compute_counts( # nosec
sessions: List[List[Cmd]], start_token: str, end_token: str
) -> Tuple[
DefaultDict[str, int],
DefaultDict[str, DefaultDict[str, int]],
DefaultDict[str, int],
DefaultDict[str, DefaultDict[str, int]],
]:
"""
Compute the training counts for the sessions.
In particular, computes counts of individual commands and of sequences
of two commands. It also computes the counts of individual params as
well as counts of params conditional on the command.
Parameters
----------
sessions: List[List[Cmd]]
each session is a list of the Cmd datatype. Where the Cmd datatype
has a name attribute (command name) and a params attribute
(set containing params associated with the command)
an example session:
[Cmd(name='Set-User', params={'Identity', 'Force'}),
Cmd(name='Set-Mailbox', params={'Identity', 'AuditEnabled'})]
start_token: str
dummy command to signify the start of a session (e.g. "##START##")
end_token: str
dummy command to signify the end of a session (e.g. "##END##")
Returns
-------
tuple of counts:
individual command counts,
sequence command (length 2) counts,
individual param counts,
param conditional on command counts
"""
seq1_counts: DefaultDict[str, int] = defaultdict(lambda: 0)
seq2_counts: DefaultDict[str, DefaultDict[str, int]] = defaultdict(
lambda: defaultdict(lambda: 0)
)
param_counts: DefaultDict[str, int] = defaultdict(lambda: 0)
cmd_param_counts: DefaultDict[str, DefaultDict[str, int]] = defaultdict(
lambda: defaultdict(lambda: 0)
)
for session in sessions:
prev = start_token
seq1_counts[prev] += 1
for cmd in session:
seq1_counts[cmd.name] += 1
seq2_counts[prev][cmd.name] += 1
prev = cmd.name
for par in cmd.params:
param_counts[par] += 1
cmd_param_counts[cmd.name][par] += 1
seq2_counts[prev][end_token] += 1
seq1_counts[end_token] += 1
return seq1_counts, seq2_counts, param_counts, cmd_param_counts
def laplace_smooth_counts(
seq1_counts: DefaultDict[str, int],
seq2_counts: DefaultDict[str, DefaultDict[str, int]],
param_counts: DefaultDict[str, int],
cmd_param_counts: DefaultDict[str, DefaultDict[str, int]],
start_token: str,
end_token: str,
unk_token: str,
):
"""
Laplace smoothing is applied to the counts.
We do this by adding 1 to each of the counts.
This is so we shift some of the probability mass from the very probable
commands/params to the unseen and very unlikely commands/params. The
`unk_token` means we can handle unseen commands, sequences of commands
and params
Parameters
----------
seq1_counts: DefaultDict[str, int]
individual command counts
seq2_counts: DefaultDict[str, DefaultDict[str, int]]
sequence command (length 2) counts
param_counts: DefaultDict[str, int]
individual param counts
cmd_param_counts: DefaultDict[str, DefaultDict[str, int]]
param conditional on command counts
start_token: str
dummy command to signify the start of a session (e.g. "##START##")
end_token: str
dummy command to signify the end of a session (e.g. "##END##")
unk_token: str
dummy command to signify an unseen command (e.g. "##UNK##")
Returns
-------
tuple of StateMatrix counts:
individual command counts,
sequence command (length 2) counts,
individual param counts,
param conditional on command counts
"""
cmds: List[str] = list(seq1_counts.keys()) + [unk_token]
# apply laplace smoothing for cmds
seq1_counts_ls, seq2_counts_ls = laplace_smooth_cmd_counts(
seq1_counts=copy.deepcopy(seq1_counts),
seq2_counts=copy.deepcopy(seq2_counts),
start_token=start_token,
end_token=end_token,
unk_token=unk_token,
)
# apply laplace smoothing for params
param_counts_ls, cmd_param_counts_ls = laplace_smooth_param_counts(
cmds=cmds,
param_counts=copy.deepcopy(param_counts),
cmd_param_counts=copy.deepcopy(cmd_param_counts),
unk_token=unk_token,
)
seq1_counts_sm = StateMatrix(states=seq1_counts_ls, unk_token=unk_token)
seq2_counts_sm = StateMatrix(states=seq2_counts_ls, unk_token=unk_token)
param_counts_sm = StateMatrix(states=param_counts_ls, unk_token=unk_token)
cmd_param_counts_sm = StateMatrix(states=cmd_param_counts_ls, unk_token=unk_token)
return seq1_counts_sm, seq2_counts_sm, param_counts_sm, cmd_param_counts_sm
def compute_prob_setofparams_given_cmd(
cmd: str,
params: Union[set, dict],
param_cond_cmd_probs: Union[StateMatrix, dict],
use_geo_mean: bool = True,
) -> float:
"""
Compute probability of a set of params given the cmd.
Parameters
----------
cmd: str
name of command
(e.g. for Exchange powershell commands: "Set-Mailbox")
params: Union[set, dict]
set of accompanying params for the cmd
(e.g for Exchange powershell commands: {'Identity', 'ForwardingEmailAddress'}).
If params is set to be a dictionary of accompanying params and values,
then only the keys of the dict will be used.
param_cond_cmd_probs: Union[StateMatrix, dict]
computed probabilities of params conditional on the command
use_geo_mean: bool
if True, then the likelihood will be raised to the power of (1/K)
where K is the number of distinct params which appeared for the
given `cmd` across our training set. See Notes.
Returns
-------
float
computed likelihood
Notes
-----
`use_geo_mean` - Some commands may have more params set in general compared with
other commands. It can be useful to use the geo mean so that
you can compare this probability across different
commands with differing number of params
"""
pars = params.copy()
if isinstance(pars, dict):
pars = set(pars.keys())
if len(pars) == 0:
return 1.0
ref = param_cond_cmd_probs[cmd]
lik: float = 1
for param, prob in ref.items():
if param in pars:
lik *= prob
else:
lik *= 1 - prob
if use_geo_mean:
k = len(ref)
lik = lik ** (1 / k)
return lik
# pylint: disable=too-many-locals, too-many-arguments, too-many-branches
def compute_likelihood_window(
window: List[Cmd],
prior_probs: Union[StateMatrix, dict],
trans_probs: Union[StateMatrix, dict],
param_cond_cmd_probs: Union[StateMatrix, dict],
use_start_token: bool,
use_end_token: bool,
start_token: str = None,
end_token: str = None,
) -> float:
"""
Compute the likelihood of the input `window`.
Parameters
----------
window: List[Cmd]
part or all of a session, where a session is a list of the Cmd datatype
an example session:
[Cmd(name='Set-User', params={'Identity', 'Force'}), Cmd(name='Set-Mailbox',
params={'Identity', 'AuditEnabled'})]
prior_probs: Union[StateMatrix, dict]
computed probabilities of individual commands
trans_probs: Union[StateMatrix, dict]
computed probabilities of sequences of commands (length 2)
param_cond_cmd_probs: Union[StateMatrix, dict]
computed probabilities of the params conditional on the commands
use_start_token: bool
if set to True, the start_token will be prepended to the window
before the likelihood calculation is done
use_end_token: bool
if set to True, the end_token will be appended to the window
before the likelihood calculation is done
start_token: str
dummy command to signify the start of the session (e.g. "##START##")
end_token: str
dummy command to signify the end of the session (e.g. "##END##")
Returns
-------
likelihood of the window
"""
if use_end_token:
if end_token is None:
raise MsticpyException(
"end_token should not be None, when use_end_token is True"
)
if use_start_token:
if start_token is None:
raise MsticpyException(
"start_token should not be None, when use_start_token is True"
)
w_len = len(window)
if w_len == 0:
return np.nan
prob: float = 1
cur_cmd = window[0].name
params = window[0].params
param_cond_prob = compute_prob_setofparams_given_cmd(
cmd=cur_cmd,
params=params,
param_cond_cmd_probs=param_cond_cmd_probs,
use_geo_mean=True,
)
if use_start_token:
prob *= trans_probs[start_token][cur_cmd] * param_cond_prob
else:
prob *= prior_probs[cur_cmd] * param_cond_prob
for i in range(1, w_len):
prev, cur = window[i - 1], window[i]
prev_cmd, cur_cmd = prev.name, cur.name
cur_par = cur.params
prob *= trans_probs[prev_cmd][cur_cmd]
param_cond_prob = compute_prob_setofparams_given_cmd(
cmd=cur_cmd,
params=cur_par,
param_cond_cmd_probs=param_cond_cmd_probs,
use_geo_mean=True,
)
prob *= param_cond_prob
if use_end_token:
prob *= trans_probs[cur_cmd][end_token]
return prob
# pylint: disable=too-many-locals, too-many-arguments, too-many-branches
def compute_likelihood_windows_in_session(
session: List[Cmd],
prior_probs: Union[StateMatrix, dict],
trans_probs: Union[StateMatrix, dict],
param_cond_cmd_probs: Union[StateMatrix, dict],
window_len: int,
use_start_end_tokens: bool,
start_token: str = None,
end_token: str = None,
use_geo_mean: bool = False,
) -> List[float]:
"""
Compute the likelihoods of a sliding window in the session.
Parameters
----------
session: List[Cmd]
list of Cmd datatype
an example session:
[Cmd(name='Set-User', params={'Identity', 'Force'}),
Cmd(name='Set-Mailbox', params={'Identity', 'AuditEnabled'})]
prior_probs: Union[StateMatrix, dict]
computed probabilities of individual commands
trans_probs: Union[StateMatrix, dict]
computed probabilities of sequences of commands (length 2)
param_cond_cmd_probs: Union[StateMatrix, dict]
computed probabilities of the params conditional on the command
window_len: int
length of sliding window for likelihood calculations
use_start_end_tokens: bool
if True, then `start_token` and `end_token` will be prepended
and appended to the session respectively before the calculations
are done
start_token: str
dummy command to signify the start of the session (e.g. "##START##")
end_token: str
dummy command to signify the end of the session (e.g. "##END##")
use_geo_mean: bool
if True, then each of the likelihoods of the sliding windows will
be raised to the power of (1/`window_len`)
Returns
-------
List[float]
list of likelihoods
"""
if use_start_end_tokens:
if start_token is None or end_token is None:
raise MsticpyException(
"start_token and end_token should not be set to None when "
"use_start_end_tokens is set to True"
)
likelihoods = []
sess = session.copy()
if use_start_end_tokens and end_token:
sess += [Cmd(name=str(end_token), params={})]
end = len(sess) - window_len
for i in range(end + 1):
window = sess[i : i + window_len] # noqa E203
if i == 0:
use_start = use_start_end_tokens
else:
use_start = False
lik = compute_likelihood_window(
window=window,
prior_probs=prior_probs,
trans_probs=trans_probs,
param_cond_cmd_probs=param_cond_cmd_probs,
use_start_token=use_start,
use_end_token=False,
start_token=start_token,
end_token=end_token,
)
if use_geo_mean:
k = window_len
lik = lik ** (1 / k)
likelihoods.append(lik)
return likelihoods
# pylint: disable=too-many-arguments
def rarest_window_session(
session: List[Cmd],
prior_probs: StateMatrix,
trans_probs: StateMatrix,
param_cond_cmd_probs: StateMatrix,
window_len: int,
use_start_end_tokens: bool,
start_token: str,
end_token: str,
use_geo_mean=False,
) -> Tuple[List[Cmd], float]:
"""
Find and compute the likelihood of the rarest window of `window_len` in the session.
Parameters
----------
session: List[Cmd]
list of Cmd datatype
an example session:
[Cmd(name='Set-User', params={'Identity', 'Force'}), Cmd(name='Set-Mailbox',
params={'Identity', 'AuditEnabled'})]
prior_probs: Union[StateMatrix, dict]
computed probabilities of individual commands
trans_probs: Union[StateMatrix, dict]
computed probabilities of sequences of commands (length 2)
param_cond_cmd_probs: Union[StateMatrix, dict]
computed probabilities of the params conditional on the command
window_len: int
length of sliding window for likelihood calculations
use_start_end_tokens: bool
if True, then `start_token` and `end_token` will be prepended and appended to the
session respectively before the calculations are done
start_token: str
dummy command to signify the start of the session (e.g. "##START##")
end_token: str
dummy command to signify the end of the session (e.g. "##END##")
use_geo_mean: bool
if True, then each of the likelihoods of the sliding windows will be
raised to the power of (1/`window_len`)
Returns
-------
Tuple:
rarest window part of the session,
likelihood of the rarest window
"""
likelihoods = compute_likelihood_windows_in_session(
session=session,
prior_probs=prior_probs,
trans_probs=trans_probs,
param_cond_cmd_probs=param_cond_cmd_probs,
window_len=window_len,
use_start_end_tokens=use_start_end_tokens,
start_token=start_token,
end_token=end_token,
use_geo_mean=use_geo_mean,
)
if len(likelihoods) == 0:
return [], np.nan
min_lik = min(likelihoods)
ind = likelihoods.index(min_lik)
return session[ind : ind + window_len], min_lik # noqa E203
|
|
from django.utils.translation import ugettext_lazy as _
# ISO 3166-1 country names and codes adapted from http://opencountrycodes.appspot.com/python/
CONTINENTS = [
(u'africa', _(u'Africa')),
(u'antarctica', _(u'Antarctica')),
(u'asia', _(u'Asia')),
(u'europe', _(u'Europe')),
(u'north-america', _(u'North America')),
(u'oceania', _(u'Oceania')),
(u'south-america', _(u'South America')),
]
COUNTRIES = (
(u'GB', _(u'United Kingdom')),
(u'AF', _(u'Afghanistan')),
(u'AX', _(u'Aland Islands')),
(u'AL', _(u'Albania')),
(u'DZ', _(u'Algeria')),
(u'AS', _(u'American Samoa')),
(u'AD', _(u'Andorra')),
(u'AO', _(u'Angola')),
(u'AI', _(u'Anguilla')),
(u'AQ', _(u'Antarctica')),
(u'AG', _(u'Antigua and Barbuda')),
(u'AR', _(u'Argentina')),
(u'AM', _(u'Armenia')),
(u'AW', _(u'Aruba')),
(u'AU', _(u'Australia')),
(u'AT', _(u'Austria')),
(u'AZ', _(u'Azerbaijan')),
(u'BS', _(u'Bahamas')),
(u'BH', _(u'Bahrain')),
(u'BD', _(u'Bangladesh')),
(u'BB', _(u'Barbados')),
(u'BY', _(u'Belarus')),
(u'BE', _(u'Belgium')),
(u'BZ', _(u'Belize')),
(u'BJ', _(u'Benin')),
(u'BM', _(u'Bermuda')),
(u'BT', _(u'Bhutan')),
(u'BO', _(u'Bolivia')),
(u'BA', _(u'Bosnia and Herzegovina')),
(u'BW', _(u'Botswana')),
(u'BV', _(u'Bouvet Island')),
(u'BR', _(u'Brazil')),
(u'IO', _(u'British Indian Ocean Territory')),
(u'BN', _(u'Brunei Darussalam')),
(u'BG', _(u'Bulgaria')),
(u'BF', _(u'Burkina Faso')),
(u'BI', _(u'Burundi')),
(u'KH', _(u'Cambodia')),
(u'CM', _(u'Cameroon')),
(u'CA', _(u'Canada')),
(u'CV', _(u'Cape Verde')),
(u'KY', _(u'Cayman Islands')),
(u'CF', _(u'Central African Republic')),
(u'TD', _(u'Chad')),
(u'CL', _(u'Chile')),
(u'CN', _(u'China')),
(u'CX', _(u'Christmas Island')),
(u'CC', _(u'Cocos (Keeling) Islands')),
(u'CO', _(u'Colombia')),
(u'KM', _(u'Comoros')),
(u'CG', _(u'Congo')),
(u'CD', _(u'Congo, The Democratic Republic of the')),
(u'CK', _(u'Cook Islands')),
(u'CR', _(u'Costa Rica')),
(u'CI', _(u'Cote d\'Ivoire')),
(u'HR', _(u'Croatia')),
(u'CU', _(u'Cuba')),
(u'CY', _(u'Cyprus')),
(u'CZ', _(u'Czech Republic')),
(u'DK', _(u'Denmark')),
(u'DJ', _(u'Djibouti')),
(u'DM', _(u'Dominica')),
(u'DO', _(u'Dominican Republic')),
(u'EC', _(u'Ecuador')),
(u'EG', _(u'Egypt')),
(u'SV', _(u'El Salvador')),
(u'GQ', _(u'Equatorial Guinea')),
(u'ER', _(u'Eritrea')),
(u'EE', _(u'Estonia')),
(u'ET', _(u'Ethiopia')),
(u'FK', _(u'Falkland Islands (Malvinas)')),
(u'FO', _(u'Faroe Islands')),
(u'FJ', _(u'Fiji')),
(u'FI', _(u'Finland')),
(u'FR', _(u'France')),
(u'GF', _(u'French Guiana')),
(u'PF', _(u'French Polynesia')),
(u'TF', _(u'French Southern Territories')),
(u'GA', _(u'Gabon')),
(u'GM', _(u'Gambia')),
(u'GE', _(u'Georgia')),
(u'DE', _(u'Germany')),
(u'GH', _(u'Ghana')),
(u'GI', _(u'Gibraltar')),
(u'GR', _(u'Greece')),
(u'GL', _(u'Greenland')),
(u'GD', _(u'Grenada')),
(u'GP', _(u'Guadeloupe')),
(u'GU', _(u'Guam')),
(u'GT', _(u'Guatemala')),
(u'GG', _(u'Guernsey')),
(u'GN', _(u'Guinea')),
(u'GW', _(u'Guinea-Bissau')),
(u'GY', _(u'Guyana')),
(u'HT', _(u'Haiti')),
(u'HM', _(u'Heard Island and McDonald Islands')),
(u'VA', _(u'Holy See (Vatican City State)')),
(u'HN', _(u'Honduras')),
(u'HK', _(u'Hong Kong')),
(u'HU', _(u'Hungary')),
(u'IS', _(u'Iceland')),
(u'IN', _(u'India')),
(u'ID', _(u'Indonesia')),
(u'IR', _(u'Iran, Islamic Republic of')),
(u'IQ', _(u'Iraq')),
(u'IE', _(u'Ireland')),
(u'IM', _(u'Isle of Man')),
(u'IL', _(u'Israel')),
(u'IT', _(u'Italy')),
(u'JM', _(u'Jamaica')),
(u'JP', _(u'Japan')),
(u'JE', _(u'Jersey')),
(u'JO', _(u'Jordan')),
(u'KZ', _(u'Kazakhstan')),
(u'KE', _(u'Kenya')),
(u'KI', _(u'Kiribati')),
(u'KP', _(u'Korea, Democratic People\'s Republic of')),
(u'KR', _(u'Korea, Republic of')),
(u'KW', _(u'Kuwait')),
(u'KG', _(u'Kyrgyzstan')),
(u'LA', _(u'Lao People\'s Democratic Republic')),
(u'LV', _(u'Latvia')),
(u'LB', _(u'Lebanon')),
(u'LS', _(u'Lesotho')),
(u'LR', _(u'Liberia')),
(u'LY', _(u'Libyan Arab Jamahiriya')),
(u'LI', _(u'Liechtenstein')),
(u'LT', _(u'Lithuania')),
(u'LU', _(u'Luxembourg')),
(u'MO', _(u'Macao')),
(u'MK', _(u'Macedonia, The Former Yugoslav Republic of')),
(u'MG', _(u'Madagascar')),
(u'MW', _(u'Malawi')),
(u'MY', _(u'Malaysia')),
(u'MV', _(u'Maldives')),
(u'ML', _(u'Mali')),
(u'MT', _(u'Malta')),
(u'MH', _(u'Marshall Islands')),
(u'MQ', _(u'Martinique')),
(u'MR', _(u'Mauritania')),
(u'MU', _(u'Mauritius')),
(u'YT', _(u'Mayotte')),
(u'MX', _(u'Mexico')),
(u'FM', _(u'Micronesia, Federated States of')),
(u'MD', _(u'Moldova')),
(u'MC', _(u'Monaco')),
(u'MN', _(u'Mongolia')),
(u'ME', _(u'Montenegro')),
(u'MS', _(u'Montserrat')),
(u'MA', _(u'Morocco')),
(u'MZ', _(u'Mozambique')),
(u'MM', _(u'Myanmar')),
(u'NA', _(u'Namibia')),
(u'NR', _(u'Nauru')),
(u'NP', _(u'Nepal')),
(u'NL', _(u'Netherlands')),
(u'AN', _(u'Netherlands Antilles')),
(u'NC', _(u'New Caledonia')),
(u'NZ', _(u'New Zealand')),
(u'NI', _(u'Nicaragua')),
(u'NE', _(u'Niger')),
(u'NG', _(u'Nigeria')),
(u'NU', _(u'Niue')),
(u'NF', _(u'Norfolk Island')),
(u'MP', _(u'Northern Mariana Islands')),
(u'NO', _(u'Norway')),
(u'OM', _(u'Oman')),
(u'PK', _(u'Pakistan')),
(u'PW', _(u'Palau')),
(u'PS', _(u'Palestinian Territory, Occupied')),
(u'PA', _(u'Panama')),
(u'PG', _(u'Papua New Guinea')),
(u'PY', _(u'Paraguay')),
(u'PE', _(u'Peru')),
(u'PH', _(u'Philippines')),
(u'PN', _(u'Pitcairn')),
(u'PL', _(u'Poland')),
(u'PT', _(u'Portugal')),
(u'PR', _(u'Puerto Rico')),
(u'QA', _(u'Qatar')),
(u'RE', _(u'Reunion')),
(u'RO', _(u'Romania')),
(u'RU', _(u'Russian Federation')),
(u'RW', _(u'Rwanda')),
(u'BL', _(u'Saint Barthelemy')),
(u'SH', _(u'Saint Helena')),
(u'KN', _(u'Saint Kitts and Nevis')),
(u'LC', _(u'Saint Lucia')),
(u'MF', _(u'Saint Martin')),
(u'PM', _(u'Saint Pierre and Miquelon')),
(u'VC', _(u'Saint Vincent and the Grenadines')),
(u'WS', _(u'Samoa')),
(u'SM', _(u'San Marino')),
(u'ST', _(u'Sao Tome and Principe')),
(u'SA', _(u'Saudi Arabia')),
(u'SN', _(u'Senegal')),
(u'RS', _(u'Serbia')),
(u'SC', _(u'Seychelles')),
(u'SL', _(u'Sierra Leone')),
(u'SG', _(u'Singapore')),
(u'SK', _(u'Slovakia')),
(u'SI', _(u'Slovenia')),
(u'SB', _(u'Solomon Islands')),
(u'SO', _(u'Somalia')),
(u'ZA', _(u'South Africa')),
(u'GS', _(u'South Georgia and the South Sandwich Islands')),
(u'ES', _(u'Spain')),
(u'LK', _(u'Sri Lanka')),
(u'SD', _(u'Sudan')),
(u'SR', _(u'Suriname')),
(u'SJ', _(u'Svalbard and Jan Mayen')),
(u'SZ', _(u'Swaziland')),
(u'SE', _(u'Sweden')),
(u'CH', _(u'Switzerland')),
(u'SY', _(u'Syrian Arab Republic')),
(u'TW', _(u'Taiwan, Province of China')),
(u'TJ', _(u'Tajikistan')),
(u'TZ', _(u'Tanzania, United Republic of')),
(u'TH', _(u'Thailand')),
(u'TL', _(u'Timor-Leste')),
(u'TG', _(u'Togo')),
(u'TK', _(u'Tokelau')),
(u'TO', _(u'Tonga')),
(u'TT', _(u'Trinidad and Tobago')),
(u'TN', _(u'Tunisia')),
(u'TR', _(u'Turkey')),
(u'TM', _(u'Turkmenistan')),
(u'TC', _(u'Turks and Caicos Islands')),
(u'TV', _(u'Tuvalu')),
(u'UG', _(u'Uganda')),
(u'UA', _(u'Ukraine')),
(u'AE', _(u'United Arab Emirates')),
(u'US', _(u'United States')),
(u'UM', _(u'United States Minor Outlying Islands')),
(u'UY', _(u'Uruguay')),
(u'UZ', _(u'Uzbekistan')),
(u'VU', _(u'Vanuatu')),
(u'VE', _(u'Venezuela')),
(u'VN', _(u'Viet Nam')),
(u'VG', _(u'Virgin Islands, British')),
(u'VI', _(u'Virgin Islands, U.S.')),
(u'WF', _(u'Wallis and Futuna')),
(u'EH', _(u'Western Sahara')),
(u'YE', _(u'Yemen')),
(u'ZM', _(u'Zambia')),
(u'ZW', _(u'Zimbabwe')),
)
CONTINENT_COUNTRIES = (
(_(u'Africa'), (
(u'DZ', _(u'Algeria')),
(u'AO', _(u'Angola')),
(u'BJ', _(u'Benin')),
(u'BW', _(u'Botswana')),
(u'BF', _(u'Burkina Faso')),
(u'BI', _(u'Burundi')),
(u'CM', _(u'Cameroon')),
(u'CV', _(u'Cape Verde')),
(u'CF', _(u'Central African Republic')),
(u'TD', _(u'Chad')),
(u'KM', _(u'Comoros')),
(u'CG', _(u'Congo')),
(u'CD', _(u'Congo, The Democratic Republic of the')),
(u'CI', _(u'Cote d\'Ivoire')),
(u'DJ', _(u'Djibouti')),
(u'EG', _(u'Egypt')),
(u'GQ', _(u'Equatorial Guinea')),
(u'ER', _(u'Eritrea')),
(u'ET', _(u'Ethiopia')),
(u'GA', _(u'Gabon')),
(u'GM', _(u'Gambia')),
(u'GH', _(u'Ghana')),
(u'GN', _(u'Guinea')),
(u'GW', _(u'Guinea-Bissau')),
(u'KE', _(u'Kenya')),
(u'LS', _(u'Lesotho')),
(u'LR', _(u'Liberia')),
(u'LY', _(u'Libyan Arab Jamahiriya')),
(u'MG', _(u'Madagascar')),
(u'YT', _(u'Mayotte')),
(u'MW', _(u'Malawi')),
(u'ML', _(u'Mali')),
(u'MR', _(u'Mauritania')),
(u'MU', _(u'Mauritius')),
(u'MA', _(u'Morocco')),
(u'MZ', _(u'Mozambique')),
(u'NA', _(u'Namibia')),
(u'NE', _(u'Niger')),
(u'NG', _(u'Nigeria')),
(u'RE', _(u'Reunion')),
(u'RW', _(u'Rwanda')),
(u'SH', _(u'Saint Helena')),
(u'ST', _(u'Sao Tome and Principe')),
(u'SN', _(u'Senegal')),
(u'SC', _(u'Seychelles')),
(u'SL', _(u'Sierra Leone')),
(u'SO', _(u'Somalia')),
(u'ZA', _(u'South Africa')),
(u'SD', _(u'Sudan')),
(u'SZ', _(u'Swaziland')),
(u'TZ', _(u'Tanzania, United Republic of')),
(u'TG', _(u'Togo')),
(u'TN', _(u'Tunisia')),
(u'UG', _(u'Uganda')),
(u'EH', _(u'Western Sahara')),
(u'ZM', _(u'Zambia')),
(u'ZW', _(u'Zimbabwe')),
)),
(_(u'Antarctica'), (
(u'AQ', _(u'Antarctica')),
(u'BV', _(u'Bouvet Island')),
(u'TF', _(u'French Southern Territories')),
(u'HM', _(u'Heard Island and McDonald Islands')),
)),
(_(u'Asia'), (
(u'AF', _(u'Afghanistan')),
(u'BH', _(u'Bahrain')),
(u'BD', _(u'Bangladesh')),
(u'BT', _(u'Bhutan')),
(u'IO', _(u'British Indian Ocean Territory')),
(u'BN', _(u'Brunei Darussalam')),
(u'KH', _(u'Cambodia')),
(u'CN', _(u'China')),
(u'HK', _(u'Hong Kong')),
(u'IR', _(u'Iran, Islamic Republic of')),
(u'IN', _(u'India')),
(u'ID', _(u'Indonesia')),
(u'IQ', _(u'Iraq')),
(u'IL', _(u'Israel')),
(u'JP', _(u'Japan')),
(u'JO', _(u'Jordan')),
(u'KZ', _(u'Kazakhstan')),
(u'KW', _(u'Kuwait')),
(u'KP', _(u'Korea, Democratic People\'s Republic of')),
(u'KR', _(u'Korea, Republic of')),
(u'LA', _(u'Lao People\'s Democratic Republic')),
(u'KG', _(u'Kyrgyzstan')),
(u'LB', _(u'Lebanon')),
(u'MO', _(u'Macao')),
(u'MY', _(u'Malaysia')),
(u'MV', _(u'Maldives')),
(u'MM', _(u'Myanmar')),
(u'MN', _(u'Mongolia')),
(u'NP', _(u'Nepal')),
(u'OM', _(u'Oman')),
(u'PK', _(u'Pakistan')),
(u'PS', _(u'Palestinian Territory, Occupied')),
(u'PH', _(u'Philippines')),
(u'QA', _(u'Qatar')),
(u'RU', _(u'Russian Federation')),
(u'SA', _(u'Saudi Arabia')),
(u'SG', _(u'Singapore')),
(u'SY', _(u'Syrian Arab Republic')),
(u'LK', _(u'Sri Lanka')),
(u'TJ', _(u'Tajikistan')),
(u'TW', _(u'Taiwan, Province of China')),
(u'TH', _(u'Thailand')),
(u'TL', _(u'Timor-Leste')),
(u'TR', _(u'Turkey')),
(u'TM', _(u'Turkmenistan')),
(u'AE', _(u'United Arab Emirates')),
(u'UZ', _(u'Uzbekistan')),
(u'VN', _(u'Vietnam')),
(u'YE', _(u'Yemen')),
)),
(_(u'Europe'), (
(u'AX', _(u'Aland Islands')),
(u'AL', _(u'Albania')),
(u'AD', _(u'Andorra')),
(u'AM', _(u'Armenia')),
(u'AT', _(u'Austria')),
(u'AZ', _(u'Azerbaijan')),
(u'BY', _(u'Belarus')),
(u'BE', _(u'Belgium')),
(u'BA', _(u'Bosnia and Herzegovina')),
(u'BG', _(u'Bulgaria')),
(u'HR', _(u'Croatia')),
(u'CY', _(u'Cyprus')),
(u'CZ', _(u'Czech Republic')),
(u'DK', _(u'Denmark')),
(u'EE', _(u'Estonia')),
(u'FO', _(u'Faroe Islands')),
(u'FI', _(u'Finland')),
(u'FR', _(u'France')),
(u'GE', _(u'Georgia')),
(u'DE', _(u'Germany')),
(u'GI', _(u'Gibraltar')),
(u'GR', _(u'Greece')),
(u'GL', _(u'Greenland')),
(u'GG', _(u'Guernsey')),
(u'HU', _(u'Hungary')),
(u'IS', _(u'Iceland')),
(u'IE', _(u'Ireland')),
(u'IM', _(u'Isle of Man')),
(u'IT', _(u'Italy')),
(u'JE', _(u'Jersey')),
(u'LV', _(u'Latvia')),
(u'LI', _(u'Liechtenstein')),
(u'LT', _(u'Lithuania')),
(u'LU', _(u'Luxembourg')),
(u'MK', _(u'Macedonia, The Former Yugoslav Republic of')),
(u'MT', _(u'Malta')),
(u'MD', _(u'Moldova')),
(u'MC', _(u'Monaco')),
(u'ME', _(u'Montenegro')),
(u'NL', _(u'Netherlands')),
(u'NO', _(u'Norway')),
(u'PL', _(u'Poland')),
(u'PT', _(u'Portugal')),
(u'RO', _(u'Romania')),
(u'SM', _(u'San Marino')),
(u'RS', _(u'Serbia')),
(u'SK', _(u'Slovakia')),
(u'SI', _(u'Slovenia')),
(u'ES', _(u'Spain')),
(u'SJ', _(u'Svalbard and Jan Mayen')),
(u'SE', _(u'Sweden')),
(u'CH', _(u'Switzerland')),
(u'UA', _(u'Ukraine')),
(u'GB', _(u'United Kingdom')),
(u'VA', _(u'Holy See (Vatican City State)')),
)),
(_(u'North America'), (
(u'AS', _(u'American Samoa')),
(u'AI', _(u'Anguilla')),
(u'AG', _(u'Antigua and Barbuda')),
(u'AW', _(u'Aruba')),
(u'BS', _(u'Bahamas')),
(u'BB', _(u'Barbados')),
(u'BZ', _(u'Belize')),
(u'BM', _(u'Bermuda')),
(u'CA', _(u'Canada')),
(u'KY', _(u'Cayman Islands')),
(u'CR', _(u'Costa Rica')),
(u'CU', _(u'Cuba')),
(u'DM', _(u'Dominica')),
(u'DO', _(u'Dominican Republic')),
(u'SV', _(u'El Salvador')),
(u'GD', _(u'Grenada')),
(u'GP', _(u'Guadeloupe')),
(u'GT', _(u'Guatemala')),
(u'HT', _(u'Haiti')),
(u'HN', _(u'Honduras')),
(u'JM', _(u'Jamaica')),
(u'MX', _(u'Mexico')),
(u'MS', _(u'Montserrat')),
(u'AN', _(u'Netherlands Antilles')),
(u'NI', _(u'Nicaragua')),
(u'PA', _(u'Panama')),
(u'PR', _(u'Puerto Rico')),
(u'BL', _(u'Saint Barthelemy')),
(u'KN', _(u'Saint Kitts and Nevis')),
(u'LC', _(u'Saint Lucia')),
(u'MF', _(u'Saint Martin')),
(u'PM', _(u'Saint Pierre and Miquelon')),
(u'VC', _(u'Saint Vincent and the Grenadines')),
(u'TT', _(u'Trinidad and Tobago')),
(u'TC', _(u'Turks and Caicos Islands')),
(u'US', _(u'United States')),
(u'UM', _(u'United States Minor Outlying Islands')),
(u'VG', _(u'Virgin Islands, British')),
(u'VI', _(u'Virgin Islands, U.S.')),
)),
(_(u'Oceania'), (
(u'AU', _(u'Australia')),
(u'CX', _(u'Christmas Island')),
(u'CC', _(u'Cocos (Keeling) Islands')),
(u'CK', _(u'Cook Islands')),
(u'FJ', _(u'Fiji')),
(u'PF', _(u'French Polynesia')),
(u'GU', _(u'Guam')),
(u'KI', _(u'Kiribati')),
(u'MH', _(u'Marshall Islands')),
(u'FM', _(u'Micronesia, Federated States of')),
(u'NR', _(u'Nauru')),
(u'NC', _(u'New Caledonia')),
(u'NZ', _(u'New Zealand')),
(u'NU', _(u'Niue')),
(u'NF', _(u'Norfolk Island')),
(u'MP', _(u'Northern Mariana Islands')),
(u'PW', _(u'Palau')),
(u'PG', _(u'Papua New Guinea')),
(u'PN', _(u'Pitcairn')),
(u'WS', _(u'Samoa')),
(u'SB', _(u'Solomon Islands')),
(u'TK', _(u'Tokelau')),
(u'TO', _(u'Tonga')),
(u'TV', _(u'Tuvalu')),
(u'VU', _(u'Vanuatu')),
(u'WF', _(u'Wallis and Futuna')),
)),
(_(u'South America'), (
(u'AR', _(u'Argentina')),
(u'BO', _(u'Bolivia')),
(u'BR', _(u'Brazil')),
(u'CL', _(u'Chile')),
(u'CO', _(u'Colombia')),
(u'EC', _(u'Ecuador')),
(u'FK', _(u'Falkland Islands (Malvinas)')),
(u'GF', _(u'French Guiana')),
(u'GY', _(u'Guyana')),
(u'MQ', _(u'Martinique')),
(u'PY', _(u'Paraguay')),
(u'PE', _(u'Peru')),
(u'GS', _(u'South Georgia and the South Sandwich Islands')),
(u'SR', _(u'Suriname')),
(u'UY', _(u'Uruguay')),
(u'VE', _(u'Venezuela')),
)
),
)
|
|
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from bson.json_util import loads, dumps
import datetime
import mock
import webapps.server
from webapps.server.views import ratelimit
from webapps.lib.db import get_db
from webapps.lib.util import get_internal_coll_name, get_collection_names
from flask import session
from pymongo.errors import OperationFailure
from webapps.lib.MWSServerError import MWSServerError
from tests import MongoWSTestCase
from webapps.lib import CLIENTS_COLLECTION
class ViewsSetUpUnitTestCase(MongoWSTestCase):
def test_create_mws_resource(self):
url = '/mws/'
rv = self.app.post(url)
new_response_dict = loads(rv.data)
self.assertIn('res_id', new_response_dict)
res_id = new_response_dict['res_id']
is_new = new_response_dict['is_new']
self.assertIsNotNone(res_id)
self.assertTrue(is_new)
# check if res_id is unchanged
rv = self.app.post(url)
new_response_dict = loads(rv.data)
new_res_id = new_response_dict['res_id']
new_is_new = new_response_dict['is_new']
self.assertIsNotNone(new_res_id)
self.assertEqual(res_id, new_res_id)
self.assertFalse(new_is_new)
def test_create_mws_resource_new_session(self):
url = '/mws/'
rv = self.app.post(url)
response_dict = loads(rv.data)
self.assertIn('res_id', response_dict)
res_id = response_dict['res_id']
self.assertIsNotNone(res_id)
with self.app.session_transaction() as sess:
del sess['session_id']
# check if res_id is unique
rv = self.app.post(url)
new_res_id = loads(rv.data)['res_id']
self.assertIsNotNone(new_res_id)
self.assertNotEqual(res_id, new_res_id)
@mock.patch('webapps.server.views.datetime')
def test_keep_mws_alive(self, datetime_mock):
first = datetime.datetime(2012, 7, 4)
second = first + datetime.timedelta(days=1)
datetime_mock.now.return_value = first
db = get_db()
# get a session to keep alive
rv = self.app.post('/mws/')
res_id = loads(rv.data)['res_id']
with self.app.session_transaction() as sess:
session_id = sess['session_id']
res = db.clients.find({'res_id': res_id, 'session_id': session_id},
{'timestamp': 1})
_id = res[0]['_id']
old_ts = res[0]['timestamp']
self.assertEqual(old_ts, first)
datetime_mock.now.return_value = second
url = '/mws/' + res_id + '/keep-alive'
rv = self.app.post(url)
self.assertEqual(rv.status_code, 204)
newres = db.clients.find({'_id': _id}, {'timestamp': 1})
self.assertEqual(newres[0]['timestamp'], second)
def test_ratelimit(self):
rv = self.app.post('/mws/')
self.res_id = loads(rv.data)['res_id']
limit = self.real_app.config['RATELIMIT_QUOTA'] = 3
def dummy():
return ('', 204)
with self.app.session_transaction() as client_sess:
session_id = client_sess['session_id']
with self.real_app.test_request_context():
session['session_id'] = session_id
for i in range(limit):
self.assertEqual(ratelimit(dummy)(), ('', 204))
with self.assertRaises(MWSServerError) as cm:
ratelimit(dummy)()
self.assertEqual(cm.exception.error, 429)
def test_ratelimit_no_session(self):
def dummy():
return ('', 204)
with self.real_app.test_request_context():
with self.assertRaises(MWSServerError) as cm:
ratelimit(dummy)()
self.assertEqual(cm.exception.error, 401)
def test_nocache(self):
res = self.app.post('/mws/')
self.assertEqual(res.headers['cache-control'], 'no-cache')
self.assertEqual(res.headers['expires'], '0')
res_id = loads(res.data)['res_id']
res = self.app.get('/mws/%s/db/coll/find?{}' % res_id)
self.assertEqual(res.headers['cache-control'], 'no-cache')
self.assertEqual(res.headers['expires'], '0')
class DBTestCase(MongoWSTestCase):
def setUp(self):
super(DBTestCase, self).setUp()
# Todo: For stuff that isn't checking authentication,
# we probably don't want to rely on/use the authentication code
rv = self.app.post('/mws/')
response_dict = loads(rv.data)
self.assertIn('res_id', response_dict)
self.res_id = response_dict['res_id']
self.assertIsNotNone(self.res_id)
self.db = get_db()
self.make_request_url = '/mws/%s/db/%%s' % (self.res_id)
def _make_request(self, endpoint, data, method, expected_status):
url = self.make_request_url % (endpoint)
if data is not None:
if isinstance(data, dict):
data = dumps(
dict((k, v) for (k, v) in data.iteritems() if v is not None)
)
else:
data = dumps(data)
if method == self.app.get:
url = '%s?%s' % (url, data)
data = None
result = method(url, data=data, content_type='application/json')
actual_status = result.status_code
self.assertEqual(
actual_status, expected_status,
("Expected request status to be %s, got %s instead."
" Full result: %s") %
(expected_status, actual_status, result.data))
result_dict = loads(result.data) if result.data else {}
return result_dict
def make_get_collection_names_request(self, expected_status=200):
return self._make_request('getCollectionNames', None, self.app.get,
expected_status)
def make_db_drop_request(self, expected_status=204):
self.make_request_url = '/mws/%s/db%%s' % (self.res_id)
return self._make_request('', None, self.app.delete, expected_status)
class DBCollectionTestCase(DBTestCase):
def setUp(self):
super(DBCollectionTestCase, self).setUp()
self.coll_name = 'test_collection'
self.internal_coll_name = get_internal_coll_name(self.res_id,
self.coll_name)
self.db = get_db()
self.db_collection = self.db[self.internal_coll_name]
self.make_request_url = '/mws/%s/db/%s/%%s' % \
(self.res_id, self.coll_name)
def tearDown(self):
super(DBCollectionTestCase, self).setUp()
self.db_collection.drop()
def make_find_request(self, query=None, projection=None, skip=None,
limit=None, expected_status=200):
data = {
'query': query,
'projection': projection,
'skip': skip,
'limit': limit,
}
return self._make_request('find', data, self.app.get,
expected_status)
def make_insert_request(self, document, expected_status=204):
data = {'document': document}
return self._make_request('insert', data, self.app.post,
expected_status)
def make_remove_request(self, constraint, just_one=False,
expected_status=204):
data = {'constraint': constraint, 'just_one': just_one}
return self._make_request('remove', data, self.app.delete,
expected_status)
def make_update_request(self, query, update, upsert=False, multi=False,
expected_status=204):
data = {
'query': query,
'update': update,
'upsert': upsert,
'multi': multi,
}
return self._make_request('update', data, self.app.put,
expected_status)
def make_aggregate_request(self, query=None, expected_status=200):
return self._make_request('aggregate', query, self.app.get,
expected_status)
def make_drop_request(self, expected_status=204):
return self._make_request('drop', None, self.app.delete,
expected_status)
def make_count_request(self, query=None, skip=None, limit=None,
expected_status=200):
data = {'query': query, 'skip': skip, 'limit': limit}
return self._make_request('count', data, self.app.get, expected_status)
def set_session_id(self, new_id):
with self.app.session_transaction() as sess:
sess['session_id'] = new_id
class FindUnitTestCase(DBCollectionTestCase):
def test_find(self):
query = {'name': 'mongo'}
self.db_collection.insert(query)
result = self.make_find_request(query)
self.assertEqual(len(result), 1)
self.assertEqual(result['result'][0]['name'], 'mongo')
def test_skipping_results(self):
self.db_collection.insert([{'val': i} for i in xrange(10)])
response = self.make_find_request(query={}, skip=4)
result = response['result']
self.assertEqual(len(result), 6)
values = [r['val'] for r in result]
self.assertItemsEqual(values, range(4, 10))
def test_limiting_results(self):
self.db_collection.insert([{'val': i} for i in xrange(10)])
response = self.make_find_request(query={}, limit=4)
result = response['result']
self.assertEqual(len(result), 4)
values = [r['val'] for r in result]
self.assertItemsEqual(values, range(4))
def test_invalid_find_session(self):
self.set_session_id('invalid_id')
document = {'name': 'mongo'}
result = self.make_find_request(document, expected_status=403)
error = {
'error': 403,
'reason': 'Session error. User does not have access to res_id',
'detail': '',
}
self.assertEqual(result, error)
class InsertUnitTestCase(DBCollectionTestCase):
def test_simple_insert(self):
document = {'name': 'Mongo'}
self.make_insert_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
def test_multiple_document_insert(self):
document = [{'name': 'Mongo'}, {'name': '10gen'}]
self.make_insert_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 2)
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', '10gen'])
def test_invalid_insert_session(self):
self.set_session_id('invalid_session')
document = {'name': 'mongo'}
self.make_insert_request(document, expected_status=403)
def test_insert_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 150
self.make_insert_request([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
], expected_status=204)
result = self.make_insert_request([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
], expected_status=403)
error = {
'error': 403,
'reason': 'Collection size exceeded',
'detail': ''
}
self.assertEqual(result, error)
class RemoveUnitTestCase(DBCollectionTestCase):
def test_remove(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
document = {'name': 'Mongo'}
self.make_remove_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'NotMongo')
def test_remove_one(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
document = {'name': 'Mongo'}
self.make_remove_request(document, just_one=True)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', 'NotMongo'])
def test_remove_requires_valid_res_id(self):
self.set_session_id('invalid_session')
self.make_remove_request({}, expected_status=403)
class UpdateUnitTestCase(DBCollectionTestCase):
def test_upsert(self):
result = self.db_collection.find({'name': 'Mongo'})
self.assertEqual(result.count(), 0)
self.make_update_request({}, {'name': 'Mongo'}, True)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
def test_update_one(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request({'name': 'Mongo'}, {'name': 'Mongo2'}, True)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', 'Mongo2', 'NotMongo'])
def test_update_multi(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request(
{'name': 'Mongo'},
{'$set': {'name': 'Mongo2'}},
False, True
)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo2', 'Mongo2', 'NotMongo'])
def test_multi_upsert(self):
# Does not exist - upsert
self.make_update_request({}, {'$set': {'name': 'Mongo'}}, True, True)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
# Exists - multi-update
self.db_collection.insert([{'name': 'Mongo'}, {'name': 'NotMongo'}])
self.make_update_request(
{'name': 'Mongo'},
{'$set': {'name': 'Mongo2'}},
True, True
)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo2', 'Mongo2', 'NotMongo'])
def test_update_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 500
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request({'name': 'Mongo'}, {'name': 'Mongo2'},
expected_status=204)
result = self.make_update_request({'name': 'Mongo'},
{'$set': {'a': list(range(50))}},
expected_status=403)
error = {
'error': 403,
'reason': 'Collection size exceeded',
'detail': ''
}
self.assertEqual(result, error)
def test_multi_update_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 500
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request({},
{'$set': {'a': list(range(12))}},
multi=False,
expected_status=204)
result = self.make_update_request({},
{'$set': {'a': list(range(12))}},
multi=True,
expected_status=403)
error = {
'error': 403,
'reason': 'Collection size exceeded',
'detail': ''
}
self.assertEqual(result, error)
class AggregateUnitTestCase(DBCollectionTestCase):
def test_aggregate(self):
for i in range(6):
self.db_collection.insert({'val': i})
query = [
{'$match': {'val': {'$lt': 5}}},
{'$sort': {'val': -1}},
{'$skip': 1},
{'$limit': 2}
]
self.db_collection.aggregate(query)
result = self.make_aggregate_request(query)
self.assertEqual(result['ok'], 1)
result = result['result']
self.assertEqual(len(result), 2)
self.assertEqual([x['val'] for x in result], [3, 2])
def test_invalid_query(self):
result = self.make_aggregate_request({}, expected_status=400)
self.assertEqual(result['error'], 400)
with self.assertRaises(OperationFailure) as cm:
self.db_collection.aggregate({})
self.assertEqual(cm.exception.message, result['reason'])
def test_invalid_find_session(self):
self.set_session_id('invalid_id')
query = [{'$match': {'val': {'$lt': 5}}}]
result = self.make_aggregate_request(query, expected_status=403)
error = {
'error': 403,
'reason': 'Session error. User does not have access to res_id',
'detail': '',
}
self.assertEqual(result, error)
class CountTestCase(DBCollectionTestCase):
def test_get_query_count(self):
self.db_collection.insert([{'n': i} for i in xrange(10)])
response = self.make_count_request({'n': {'$gt': 5}})
self.assertEqual(response['count'], 4)
self.db_collection.insert([{'n': i} for i in xrange(10)])
response = self.make_count_request({'n': {'$gt': 4}})
self.assertEqual(response['count'], 10)
def test_uses_skip_and_limit_info(self):
self.db_collection.insert([{'n': i} for i in xrange(10)])
response = self.make_count_request({}, skip=0, limit=1)
self.assertEqual(response['count'], 1)
response = self.make_count_request({}, skip=8, limit=0)
self.assertEqual(response['count'], 2)
class DropUnitTestCase(DBCollectionTestCase):
def test_drop(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
result = self.db_collection.find()
self.assertEqual(result.count(), 3)
self.make_drop_request()
result = self.db_collection.find()
self.assertEqual(result.count(), 0)
self.assertNotIn(self.internal_coll_name, self.db.collection_names())
class GetCollectionNamesUnitTestCase(DBTestCase):
def test_get_collection_names(self):
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, [])
self.db[CLIENTS_COLLECTION].update({'res_id': self.res_id},
{'$push': {'collections': 'test'}})
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, ['test'])
def test_invalid_session(self):
with self.app.session_transaction() as sess:
sess['session_id'] = 'invalid session'
result = self.make_get_collection_names_request(expected_status=403)
error = {
'error': 403,
'reason': 'Session error. User does not have access to res_id',
'detail': '',
}
self.assertEqual(result, error)
def test_resid_isolation(self):
self.db[CLIENTS_COLLECTION].update({'res_id': self.res_id},
{'$push': {'collections': 'test'}})
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, ['test'])
with self.app.session_transaction() as sess:
del sess['session_id']
new_resid = loads(self.app.post('/mws/').data)['res_id']
self.assertNotEqual(self.res_id, new_resid)
self.db[CLIENTS_COLLECTION].update({'res_id': new_resid},
{'$push': {'collections': 'test2'}})
self.make_request_url = '/mws/%s/db/%%s' % (new_resid)
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, ['test2'])
class DropDBUnitTestCase(DBTestCase):
def test_drop_db(self):
testdoc = {'name': 'Mongo'}
colls = ['a', 'b', 'c']
update = {'$addToSet': {'collections': {'$each': colls}}}
self.db[CLIENTS_COLLECTION].update({'res_id': self.res_id}, update)
colls = [get_internal_coll_name(self.res_id, c) for c in colls]
for c in colls:
self.db[c].insert(testdoc)
actual_colls = self.db.collection_names()
for c in colls:
self.assertIn(c, actual_colls)
self.make_db_drop_request()
actual_colls = self.db.collection_names()
for c in colls:
self.assertNotIn(c, actual_colls)
self.assertItemsEqual(get_collection_names(self.res_id), [])
class IntegrationTestCase(DBCollectionTestCase):
def test_insert_find(self):
document = {'name': 'mongo'}
self.make_insert_request(document)
result = self.make_find_request(document)
self.assertDictContainsSubset(document, result['result'][0])
|
|
import entDevType as edt
class entropyDeviationType:
"""
A class that provides an interface to Chi-Square distribution
testing, Shannon Entropy Analysis and Monte Carlo Pi Approximation
for the purposes of analyzing entropy in unknown binary stream to
locate hidden or encrypted data with a primary intended focus on
XOR encrypted PE files hidden in Office, PDF, et cetera documents
"""
def __init__(self, bs = 8192):
"""
Takes a parameter, 'bs' that represents the block size
represented in byte to be used. The default value is
8192 or 8KB
"""
self.block_size = bs
self.edt = edt.entDevType(long(bs))
def openFile(self, name, whole = False):
"""
Opens and reads the file specified by 'name'
performs whole file analysis if 'whole' is True
"""
self.fileName = name
self.fileHand = open(self.fileName, 'rb')
self.data = self.fileHand.read()
self.fileHand.close()
self.edt.setData(self.data, long(self.block_size), whole)
self.edt.calculate()
def getBlockCount(self):
"""
Returns the number of blocks in a file
"""
return self.edt.count()
def isValidBlockNumber(self, idx, base = 16):
"""
Checks that a given index, 'idx' is within the range of
valid blocks; defaults to base 16 format, modified by the
'base' parameter. Returns True if the index is valid.
"""
if type(idx) == str:
idx = long(idx, base)
if self.edt.maxIndex() < idx:
return False
else:
return True
def isValidBlockRange(self, low, high, base = 16):
"""
Checks that a given range of indices demarked by 'low' and
'high' are valid; defaults to base 16 format and modified by
the 'base' parameter. Returns True if the range is valid
"""
if self.isValidBlockNumber(low, base) and self.isValidBlockNumber(high, base):
return True
return False
def getScore(self, idx, base = 16):
"""
Gets the Chi, Shannon and Pi approximation score
for a given block indicated by 'idx', which by
default is specified in base 16 format but can be
changed via the 'base' parameter. Returns a
native object with the properties 'chi_square',
'shannon' and 'estimate' and 'error' for the Chi,
Shannon Entropy and Monte Carli Pi approximation
respectively. Throws a ValueError() if the index is
invalid
"""
if not self.isValidBlockNumber(idx, base):
raise ValueError("Invalid block number")
return self.edt.getScore(idx)
def getAllScores(self):
"""Retrieves the Chi, Shannon and Pi approximation scores
for all blocks in the file. Returns a list() of native
objects with 'chi_square', 'shannon', 'estimate' and
'error' properties for the Chi, Shannon and Pi approximation
values respectively"""
return self.edt.getAllScores()
def getWholeFileScore(self):
"""
Retrieves the Chi, Shannon and Pi approximation scores
for the entire file. Returns a native object with
'chi_square', 'shannon', 'estimate' and 'error' properties
for the Chi, Shannon and Pi approximation values respectively
"""
return self.edt.getWholeScore()
def getXYDeviation(self, x, y, base = 16):
"""
Retrieves the Chi, Shannon and Pi approximation
deviation scores between two blocks indicated by
the parameters 'x' and 'y', which by default are
specified in base 16 format but is changable via
the 'base' parameter. Returns a native object
With 'chi_square', 'shannon', 'estimate' and
'error' properties that indicate the deviation
between the two blocks. Throws a ValueError()
if the specified range is invalid.
"""
if type(x) == str:
x = long(x, base)
if type(y) == str:
y = long(y, base)
if self.isValidBlockRange(x, y, base):
return self.edt.getDeviation(x, y)
raise ValueError("Invalid XY range")
def getBlockAllDeviation(self, x, base = 16):
"""
Retrieves the Chi, Shannon and Pi approximation
deviations between all blocks in a file against
the block specified by the parameter 'x', which
by default is specified in base 16 format, but is
changable via the 'base' parameter.
Returns a list() of native objects with 'chi_square',
'shannon', 'estimate' and 'error' properties for the
Chi, Shannon and Pi approximation values respectively.
Throws a ValueError() if the specified index is invalid
"""
if self.isValidBlockNumber(x, base):
return self.edt.getAllDeviations(x)
raise ValueError("Invalid block number")
def getWholeFileDeviation(self, x, base = 16):
"""
Returns the Chi, Shannon and Pi approximation
deviations for a block indicated by the parameter
'x' relative to the entire file. The index is specified
in base 16 by default, however that is customizable via
'base' parameter. The returned native object has
properties named 'chi_square', 'shannon', 'estimate' and
'error' for the Chi, Shannon and Pi approximation values
respectively. If the index specified is invalid then a
ValueError() is thrown.
"""
if self.isValidBlockNumber(x, base):
return ed.getWholeFileDeviation(x)
raise ValueError("Invalid block number")
def getSequentialDeviation(self, x = 0, y = 0, base = 16):
"""
Calculates the deviation for sequential blocks, both
prior and following within a range of blocks that is
specified by the 'x' and 'y' parameters or every block
in the file by default. The x and y parameters by default
are specified in base 16 format however this is
customizable via the 'base' parameter.
Returns a list() of dict() objects with the keys
'prior', 'next', 'index' and 'dev' for the prior block
number, next block number, the block the deviations
are relative to, and the native object respectively.
Only one of the 'prior' and 'next' keys will be valid
in any given list element. The other will have a value
of None. The object stored at the key 'dev' will
contain the properties 'chi_square', 'shannon',
'estimate' and 'error' for the Chi, Shannon and Pi
approximation values respectively.
Throws a ValueError() if the index range specified is
invalid.
"""
ret = list()
if 0 == x and 0 == y:
x = 0
y = self.edt.maxIndex()
if x > y:
raise ValueError("Invalid block range specified (x > y)")
if not self.isValidBlockRange(x, y, base):
raise ValueError("Invalid block range specified")
for idx in range(x, y, 2):
if (0 != idx):
ret.append(dict({'prior': idx-1,
'next': None,
'index': idx,
'dev': self.edt.getDeviation(idx-1, idx)}))
if self.edt.maxIndex()-1 != idx:
ret.append(dict({'prior': None,
'next': idx+1,
'index': idx,
'dev': self.edt.getDeviation(idx, idx+1)}))
return ret
def findHighDeviation(self, c = 100, s = 20, e = 1):
"""
THIS METHOD IS AN ILLUSTRATED EXAMPLE ONLY.
Attempts to find blocks with high deviation values relative to
the blocks around it. What constitutes high deviation is
specified by the 'c', 's' and 'e' parameters that denote the
Chi Square, Shannon and Pi approximation Estimate respectively
Returns a list of native objects for any blocks that match, or
an empty list if none do. The returned native objects have the
properties 'chi_square', 'shannon', 'estimate' and 'error' for
the Chi, Shannon and Pi approximation deviation values
respectively.
"""
ret = list()
items = self.getSequentialDeviation(0, self.edt.maxIndex())
for item in items:
dev = item['dev']
if dev.chi_square > c and dev.shannon > s and dev.estimate > e:
ret.append(item)
return ret
def getBlocksAverage(self, ilist):
"""
Averages the Chi, Shannon and Pi approximation values in a list()
specified by the 'ilist' parameter.
Returns a dict() object with 'chi_square', 'shannon',
'estimate' and 'error' keys containing the averaged Chi,
Shannon and Pi approxmation values. Throws a ValueError()
if passed an empty list() as a parameter
"""
ret = dict({'chi_square': 0.0, 'shannon': 0.0, 'estimate': 0.0, 'error': 0.0})
cnt = len(ilist);
chi = 0.0
sha = 0.0
est = 0.0
err = 0.0
if 0 == cnt:
raise ValueError("An invalid (empty) list was specified")
for item in ilist:
chi += item['dev'].chi_square
sha += item['dev'].shannon
est += item['dev'].estimate
err += item['dev'].error
ret['chi_square'] = chi/cnt
ret['shannon'] = sha/cnt
ret['estimate'] = est/cnt
ret['error'] = err/cnt
return ret
def isHighAverageChi(self, maxv, chi = 15):
"""
WARNING: METHOD IS AN ILLUSTRATED EXAMPLE ONLY.
Identifies blocks with uniform or near uniform Chi
distributions for a range between the first block and
the block specified by 'maxv'. The blocks in that range
have their scores averaged and then if the average exceeds
a percentage specified by the 'chi' parameter it returns
true. Otherwise it returns false. The 'maxv' parameter is
specified in base 16 format and methods called by this
method can throw a ValueError() when an invalid index is
specified.
"""
items = self.getSequentialDeviation(0, maxv)
avg = self.getBlocksAverage(items)
if avg['chi_square'] > chi:
return True
return False
def priorHighAndNextLowShannon(self, idx, high = 20.0, low = 2.5):
"""
WARNING: METHOD IS AN ILLUSTRATED EXAMPLE ONLY.
Attempts to identify the beginning of a significant deviation
by attempting to determine if the block denoted by the
parameter 'idx' has a high percentage of deviation in its
Shannon score relative to the prior block and a low percentage
of deviation in its Shannon score in the block that follows it.
The high and low marks are denoted by the parameters 'high' and
'low' and default to 20% and 2.5% respectively. These values
were chosen based on deviations in a very small sample and will
result in high false negative and false positive results.
Returns true if the prior blocks Shannon deviation exceeds
'high' and the following blocks is less than 'low', otherwise
it returns false. A ValueError() is thrown if the index,
index-1 or index+1 are invalid.
"""
prv = None
nxt = None
if (not self.isValidBlockNumber(idx) or
not self.isValidBlockNumber(idx-1) or
not self.isValidBlockNumber(idx+1)):
raise ValueError("An invalid index was specified")
prv = self.getXYDeviation(idx-1, idx)
nxt = self.getXYDeviation(idx, idx+1)
if prv.shannon > high and nxt.shannon < low:
return True
return False
def getSequentialLowShannon(self, idx, low = 1.7):
"""
WARNING: METHOD IS AN ILLUSTRATED EXAMPLE ONLY.
Attempts to identify sequential blocks of deviant
data by looking for low Shannon score deviations
in sequential blocks. The block to start at is
specified by the 'idx' parameter, which is specified
in base 16 format. What exactly constitutes a low percentage
of deviation is specified by the parameter 'low', which
defaults to 1.7%. This value was chosen based on analysis of
a very small set of samples and is likely to result in high
amounts of false positive and false negatives as a result.
Returns the index of the highest block following 'idx' that
has a relative Shannon deviation less than 'low', or the index
specified by 'idx' if the following block does not match.
Throws a ValueError() if the index specified is invalid.
"""
ret = list()
if not self.isValidBlockNumber(idx):
raise ValueError("An invalid index was specified")
for idx in range(idx, self.edt.maxIndex()):
if not self.isValidBlockNumber(idx+1):
return idx
dev = self.getXYDeviation(idx, idx+1)
if dev.shannon < low:
continue
else:
return idx
return idx
def getSequentialCloseChi(self, lidx, hidx, dmax = 26.0):
"""
WARNING: METHOD IS AN ILLUSTRATED EXAMPLE ONLY
Attempts to identify related deviant blocks between a range
specified by the indices 'lidx' and 'hidx' respectively.
Specifically this method attempts to identify blocks that have
Chi Square scores that are within 'dmax' percent of one
another, which defaults to 26%. This value was chosen based on
analysis of a very small sample set and is likely to result in
high false positive and false negative rates if used as is.
The theory is based on the observation that the distribution of
shorter XOR keys varies relatively little.
Returns the highest index of a block that follows 'lidx' that
deviates less than 'dmax' percent, or 'lidx' if the block
immediately following 'lidx' exceeds 'dmax'%.
Throws a ValueError() if the index range specified is invalid.
"""
ret = 0
if lidx > hidx or not self.isValidBlockRange(lidx, hidx):
raise ValueError("An invalid index range was specified")
ret = lidx
for idx in range(lidx, hidx):
dev = self.getXYDeviation(idx, idx+1)
if dev.chi_square < dmax:
ret = idx
else:
break
return ret
def coalesceSequential(self, lst, maxv = 2):
"""
Takes a list of tuples in the format of tuple((low, high))
indicating a start and stop range of blocks and checks to see
if sequential list elements have nearly overlapping ranges
with a distance less than or equal to 'maxv'.
The concept behind this method is that once a sequence of
suspicious blocks are identified it is not uncommon for a
few outlier blocks to cause multiple ranges of suspect blocks
that really is a single range of blocks. As such, this method
checks to see if that is the case and coalesces the indices
into a single range of blocks.
Returns a list of tuples with high and low ranges.
"""
pb = None
ph = None
ret = list()
for itr in lst:
if None == pb and None == ph:
pb = itr[0]
ph = itr[1]
continue
elif itr[0] - ph <= maxv:
ph = itr[1]
continue
else:
ret.append(tuple((pb, ph)))
pb = itr[0]
ph = itr[1]
if len(ret) and ret[-1][0] != pb and ret[-1][1] != ph:
ret.append(tuple((pb, ph)))
return ret
def calculateDistribution(self, x = 0, y = 0, base = 16):
"""
Takes a range of block indices denoted by 'x' and 'y', which
are specified in base 16 format by default and calculates the
frequency each character occurs in the block range. The idea
is that shorter XOR keys across real data tend to encounter the
value zero a lot, which leaks the key in question. Thus by
analyzing the frequency of characters in a block range, we can
easily spot abnormal sequential frequencies and quickly
identify an XOR key as a result.
Returns a list sorted in descending order of the frequency of
each character in the block range, yielding a list with a
static length of 256. Throws a ValueError() if the range
specified is invalid.
"""
dist = list()
if 0 == y:
y = self.edt.maxIndex()
if not self.isValidBlockRange(x, y):
raise ValueError("Invalid index range specified")
self.edt.calculateDistribution(x, y)
dist = self.edt.getDistribution()
dist = sorted(dist, key=lambda d: d.count)
dist.reverse()
return dist
|
|
import os
import os.path as op
import numpy as np
import nibabel as nib
from dipy.reconst import dki
from dipy.reconst import dki_micro
from dipy.core.ndindex import ndindex
import AFQ.utils.models as ut
__all__ = ["fit_dki", "predict"]
def _fit(gtab, data, mask=None):
dkimodel = dki.DiffusionKurtosisModel(gtab)
return dkimodel.fit(data, mask=mask)
def fit_dki(data_files, bval_files, bvec_files, mask=None,
min_kurtosis=-1, max_kurtosis=3, out_dir=None, b0_threshold=50):
"""
Fit the DKI model, save files with derived maps
Parameters
----------
data_files : str or list
Files containing DWI data. If this is a str, that's the full path to a
single file. If it's a list, each entry is a full path.
bval_files : str or list
Equivalent to `data_files`.
bvec_files : str or list
Equivalent to `data_files`.
mask : ndarray, optional
Binary mask, set to True or 1 in voxels to be processed.
Default: Process all voxels.
min_kurtosis : float, optional
The minimal plausible value of kurtosis. Default: -1.
max_kurtosis : float, optional
The maximal plausible value of kurtosis. Default: 3.
out_dir : str, optional
A full path to a directory to store the maps that get computed.
Default: maps get stored in the same directory as the last DWI file
in `data_files`.
b0_threshold : float
Returns
-------
file_paths : a dict with the derived maps that were computed and full-paths
to the files containing these maps.
Note
----
Maps that are calculated: FA, MD, AD, RD, MK, AK, RK
"""
img, data, gtab, mask = ut.prepare_data(data_files, bval_files,
bvec_files, mask=mask,
b0_threshold=b0_threshold)
dkimodel = dki.DiffusionKurtosisModel(gtab)
dkifit = dkimodel.fit(data, mask=mask)
FA = dkifit.fa
MD = dkifit.md
AD = dkifit.ad
RD = dkifit.rd
MK = dkifit.mk(min_kurtosis, max_kurtosis)
AK = dkifit.ak(min_kurtosis, max_kurtosis)
RK = dkifit.rk(min_kurtosis, max_kurtosis)
params = dkifit.model_params
maps = [FA, MD, AD, RD, MK, AK, RK, params]
names = ['FA', 'MD', 'AD', 'RD', 'MK', 'AK', 'RK', 'params']
if out_dir is None:
if isinstance(data_files, list):
out_dir = op.join(op.split(data_files[0])[0], 'dki')
else:
out_dir = op.join(op.split(data_files)[0], 'dki')
if not op.exists(out_dir):
os.makedirs(out_dir)
aff = img.affine
file_paths = {}
for m, n in zip(maps, names):
file_paths[n] = op.join(out_dir, 'dki_%s.nii.gz' % n)
nib.save(nib.Nifti1Image(m, aff), file_paths[n])
return file_paths
def avs_dki_df(gtab, data, mask=None, min_signal=1.0e-6):
r""" Computes mean diffusion kurtosis
Parameters
----------
gtab : a GradientTable class instance
The gradient table containing diffusion acquisition parameters.
data : ndarray ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
mask : array, optional
A boolean array used to mark the coordinates in the data that should
be analyzed that has the shape data.shape[:-1]
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: 1.0e-6.
Returns
-------
params : ndarray ([X, Y, Z, ...], 3)
All parameters estimated from the dir fit DKI model.
Parameters are ordered as follows:
1) Direct Mean Diffusivity measure
2) Direct Mean Kurtosis measure
3) Direct S0 estimate
Reference
---------
Henriques, R.N., Correia, M.M., Interpreting age-related changes based
on the mean signal diffusion kurtosis. 25th Annual Meeting of ISMRM,
Honolulu, April 22-28, 2017
"""
params = np.zeros(data.shape[:-1] + (3,))
bmag = int(np.log10(gtab.bvals.max()))
b = gtab.bvals.copy() / (10 ** (bmag - 1)) # normalize b units
b = b.round() * (10 ** (bmag - 1))
uniqueb = np.unique(b)
nb = len(uniqueb)
B = np.zeros((nb, 3))
B[:, 0] = -uniqueb
B[:, 1] = 1.0 / 6.0 * uniqueb ** 2
B[:, 2] = np.ones(nb)
ng = np.zeros(nb)
for bi in range(nb):
ng[bi] = np.sum(b == uniqueb[bi])
ng = np.sqrt(ng)
# Prepare mask
if mask is None:
mask = np.ones(data.shape[:-1], dtype=bool)
else:
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
index = ndindex(mask.shape)
sig = np.zeros(nb)
for v in index:
if mask[v]:
for bi in range(nb):
sig[bi] = np.mean(data[v][b == uniqueb[bi]])
# Define weights as diag(sqrt(ng) * yn**2)
W = np.diag(ng * sig**2)
BTW = np.dot(B.T, W)
inv_BT_W_B = np.linalg.pinv(np.dot(BTW, B))
invBTWB_BTW = np.dot(inv_BT_W_B, BTW)
p = np.dot(invBTWB_BTW, np.log(sig))
p[1] = p[1] / (p[0]**2)
p[2] = np.exp(p[2])
params[v] = p
return params
def fit_mdki(data_files, bval_files, bvec_files, mask=None, out_dir=None,
b0_threshold=50):
"""
Fit the DKI model, save files with derived maps
Parameters
----------
data_files : str or list
Files containing DWI data. If this is a str, that's the full path to a
single file. If it's a list, each entry is a full path.
bval_files : str or list
Equivalent to `data_files`.
bvec_files : str or list
Equivalent to `data_files`.
mask : ndarray, optional
Binary mask, set to True or 1 in voxels to be processed.
Default: Process all voxels.
min_kurtosis : float, optional
The minimal plausible value of kurtosis. Default: -1.
max_kurtosis : float, optional
The maximal plausible value of kurtosis. Default: 3.
out_dir : str, optional
A full path to a directory to store the maps that get computed.
Default: maps get stored in the same directory as the last DWI file
in `data_files`.
b0_threshold : float
Returns
-------
file_paths : a dict with the derived maps that were computed and full-paths
to the files containing these maps.
Note
----
Maps that are calculated: FA, MD, AD, RD, MK, AK, RK
"""
img, data, gtab, mask = ut.prepare_data(data_files, bval_files,
bvec_files, mask=mask,
b0_threshold=b0_threshold)
params = avs_dki_df(gtab, data, mask=mask)
MD = params[..., 0]
MK = params[..., 1]
S0 = params[..., 2]
maps = [MD, MK, S0]
names = ['MD', 'MK', 'S0']
if out_dir is None:
if isinstance(data_files, list):
out_dir = op.join(op.split(data_files[0])[0], 'dki')
else:
out_dir = op.join(op.split(data_files)[0], 'dki')
if not op.exists(out_dir):
os.makedirs(out_dir)
aff = img.affine
file_paths = {}
for m, n in zip(maps, names):
file_paths[n] = op.join(out_dir, 'mdki_%s.nii.gz' % n)
nib.save(nib.Nifti1Image(m, aff), file_paths[n])
return file_paths
def fit_dkimicro(data_files, bval_files, bvec_files, mask=None,
min_kurtosis=-1, max_kurtosis=3, out_dir=None,
b0_threshold=50):
"""
Fit the DKI model, save files with derived maps
Parameters
----------
data_files : str or list
Files containing DWI data. If this is a str, that's the full path to a
single file. If it's a list, each entry is a full path.
bval_files : str or list
Equivalent to `data_files`.
bvec_files : str or list
Equivalent to `data_files`.
mask : ndarray, optional
Binary mask, set to True or 1 in voxels to be processed.
Default: Process all voxels.
min_kurtosis : float, optional
The minimal plausible value of kurtosis. Default: -1.
max_kurtosis : float, optional
The maximal plausible value of kurtosis. Default: 3.
out_dir : str, optional
A full path to a directory to store the maps that get computed.
Default: maps get stored in the same directory as the last DWI file
in `data_files`.
b0_threshold : float
Returns
-------
file_paths : a dict with the derived maps that were computed and full-paths
to the files containing these maps.
Note
----
Maps that are calculated: FA, MD, AD, RD, MK, AK, RK
"""
img, data, gtab, mask = ut.prepare_data(data_files, bval_files,
bvec_files, mask=mask,
b0_threshold=b0_threshold)
dkimodel = dki_micro.KurtosisMicrostructureModel(gtab)
dkifit = dkimodel.fit(data, mask=mask)
AWF = dkifit.awf
T = dkifit.tortuosity
Da = dkifit.axonal_diffusivity
hRD = dkifit.hindered_rd
hAD = dkifit.hindered_ad
evals = dkifit.hindered_evals
hMD = (evals[..., 0] + evals[..., 1] + evals[..., 2]) / 3.0
params = dkifit.model_params
maps = [AWF, T, hAD, hRD, hMD, Da, params]
names = ['AWF', 'T', 'hAD', 'hRD', 'hMD', 'Da', 'params']
if out_dir is None:
if isinstance(data_files, list):
out_dir = op.join(op.split(data_files[0])[0], 'dki')
else:
out_dir = op.join(op.split(data_files)[0], 'dki')
if not op.exists(out_dir):
os.makedirs(out_dir)
aff = img.affine
file_paths = {}
for m, n in zip(maps, names):
file_paths[n] = op.join(out_dir, 'dkimicro_%s.nii.gz' % n)
nib.save(nib.Nifti1Image(m, aff), file_paths[n])
return file_paths
def predict(params_file, gtab, S0_file=None, out_dir=None):
"""
Create a signal prediction from DKI params
params_file : str
Full path to a file with parameters saved from a DKI fit
gtab : GradientTable object
The gradient table to predict for
S0_file : str
Full path to a nifti file that contains S0 measurements to incorporate
into the prediction. If the file contains 4D data, the volumes that
contain the S0 data must be the same as the gtab.b0s_mask.
"""
if out_dir is None:
out_dir = op.join(op.split(params_file)[0])
if S0_file is None:
S0 = 100
else:
S0 = nib.load(S0_file).get_fdata()
# If the S0 data is 4D, we assume it comes from an acquisition that had
# B0 measurements in the same volumes described in the gtab:
if len(S0.shape) == 4:
S0 = np.mean(S0[..., gtab.b0s_mask], -1)
# Otherwise, we assume that it's already a 3D volume, and do nothing
img = nib.load(params_file)
params = img.get_fdata()
pred = dki.dki_prediction(params, gtab, S0=S0)
fname = op.join(out_dir, 'dki_prediction.nii.gz')
nib.save(nib.Nifti1Image(pred, img.affine), fname)
return fname
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnConnectionsOperations(object):
"""VpnConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnConnection"
"""Retrieves the details of a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VpnConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
vpn_connection_parameters, # type: "_models.VpnConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
vpn_connection_parameters, # type: "_models.VpnConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnConnection"]
"""Creates a vpn connection to a scalable vpn gateway if it doesn't exist else updates the
existing connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param vpn_connection_parameters: Parameters supplied to create or Update a VPN Connection.
:type vpn_connection_parameters: ~azure.mgmt.network.v2020_03_01.models.VpnConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.VpnConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
vpn_connection_parameters=vpn_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def list_by_vpn_gateway(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnConnectionsResult"]
"""Retrieves all vpn connections for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'} # type: ignore
|
|
import cPickle as pickle
#from matplotlib.patches import Rectangle
#from matplotlib.pyplot import gca,tight_layout,figure,plot
from pylab import *
#####################################
# Function to calculate theoretical #
# profiles from continuity and #
# flux steady state assumptions #
#####################################
def theoreticalprofile(K1, K2, H1, H2, U, nlayers, unit_area = 1., m = 0.5, n = 1, hshift = 0):
if(abs(n-1)>1E-6):
ratio = (K1/K2)**(1/(1-n))
E2 = U*(H1 + ratio * H2)/(ratio*(H1+H2))
E1 = ratio * E2
Sx1 = (E1/K1)**(1/n) * unit_area**(n/m)
#E2 = U*((H2/H1) + (K2/K1)**(1/(1-n)))/(1 + H2/H1)
#E2 = U * H2 / (H1 + H2 - H1 * U / E1)
Sx2 = (E2/K2)**(1/n) * unit_area**(n/m)
output = empty([2*nlayers+1,2])
output[0,0]=hshift
output[0,1]=0
for i in range(nlayers):
output[2*i+1,0]=output[2*i,0] + H1/Sx1
output[2*i+2,0]=output[2*i+1,0] + H2/Sx2
output[2*i+1,1]=output[2*i,1] + H1
output[2*i+2,1]=output[2*i+1,1] + H2
return output
else:
Sx1 = U/K1 * unit_area**(n/m)
Sx2 = U/K2 * unit_area**(n/m)
#print Sx1, Sx2
output = empty([4*nlayers+2,2])
output[0,0]=hshift
output[0,1]=0
output[1,0]=hshift
output[1,1]=0
for i in range(nlayers):
output[4*i+2,0]=output[4*i+1,0] + H1/Sx1
output[4*i+2,1]=output[4*i+1,1] + H1
output[4*i+3,0]=output[4*i,0] + H1/Sx2
output[4*i+3,1]=output[4*i,1] + H1
output[4*i+4,0]=output[4*i+3,0] + H2/Sx2
output[4*i+4,1]=output[4*i+3,1] + H2
output[4*i+5,0]=output[4*i+2,0] + H2/Sx1
output[4*i+5,1]=output[4*i+2,1] + H2
for i in range(nlayers):
output[4*i+4,0]=output[4*i+5,0]
output[4*i+4,1]=output[4*i+3,1] + Sx2 * (output[4*i+4,0]-output[4*i+3,0])
return output
#############################################
#Figure 3 (slope ratios as a function of n)
#############################################
figure(figsize=(5,4))
n=linspace(0.5,2.0,50)
k_w=2.
k_s=1.
k_w2 = 4.
k_w3 = 1.25
s_ratio = (k_w/k_s)**(1./(1.-n))
s_ratio_trad = (k_s/k_w)**(1./n)
s_ratio2 = (k_w2/k_s)**(1./(1.-n))
s_ratio_trad2 = (k_s/k_w2)**(1./n)
s_ratio3 = (k_w3/k_s)**(1./(1.-n))
s_ratio_trad3 = (k_s/k_w3)**(1./n)
big = s_ratio>1
small = s_ratio<1
semilogy(n[big],s_ratio[big],'-k',lw=2)
semilogy(n,s_ratio_trad,'--k',lw=2)
semilogy(n[small],s_ratio[small],'-k',lw=2)
plot([0.5,2.0],[1,1],':k',lw=1)
plot([1,1],[1e-5,1e5],':k',lw=1)
ylim([10**-3,10**3])
xlim([.5,2.0])
legend(['Horizontal contacts','Vertical contacts'],loc='upper right',fontsize=10.)
xlabel('$n$ value in Erosion law',fontsize=12)
ylabel('$S_w/S_s$',fontsize=14)
tight_layout()
savefig('fig3.pdf')
#################################################
#Stream profile figures (high uplift, Figure 4) #
#################################################
#Panel a
#n=2/3
figure(figsize=(9,6))
res = pickle.load(open('high_uplift_0.66.pkl','rb'))
chi = res['chi']
elev = res['elev']
start_soft = res['start_soft']
end_soft = res['end_soft']
start_soft_chi = res['start_soft_chi']
end_soft_chi = res['end_soft_chi']
uplift = res['uplift']
x = res['x']/1000. #convert to km
subplot(2,3,1)
idx = 450 #final timestep before duration was reached
nstep = 15
plot(chi,elev[idx], color='black', lw=2)
plot(chi,elev[idx-nstep]-uplift[idx-nstep]+uplift[idx], 'k', lw=2)
plot(chi,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
ax1 = ax #save for use in shared axis
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('$\chi$ [m]',fontsize=16)
ylabel('Elevation [m]',fontsize=12)
xlim([0,7])
ylim([0,600])
text(4,210,'weak rocks', fontsize=12)
text(0.05,0.85,'A', fontsize=20, transform=gca().transAxes)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=2/3', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
#plot theoretical profile
profile = theoreticalprofile(2e-4,1e-4,50,50,2.5e-3,6,m=.33,n=2./3., unit_area=1.)
plot(profile[:,0],profile[:,1], 'k--o',ms=4)
tight_layout()
#Panel D
subplot(2,3,4)
idx = 450 #final timestep before duration was reached
nstep = 15
plot(x,elev[idx], color='black', lw=2)
plot(x,elev[idx-nstep]-uplift[idx-nstep]+uplift[idx], 'k', lw=2)
plot(x,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
ax2 = ax #save for shared axis
ax.invert_xaxis()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('Distance from divide [km]',fontsize=12)
ylabel('Elevation [m]',fontsize=12)
xlim([50,0])
ylim([0,600])
text(45,310,'weak rocks', fontsize=12)
text(0.05,0.85,'D', fontsize=20, transform=gca().transAxes)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=2/3', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
tight_layout()
#Panel B
#n=3/2
res = pickle.load(open('high_uplift_1.5.pkl','rb'))
chi = res['chi']
elev = res['elev']
start_soft = res['start_soft']
end_soft = res['end_soft']
uplift = res['uplift']
x = res['x']/1000.
subplot(2,3,2,sharey=ax1)
idx = 445
nstep = 15
plot(chi,elev[idx], color='black', lw=2)
plot(chi,elev[idx-nstep-2]-uplift[idx-nstep-2]+uplift[idx], 'k', lw=2)
plot(chi,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('$\chi$ [m]',fontsize=16)
xlim([0,7])
ylim([0,600])
setp(ax.get_yticklabels(), visible=False)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=3/2', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(4,210,'weak rocks', fontsize=12)
text(0.05,0.85,'B', fontsize=20, transform=gca().transAxes)
#make theoretical profile
profile = theoreticalprofile(3e-6,1.5e-6,50,50,2.5e-3,6,m=.75,n=3./2., unit_area=1.)
plot(profile[:,0],profile[:,1], 'k--o',ms=4)
tight_layout()
#Panel e
subplot(2,3,5, sharey=ax2)
plot(x,elev[idx], color='black', lw=2)
plot(x,elev[idx-nstep-2]-uplift[idx-nstep-2]+uplift[idx], 'k', lw=2)
plot(x,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
ax.invert_xaxis()
setp(ax.get_yticklabels(), visible=False)
xlabel('Distance from divide [km]',fontsize=12)
xlim([50,0])
ylim([0,600])
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=3/2', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(45,310,'weak rocks', fontsize=12)
text(0.05,0.85,'E', fontsize=20, transform=gca().transAxes)
#Panel C
#n=1
res = pickle.load(open('high_uplift_1.pkl','rb'))
chi = res['chi']
elev = res['elev']
start_soft = res['start_soft']
end_soft = res['end_soft']
uplift = res['uplift']
x = res['x']/1000.
subplot(2,3,3, sharey=ax1)
idx = 445
nstep = 20
plot(chi,elev[idx], color='black', lw=2)
plot(chi,elev[idx-nstep-1]-uplift[idx-nstep-1]+uplift[idx], 'k', lw=2)
plot(chi,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('$\chi$ [m]',fontsize=16)
#ylabel('Elevation [m]',fontsize=12)
xlim([0,7])
ylim([0,600])
setp(ax.get_yticklabels(), visible=False)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=1', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(4,210,'weak rocks', fontsize=12)
text(0.05,0.85,'C', fontsize=20, transform=gca().transAxes)
tight_layout()
#Panel f
subplot(2,3,6,sharey=ax2)
plot(x,elev[idx], color='black', lw=2)
plot(x,elev[idx-nstep-1]-uplift[idx-nstep-1]+uplift[idx], 'k', lw=2)
plot(x,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
ax.invert_xaxis()
xlabel('Distance from divide [km]',fontsize=12)
#ylabel('Elevation [m]',fontsize=12)
xlim([50,0])
ylim([0,600])
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=1', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(45,310,'weak rocks', fontsize=12)
text(0.05,0.85,'F', fontsize=20, transform=gca().transAxes)
setp(ax.get_yticklabels(), visible=False)
tight_layout()
savefig('fig4.pdf')
#######################################
## Stream profiles (Low uplift, Fig 5)#
#######################################
#Panel A
#n=2/3
figure(figsize=(9,6))
res = pickle.load(open('low_uplift_0.66.pkl','rb'))
chi = res['chi']
elev = res['elev']
start_soft = res['start_soft']
end_soft = res['end_soft']
start_soft_chi = res['start_soft_chi']
end_soft_chi = res['end_soft_chi']
uplift = res['uplift']
x = res['x']/1000. #convert to km
subplot(2,3,1)
idx = 436 #final timestep before duration was reached
nstep = 15
plot(chi,elev[idx], color='black', lw=2)
plot(chi,elev[idx-nstep]-uplift[idx-nstep]+uplift[idx], 'k', lw=2)
plot(chi,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
ax1 = ax #save for use in shared axis
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('$\chi$ [m]',fontsize=16)
ylabel('Elevation [m]',fontsize=12)
xlim([0,9])
ylim([0,100])
text(4,210,'weak rocks', fontsize=12)
text(0.05,0.85,'A', fontsize=20, transform=gca().transAxes)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=2/3', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
#plot theoretical profile
profile = theoreticalprofile(8e-5,4e-5,10,10,2.5e-4,5,m=.33,n=2./3., unit_area=1.)
plot(profile[:,0],profile[:,1], 'k--o',ms=4)
tight_layout()
#Panel D
subplot(2,3,4)
plot(x,elev[idx], color='black', lw=2)
plot(x,elev[idx-nstep]-uplift[idx-nstep]+uplift[idx], 'k', lw=2)
plot(x,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
ax2 = ax #save for shared axis
ax.invert_xaxis()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('Distance from divide [km]',fontsize=12)
ylabel('Elevation [m]',fontsize=12)
xlim([200,0])
ylim([0,100])
text(45,310,'weak rocks', fontsize=12)
text(0.05,0.85,'D', fontsize=20, transform=gca().transAxes)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=2/3', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
tight_layout()
#Panel B
#n=3/2
res = pickle.load(open('low_uplift_1.5.pkl','rb'))
chi = res['chi']
elev = res['elev']
start_soft = res['start_soft']
end_soft = res['end_soft']
uplift = res['uplift']
x = res['x']/1000.
subplot(2,3,2,sharey=ax1)
idx = 439
nstep = 15
plot(chi,elev[idx], color='black', lw=2)
plot(chi,elev[idx-nstep-2]-uplift[idx-nstep-2]+uplift[idx], 'k', lw=2)
plot(chi,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('$\chi$ [m]',fontsize=16)
xlim([0,9])
ylim([0,100])
setp(ax.get_yticklabels(), visible=False)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=3/2', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(4,210,'weak rocks', fontsize=12)
text(0.05,0.85,'B', fontsize=20, transform=gca().transAxes)
#make theoretical profile
profile = theoreticalprofile(6e-6,3e-6,10,10,2.5e-4,5,m=.75,n=3./2., unit_area=1.)
plot(profile[:,0],profile[:,1], 'k--o',ms=4)
tight_layout()
#Panel e
subplot(2,3,5, sharey=ax2)
plot(x,elev[idx], color='black', lw=2)
plot(x,elev[idx-nstep-2]-uplift[idx-nstep-2]+uplift[idx], 'k', lw=2)
plot(x,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
ax.invert_xaxis()
setp(ax.get_yticklabels(), visible=False)
xlabel('Distance from divide [km]',fontsize=12)
xlim([200,0])
ylim([0,100])
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=3/2', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(45,310,'weak rocks', fontsize=12)
text(0.05,0.85,'E', fontsize=20, transform=gca().transAxes)
#Panel C
#n=1
res = pickle.load(open('low_uplift_1.pkl','rb'))
chi = res['chi']
elev = res['elev']
start_soft = res['start_soft']
end_soft = res['end_soft']
uplift = res['uplift']
x = res['x']/1000.
subplot(2,3,3, sharey=ax1)
idx = 454
nstep = 20
plot(chi,elev[idx], color='black', lw=2)
plot(chi,elev[idx-nstep-1]-uplift[idx-nstep-1]+uplift[idx], 'k', lw=2)
plot(chi,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
xlabel('$\chi$ [m]',fontsize=16)
#ylabel('Elevation [m]',fontsize=12)
xlim([0,9])
ylim([0,100])
setp(ax.get_yticklabels(), visible=False)
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=1', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(4,210,'weak rocks', fontsize=12)
text(0.05,0.85,'C', fontsize=20, transform=gca().transAxes)
tight_layout()
#Panel f
subplot(2,3,6,sharey=ax2)
plot(x,elev[idx], color='black', lw=2)
plot(x,elev[idx-nstep-1]-uplift[idx-nstep-1]+uplift[idx], 'k', lw=2)
plot(x,elev[idx+nstep]-uplift[idx+nstep]+uplift[idx], 'k', lw=2)
ax = gca()
for i, lly in enumerate(end_soft[idx]):
r = Rectangle((0,lly),200,start_soft[idx][i]-lly,color='grey',alpha=.5)
ax.add_patch(r)
ax.invert_xaxis()
xlabel('Distance from divide [km]',fontsize=12)
xlim([200,0])
ylim([0,100])
bbox_props = dict(boxstyle='round', fc="w",alpha=0.9, ec="k")
text(0.07,0.67,'n=1', fontsize=14, transform=gca().transAxes,bbox=bbox_props)
text(45,310,'weak rocks', fontsize=12)
text(0.05,0.85,'F', fontsize=20, transform=gca().transAxes)
setp(ax.get_yticklabels(), visible=False)
tight_layout()
savefig('fig5.pdf')
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines model architectures."""
import tensorflow as tf
layers = tf.keras.layers
# Classifier type.
TYPE_CLASSIFIER_CONVNET = 'CONVNET'
TYPE_CLASSIFIER_RESNET = 'RESNET'
TYPE_CLASSIFIER_RESTCN = 'RESTCN'
SUPPORTED_CLASSIFIER_TYPES = [
TYPE_CLASSIFIER_CONVNET, TYPE_CLASSIFIER_RESNET, TYPE_CLASSIFIER_RESTCN
]
def get_temporal_classifier(classifier_type, **kwargs):
"""Gets classifier.
Args:
classifier_type: A string for classifier type.
**kwargs: A dictionary for additional arguments.
Returns:
An classifier instance.
"""
if classifier_type == TYPE_CLASSIFIER_CONVNET:
return build_simple_temporal_model(**kwargs)
elif classifier_type == TYPE_CLASSIFIER_RESNET:
return build_residual_temporal_model(**kwargs)
elif classifier_type == TYPE_CLASSIFIER_RESTCN:
return build_residual_temporal_convolutional_model(**kwargs)
else:
raise ValueError('Unknown classifier: {}'.format(classifier_type))
def build_residual_block(input_layer,
feature_dim,
stride,
activation='relu',
dropout_rate=0.5,
**layer_kwargs):
"""Builds a residual block.
Args:
input_layer: A `tf.keras.layers.Layer` object for the input layer.
feature_dim: An integer for the feature dimension of all layers.
stride: An integer for the stride.
activation: A string for the activation function.
dropout_rate: A float between 0 and 1 for the dropout rate.
**layer_kwargs: A dictionary for additional layer arguments.
Returns:
A configured model.
"""
layer_kwargs.update(dict(padding='same'))
conv_x = layers.Conv1D(
filters=feature_dim,
kernel_size=7,
strides=stride,
**layer_kwargs)(input_layer)
conv_x = layers.BatchNormalization()(conv_x)
conv_x = layers.Activation(activation)(conv_x)
conv_x = layers.Dropout(dropout_rate)(conv_x)
conv_y = layers.Conv1D(
filters=feature_dim, kernel_size=5, **layer_kwargs)(conv_x)
conv_y = layers.BatchNormalization()(conv_y)
conv_y = layers.Activation(activation)(conv_y)
conv_y = layers.Dropout(dropout_rate)(conv_y)
conv_z = layers.Conv1D(
filters=feature_dim, kernel_size=3, **layer_kwargs)(conv_y)
conv_z = layers.BatchNormalization()(conv_z)
shortcut_y = layers.Conv1D(
filters=feature_dim,
kernel_size=1,
strides=stride,
**layer_kwargs)(input_layer)
shortcut_y = layers.BatchNormalization()(shortcut_y)
output_layer = layers.add([shortcut_y, conv_z])
output_layer = layers.Activation(activation)(output_layer)
return output_layer
def build_residual_temporal_model(input_shape,
num_classes,
channel_depths=(64, 128, 256),
activation='relu',
temporal_stride=2,
dropout_rate=0.5):
"""Builds a residual temporal model for classifier.
Reference:
Fawaz et al. Deep learning for time series classification: a review.
https://arxiv.org/pdf/1809.04356.pdf.
Args:
input_shape: A tuple for the shape of inputs.
num_classes: An integer for the number of classes.
channel_depths: A tuple for the feature dimension of all layers.
activation: A string for the activation function.
temporal_stride: An integer for the stride of temporal dimension.
dropout_rate: A float between 0 and 1 for the dropout rate.
Returns:
A configured model.
"""
layer_kwargs = dict(
kernel_initializer='he_normal', bias_initializer='he_normal')
input_layer = tf.keras.Input(input_shape)
output_layer = layers.Conv1D(
filters=channel_depths[0], kernel_size=7, padding='same', **layer_kwargs)(
input_layer)
output_layer = layers.BatchNormalization()(output_layer)
output_layer = layers.Activation(activation)(output_layer)
stride_layout = [temporal_stride] * (len(channel_depths) - 1) + [1]
for feature_dim, stride in zip(channel_depths, stride_layout):
output_layer = build_residual_block(
output_layer,
feature_dim,
stride,
activation=activation,
dropout_rate=dropout_rate,
**layer_kwargs)
output_layer = layers.GlobalAveragePooling1D()(output_layer)
output_layer = layers.Dense(num_classes, **layer_kwargs)(output_layer)
return tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
def build_simple_temporal_model(input_shape,
num_classes,
channel_depths=(64, 128, 256),
activation='relu',
temporal_stride=2,
kernel_size=7,
dropout_rate=0.5):
"""Builds a simple temporal model for classifier.
Args:
input_shape: A tuple for the shape of inputs.
num_classes: An integer for the number of classes.
channel_depths: A tuple for the feature dimension of all layers.
activation: A string for the activation function.
temporal_stride: An integer for the stride of temporal dimension.
kernel_size: An integer for the kernel size of the 1D Conv layers.
dropout_rate: A float between 0 and 1 for the dropout rate.
Returns:
A configured model.
"""
layer_kwargs = dict(
kernel_initializer='he_normal', bias_initializer='he_normal')
input_layer = tf.keras.Input(input_shape)
output_layer = input_layer
for feature_dim in channel_depths:
output_layer = layers.Conv1D(
filters=feature_dim,
kernel_size=kernel_size,
strides=temporal_stride,
padding='same',
**layer_kwargs)(output_layer)
output_layer = layers.BatchNormalization()(output_layer)
output_layer = layers.Activation(activation)(output_layer)
output_layer = layers.Dropout(dropout_rate)(output_layer)
output_layer = layers.GlobalAveragePooling1D()(output_layer)
output_layer = layers.Dense(num_classes, **layer_kwargs)(output_layer)
return tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
def build_residual_temporal_convolutional_model(
input_shape,
num_classes,
pooling=True,
activation='relu',
dropout_rate=0.5,
kernel_regularizer=tf.keras.regularizers.l1(1.e-4)):
"""Builds a residual temporal convolutional model for classifier (Res-TCN).
Reference:
Kim et al. Interpretable 3D Human Action Analysis with Temporal
Convolutional Networks. https://arxiv.org/pdf/1704.04516.pdf.
Args:
input_shape: A tuple for the shape of inputs.
num_classes: An integer for the number of classes.
pooling: A boolean for whether to use average pooling.
activation: A string for the activation function.
dropout_rate: A float between 0 and 1 for the dropout rate.
kernel_regularizer: A `tf.keras.regularizers` instance for regularizer.
Returns:
A configured model.
"""
row_axis = 1
channel_axis = 2
# Each tuple in config represents (stride, kernel_size, feature_dim).
config = [[(1, 8, 64)], [(1, 8, 64)], [(1, 8, 64)], [(2, 8, 128)],
[(1, 8, 128)], [(1, 8, 128)], [(2, 8, 256)], [(1, 8, 256)],
[(1, 8, 256)]]
initial_stride = 1
initial_kernel_size = 8
initial_num = 64
model_input = tf.keras.Input(shape=input_shape)
model = layers.Conv1D(
initial_num,
kernel_size=initial_kernel_size,
strides=initial_stride,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer)(
model_input)
for depth in range(len(config)):
for stride, kernel_size, feature_dim in config[depth]:
bn = layers.BatchNormalization(axis=channel_axis)(model)
relu = layers.Activation(activation)(bn)
dr = layers.Dropout(dropout_rate)(relu)
res = layers.Conv1D(
feature_dim,
kernel_size=kernel_size,
strides=stride,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer)(
dr)
res_shape = tf.keras.backend.int_shape(res)
model_shape = tf.keras.backend.int_shape(model)
if res_shape[channel_axis] != model_shape[channel_axis]:
model = layers.Conv1D(
feature_dim,
kernel_size=1,
strides=stride,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer)(
model)
model = layers.add([model, res])
bn = layers.BatchNormalization(axis=channel_axis)(model)
model = layers.Activation(activation)(bn)
if pooling:
pool_window_shape = tf.keras.backend.int_shape(model)
gap = layers.AveragePooling1D(pool_window_shape[row_axis], strides=1)(model)
flatten = layers.Flatten()(gap)
else:
flatten = layers.Flatten()(model)
dense = layers.Dense(
units=num_classes, activation='softmax', kernel_initializer='he_normal')(
flatten)
return tf.keras.models.Model(inputs=model_input, outputs=dense)
|
|
# Copyright 2013 IBM Corp.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
import mock
import webob
import webob.dec
import webob.exc
import nova.api.openstack
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
from nova.openstack.common import gettextutils
from nova.openstack.common import jsonutils
from nova import test
class TestFaultWrapper(test.NoDBTestCase):
"""Tests covering `nova.api.openstack:FaultWrapper` class."""
@mock.patch('nova.openstack.common.gettextutils.get_localized_message')
def test_safe_exception_translated(self, mock_get_localized):
msg = gettextutils.Message('Should be translated.', 'nova')
safe_exception = exception.NotFound()
safe_exception.msg_fmt = msg
safe_exception.safe = True
safe_exception.code = 404
req = webob.Request.blank('/')
def fake_translate(mesg, locale):
if str(mesg) == "Should be translated.":
return "I've been translated!"
return mesg
mock_get_localized.side_effect = fake_translate
def raiser(*args, **kwargs):
raise safe_exception
wrapper = nova.api.openstack.FaultWrapper(raiser)
response = req.get_response(wrapper)
self.assertIn("I've been translated!", unicode(response.body))
mock_get_localized.assert_any_call(
u'Should be translated.', None)
class TestFaults(test.NoDBTestCase):
"""Tests covering `nova.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
"""Remove characters from string which hinder XML equality testing."""
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault_json(self):
# Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
expected = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(expected, actual)
def test_413_fault_json(self):
# Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
# NOTE(aloga): we intentionally pass an integer for the
# 'Retry-After' header. It should be then converted to a str
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": "4",
},
}
actual = jsonutils.loads(response.body)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(expected, actual)
def test_429_fault_json(self):
# Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPTooManyRequests
# NOTE(aloga): we intentionally pass an integer for the
# 'Retry-After' header. It should be then converted to a str
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 429,
"retryAfter": "4",
},
}
actual = jsonutils.loads(response.body)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(expected, actual)
def test_raise(self):
# Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual(resp.content_type, "application/xml")
self.assertEqual(resp.status_int, 404)
self.assertIn('whut?', resp.body)
def test_raise_403(self):
# Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual(resp.content_type, "application/xml")
self.assertEqual(resp.status_int, 403)
self.assertNotIn('resizeNotAllowed', resp.body)
self.assertIn('forbidden', resp.body)
def test_raise_localize_explanation(self):
msgid = "String with params: %s"
params = ('blah', )
lazy_gettext = gettextutils._
expl = lazy_gettext(msgid) % params
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation=expl))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual(resp.content_type, "application/xml")
self.assertEqual(resp.status_int, 404)
self.assertIn((msgid % params), resp.body)
def test_fault_has_status_int(self):
# Ensure the status_int is set correctly on faults.
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(fault.status_int, 400)
def test_xml_serializer(self):
# Ensure that a v1.1 request responds with a v1.1 xmlns.
request = webob.Request.blank('/v1.1',
headers={"Accept": "application/xml"})
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
self.assertIn(common.XML_NS_V11, response.body)
self.assertEqual(response.content_type, "application/xml")
self.assertEqual(response.status_int, 400)
class FaultsXMLSerializationTestV11(test.NoDBTestCase):
"""Tests covering `nova.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault(self):
metadata = {'attributes': {"badRequest": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V11)
fixture = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<badRequest code="400" xmlns="%s">
<message>scram</message>
</badRequest>
""") % common.XML_NS_V11)
self.assertEqual(expected.toxml(), actual.toxml())
def test_413_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V11)
fixture = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="413" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V11)
self.assertEqual(expected.toxml(), actual.toxml())
def test_429_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V11)
fixture = {
"overLimit": {
"message": "sorry",
"code": 429,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="429" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V11)
self.assertEqual(expected.toxml(), actual.toxml())
def test_404_fault(self):
metadata = {'attributes': {"itemNotFound": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V11)
fixture = {
"itemNotFound": {
"message": "sorry",
"code": 404,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<itemNotFound code="404" xmlns="%s">
<message>sorry</message>
</itemNotFound>
""") % common.XML_NS_V11)
self.assertEqual(expected.toxml(), actual.toxml())
|
|
#!/usr/bin/python
"""import23.py - Script to import the data into Gluu Server 2.3.x
"""
# Requires JSON Merge library
# wget https://github.com/avian2/jsonmerge/archive/master.zip
# unzip master.zip
# cd jsonmerge-master
# python setup.py install
# Also requires ldif.py in same folder
import os
import os.path
import shutil
import sys
import time
import traceback
from ldif import LDIFParser
from jsonmerge import merge
import base64
import json
import uuid
log = "./import23.log"
logError = "./import23.error"
password_file = "/root/.pw"
service = "/usr/sbin/service"
ldapmodify = "/opt/opendj/bin/ldapmodify"
ldapsearch = "/opt/opendj/bin/ldapsearch"
ldapdelete = "/opt/opendj/bin/ldapdelete"
ignore_files = ['101-ox.ldif',
'gluuImportPerson.properties',
'oxTrust.properties',
'oxauth-config.xml',
'oxauth-errors.json',
'oxauth.config.reload',
'oxauth-static-conf.json',
'oxtrust.config.reload'
]
ldap_creds = ['-h',
'localhost',
'-p',
'1389',
'-D',
'"cn=directory',
'manager"',
'-j',
password_file
]
class MyLDIF(LDIFParser):
def __init__(self, input, output):
LDIFParser.__init__(self, input)
self.targetDN = None
self.targetAttr = None
self.targetEntry = None
self.DNs = []
self.lastDN = None
self.lastEntry = None
def getResults(self):
return (self.targetDN, self.targetAttr)
def getDNs(self):
return self.DNs
def getLastEntry(self):
return self.lastEntry
def handle(self, dn, entry):
if self.targetDN == None:
self.targetDN = dn
self.lastDN = dn
self.DNs.append(dn)
self.lastEntry = entry
if dn.lower().strip() == self.targetDN.lower().strip():
self.targetEntry = entry
if entry.has_key(self.targetAttr):
self.targetAttr = entry[self.targetAttr]
def addEntry(dn, entry, ldifModFolder):
newLdif = """dn: %s
changetype: add
""" % dn
for attr in entry.keys():
for value in entry[attr]:
newLdif = newLdif + getMod(attr, value)
newLdif = newLdif + "\n"
f = open("%s/%s.ldif" % (ldifModFolder, str(uuid.uuid4())), 'w')
f.write(newLdif)
f.close()
def getNewConfig(fn):
# Backup the appliance config just in case!
args = [ldapsearch] + ldap_creds + \
['-b',
'o=gluu',
'objectclass=*']
output = getOutput(args)
f = open(fn, 'w')
f.write(output)
f.close()
logIt("Wrote new ldif to %s" % fn)
def copyFiles(backup23_folder):
os.path.walk ("%s/etc" % backup23_folder, walk_function, None)
os.path.walk ("%s/opt" % backup23_folder, walk_function, None)
os.path.walk ("%s/usr" % backup23_folder, walk_function, None)
def deleteEntries(dn_list):
for dn in dn_list:
cmd = [ldapdelete] + ldap_creds + [dn]
output = getOutput(cmd)
if output:
logIt(output)
else:
logIt("Error deleting %s" % dn)
def getAttributeValue(fn, targetAttr):
# Load oxAuth Config From LDIF
parser = MyLDIF(open(fn, 'rb'), sys.stdout)
parser.targetAttr = targetAttr
parser.parse()
value = parser.targetAttr
return value
def getOldEntryMap(folder):
files = os.listdir(folder)
dnMap = {}
for fn in files:
if (fn == "site.ldif") or (fn == "people.ldif"):
continue
dnList = getDns("%s/%s" % (folder,fn))
for dn in dnList:
dnMap[dn] = fn
return dnMap
def getEntry(fn, dn):
parser = MyLDIF(open(fn, 'rb'), sys.stdout)
parser.targetDN = dn
parser.parse()
return parser.targetEntry
def getDns(fn):
parser = MyLDIF(open(fn, 'rb'), sys.stdout)
parser.parse()
return parser.DNs
def getMod(attr, s):
val = str(s).strip()
if val.find('\n') > -1:
val = base64.b64encode(val)
return "%s\n" % tab_attr(attr, val, True)
elif len(val) > (78 - len(attr)):
return "%s\n" % tab_attr(attr, val)
else:
return "%s: %s\n" % (attr, val)
def getOutput(args):
try:
logIt("Running command : %s" % " ".join(args))
output = os.popen(" ".join(args)).read().strip()
return output
except:
logIt("Error running command : %s" % " ".join(args), True)
logIt(traceback.format_exc(), True)
sys.exit(1)
def logIt(msg, errorLog=False):
if errorLog:
f = open(logError, 'a')
f.write('%s %s\n' % (time.strftime('%X %x'), msg))
f.close()
f = open(log, 'a')
f.write('%s %s\n' % (time.strftime('%X %x'), msg))
f.close()
def restoreConfig(ldifFolder, newLdif, ldifModFolder):
ignoreList = ['objectClass', 'ou']
current_config_dns = getDns(newLdif)
oldDnMap = getOldEntryMap(ldifFolder)
for dn in oldDnMap.keys():
old_entry = getEntry("%s/%s" % (ldifFolder, oldDnMap[dn]), dn)
if dn not in current_config_dns:
addEntry(dn, old_entry, ldifModFolder)
continue
new_entry = getEntry(newLdif, dn)
for attr in old_entry.keys():
if attr in ignoreList:
continue
if not new_entry.has_key(attr):
writeMod(dn, attr, old_entry[attr], '%s/%s.ldif' % (ldifModFolder, str(uuid.uuid4())), True)
logIt("Adding attr %s to %s" % (attr, dn))
else:
mod_list = None
if old_entry[attr] != new_entry[attr]:
if len(old_entry[attr]) == 1:
try:
logIt("Merging json value for %s " % attr)
old_json = json.loads(old_entry[attr][0])
new_json = json.loads(new_entry[attr][0])
new_json = merge(new_json, old_json)
mod_list = [json.dumps(new_json)]
except:
mod_list = old_entry[attr]
else:
mod_list = old_entry[attr]
logIt("Keeping multiple old values for %s" % attr)
else:
continue
writeMod(dn, attr, mod_list, '%s/%s.ldif' % (ldifModFolder, str(uuid.uuid4())))
def startOpenDJ():
output = getOutput([service, 'opendj', 'start'])
if output.find("Directory Server has started successfully") > 0:
logIt("Directory Server has started successfully")
else:
logIt("OpenDJ did not start properly... exiting. Check /opt/opendj/logs/errors", True)
sys.exit(2)
def stopOpenDJ():
output = getOutput([service, 'opendj', 'stop'])
if output.find("Directory Server is now stopped") > 0:
logIt("Directory Server is now stopped")
else:
logIt("OpenDJ did not stop properly... exiting. Check /opt/opendj/logs/errors", True)
sys.exit(3)
def tab_attr(attr, value, encoded=False):
targetLength = 80
lines = ['%s: ' % attr]
if encoded:
lines = ['%s:: ' % attr]
for char in value:
current_line = lines[-1]
if len(current_line) < 80:
new_line = current_line + char
del lines[-1]
lines.append(new_line)
else:
lines.append(" " + char)
return "\n".join(lines)
def uploadLDIF(ldifFolder, outputLdifFolder):
files = os.listdir(outputLdifFolder)
for fn in files:
cmd = [ldapmodify] + ldap_creds + ['-a', '-f', "%s/%s" % (outputLdifFolder, fn)]
output = getOutput(cmd)
if output:
logIt(output)
else:
logIt("Error adding file %s" % fn, True)
# delete default admin user created in 2.4 install
dn_list = getDns("/opt/opendj/ldif/people.ldif")
deleteEntries(dn_list)
# Add People
cmd = [ldapmodify] + ldap_creds + ['-a', '-c', '-f', "%s/people.ldif" % ldifFolder]
output = getOutput(cmd)
if output:
logIt(output)
else:
logIt("Error adding people.ldif", True)
dn_list = getDns("%s/site.ldif" % ldifFolder)
if dn_list > 2:
cmd = [ldapmodify] + ldap_creds + ['-a', '-c', '-f', "%s/site.ldif" % ldifFolder]
output = getOutput(cmd)
if output:
logIt(output)
else:
logIt("Error adding site.ldif", True)
def walk_function(a, dir, files):
for file in files:
if file in ignore_files:
continue
fn = "%s/%s" % (dir, file)
targetFn = fn[1:]
if os.path.isdir(fn):
if not os.path.exists(targetFn):
os.mkdir(targetFn)
else:
# It's a file...
try:
logIt("copying %s" % targetFn)
shutil.copyfile(fn, targetFn)
except:
logIt("Error copying %s" % targetFn, True)
def writeMod(dn, attr, value_list, fn, add=False):
operation = "replace"
if add:
operation = "add"
modLdif = """dn: %s
changetype: modify
%s: %s\n""" % (dn, operation, attr)
if value_list == None: return
for val in value_list:
modLdif = modLdif + getMod(attr, val)
modLdif = modLdif + "\n"
f = open(fn, 'w')
f.write(modLdif)
f.close()
backup23_folder = None
error = False
try:
backup23_folder = sys.argv[1]
if (not os.path.exists("%s/ldif" % backup23_folder)):
error = True
if (not os.path.exists("%s/etc" % backup23_folder)):
error = True
if (not os.path.exists("%s/opt" % backup23_folder)):
error = True
except:
error = True
if error:
print "backup folders not found"
print "Usage: ./import.py <path_to_backup_folders>"
sys.exit(1)
ldif_folder = "%s/ldif" % backup23_folder
outputFolder = "./output_ldif"
outputLdifFolder = "%s/config" % outputFolder
if not os.path.exists(outputFolder):
os.mkdir(outputFolder)
if not os.path.exists(outputLdifFolder):
os.mkdir(outputLdifFolder)
newLdif = "%s/current_config.ldif" % outputFolder
stopOpenDJ()
copyFiles(backup23_folder)
startOpenDJ()
getNewConfig(newLdif)
restoreConfig(ldif_folder, newLdif, outputLdifFolder)
uploadLDIF(ldif_folder, outputLdifFolder)
|
|
import requests
from slackclient import SlackClient
from kalliope import Utils
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException
Slack_Actions = (
"POST",
"READ",
"USERS"
)
class Slack(NeuronModule):
def __init__(self, **kwargs):
super(Slack, self).__init__(**kwargs)
# parameters
self.action = kwargs.get('action', None)
self.token = kwargs.get('slack_token', None)
self.channel = kwargs.get('channel', None)
self.message = kwargs.get('message', None)
self.nb_messages = int(kwargs.get('nb_messages', 10)) # Int
self.only_connected = bool(kwargs.get('only_connected', True))
# check parameters
if self._is_parameters_ok():
sc = SlackClient(self.token)
if self.action == Slack_Actions[0]: # POST
if self._is_post_parameters_ok():
sc.api_call(
"chat.postMessage",
channel=self.channel,
text=self.message,
as_user=True
)
message = {
"action": self.action,
"text": self.message,
"channel": self.channel
}
if self.action == Slack_Actions[1]: # READ
if self.is_read_parameters_ok():
# get the list of channel
user_list = sc.api_call("users.list")
channel_list = sc.api_call("channels.list")
# Find the channel ID of the given channel name
channel_id = self._get_channel_id(channel_name=self.channel,
channel_list=channel_list)
# Get all messages of the channel
messages_list = self._get_list_messages(sc=sc,
channel_id=channel_id,
nb_messages=self.nb_messages)
# Order the messages
messages_list.reverse()
# Associate user ID of messages to the user name
user_messages = self._get_user_message_list(user_list=user_list,
messages=messages_list)
message = {
"action": self.action,
"messages": user_messages,
"channel": self.channel
}
if self.action == Slack_Actions[2]: # USERS
user_list = sc.api_call(
"users.list",
presence=1
)
members = user_list["members"]
if self.only_connected:
members = filter(lambda m: "presence" in m and m["presence"] == "active", members)
message = {
"action": self.action,
"members": members,
"only_connected": self.only_connected
}
self.say(message)
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron.
:return: True if parameters are ok, raise an exception otherwise.
.. raises:: MissingParameterException
"""
if self.token is None:
raise MissingParameterException("Slack needs a slack_token")
if self.action is None:
raise MissingParameterException("Slack needs an action parameter")
return True
def _is_post_parameters_ok(self):
"""
Check if parameters required to POST a message are present.
:return: True, if parameters are OK, raise exception otherwise.
.. raises:: MissingParameterException
"""
if self.channel is None:
raise MissingParameterException("Slack needs a channel")
if self.message is None:
raise MissingParameterException("Slack needs a message")
return True
def is_read_parameters_ok(self):
"""
Check if parameters required to READ a message are present.
:return: True, if parameters are OK, raise exception otherwise.
.. raises:: MissingParameterException
"""
if self.channel is None:
raise MissingParameterException("Slack needs a channel")
return True
@staticmethod
def _get_list_messages(sc=None,
channel_id=None,
nb_messages=None):
"""
Using Slack API to access messages from a given channel id.
:param sc: the slack client
:param channel_id: the channel id
:param nb_messages: the number of messages
:return: the message list of the last nb_messages
"""
global_message_list = sc.api_call(
"channels.history",
channel=channel_id,
count=nb_messages
)
message_list = list()
if "messages" in global_message_list:
message_list = global_message_list["messages"]
else:
Utils.print_warning("No messages found !")
return message_list
@staticmethod
def _get_channel_id(channel_name=None,
channel_list=None):
"""
return the id from the channel list corresponding to the channel name.
:param channel_name: str of the name
:param channel_list: list of the channel
:return: the id from the channel list corresponding to the channel name.
"""
id = next((channel["id"] for channel in channel_list["channels"] if channel["name"] == channel_name), None)
if id is None:
Utils.print_warning("The channel name has not been found !")
return id
@staticmethod
def _get_user_message_list(user_list=None,
messages=None):
"""
List of associate message to an user.
:param user_list: the full list of user
:param messages: the list of messages
:return: the list of dicts user:message
"""
current_user_message_dict = dict()
user_message_list = list()
for message in messages:
if "username" in message:
current_user_message_dict[message["username"]] = message["text"]
user_message_list.append(current_user_message_dict)
current_user_message_dict = dict()
continue
elif "user" in message:
for user in user_list["members"]:
if "id" in user:
if user["id"] == message["user"]:
current_user_message_dict[user["name"]] = message["text"]
user_message_list.append(current_user_message_dict)
current_user_message_dict = dict()
continue
return user_message_list
|
|
""" manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation import expr, ops
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class Scope(expr.Scope):
__slots__ = ("queryables",)
def __init__(self, level, global_dict=None, local_dict=None, queryables=None):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
if self.name not in self.env.queryables:
raise NameError("name {name!r} is not defined".format(name=self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side="left")
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(
"Cannot compare {v} of type {typ} to {kind} column".format(
v=v, typ=type(v), kind=kind
)
)
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __str__(self):
return pprint_thing(
"[Filter : [{lhs}] -> [{op}]".format(lhs=self.filter[0], op=self.filter[1])
)
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]".format(slf=self))
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind).value for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
else:
raise TypeError(
"passing a filterable condition to a non-table "
"indexer [{slf}]".format(slf=self)
)
return self
def generate_filter_op(self, invert=False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __str__(self):
return pprint_thing("[Condition : [{cond}]]".format(cond=self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when " "passing to numexpr"
)
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]".format(slf=self))
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "({cond})".format(cond=" | ".join(vs))
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "({lhs} {op} {rhs})".format(
lhs=self.lhs.condition, op=self.op, rhs=self.rhs.condition
)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {"unary": UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
"visit_{node}".format(node=bin_node),
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError(
"cannot subscript {value!r} with "
"{slobj!r}".format(value=value, slobj=slobj)
)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {name}".format(name=ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, Expr, " "or list-like of Exprs"
)
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = " & ".join(map("({})".format, com.flatten(where))) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __str__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError(
"cannot process expression [{expr}], [{slf}] "
"is not a valid condition".format(expr=self.expr, slf=self)
)
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError(
"cannot process expression [{expr}], [{slf}] "
"is not a valid filter".format(expr=self.expr, slf=self)
)
return self.condition, self.filter
class TermValue:
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == "string":
if encoding is not None:
return self.converted
return '"{converted}"'.format(converted=self.converted)
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
|
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import logging as _logging
import re as _re
from lib.utility import misc
from lib.aws.exception import AwsErrorCode
from lib.aws.exception import AwsServiceException
from lib.aws.exception import MissingParameterException
from lib.aws.exception import InsufficientPrivilegesException
from lib.aws.exception import InvalidParameterValueException
from lib.aws.exception import OptInRequiredException
from lib.aws.webservice import AWSQueryClient
from lib.aws.webservice import AWSSignature
from lib.elasticbeanstalk.request import Request
from lib.elasticbeanstalk.request import Response
from lib.elasticbeanstalk import strings as Strings
from lib.elasticbeanstalk.model import ApplicationDescription
from lib.elasticbeanstalk.model import ApplicationVersionDescription
from lib.elasticbeanstalk.model import ConfigurationSettingsDescription
from lib.elasticbeanstalk.model import ConfigurationOptionDescription
from lib.elasticbeanstalk.model import EnvironmentDescription
from lib.elasticbeanstalk.model import EnvironmentResourceDescription
from lib.elasticbeanstalk.model import EventDescription
from lib.elasticbeanstalk.model import SolutionStackDescription
from lib.elasticbeanstalk.model import ValidationMessage
from lib.elasticbeanstalk.exception import EBErrorCode
from lib.elasticbeanstalk.exception import AlreadyExistException
from lib.elasticbeanstalk.exception import ApplicationHasRunningEnvException
from lib.elasticbeanstalk.exception import OperationInProgressException
from lib.elasticbeanstalk.exception import SourceBundleDeletionException
from lib.elasticbeanstalk.exception import S3LocationNotInServiceRegionException
log = _logging.getLogger('eb')
class ElasticBeanstalkClient(object):
'''
Web service client for Elastic Beanstalk
'''
_signature_version = AWSSignature.SigV4
_api_version = u'2010-12-01'
_service_name = u'elasticbeanstalk'
def __init__(self, accessKey, secretKey, endpoint, region, result_format = 'json'):
'''
Constructor
'''
self._accessKey = accessKey
self._secretKey = secretKey
self._endpoint = endpoint
self._format = result_format
self._region = region
self._client = AWSQueryClient(self._accessKey, self._secretKey,
self._endpoint, self._region,
self._service_name, self._format,
self._signature_version, self._api_version)
def call(self, request):
'''Make API call and translate AWSServiceException to more specific exception'''
try:
log.debug(request)
return_msg = self._client.call(request, self._format)
log.debug(u'Request ID: {0}'.format(return_msg.json.values()[0]\
[u'ResponseMetadata'][u'RequestId']))
#TODO: set more specific charset code
return return_msg.json
except AwsServiceException as ex:
log.debug(misc.to_unicode(ex))
# Translate general Elastic Beanstalk exception
if misc.string_equal_ignore_case(ex.code, AwsErrorCode.OptInRequired):
raise OptInRequiredException(ex)
if misc.string_equal_ignore_case(ex.code, AwsErrorCode.InsufficientPrivileges):
raise InsufficientPrivilegesException(ex)
if misc.string_equal_ignore_case(ex.code, AwsErrorCode.InvalidParameterValue):
raise InvalidParameterValueException(ex)
if misc.string_equal_ignore_case(ex.code, AwsErrorCode.MissingParameter):
raise MissingParameterException(ex)
raise
#---------------------------------------
# service calls
def create_application(self, name, description = None):
request = Request()
request.set_operation(u'CreateApplication')
request.set_app_name(name)
if description is not None:
request.set_description(description)
try:
response = self.call(request)
except AwsServiceException as ex:
if ex.code.lower() == AwsErrorCode.InvalidParameterValue.lower()\
and _re.search(Strings.APP_EXIST_RE, ex.message):
raise AlreadyExistException(ex)
raise
# TODO: take care of too many application exception?
result = response[u'CreateApplicationResponse']\
[u'CreateApplicationResult'][u'Application']
request_id = response[u'CreateApplicationResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id, ApplicationDescription.from_json(result))
def delete_application(self, name, terminate_env = u'false'):
request = Request()
request.set_operation(u'DeleteApplication')
request.set_app_name(name)
request.set_terminate_env(terminate_env)
try:
response = self.call(request)
except AwsServiceException as ex:
if ex.code.lower() == AwsErrorCode.InvalidParameterValue.lower()\
and _re.search(Strings.APP_HAS_RUNNING_ENV, ex.message):
raise ApplicationHasRunningEnvException(ex)
if ex.code.lower() == EBErrorCode.OperationInProgress.lower():
raise OperationInProgressException(ex)
raise
request_id = response[u'DeleteApplicationResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id)
def create_application_version(self, application, version_label,
s3bucket = None, s3key = None,
auto_create_app = u'false', description = None):
if (s3bucket is None and s3key is not None) \
or (s3bucket is not None and s3key is None):
raise AttributeError(u'Must specify both s3 bucket and key')
request = Request()
request.set_operation(u'CreateApplicationVersion')
request.set_app_name(application)
request.set_version_label(version_label)
if s3bucket is not None:
request.set_s3bucket(s3bucket)
request.set_s3key(s3key)
request.set_auto_create_app(auto_create_app)
if description is not None:
request.set_description(description)
try:
response = self.call(request)
except AwsServiceException as ex:
if ex.code.lower() == AwsErrorCode.InvalidParameterValue.lower()\
and _re.search(Strings.VER_EXIST_RE, ex.message):
raise AlreadyExistException(ex)
raise
# TODO: take care of too many application and/or version exception
result = response[u'CreateApplicationVersionResponse']\
[u'CreateApplicationVersionResult'][u'ApplicationVersion']
request_id = response[u'CreateApplicationVersionResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id, ApplicationVersionDescription.from_json(result))
def delete_application_version(self, application,
version_label, delete_bundle = u'false'):
request = Request()
request.set_operation(u'DeleteApplicationVersion')
request.set_app_name(application)
request.set_version_label(version_label)
request.set_delete_source_bundle(delete_bundle)
try:
response = self.call(request)
except AwsServiceException as ex:
if ex.code.lower() == EBErrorCode.SourceBundleDeletion:
raise SourceBundleDeletionException(ex)
if ex.code.lower() == EBErrorCode.S3LocationNotInServiceRegion:
raise S3LocationNotInServiceRegionException(ex)
if ex.code.lower() == EBErrorCode.OperationInProgress:
raise OperationInProgressException(ex)
raise
request_id = response[u'DeleteApplicationVersionResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id)
def create_environment(self, application, environment, cname_prefix = None,
template = None, solution_stack = None, version_label = None,
option_settings = None, option_remove = None,
template_specification = None,
description = None):
request = Request()
request.set_operation(u'CreateEnvironment')
request.set_app_name(application)
request.set_env_name(environment)
if cname_prefix is not None:
request.set_cname(cname_prefix)
if template is not None:
request.set_template(template)
if solution_stack is not None:
request.set_solution_stack(solution_stack)
if version_label is not None:
request.set_version_label(version_label)
if option_settings is not None:
request.set_option_settings(option_settings)
if option_remove is not None:
request.set_options_to_remove(option_remove)
if template_specification is not None:
request.set_template_specification(template_specification)
if description is not None:
request.set_description(description)
try:
response = self.call(request)
except AwsServiceException as ex:
if ex.code.lower() == AwsErrorCode.InvalidParameterValue.lower()\
and _re.search(Strings.ENV_EXIST_RE, ex.message):
raise AlreadyExistException(ex)
raise
result = response[u'CreateEnvironmentResponse'][u'CreateEnvironmentResult']
request_id = response[u'CreateEnvironmentResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id, EnvironmentDescription.from_json(result))
def update_environment(self, env_name = None, env_id = None,
template = None, version_label = None,
option_settings = None, option_remove = None,
template_specification = None,
description = None):
request = Request()
request.set_operation(u'UpdateEnvironment')
if env_name is not None:
request.set_env_name(env_name)
if env_id is not None:
request.set_env_id(env_id)
if template is not None:
request.set_template(template)
if version_label is not None:
request.set_version_label(version_label)
if option_settings is not None:
request.set_option_settings(option_settings)
if option_remove is not None:
request.set_options_to_remove(option_remove)
if template_specification is not None:
request.set_template_specification(template_specification)
if description is not None:
request.set_description(description)
try:
response = self.call(request)
except AwsServiceException as ex:
if ex.code.lower() == AwsErrorCode.InvalidParameterValue.lower():
raise InvalidParameterValueException(ex)
raise
result = response[u'UpdateEnvironmentResponse'][u'UpdateEnvironmentResult']
request_id = response[u'UpdateEnvironmentResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id, EnvironmentDescription.from_json(result))
def terminate_environment(self, environment_name,
environment_id = None, delete_resource = u'true'):
request = Request()
request.set_operation(u'TerminateEnvironment')
if environment_name is not None:
request.set_env_name(environment_name)
if environment_id is not None:
request.set_env_id(environment_id)
request.set_terminate_resources(delete_resource)
try:
response = self.call(request)
except:
raise
result = response[u'TerminateEnvironmentResponse']\
[u'TerminateEnvironmentResult']
request_id = response[u'TerminateEnvironmentResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id, EnvironmentDescription.from_json(result))
def describe_applications(self, names):
request = Request()
request.set_operation(u'DescribeApplications')
request.set_app_names(names)
try:
response = self.call(request)
except:
raise
results = response[u'DescribeApplicationsResponse']\
[u'DescribeApplicationsResult'][u'Applications']
request_id = response[u'DescribeApplicationsResponse']\
[u'ResponseMetadata'][u'RequestId']
applications = []
for result in results:
applications.append(ApplicationDescription.from_json(result))
return Response(request_id, applications)
def describe_configuration_options(self,
application_name = None,
environment_name = None,
template = None,
solution_stack = None,
options = None,
):
request = Request()
request.set_operation(u'DescribeConfigurationOptions')
if application_name is not None:
request.set_app_name(application_name)
if environment_name is not None:
request.set_env_name(environment_name)
if template is not None:
request.set_template(template)
if solution_stack is not None:
request.set_solution_stack(solution_stack)
if options is not None and len(options) > 0:
request.set_options(options)
try:
response = self.call(request)
except:
raise
options = response[u'DescribeConfigurationOptionsResponse']\
[u'DescribeConfigurationOptionsResult'][u'Options']
request_id = response[u'DescribeConfigurationOptionsResponse']\
[u'ResponseMetadata'][u'RequestId']
option_descriptions = set()
for option in options:
option_descriptions.add(ConfigurationOptionDescription.from_json(option))
return Response(request_id, option_descriptions)
def describe_configuration_settings(self, application_name,
environment_name = None, template = None,
options = None):
request = Request()
request.set_operation(u'DescribeConfigurationSettings')
request.set_app_name(application_name)
if environment_name is not None:
request.set_env_name(environment_name)
if template is not None:
request.set_template(template)
if options is not None:
request.set_options(options)
try:
response = self.call(request)
except:
raise
result = response[u'DescribeConfigurationSettingsResponse']\
[u'DescribeConfigurationSettingsResult'][u'ConfigurationSettings'][0]
request_id = response[u'DescribeConfigurationSettingsResponse']\
[u'ResponseMetadata'][u'RequestId']
configuration = ConfigurationSettingsDescription.from_json(result)
return Response(request_id, configuration)
def describe_environments (self, application = None,
environment_names = None, environment_ids = None,
include_deleted = None, included_deleted_backto = None,
version_label = None):
request = Request()
request.set_operation(u'DescribeEnvironments')
if application is not None:
request.set_app_name(application)
if environment_names is not None:
request.set_env_names(environment_names)
if environment_ids is not None:
request.set_env_ids(environment_ids)
if include_deleted is not None:
request.set_include_deleted(include_deleted)
if included_deleted_backto is not None:
request.set_included_deleted_backto(included_deleted_backto)
if version_label is not None:
request.set_version_label(version_label)
try:
response = self.call(request)
except:
raise
# parse message
results = response[u'DescribeEnvironmentsResponse']\
[u'DescribeEnvironmentsResult'][u'Environments']
request_id = response[u'DescribeEnvironmentsResponse']\
[u'ResponseMetadata'][u'RequestId']
environments = []
for result in results:
environments.append(EnvironmentDescription.from_json(result))
return Response(request_id, environments)
def describe_environment_resources (self, environment_name, environment_id = None):
request = Request()
request.set_operation(u'DescribeEnvironmentResources')
if environment_name is not None:
request.set_env_name(environment_name)
if environment_id is not None:
request.set_env_id(environment_id)
try:
response = self.call(request)
except:
raise
# parse message
result = response[u'DescribeEnvironmentResourcesResponse']\
[u'DescribeEnvironmentResourcesResult'][u'EnvironmentResources']
request_id = response[u'DescribeEnvironmentResourcesResponse']\
[u'ResponseMetadata'][u'RequestId']
resources = EnvironmentResourceDescription.from_json(result)
return Response(request_id, resources)
def describe_events (self, application = None,
environment_name = None, environment_id = None,
start_time = None, end_time = None,
severity = None, request_id = None,
template = None, version_label = None,
max_records = None, next_token = None
):
request = Request()
request.set_operation(u'DescribeEvents')
if application is not None:
request.set_app_name(application)
if environment_name is not None:
request.set_env_name(environment_name)
if environment_id is not None:
request.set_env_id(environment_id)
if start_time is not None:
request.set_start_time(start_time)
if end_time is not None:
request.set_end_time(end_time)
if severity is not None:
request.set_severity(severity)
if request_id is not None:
request.set_requst_id(request_id)
if template is not None:
request.set_template(template)
if version_label is not None:
request.set_version_label(version_label)
if max_records is not None:
request.set_max_records(max_records)
if next_token is not None:
request.set_next_token(next_token)
try:
response = self.call(request)
except:
raise
# parse message
combined_result = response[u'DescribeEventsResponse']\
[u'DescribeEventsResult']
results = combined_result[u'Events']
next_token = combined_result[u'NextToken'] \
if u'NextToken' in combined_result else None
request_id = response[u'DescribeEventsResponse']\
[u'ResponseMetadata'][u'RequestId']
events = []
for result in results:
events.append(EventDescription.from_json(result))
return Response(request_id, events, next_token)
def list_available_solutionstacks(self):
request = Request()
request.set_operation(u'ListAvailableSolutionStacks')
try:
response = self.call(request)
except:
raise
results = response[u'ListAvailableSolutionStacksResponse']\
[u'ListAvailableSolutionStacksResult'][u'SolutionStackDetails']
request_id = response[u'ListAvailableSolutionStacksResponse']\
[u'ResponseMetadata'][u'RequestId']
solutionstacks = []
for result in results:
solutionstacks.append(SolutionStackDescription.from_json(result))
return Response(request_id, solutionstacks)
def create_configuration_template(self, application, template,
environment_id = None, solution_stack = None,
source_configuration = None,
option_settings = None,
option_remove = None,
template_specification = None,
description = None):
request = Request()
request.set_operation(u'CreateConfigurationTemplate')
request.set_app_name(application)
request.set_template(template)
if environment_id is not None:
request.set_env_id(environment_id)
if solution_stack is not None:
request.set_solution_stack(solution_stack)
if source_configuration is not None:
request.set_source_configuration(source_configuration)
if option_settings is not None:
request.set_option_settings(option_settings)
if option_remove is not None:
request.set_options_to_remove(option_remove)
if template_specification is not None:
request.set_template_specification(template_specification)
if description is not None:
request.set_description(description)
response = self.call(request)
result = response[u'CreateConfigurationTemplateResponse'][u'CreateConfigurationTemplateResult']
request_id = response[u'CreateConfigurationTemplateResponse']\
[u'ResponseMetadata'][u'RequestId']
configuration = ConfigurationSettingsDescription.from_json(result)
return Response(request_id, configuration)
def update_configuration_template(self, application, template,
environment_id = None, solution_stack = None,
source_configuration = None,
option_settings = None,
option_remove = None,
template_specification = None,
description = None):
request = Request()
request.set_operation(u'UpdateConfigurationTemplate')
request.set_app_name(application)
request.set_template(template)
if environment_id is not None:
request.set_env_id(environment_id)
if solution_stack is not None:
request.set_solution_stack(solution_stack)
if source_configuration is not None:
request.set_source_configuration(source_configuration)
if option_settings is not None:
request.set_option_settings(option_settings)
if option_remove is not None:
request.set_options_to_remove(option_remove)
if template_specification is not None:
request.set_template_specification(template_specification)
if description is not None:
request.set_description(description)
response = self.call(request)
result = response[u'UpdateConfigurationTemplateResponse'][u'UpdateConfigurationTemplateResult']
request_id = response[u'UpdateConfigurationTemplateResponse']\
[u'ResponseMetadata'][u'RequestId']
configuration = ConfigurationSettingsDescription.from_json(result)
return Response(request_id, configuration)
def delete_configuration_template(self, application, template):
request = Request()
request.set_operation(u'DeleteConfigurationTemplate')
request.set_app_name(application)
request.set_template(template)
response = self.call(request)
request_id = response[u'DeleteConfigurationTemplateResponse']\
[u'ResponseMetadata'][u'RequestId']
return Response(request_id, None)
def validate_configuration_settings(self,
application_name = None,
option_settings = None,
environment_name = None,
template = None,
version_label = None,
option_remove = None,
template_specification = None):
request = Request()
request.set_operation(u'ValidateConfigurationSettings')
if application_name is not None:
request.set_app_name(application_name)
if option_settings is not None:
request.set_option_settings(option_settings)
if environment_name is not None:
request.set_env_name(environment_name)
if template is not None:
request.set_template(template)
if version_label is not None:
request.set_version_label(version_label)
if option_remove is not None:
request.set_options_to_remove(option_remove)
if template_specification is not None:
request.set_template_specification(template_specification)
try:
response = self.call(request)
except AwsServiceException as ex:
if ex.code.lower() == AwsErrorCode.InsufficientPrivileges.lower():
raise InsufficientPrivilegesException(ex)
if ex.code.lower() == AwsErrorCode.MissingParameter.lower():
raise MissingParameterException(ex)
if ex.code.lower() == AwsErrorCode.InvalidParameterValue.lower():
raise InvalidParameterValueException(ex)
raise
results = response[u'ValidateConfigurationSettingsResponse']\
[u'ValidateConfigurationSettingsResult'][u'Messages']
request_id = response[u'ValidateConfigurationSettingsResponse']\
[u'ResponseMetadata'][u'RequestId']
messages = []
if results is not None:
for result in results:
messages.append(ValidationMessage.from_json(result))
return Response(request_id, messages)
|
|
"""Tests for Incremental PCA."""
import numpy as np
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose_dense_sparse
from numpy.testing import assert_array_equal
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
from scipy import sparse
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
assert X_transformed.shape == (X.shape[0], 2)
np.testing.assert_allclose(
ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(),
rtol=1e-3,
)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(
np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13
)
@pytest.mark.parametrize(
"matrix_class", [sparse.csc_matrix, sparse.csr_matrix, sparse.lil_matrix]
)
def test_incremental_pca_sparse(matrix_class):
# Incremental PCA on sparse arrays.
X = iris.data
pca = PCA(n_components=2)
pca.fit_transform(X)
X_sparse = matrix_class(X)
batch_size = X_sparse.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
X_transformed = ipca.fit_transform(X_sparse)
assert X_transformed.shape == (X_sparse.shape[0], 2)
np.testing.assert_allclose(
ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(),
rtol=1e-3,
)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X_sparse)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(
np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13
)
with pytest.raises(
TypeError,
match=(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
),
):
ipca.partial_fit(X_sparse)
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * 0.1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = np.array([[0, 1, 0], [1, 0, 0]])
n_samples, n_features = X.shape
for n_components in [-1, 0, 0.99, 4]:
with pytest.raises(
ValueError,
match=(
"n_components={} invalid"
" for n_features={}, need more rows than"
" columns for IncrementalPCA"
" processing".format(n_components, n_features)
),
):
IncrementalPCA(n_components, batch_size=10).fit(X)
# Tests that n_components is also <= n_samples.
n_components = 3
with pytest.raises(
ValueError,
match=(
"n_components={} must be"
" less or equal to the batch number of"
" samples {}".format(n_components, n_samples)
),
):
IncrementalPCA(n_components=n_components).partial_fit(X)
def test_n_components_none():
# Ensures that n_components == None is handled correctly
rng = np.random.RandomState(1999)
for n_samples, n_features in [(50, 10), (10, 50)]:
X = rng.rand(n_samples, n_features)
ipca = IncrementalPCA(n_components=None)
# First partial_fit call, ipca.n_components_ is inferred from
# min(X.shape)
ipca.partial_fit(X)
assert ipca.n_components_ == min(X.shape)
# Second partial_fit call, ipca.n_components_ is inferred from
# ipca.components_ computed from the first partial_fit call
ipca.partial_fit(X)
assert ipca.n_components_ == ipca.components_.shape[0]
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
# Increasing number of components
ipca.set_params(n_components=15)
with pytest.raises(ValueError):
ipca.partial_fit(X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_batch_rank():
# Test sample size in each batch is always larger or equal to n_components
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 90, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for components_i, components_j in zip(all_components[:-1], all_components[1:]):
assert_allclose_dense_sparse(components_i, components_j)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(
1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999
)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(
pca.explained_variance_, ipca.explained_variance_, decimal=prec
)
assert_almost_equal(
pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec
)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(
n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng
)
pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(
np.sum(pca.singular_values_ ** 2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12
)
assert_array_almost_equal(
np.sum(ipca.singular_values_ ** 2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2
)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(
pca.singular_values_, np.sqrt(np.sum(X_pca ** 2.0, axis=0)), 12
)
assert_array_almost_equal(
ipca.singular_values_, np.sqrt(np.sum(X_ipca ** 2.0, axis=0)), 2
)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(
n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng
)
pca = PCA(n_components=3, svd_solver="full", random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca ** 2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(
1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999
)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
def test_incremental_pca_partial_fit_float_division():
# Test to ensure float division is used in all versions of Python
# (non-regression test for issue #9489)
rng = np.random.RandomState(0)
A = rng.randn(5, 3) + 2
B = rng.randn(7, 3) + 5
pca = IncrementalPCA(n_components=2)
pca.partial_fit(A)
# Set n_samples_seen_ to be a floating point number instead of an int
pca.n_samples_seen_ = float(pca.n_samples_seen_)
pca.partial_fit(B)
singular_vals_float_samples_seen = pca.singular_values_
pca2 = IncrementalPCA(n_components=2)
pca2.partial_fit(A)
pca2.partial_fit(B)
singular_vals_int_samples_seen = pca2.singular_values_
np.testing.assert_allclose(
singular_vals_float_samples_seen, singular_vals_int_samples_seen
)
def test_incremental_pca_fit_overflow_error():
# Test for overflow error on Windows OS
# (non-regression test for issue #17693)
rng = np.random.RandomState(0)
A = rng.rand(500000, 2)
ipca = IncrementalPCA(n_components=2, batch_size=10000)
ipca.fit(A)
pca = PCA(n_components=2)
pca.fit(A)
np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_)
def test_incremental_pca_feature_names_out():
"""Check feature names out for IncrementalPCA."""
ipca = IncrementalPCA(n_components=2).fit(iris.data)
names = ipca.get_feature_names_out()
assert_array_equal([f"incrementalpca{i}" for i in range(2)], names)
|
|
# python client.py
import json
import hmac
import time
import base64
import hashlib
import requests
from datetime import datetime, timedelta
one_week_ago = datetime.today() - timedelta(days=7)
class BitfinexError(Exception):
pass
class Base(object):
# A base class for the API Client methods that handles interaction with the requests library.
api_url = 'https://api.bitfinex.com/'
exception_on_error = True
def __init__(self, proxydict=None, *args, **kwargs):
self.proxydict = proxydict
def _get(self, *args, **kwargs):
# Make a GET request.
return self._request(requests.get, *args, **kwargs)
def _post(self, *args, **kwargs):
# Make a POST request.
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs)
def _default_data(self):
# Default data for a POST request.
return {}
def _request(self, func, url, *args, **kwargs):
# Make a generic request, adding in any proxy defined by the instance.
# Raises a 'requests.HTTPError if the response status isn't 200,
# Raises a 'BitfinexError' if the response contains a json encoded error message.
return_json = kwargs.pop('return_json', False)
url = self.api_url + url
response = func(url, *args, **kwargs)
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
# Check for error, raising an exception if appropriate.
response.raise_for_status()
try:
json_response = response.json()
except ValueError:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitfinexError(error)
if return_json:
if json_response is None:
raise BitfinexError(
"Could not decode json for: " + response.text)
return json_response
return response
class Public(Base):
def ticker(self, symbol="btcusd"):
# The ticker is a high level overview of the state of the market.
url = "v1/pubticker/" + symbol
return self._get(url, return_json=True)
def last_trade(self, symbol="btcusd"):
# Shortcut for last trade
return float(self.ticker(symbol)['last_price'])
def stats(self, symbol="btcusd"):
# Various statistics about the requested pair.
url = "v1/stats/" + symbol
return self._get(url, return_json=True)
def funding_book(self, currency="USD"):
# Get the full margin funding book
url = "v1/lendbook/" + currency
return self._get(url, return_json=True)
def order_book(self, symbol="btcusd"):
# Get the full order book.
url = "v1/book/" + symbol
return self._get(url, return_json=True)
def trades(self, symbol="btcusd"):
# Get a list of the most recent trades for the given symbol.
url = "v1/trades/" + symbol
return self._get(url, return_json=True)
def lends(self, currency="USD"):
# Get a list of the most recent funding data for the given currency: total amount provided and Flash Return Rate (in % by 365 days) over time.
url = "v1/lends/" + currency
return self._get(url, return_json=True)
def symbols(self):
# A list of symbol names.
return self._get("/v1/symbols", return_json=True)
def symbols_details(self):
# Get a list of valid symbol IDs and the pair details.
return self._get("/v1/symbols_details", return_json=True)
class Private(Public):
def __init__(self, key, secret, *args, **kwargs):
# Stores the username, key, and secret which is used when making POST requests to Bitfinex.
super(Private, self).__init__(
key=key, secret=secret, *args, **kwargs)
self.key = key
self.secret = secret
def _get_nonce(self):
# Get a unique nonce for the bitfinex API.
# This isn't a thread-safe function.
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
# Generate a one-time signature and other data required to send a secure POST request to the Bitfinex API.
data = {}
nonce = self._get_nonce()
data['nonce'] = str(nonce)
data['request'] = args[0]
return data
def _post(self, *args, **kwargs):
# Make a POST request.
data = kwargs.pop('data', {})
data.update(self._default_data(*args, **kwargs))
key = self.key
secret = self.secret
payload_json = json.dumps(data)
payload = base64.b64encode(payload_json)
sig = hmac.new(secret, payload, hashlib.sha384)
sig = sig.hexdigest()
headers = {
'X-BFX-APIKEY' : key,
'X-BFX-PAYLOAD' : payload,
'X-BFX-SIGNATURE' : sig
}
kwargs['headers'] = headers
return self._request(requests.post, *args, **kwargs)
def account_infos(self):
# Return information about your account
return self._post("/v1/account_infos", return_json=True)
def account_fees(self):
# See the fees applied to your withdrawals
return self._post("/v1/account_fees", return_json=True)
def summary(self):
# Returns a 30-day summary of your trading volume and return on margin funding
return self._post("/v1/summary", return_json=True)
def deposit(self, method, wallet_name, renew=0):
data = {'method': method,
'wallet_name': wallet_name,
'renew': renew
}
return self._post("/v1/deposit/new", data=data, return_json=True)
def key_info(self):
# Check the permissions of the key being used to generate this request.
return self._post("/v1/key_info",return_json=True)
def margin_infos(self):
# See your trading wallet information for margin trading.
return self._post("/v1/margin_infos",return_json=True)
def balances(self):
# See your balances
return self._post("/v1/balances",return_json=True)
def transfer(self, amount, currency, wallet_from, wallet_to):
# Allow you to move available balances between your wallets.
data = {'amount': amount,
'currency': currency,
'walletfrom': wallet_from,
'walletto': wallet_to
}
return self._post("/v1/transfer", data=data, return_json=True)
def withdraw(self, withdraw_type, wallet_selected, amount, address, payment_id="", account_name="", account_number, swift_code="", bank_name, bank_address, bank_city, bank_country, detail_payment="", express_wire=0, intermediary_bank_name="", intermediary_bank_address="", intermediary_bank_city="", intermediary_bank_country="", intermediary_bank_account="", intermediary_bank_swift=""):
# Allow you to request a withdrawal from one of your wallet.
data = {'withdraw_type': withdraw_type,
'walletselected': wallet_selected,
'amount': amount,
'address': address,
'payment_id': payment_id,
'account_name': account_name,
'account_number': account_number,
'swift': swift_code,
'bank_name': bank_name,
'bank_address': bank_address,
'bank_city': bank_city,
'bank_country': bank_country,
'detail_payment': detail_payment,
'expressWire': express_wire,
'intermediary_bank_name': intermediary_bank_name,
'intermediary_bank_address': intermediary_bank_address,
'intermediary_bank_city': intermediary_bank_city,
'intermediary_bank_country': intermediary_bank_country,
'intermediary_bank_account': intermediary_bank_account,
'intermediary_bank_swift': intermediary_bank_swift
}
return self._post("/v1/withdraw", data=data, return_json=True)
####### Orders #######
def new_order(self, symbol, amount, price, side, order_type):
# Submit a new Order
data = {'symbol': symbol,
'amount': amount,
'price': price,
'exchange': 'bitfinex',
'side': side,
'type': order_type
}
return self._post("/v1/order/new", data=data, return_json=True)
def multiple_orders(self):
# Submit several new orders at once.
return
def cancel_order(self, order_id):
# Cancel an order.
data = {'order_id': order_id}
return self._post("/v1/order/cancel",data, return_json=True)
def cancel_multiple_orders(self, order_ids):
# Cancel multiples orders at once.
data = {'order_ids': order_ids}
req = self._post("/v1/order/cancel/multi",data, return_json=True)
if req.content == "Orders cancelled":
return True
else:
return False
def cancel_all_orders(self):
# Cancel all active orders at once.
req = self._post('/v1/order/cancel/all', return_json=False)
if req.content == "All orders cancelled":
return True
else:
return False
def replace_order(self, order_id, symbol, amount, price, side, order_type):
# Replace an order with a new one.
data = {'order_id': order_id,
'symbol': symbol,
'amount': amount,
'price': price,
'exchange': 'bitfinex',
'side': side,
'type': order_type
}
return self._post('/v1/order/cancel/replace', return_json=False)
def order_status(self, order_id):
# Get the status of an order.
data = {'order_id': order_id}
return self._post('/v1/order/status', return_json=True)
def active_orders(self):
# Returns an array of the results of `/order/status` for all your live orders.
return self._post("/v1/orders", return_json=True)
def order_history(self, limit=10):
# View your latest inactive orders
# Limited to last 3 days and 1 request per minute.
data = {'limit': limit}
return self._post("/v1/orders/hist", return_json=True)
####### Positions #######
def active_positions(self):
# View your active positions.
return self._post("/v1/positions", return_json=True)
def claim_position(self, position_id, amount):
'''
A position can be claimed if:
It is a long position: The amount in the last unit of the position pair that you have in your trading wallet AND/OR the realized profit of the position is greater or equal to the purchase amount of the position (base price position amount) and the funds which need to be returned. For example, for a long BTCUSD position, you can claim the position if the amount of USD you have in the trading wallet is greater than the base price the position amount and the funds used.
It is a short position: The amount in the first unit of the position pair that you have in your trading wallet is greater or equal to the amount of the position and the margin funding used.
'''
data = {'position_id': position_id,
'amount': amount
}
return self._post("/v1/position/claim", return_json=True)
####### Historical Data #######
def balance_history(self, currency, since=one_week_ago, until=datetime.today(), limit=100, wallet):
# View all of your balance ledger entries.
data = {'currency': currency,
'since': since,
'until': until,
'limit': limit,
'wallet': wallet
}
return self._post("/v1/history", return_json=True)
def deposit_withdrawl_history(self, currency, method="bitcoin",since=one_week_ago, until=datetime.today(), limit=100):
# View your past deposits/withdrawals.
data = {'currency': currency,
'method': method,
'since': since,
'until': until,
'limit': limit
}
return self._post("/v1/history/movements", return_json=True)
def past_trades(self, symbol="BTCUSD", timestamp=one_week_ago, until=datetime.today(), limit_trades=50, reverse=0):
data = {'symbol': symbol,
'timestamp': timestamp,
'until': until,
'limit_trades': limit_trades,
'reverse': reverse
}
return self._post("/v1/mytrades", return_json=True)
####### Margin Funding #######
def new_offer(self):
# Submit a new Offer
return
def cancel_offer(self):
return
def offer_status(self):
return
def active_credits(self):
return
def offers(self):
return
def offers_history(self):
return
def past_funding_trades(self):
return
def taken_funds(self):
# Active Funding Used in a margin position
return
def unused_taken_funds(self):
# Active Funding Not Used in a margin position
# View your funding currently borrowed and not used (available for a new margin position).
return
def total_taken_funds(self):
# View the total of your active funding used in your position(s).
return
def close_margin_funding(self):
return
def basket_manage(self):
return
|
|
from __future__ import absolute_import, division, print_function
import functools
import sys
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipBefore35, exec_test
from tornado.web import Application, RequestHandler
try:
import tornado.websocket # noqa
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
methods_to_test = [
functools.partial(self.write, 'This should not work'),
functools.partial(self.redirect, 'http://localhost/elsewhere'),
functools.partial(self.set_header, 'X-Test', ''),
functools.partial(self.set_cookie, 'Chocolate', 'Chip'),
functools.partial(self.set_status, 503),
self.flush,
self.finish,
]
for method in methods_to_test:
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
method()
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class HeaderEchoHandler(TestWebSocketHandler):
def set_default_headers(self):
self.set_header("X-Extra-Response-Header", "Extra-Response-Value")
def prepare(self):
for k, v in self.request.headers.get_all():
if k.lower().startswith('x-test'):
self.set_header(k, v)
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class PathArgsHandler(TestWebSocketHandler):
def open(self, arg):
self.write_message(arg)
class CoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, close_future, compression_options=None):
super(CoroutineOnMessageHandler, self).initialize(close_future,
compression_options)
self.sleeping = 0
@gen.coroutine
def on_message(self, message):
if self.sleeping > 0:
self.write_message('another coroutine is already sleeping')
self.sleeping += 1
yield gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)
class RenderMessageHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(self.render_string('message.html', message=message))
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, **kwargs):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
**kwargs)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/header_echo', HeaderEchoHandler,
dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
('/path_args/(.*)', PathArgsHandler,
dict(close_future=self.close_future)),
('/coroutine', CoroutineOnMessageHandler,
dict(close_future=self.close_future)),
('/render', RenderMessageHandler,
dict(close_future=self.close_future)),
], template_loader=DictLoader({
'message.html': '<b>{{ message }}</b>',
}))
def tearDown(self):
super(WebSocketTest, self).tearDown()
RequestHandler._template_loaders.clear()
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
def test_bad_websocket_version(self):
response = self.fetch('/echo',
headers={'Connection': 'Upgrade',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Version': '12'})
self.assertEqual(response.code, 426)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
yield ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u'hello \u00e9')
response = yield ws.read_message()
self.assertEqual(response, u'hello \u00e9')
yield self.close(ws)
@gen_test
def test_render_message(self):
ws = yield self.ws_connect('/render')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, '<b>hello</b>')
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_websocket_header_echo(self):
# Ensure that headers can be returned in the response.
# Specifically, that arbitrary headers passed through websocket_connect
# can be returned.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header_echo' % self.get_http_port(),
headers={'X-Test-Hello': 'hello'}))
self.assertEqual(ws.headers.get('X-Test-Hello'), 'hello')
self.assertEqual(ws.headers.get('X-Extra-Response-Header'), 'Extra-Response-Value')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_path_args(self):
ws = yield self.ws_connect('/path_args/hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_coroutine(self):
ws = yield self.ws_connect('/coroutine')
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message('hello1')
yield ws.write_message('hello2')
res = yield ws.read_message()
self.assertEqual(res, 'hello1')
res = yield ws.read_message()
self.assertEqual(res, 'hello2')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
if sys.version_info >= (3, 5):
NativeCoroutineOnMessageHandler = exec_test(globals(), locals(), """
class NativeCoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, close_future, compression_options=None):
super().initialize(close_future, compression_options)
self.sleeping = 0
async def on_message(self, message):
if self.sleeping > 0:
self.write_message('another coroutine is already sleeping')
self.sleeping += 1
await gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)""")['NativeCoroutineOnMessageHandler']
class WebSocketNativeCoroutineTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/native', NativeCoroutineOnMessageHandler,
dict(close_future=self.close_future))])
@skipBefore35
@gen_test
def test_native_coroutine(self):
ws = yield self.ws_connect('/native')
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message('hello1')
yield ws.write_message('hello2')
res = yield ws.read_message()
self.assertEqual(res, 'hello1')
res = yield ws.read_message()
self.assertEqual(res, 'hello2')
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
class ServerPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_pong(self, data):
self.write_message("got pong")
self.close_future = Future()
return Application([
('/', PingHandler, dict(close_future=self.close_future)),
], websocket_ping_interval=0.01)
@gen_test
def test_server_ping(self):
ws = yield self.ws_connect('/')
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got pong")
yield self.close(ws)
# TODO: test that the connection gets closed if ping responses stop.
class ClientPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message("got ping")
self.close_future = Future()
return Application([
('/', PingHandler, dict(close_future=self.close_future)),
])
@gen_test
def test_client_ping(self):
ws = yield self.ws_connect('/', ping_interval=0.01)
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got ping")
yield self.close(ws)
# TODO: test that the connection gets closed if ping responses stop.
class MaxMessageSizeTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/', EchoHandler, dict(close_future=self.close_future)),
], websocket_max_message_size=1024)
@gen_test
def test_large_message(self):
ws = yield self.ws_connect('/')
# Write a message that is allowed.
msg = 'a' * 1024
ws.write_message(msg)
resp = yield ws.read_message()
self.assertEqual(resp, msg)
# Write a message that is too large.
ws.write_message(msg + 'b')
resp = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(resp, None)
self.assertEqual(ws.close_code, 1009)
self.assertEqual(ws.close_reason, "message too big")
# TODO: Needs tests of messages split over multiple
# continuation frames.
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# Copyright (C) 2006 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Various utility functions and classes that support common presentation
tasks such as grouping or pagination.
"""
from math import ceil
import re
__all__ = ['captioned_button', 'classes', 'first_last', 'group', 'istext',
'prepared_paginate', 'paginate', 'Paginator']
__no_apidoc__ = 'prepared_paginate'
def captioned_button(req, symbol, text):
"""Return symbol and text or only symbol, according to user preferences."""
return symbol if req.session.get('ui.use_symbols') \
else u'%s %s' % (symbol, text)
def classes(*args, **kwargs):
"""Helper function for dynamically assembling a list of CSS class names
in templates.
Any positional arguments are added to the list of class names. All
positional arguments must be strings:
>>> classes('foo', 'bar')
u'foo bar'
In addition, the names of any supplied keyword arguments are added if they
have a truth value:
>>> classes('foo', bar=True)
u'foo bar'
>>> classes('foo', bar=False)
u'foo'
If none of the arguments are added to the list, this function returns
`None`:
>>> classes(bar=False)
"""
classes = list(filter(None, args)) + [k for k, v in kwargs.items() if v]
if not classes:
return None
return u' '.join(classes)
def first_last(idx, seq):
"""Generate ``first`` or ``last`` or both, according to the
position `idx` in sequence `seq`.
"""
return classes(first=idx == 0, last=idx == len(seq) - 1)
def group(iterable, num, predicate=None):
"""Combines the elements produced by the given iterable so that every `n`
items are returned as a tuple.
>>> items = [1, 2, 3, 4]
>>> for item in group(items, 2):
... print item
(1, 2)
(3, 4)
The last tuple is padded with `None` values if its' length is smaller than
`num`.
>>> items = [1, 2, 3, 4, 5]
>>> for item in group(items, 2):
... print item
(1, 2)
(3, 4)
(5, None)
The optional `predicate` parameter can be used to flag elements that should
not be packed together with other items. Only those elements where the
predicate function returns True are grouped with other elements, otherwise
they are returned as a tuple of length 1:
>>> items = [1, 2, 3, 4]
>>> for item in group(items, 2, lambda x: x != 3):
... print item
(1, 2)
(3,)
(4, None)
"""
buf = []
for item in iterable:
flush = predicate and not predicate(item)
if buf and flush:
buf += [None] * (num - len(buf))
yield tuple(buf)
del buf[:]
buf.append(item)
if flush or len(buf) == num:
yield tuple(buf)
del buf[:]
if buf:
buf += [None] * (num - len(buf))
yield tuple(buf)
def istext(text):
"""`True` for text (`unicode` and `str`), but `False` for `Markup`."""
from genshi.core import Markup
return isinstance(text, basestring) and not isinstance(text, Markup)
def prepared_paginate(items, num_items, max_per_page):
if max_per_page == 0:
num_pages = 1
else:
num_pages = int(ceil(float(num_items) / max_per_page))
return items, num_items, num_pages
def paginate(items, page=0, max_per_page=10):
"""Simple generic pagination.
Given an iterable, this function returns:
* the slice of objects on the requested page,
* the total number of items, and
* the total number of pages.
The `items` parameter can be a list, tuple, or iterator:
>>> items = range(12)
>>> items
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> paginate(items)
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(items, page=1)
([10, 11], 12, 2)
>>> paginate(iter(items))
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(iter(items), page=1)
([10, 11], 12, 2)
This function also works with generators:
>>> def generate():
... for idx in range(12):
... yield idx
>>> paginate(generate())
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(generate(), page=1)
([10, 11], 12, 2)
The `max_per_page` parameter can be used to set the number of items that
should be displayed per page:
>>> items = range(12)
>>> paginate(items, page=0, max_per_page=6)
([0, 1, 2, 3, 4, 5], 12, 2)
>>> paginate(items, page=1, max_per_page=6)
([6, 7, 8, 9, 10, 11], 12, 2)
"""
if not page:
page = 0
start = page * max_per_page
stop = start + max_per_page
count = None
if hasattr(items, '__len__'):
count = len(items)
if count:
assert start < count, 'Page %d out of range' % page
try: # Try slicing first for better performance
retval = items[start:stop]
except TypeError: # Slicing not supported, so iterate through the whole list
retval = []
idx = -1 # Needed if items = []
for idx, item in enumerate(items):
if start <= idx < stop:
retval.append(item)
# If we already obtained the total number of items via `len()`,
# we can break out of the loop as soon as we've got the last item
# for the requested page
if count is not None and idx >= stop:
break
if count is None:
count = idx + 1
return retval, count, int(ceil(float(count) / max_per_page))
class Paginator(object):
"""Pagination controller"""
def __init__(self, items, page=0, max_per_page=10, num_items=None):
if not page:
page = 0
if num_items is None:
items, num_items, num_pages = paginate(items, page, max_per_page)
else:
items, num_items, num_pages = prepared_paginate(items, num_items,
max_per_page)
offset = page * max_per_page
self.page = page
self.max_per_page = max_per_page
self.items = items
self.num_items = num_items
self.num_pages = num_pages
self.span = offset, offset + len(items)
self.show_index = True
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __nonzero__(self):
return len(self.items) > 0
def __setitem__(self, idx, value):
self.items[idx] = value
@property
def has_more_pages(self):
return self.num_pages > 1
@property
def has_next_page(self):
return self.page + 1 < self.num_pages
@property
def has_previous_page(self):
return self.page > 0
def get_shown_pages(self, page_index_count = 11):
if self.has_more_pages == False:
return range(1, 2)
min_page = 1
max_page = int(ceil(float(self.num_items) / self.max_per_page))
current_page = self.page + 1
start_page = current_page - page_index_count / 2
end_page = current_page + page_index_count / 2 + \
(page_index_count % 2 - 1)
if start_page < min_page:
start_page = min_page
if end_page > max_page:
end_page = max_page
return range(start_page, end_page + 1)
def displayed_items(self):
from trac.util.translation import _
start, stop = self.span
total = self.num_items
if start + 1 == stop:
return _("%(last)d of %(total)d", last=stop, total=total)
else:
return _("%(start)d - %(stop)d of %(total)d",
start=self.span[0] + 1, stop=self.span[1], total=total)
def separated(items, sep=','):
"""Yield `(item, sep)` tuples, one for each element in `items`.
`sep` will be `None` for the last item.
>>> list(separated([1, 2]))
[(1, ','), (2, None)]
>>> list(separated([1]))
[(1, None)]
>>> list(separated("abc", ':'))
[('a', ':'), ('b', ':'), ('c', None)]
"""
items = iter(items)
last = items.next()
for i in items:
yield last, sep
last = i
yield last, None
try:
from json import dumps
_js_quote = dict((c, '\\u%04x' % ord(c)) for c in '&<>')
_js_quote_re = re.compile('[' + ''.join(_js_quote) + ']')
def to_json(value):
"""Encode `value` to JSON."""
def replace(match):
return _js_quote[match.group(0)]
text = dumps(value, sort_keys=True, separators=(',', ':'))
return _js_quote_re.sub(replace, text)
except ImportError:
from trac.util.text import to_js_string
def to_json(value):
"""Encode `value` to JSON."""
if isinstance(value, basestring):
return to_js_string(value)
elif value is None:
return 'null'
elif value is False:
return 'false'
elif value is True:
return 'true'
elif isinstance(value, (int, long)):
return str(value)
elif isinstance(value, float):
return repr(value)
elif isinstance(value, (list, tuple)):
return '[%s]' % ','.join(to_json(each) for each in value)
elif isinstance(value, dict):
return '{%s}' % ','.join('%s:%s' % (to_json(k), to_json(v))
for k, v in sorted(value.iteritems()))
else:
raise TypeError('Cannot encode type %s' % value.__class__.__name__)
|
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import socket
import unittest2 as unittest
from akanda.rug import event
from akanda.rug import notifications
class TestGetTenantID(unittest.TestCase):
def test_rpc(self):
msg = {'_context_is_admin': False,
'_context_project_id': 'c25992581e574b6485dbfdf39a3df46c',
'_context_read_deleted': 'no',
'_context_roles': ['anotherrole', 'Member', 'admin'],
'_context_tenant_id': 'c25992581e574b6485dbfdf39a3df46c',
'_context_timestamp': '2013-07-25 13:51:50.791338',
'_context_user_id': '472511eedebd4322a26c5fb1f52711ee',
'_unique_id': 'c87303336c7c4bb0b097b3e97bebf7ea',
'args': {'router_id': 'f37f31e9-adc2-4712-a002-4ccf0be17a99'},
'method': 'router_deleted',
'version': '1.0'}
tenant_id = notifications._get_tenant_id_for_message(msg)
self.assertEqual("c25992581e574b6485dbfdf39a3df46c", tenant_id)
def test_notification_tenant_id(self):
msg = {u'_context_is_admin': False,
u'_context_read_deleted': u'no',
u'_context_roles': [u'anotherrole', u'Member'],
u'_context_tenant_id': u'c25992581e574b6485dbfdf39a3df46c',
u'_context_timestamp': u'2013-07-25 14:02:55.073049',
u'_context_user_id': u'472511eedebd4322a26c5fb1f52711ee',
u'_unique_id': u'8825f8a6ccec4285a7ecfdad7bd53815',
u'event_type': u'port.create.end',
u'message_id': u'bb9bcf1d-1547-4867-b41e-f5298fa10869',
u'payload': {
u'port': {
u'admin_state_up': True,
u'device_id': u'',
u'device_owner': u'',
u'fixed_ips': [{
u'ip_address': u'192.168.123.3',
u'subnet_id': u'53d8a76a-3e1a-43e0-975e-83a4b464d18c', # noqa
}],
u'id': u'bbd92f5a-5a1d-4ec5-9272-8e4dd5f0c084',
u'mac_address': u'fa:16:3e:f4:81:a9',
u'name': u'',
u'network_id': u'c3a30111-dd52-405c-84b2-4d62068e2d35', # noqa
u'security_groups': [u'5124be1c-b2d5-47e6-ac62-411a0ea028c8'], # noqa
u'status': u'DOWN',
u'tenant_id': u'c25992581e574b6485dbfdf39a3df46c',
},
},
u'priority': u'INFO',
u'publisher_id': u'network.akanda',
u'timestamp': u'2013-07-25 14:02:55.244126'}
tenant_id = notifications._get_tenant_id_for_message(msg)
self.assertEqual('c25992581e574b6485dbfdf39a3df46c', tenant_id)
def test_notification_project_id(self):
msg = {
u'_context_is_admin': False,
u'_context_project_id': u'c25992581e574b6485dbfdf39a3df46c',
u'_context_read_deleted': u'no',
u'_context_roles': [u'anotherrole', u'Member'],
u'_context_timestamp': u'2013-07-25 14:02:55.073049',
u'_context_user_id': u'472511eedebd4322a26c5fb1f52711ee',
u'_unique_id': u'8825f8a6ccec4285a7ecfdad7bd53815',
u'event_type': u'port.create.end',
u'message_id': u'bb9bcf1d-1547-4867-b41e-f5298fa10869',
u'payload': {
u'port': {
u'admin_state_up': True,
u'device_id': u'',
u'device_owner': u'',
u'fixed_ips': [{
u'ip_address': u'192.168.123.3',
u'subnet_id': u'53d8a76a-3e1a-43e0-975e-83a4b464d18c'}], # noqa
u'id': u'bbd92f5a-5a1d-4ec5-9272-8e4dd5f0c084',
u'mac_address': u'fa:16:3e:f4:81:a9',
u'name': u'',
u'network_id': u'c3a30111-dd52-405c-84b2-4d62068e2d35',
u'security_groups': [u'5124be1c-b2d5-47e6-ac62-411a0ea028c8'], # noqa
u'status': u'DOWN',
u'tenant_id': u'c25992581e574b6485dbfdf39a3df46c',
},
},
u'priority': u'INFO',
u'publisher_id': u'network.akanda',
u'timestamp': u'2013-07-25 14:02:55.244126'}
tenant_id = notifications._get_tenant_id_for_message(msg)
self.assertEqual('c25992581e574b6485dbfdf39a3df46c', tenant_id)
def test_notification_with_tenant_id_from_router_dict(self):
msg = {
u'_context_roles': [u'admin', u'_member_'],
u'priority': u'INFO',
u'_context_read_deleted': u'no',
u'event_type': u'router.update.end',
u'timestamp': u'2013-10-20 17:07:14.573678',
u'_context_tenant_id': u'618cb69189144d4bbabe0aaa678d48c6',
u'payload': {
u'router': {
u'status': u'ACTIVE',
u'external_gateway_info': {
u'network_id': u'8e163d8b-81a8-4f4f-b622-ecbf452de2e9'
},
u'name': u'r',
u'admin_state_up': True,
u'tenant_id': u'cddd1d8533cc4f828837d4c2dcb7ef96',
u'id': u'64009064-be1e-44e2-9e5d-1c706cef2e34',
u'routes': [],
}
},
u'_unique_id': u'1cc91b54bd2947fb9d513e67795ebb8d',
u'_context_is_admin': True,
u'_context_project_id': u'618cb69189144d4bbabe0aaa678d48c6',
u'_context_timestamp': u'2013-10-20 17:07:14.349210',
u'_context_user_id': u'a5b54cb8fb3a4cb49ba6118f3d2e35af',
u'publisher_id': u'network.orange.localdomain',
u'message_id': u'd8335b96-0fde-461a-af65-3f1aaa73c8af'
}
tenant_id = notifications._get_tenant_id_for_message(msg)
self.assertEqual('cddd1d8533cc4f828837d4c2dcb7ef96', tenant_id)
class TestGetCRUD(unittest.TestCase):
def test_rpc_router_deleted(self):
msg = {u'oslo.message': u'{"_context_roles": ["anotherrole", "Member", "admin"], "_context_read_deleted": "no", "args": {"router_id": "f37f31e9-adc2-4712-a002-4ccf0be17a99"}, "_unique_id": "c87303336c7c4bb0b097b3e97bebf7ea", "_context_timestamp": "2013-07-25 13:51:50.791338", "_context_is_admin": false, "version": "1.0", "_context_project_id": "c25992581e574b6485dbfdf39a3df46c", "_context_tenant_id": "c25992581e574b6485dbfdf39a3df46c", "_context_user_id": "472511eedebd4322a26c5fb1f52711ee", "method": "router_deleted"}', u'oslo.version': u'2.0'} # noqa
e = notifications._make_event_from_message(msg)
self.assertEqual(event.DELETE, e.crud)
self.assert_(e.router_id)
def _test_notification(self, event_type):
msg = {
u'_context_is_admin': False,
u'_context_project_id': u'c25992581e574b6485dbfdf39a3df46c',
u'_context_read_deleted': u'no',
u'_context_roles': [u'anotherrole', u'Member'],
u'_context_timestamp': u'2013-07-25 14:02:55.073049',
u'_context_user_id': u'472511eedebd4322a26c5fb1f52711ee',
u'_unique_id': u'8825f8a6ccec4285a7ecfdad7bd53815',
u'event_type': event_type,
u'message_id': u'bb9bcf1d-1547-4867-b41e-f5298fa10869',
u'payload': {
u'port': {
u'admin_state_up': True,
u'device_id': u'',
u'device_owner': u'',
u'fixed_ips': [{
u'ip_address': u'192.168.123.3',
u'subnet_id': u'53d8a76a-3e1a-43e0-975e-83a4b464d18c'}], # noqa
u'id': u'bbd92f5a-5a1d-4ec5-9272-8e4dd5f0c084',
u'mac_address': u'fa:16:3e:f4:81:a9',
u'name': u'',
u'network_id': u'c3a30111-dd52-405c-84b2-4d62068e2d35',
u'security_groups': [u'5124be1c-b2d5-47e6-ac62-411a0ea028c8'], # noqa
u'status': u'DOWN',
u'tenant_id': u'c25992581e574b6485dbfdf39a3df46c',
},
},
u'priority': u'INFO',
u'publisher_id': u'network.akanda',
u'timestamp': u'2013-07-25 14:02:55.244126'}
return notifications._make_event_from_message(msg)
def test_notification_port(self):
e = self._test_notification('port.create.start')
self.assertFalse(e)
e = self._test_notification('port.create.end')
self.assertEqual(event.UPDATE, e.crud)
e = self._test_notification('port.change.start')
self.assertFalse(e)
e = self._test_notification('port.change.end')
self.assertEqual(event.UPDATE, e.crud)
e = self._test_notification('port.delete.start')
self.assertFalse(e)
e = self._test_notification('port.delete.end')
self.assertEqual(event.UPDATE, e.crud)
def test_notification_subnet(self):
e = self._test_notification('subnet.create.start')
self.assertFalse(e)
e = self._test_notification('subnet.create.end')
self.assertEqual(event.UPDATE, e.crud)
e = self._test_notification('subnet.change.start')
self.assertFalse(e)
e = self._test_notification('subnet.change.end')
self.assertEqual(event.UPDATE, e.crud)
e = self._test_notification('subnet.delete.start')
self.assertFalse(e)
e = self._test_notification('subnet.delete.end')
self.assertEqual(event.UPDATE, e.crud)
def test_notification_router(self):
e = self._test_notification('router.create.start')
self.assertFalse(e)
e = self._test_notification('router.create.end')
self.assertEqual(event.CREATE, e.crud)
e = self._test_notification('router.change.start')
self.assertFalse(e)
e = self._test_notification('router.change.end')
self.assertEqual(event.UPDATE, e.crud)
e = self._test_notification('router.delete.start')
self.assertFalse(e)
e = self._test_notification('router.delete.end')
self.assertEqual(event.DELETE, e.crud)
def test_notification_router_id(self):
msg = {
u'_context_is_admin': False,
u'_context_project_id': u'c25992581e574b6485dbfdf39a3df46c',
u'_context_read_deleted': u'no',
u'_context_roles': [u'anotherrole', u'Member'],
u'_context_tenant_id': u'c25992581e574b6485dbfdf39a3df46c',
u'_context_timestamp': u'2013-08-01 20:17:11.569282',
u'_context_user_id': u'472511eedebd4322a26c5fb1f52711ee',
u'_unique_id': u'246f69b5dff44156ba56c4a2b7c3d47f',
u'event_type': u'router.create.end',
u'message_id': u'658c8901-6858-4dbc-be8a-242d94fc4b5d',
u'payload': {
u'router': {
u'admin_state_up': True,
u'external_gateway_info': None,
u'id': u'f95fb32d-0072-4675-b4bd-61d829a46aca',
u'name': u'r2',
u'ports': [],
u'status': u'ACTIVE',
u'tenant_id': u'c25992581e574b6485dbfdf39a3df46c',
},
},
u'priority': u'INFO',
u'publisher_id': u'network.akanda',
u'timestamp': u'2013-08-01 20:17:11.662425',
}
e = notifications._make_event_from_message(msg)
self.assertEqual(e.router_id, u'f95fb32d-0072-4675-b4bd-61d829a46aca')
def test_interface_create_and_delete(self):
for action in ('create', 'delete'):
msg = {
u'_context_roles': [u'_member_', u'admin'],
u'priority': u'INFO',
u'_context_read_deleted': u'no',
u'event_type': u'router.interface.%s' % action,
u'timestamp': u'2014-04-08 17:48:42.917596',
u'_context_tenant_id': u'4838397648d946279ad422886aabcf07',
u'payload': {
u'router.interface': {
u'subnet_id': u'0535072e-6ef4-4916-b1f5-05fab4da3d0c',
u'tenant_id': u'c2a1399efbed41e5be2115afa5b5ec25',
u'port_id': u'63363e5f-59b7-49ca-b619-96c16883b543',
u'id': u'58868681-4a58-4f69-8dc0-b20955e7923f'
}
},
u'_unique_id': u'bf7cbac3c964442b841f8a0dfda1b84f',
u'_context_is_admin': True,
u'_context_project_id': u'4838397648d946279ad422886aabcf07',
u'_context_timestamp': u'2014-04-08 17:48:42.838558',
u'_context_user_id': u'22a73cb6a5bf493da6a1b0b602b61ed6',
u'publisher_id': u'network.akanda',
u'message_id': u'c441df90-404a-4b53-a4d6-67f042339ef2'
}
e = notifications._make_event_from_message(msg)
self.assertEqual(event.UPDATE, e.crud)
self.assertEqual(
u'58868681-4a58-4f69-8dc0-b20955e7923f',
e.router_id
)
def test_notification_akanda(self):
e = self._test_notification('akanda.bandwidth.used')
self.assertIs(None, e)
class TestSend(unittest.TestCase):
@mock.patch('kombu.connection.BrokerConnection')
@mock.patch('kombu.entity.Exchange')
@mock.patch('kombu.Producer')
def setUp(self, producer_cls, exchange, broker):
super(TestSend, self).setUp()
self.messages = []
self.producer = mock.Mock()
self.producer.publish.side_effect = self.messages.append
producer_cls.return_value = self.producer
self.notifier = notifications.Publisher('url', 'quantum', 'topic')
self.notifier.start()
self.addCleanup(self.notifier.stop)
# def tearDown(self):
# if self.notifier:
# self.notifier.stop()
# super(TestSend, self).tearDown()
def test_payload(self):
self.notifier.publish({'payload': 'message here'})
self.notifier.stop() # flushes the queue
msg = self.messages[0]
self.assertEqual(msg['payload'], 'message here')
def test_context(self):
self.notifier.publish({'payload': 'message here'})
self.notifier.stop() # flushes the queue
msg = self.messages[0]
self.assertIn('_context_tenant', msg)
def test_unique_id(self):
self.notifier.publish({'payload': 'message here'})
self.notifier.publish({'payload': 'message here'})
self.notifier.stop() # flushes the queue
msg1, msg2 = self.messages
self.assertNotEqual(msg1['_unique_id'], msg2['_unique_id'])
class TestListen(unittest.TestCase):
@mock.patch('kombu.connection.BrokerConnection')
def test_ensure_connection_is_called(self, mock_broker):
broker = mock_broker.return_value
broker.ensure_connection = mock.Mock()
broker.drain_events = mock.Mock(side_effect=SystemExit())
notification_queue = mock.MagicMock()
notifications.listen('test-host', 'amqp://test.host',
'test-notifications', 'test-rpc',
notification_queue)
broker.ensure_connection.assert_called_once_with(
errback=mock.ANY,
max_retries=0,
interval_start=2,
interval_step=2,
interval_max=30)
@mock.patch('kombu.connection.BrokerConnection')
def test_establish_new_connection_after_socket_timeout(self, mock_broker):
broker = mock_broker.return_value
broker.ensure_connection = mock.Mock()
broker.drain_events = mock.Mock(
side_effect=(socket.timeout(), SystemExit()))
notification_queue = mock.MagicMock()
notifications.listen('test-host', 'amqp://test.host',
'test-notifications', 'test-rpc',
notification_queue)
self.assertEqual(broker.ensure_connection.call_args_list,
[mock.call(errback=mock.ANY,
max_retries=0,
interval_start=2,
interval_step=2,
interval_max=30),
mock.call(errback=mock.ANY,
max_retries=0,
interval_start=2,
interval_step=2,
interval_max=30)])
|
|
# Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# (Some parts adapted from LinuxBridge Plugin)
# TODO(shiv) need support for security groups
"""Implementation of Brocade Neutron Plugin."""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import importutils
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.rpc.handlers import metadata_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import constants as n_const
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as n_context
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import portbindings_base
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.extensions import securitygroup as ext_sg
from neutron.i18n import _LE, _LI
from neutron.plugins.brocade.db import models as brocade_db
from neutron.plugins.brocade import vlanbm as vbm
from neutron.plugins.common import constants as svc_constants
LOG = logging.getLogger(__name__)
AGENT_OWNER_PREFIX = "network:"
NOS_DRIVER = 'neutron.plugins.brocade.nos.nosdriver.NOSdriver'
SWITCH_OPTS = [cfg.StrOpt('address', default='',
help=_('The address of the host to SSH to')),
cfg.StrOpt('username', default='',
help=_('The SSH username to use')),
cfg.StrOpt('password', default='', secret=True,
help=_('The SSH password to use')),
cfg.StrOpt('ostype', default='NOS',
help=_('Currently unused'))
]
PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0',
help=_('The network interface to use when creating '
'a port'))
]
cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH")
cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE")
class BridgeRpcCallbacks(object):
"""Agent callback."""
target = oslo_messaging.Target(version='1.2')
# Device names start with "tap"
# history
# 1.1 Support Security Group RPC
# 1.2 Support get_devices_details_list
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug("Device %(device)s details requested from %(agent_id)s",
{'device': device, 'agent_id': agent_id})
port = brocade_db.get_port(rpc_context,
device[len(n_const.TAP_DEVICE_PREFIX):])
if port:
entry = {'device': device,
'vlan_id': port.vlan_id,
'network_id': port.network_id,
'port_id': port.port_id,
'physical_network': port.physical_interface,
'admin_state_up': port.admin_state_up
}
else:
entry = {'device': device}
LOG.debug("%s can not be found in database", device)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
return [
self.get_device_details(
rpc_context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
device = kwargs.get('device')
port = self.get_port_from_device(device)
if port:
entry = {'device': device,
'exists': True}
# Set port status to DOWN
port_id = port['port_id']
brocade_db.update_port_state(rpc_context, port_id, False)
else:
entry = {'device': device,
'exists': False}
LOG.debug("%s can not be found in database", device)
return entry
class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
@classmethod
def get_port_from_device(cls, device):
"""Get port from the brocade specific db."""
# TODO(shh) context is not being passed as
# an argument to this function;
#
# need to be fixed in:
# file: neutron/db/securtygroups_rpc_base.py
# function: securitygroup_rules_for_devices()
# which needs to pass context to us
# Doing what other plugins are doing
session = db.get_session()
port = brocade_db.get_port_from_device(
session, device[len(n_const.TAP_DEVICE_PREFIX):])
# TODO(shiv): need to extend the db model to include device owners
# make it appears that the device owner is of type network
if port:
port['device'] = device
port['device_owner'] = AGENT_OWNER_PREFIX
port['binding:vif_type'] = 'bridge'
return port
class AgentNotifierApi(sg_rpc.SecurityGroupAgentRpcApiMixin):
"""Agent side of the linux bridge rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
def __init__(self, topic):
self.topic = topic
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
cctxt = self.client.prepare(topic=self.topic_network_delete,
fanout=True)
cctxt.cast(context, 'network_delete', network_id=network_id)
def port_update(self, context, port, physical_network, vlan_id):
cctxt = self.client.prepare(topic=self.topic_port_update, fanout=True)
cctxt.cast(context, 'port_update', port=port,
physical_network=physical_network, vlan_id=vlan_id)
class BrocadePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_base.PortBindingBaseMixin):
"""BrocadePluginV2 is a Neutron plugin.
Provides L2 Virtual Network functionality using VDX. Upper
layer driver class that interfaces to NETCONF layer below.
"""
def __init__(self):
"""Initialize Brocade Plugin.
Specify switch address and db configuration.
"""
super(BrocadePluginV2, self).__init__()
self.supported_extension_aliases = ["binding", "security-group",
"external-net", "router",
"extraroute", "agent",
"l3_agent_scheduler",
"dhcp_agent_scheduler"]
self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE.
physical_interface)
self.base_binding_dict = self._get_base_binding_dict()
portbindings_base.register_port_dict_function()
self.ctxt = n_context.get_admin_context()
self._vlan_bitmap = vbm.VlanBitmap(self.ctxt)
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
self.brocade_init()
self.start_periodic_dhcp_agent_status_check()
def brocade_init(self):
"""Brocade specific initialization."""
self._switch = {'address': cfg.CONF.SWITCH.address,
'username': cfg.CONF.SWITCH.username,
'password': cfg.CONF.SWITCH.password
}
self._driver = importutils.import_object(NOS_DRIVER)
def _setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.rpc_context = n_context.ContextBase('neutron', 'neutron',
is_admin=False)
self.conn = n_rpc.create_connection()
self.endpoints = [BridgeRpcCallbacks(),
securitygroups_rpc.SecurityGroupServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
l3_rpc.L3RpcCallback(),
agents_db.AgentExtRpcCallback(),
metadata_rpc.MetadataRpcCallback()]
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
self.notifier = AgentNotifierApi(topics.AGENT)
self.agent_notifiers[n_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[n_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotifyAPI()
)
def create_network(self, context, network):
"""Create network.
This call to create network translates to creation of port-profile on
the physical switch.
"""
with context.session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).create_network(context, network)
net_uuid = net['id']
vlan_id = self._vlan_bitmap.get_next_vlan(None)
switch = self._switch
try:
self._driver.create_network(switch['address'],
switch['username'],
switch['password'],
vlan_id)
except Exception:
# Proper formatting
LOG.exception(_LE("Brocade NOS driver error"))
LOG.debug("Returning the allocated vlan (%d) to the pool",
vlan_id)
self._vlan_bitmap.release_vlan(int(vlan_id))
raise Exception(_("Brocade plugin raised exception, "
"check logs"))
brocade_db.create_network(context, net_uuid, vlan_id)
self._process_l3_create(context, net, network['network'])
LOG.info(_LI("Allocated vlan (%d) from the pool"), vlan_id)
return net
def delete_network(self, context, net_id):
"""Delete network.
This call to delete the network translates to removing the
port-profile on the physical switch.
"""
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
result = super(BrocadePluginV2, self).delete_network(context,
net_id)
# we must delete all ports in db first (foreign key constraint)
# there is no need to delete port in the driver (its a no-op)
# (actually: note there is no such call to the driver)
bports = brocade_db.get_ports(context, net_id)
for bport in bports:
brocade_db.delete_port(context, bport['port_id'])
# find the vlan for this network
net = brocade_db.get_network(context, net_id)
vlan_id = net['vlan']
# Tell hw to do remove PP
switch = self._switch
try:
self._driver.delete_network(switch['address'],
switch['username'],
switch['password'],
vlan_id)
except Exception:
# Proper formatting
LOG.exception(_LE("Brocade NOS driver error"))
raise Exception(_("Brocade plugin raised exception, "
"check logs"))
# now ok to delete the network
brocade_db.delete_network(context, net_id)
# relinquish vlan in bitmap
self._vlan_bitmap.release_vlan(int(vlan_id))
return result
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def create_port(self, context, port):
"""Create logical port on the switch."""
tenant_id = port['port']['tenant_id']
network_id = port['port']['network_id']
admin_state_up = port['port']['admin_state_up']
physical_interface = self.physical_interface
with context.session.begin(subtransactions=True):
bnet = brocade_db.get_network(context, network_id)
vlan_id = bnet['vlan']
neutron_port = super(BrocadePluginV2, self).create_port(context,
port)
self._process_portbindings_create_and_update(context,
port['port'],
neutron_port)
interface_mac = neutron_port['mac_address']
port_id = neutron_port['id']
switch = self._switch
# convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx
mac = self.mac_reformat_62to34(interface_mac)
try:
self._driver.associate_mac_to_network(switch['address'],
switch['username'],
switch['password'],
vlan_id,
mac)
except Exception:
# Proper formatting
LOG.exception(_LE("Brocade NOS driver error"))
raise Exception(_("Brocade plugin raised exception, "
"check logs"))
# save to brocade persistent db
brocade_db.create_port(context, port_id, network_id,
physical_interface,
vlan_id, tenant_id, admin_state_up)
# apply any extensions
return neutron_port
def delete_port(self, context, port_id):
with context.session.begin(subtransactions=True):
neutron_port = self.get_port(context, port_id)
interface_mac = neutron_port['mac_address']
# convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx
mac = self.mac_reformat_62to34(interface_mac)
brocade_port = brocade_db.get_port(context, port_id)
vlan_id = brocade_port['vlan_id']
switch = self._switch
try:
self._driver.dissociate_mac_from_network(switch['address'],
switch['username'],
switch['password'],
vlan_id,
mac)
except Exception:
LOG.exception(_LE("Brocade NOS driver error"))
raise Exception(
_("Brocade plugin raised exception, check logs"))
super(BrocadePluginV2, self).delete_port(context, port_id)
brocade_db.delete_port(context, port_id)
def update_port(self, context, port_id, port):
original_port = self.get_port(context, port_id)
session = context.session
port_updated = False
with session.begin(subtransactions=True):
# delete the port binding and read it with the new rules
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, port_id)
# process_port_create_security_group also needs port id
port['port']['id'] = port_id
self._process_port_create_security_group(
context,
port['port'],
port['port'][ext_sg.SECURITYGROUPS])
port_updated = True
port_data = port['port']
port = super(BrocadePluginV2, self).update_port(
context, port_id, port)
self._process_portbindings_create_and_update(context,
port_data,
port)
if original_port['admin_state_up'] != port['admin_state_up']:
port_updated = True
if (original_port['fixed_ips'] != port['fixed_ips'] or
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port.get(ext_sg.SECURITYGROUPS))):
self.notifier.security_groups_member_updated(
context, port.get(ext_sg.SECURITYGROUPS))
if port_updated:
self._notify_port_updated(context, port)
return port
def _notify_port_updated(self, context, port):
port_id = port['id']
bport = brocade_db.get_port(context, port_id)
self.notifier.port_update(context, port,
bport.physical_interface,
bport.vlan_id)
def _get_base_binding_dict(self):
binding = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
return binding
@staticmethod
def mac_reformat_62to34(interface_mac):
"""Transform MAC address format.
Transforms from 6 groups of 2 hexadecimal numbers delimited by ":"
to 3 groups of 4 hexadecimals numbers delimited by ".".
:param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx
:type interface_mac: string
:returns: MAC address in the format xxxx.xxxx.xxxx
:rtype: string
"""
mac = interface_mac.replace(":", "")
mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12]
return mac
|
|
import abc
import logging
import math
from collections import defaultdict
from fn import _
from fn.iters import map, filter
from .manipulator import Parameter
from .metatechniques import RecyclingMetaTechnique
from .technique import SequentialSearchTechnique, register
log = logging.getLogger(__name__)
class SimplexTechnique(SequentialSearchTechnique):
"""
Base class with utility functions common
to simplex type methods
"""
def __init__(self, seed_cfg=None, *args, **kwargs):
super(SimplexTechnique, self).__init__(*args, **kwargs)
self.centroid = None
self.last_simplex_points = None
self.seed_cfg = seed_cfg
self.simplex_points = []
def calculate_centroid(self):
"""
average of all the PrimitiveParameters in self.simplex_points
ComplexParameters are copied from self.simplex_points[0]
"""
sums = defaultdict(float)
counts = defaultdict(int)
for config in self.simplex_points:
cfg = config.data
for param in self.manipulator.parameters(cfg):
if param.is_primitive():
sums[param.name] += param.get_unit_value(cfg)
counts[param.name] += 1
centroid = self.manipulator.copy(self.simplex_points[0].data)
for param in self.manipulator.parameters(centroid):
if param.is_primitive():
param.set_unit_value(centroid,
sums[param.name] / float(counts[param.name]))
return centroid
def cfg_to_str(self, cfg):
params = list(filter(Parameter.is_primitive,
self.manipulator.parameters(cfg)))
params.sort(key=_.name)
return str(tuple(map(lambda x: x.get_unit_value(cfg), params)))
def debug_log(self):
for i, config in enumerate(self.simplex_points):
log.debug("simplex_points[%d] = %s", i, self.cfg_to_str(config.data))
if self.centroid:
log.debug("centroid = %s", self.cfg_to_str(self.centroid))
def linear_point(self, p1, p2, scale):
"""
return a point on the line passing between p1 and p2 at position scale
such that p1 + scale*(p1 - p2)
"""
return self.manipulator.linear_config(1.0, p1, scale, p1, -scale, p2)
def convergence_criterea(self):
"""True will cause the simplex method to stop"""
if self.rounds_since_novel_request > 3 * len(self.simplex_points) + 1:
return True
if self.last_simplex_points == self.simplex_points:
return True
self.last_simplex_points = list(self.simplex_points)
return False
def initial_simplex_seed(self):
"""
return a point to base the initial simplex on
"""
if self.seed_cfg is not None:
return self.seed_cfg
return self.manipulator.random()
@abc.abstractmethod
def initial_simplex(self):
"""
return a initial list of configurations
"""
return []
class RandomInitialMixin(object):
"""
start with random initial simplex
"""
def initial_simplex(self):
# we implicitly assume number of parameters is fixed here, however
# it will work if it isn't (simplex size is undefined)
cfg0 = self.initial_simplex_seed()
params = self.manipulator.parameters(cfg0)
return [cfg0] + [self.manipulator.random()
for p in params
if p.is_primitive()]
class RightInitialMixin(object):
"""
start with random initial right triangle like simplex
"""
def __init__(self, initial_unit_edge_length=0.1, *args, **kwargs):
assert initial_unit_edge_length <= 0.5
self.initial_unit_edge_length = initial_unit_edge_length
super(RightInitialMixin, self).__init__(*args, **kwargs)
def initial_simplex(self):
cfg0 = self.initial_simplex_seed()
simplex = [cfg0]
params = self.manipulator.parameters(cfg0)
params = filter(lambda x: x.is_primitive(), params)
for p in params:
simplex.append(self.manipulator.copy(cfg0))
v = p.get_unit_value(simplex[-1])
if v <= 0.5:
v += self.initial_unit_edge_length
else:
v -= self.initial_unit_edge_length
p.set_unit_value(simplex[-1], v)
return simplex
class RegularInitialMixin(object):
"""
start with random initial regular simplex (all edges equal length)
"""
def __init__(self, initial_unit_edge_length=0.1, *args, **kwargs):
assert initial_unit_edge_length <= 0.5
self.initial_unit_edge_length = initial_unit_edge_length
super(RegularInitialMixin, self).__init__(*args, **kwargs)
def initial_simplex(self):
cfg0 = self.initial_simplex_seed()
simplex = [cfg0]
params = self.manipulator.parameters(cfg0)
params = list(filter(lambda x: x.is_primitive(), params))
if len(params) == 0:
return simplex
q = (((math.sqrt(len(params) + 1.0) - 1.0) / (len(params) * math.sqrt(2.0)))
* self.initial_unit_edge_length)
p = q + ((1.0 / math.sqrt(2.0)) * self.initial_unit_edge_length)
base = [x.get_unit_value(cfg0) for x in params]
for j in xrange(len(base)):
if max(p, q) + base[j] > 1.0:
#flip this dimension as we would overflow our [0,1] bounds
base[j] *= -1.0
for i in xrange(len(params)):
simplex.append(self.manipulator.copy(cfg0))
params[i].set_unit_value(simplex[-1], abs(base[i] + p))
for j in xrange(i + 1, len(params)):
params[j].set_unit_value(simplex[-1], abs(base[i] + q))
return simplex
class NelderMead(SimplexTechnique):
"""
Nelder-Mead downhill simplex method.
Based on description of method on page 82 of
'Noisy Optimization With Evolution Strategies' by Dirk V. Arnold.
We set alpha=2.0 by default instead of the often recommended alpha=1.0 to
avoid a common degenerate case, where the volume of the simplex becomes zero.
This is easiest to see with a single parameter. Let the simplex points
be x0,x1. Let the centroid be c=(x0+x1)/2.0 and the reflection point be:
reflection = c + alpha*(c-x1) = (x0+x1)*(1+alpha)/2 - x1
The problem is, if we set alpha = 1.0, then the x1's cancel out and the
reflection point becomes just reflection=x0, which also happens to be the
second best point, meaning we will use it. So in a single step of the
algorithm the simplex becomes singular.
"""
def __init__(self,
alpha=2.0,
gamma=2.0,
beta=0.5,
sigma=0.5,
*args, **kwargs):
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.sigma = sigma
super(NelderMead, self).__init__(*args, **kwargs)
@classmethod
def get_hyper_parameters(cls):
return ['alpha', 'gamma', 'beta', 'sigma']
def main_generator(self):
objective = self.objective
driver = self.driver
# test the entire initial simplex
self.simplex_points = list(map(driver.get_configuration,
self.initial_simplex()))
if len(self.simplex_points) <= 1:
log.warning("only 1 point in simplex, will not use %s", self.name)
return
log.debug("initial points")
for p in self.simplex_points:
self.yield_nonblocking(p)
yield None # wait until results are ready
while not self.convergence_criterea():
# next steps assume this ordering
self.simplex_points.sort(cmp=objective.compare)
# set limit from worst point
self.limit = objective.limit_from_config(self.simplex_points[-1])
self.centroid = self.calculate_centroid()
if log.isEnabledFor(logging.DEBUG):
self.debug_log()
reflection = self.reflection_point()
yield reflection
if objective.lt(reflection, self.simplex_points[0]):
#expansion case
expansion = self.expansion_point(reflection)
yield expansion
if objective.lt(expansion, reflection):
log.debug("using expansion point")
self.simplex_points[-1] = expansion
else:
log.debug("using reflection point (considered expansion)")
self.simplex_points[-1] = reflection
elif objective.lt(reflection, self.simplex_points[1]):
#reflection case
log.debug("using reflection point")
self.simplex_points[-1] = reflection
else:
# contraction case
if objective.lte(reflection, self.simplex_points[-1]):
# outside contraction
contract_base = reflection
else:
# inside contraction
contract_base = self.simplex_points[-1]
contraction = self.contraction_point(contract_base)
yield contraction
if objective.lte(contraction, contract_base):
log.debug("using contraction point")
self.simplex_points[-1] = contraction
else:
#reduction case
log.debug("performing shrink reduction")
self.perform_shrink_reduction()
for p in self.simplex_points:
self.yield_nonblocking(p)
yield None # wait until results are ready
def reflection_point(self):
"""
reflect worst point across centroid
"""
return self.driver.get_configuration(
self.linear_point(self.centroid,
self.simplex_points[-1].data,
self.alpha))
def expansion_point(self, reflection):
"""
reflect worst point across centroid more (by default 2x as much)
"""
return self.driver.get_configuration(
self.linear_point(self.centroid,
reflection.data,
-self.gamma))
def contraction_point(self, contract_base):
"""
reflect worst point across centroid less
"""
return self.driver.get_configuration(
self.linear_point(self.centroid,
contract_base.data,
-self.beta))
def perform_shrink_reduction(self):
"""
shrink the simplex in size by sigma=1/2 (default), moving it closer to the
best point
"""
for i in xrange(1, len(self.simplex_points)):
self.simplex_points[i] = self.driver.get_configuration(
self.linear_point(self.simplex_points[0].data,
self.simplex_points[i].data,
-self.sigma))
class Torczon(SimplexTechnique):
"""
Torczon multi-directional search algorithm.
Based on description of method on page 85 of
'Noisy Optimization With Evolution Strategies' by Dirk V. Arnold.
"""
def __init__(self,
alpha=1.0,
gamma=2.0,
beta=0.5,
*args, **kwargs):
self.alpha = alpha
self.gamma = gamma
self.beta = beta
super(Torczon, self).__init__(*args, **kwargs)
@classmethod
def get_hyper_parameters(cls):
return ['alpha', 'gamma', 'beta']
def main_generator(self):
objective = self.objective
driver = self.driver
# test the entire initial simplex
self.simplex_points = list(map(driver.get_configuration,
self.initial_simplex()))
if len(self.simplex_points) <= 1:
log.warning("only 1 point in simplex, will not use %s", self.name)
return
log.debug("initial points")
for p in self.simplex_points:
self.yield_nonblocking(p)
yield None # wait until results are ready
self.simplex_points.sort(cmp=objective.compare)
while not self.convergence_criterea():
# set limit from worst point
self.limit = objective.limit_from_config(self.simplex_points[-1])
if log.isEnabledFor(logging.DEBUG):
self.debug_log()
reflected = self.reflected_simplex()
yield None # wait until results are ready
reflected.sort(cmp=objective.compare)
# this next condition implies reflected[0] < simplex_points[0] since
# reflected is sorted and contains simplex_points[0] (saves a db query)
if reflected[0] is not self.simplex_points[0]:
expanded = self.expanded_simplex()
yield None # wait until results are ready
expanded.sort(cmp=objective.compare)
if objective.lt(expanded[0], reflected[0]):
log.debug("expansion performed")
self.simplex_points = expanded
else:
log.debug("reflection performed")
self.simplex_points = reflected
else:
contracted = self.contracted_simplex()
yield None # wait until results are ready
contracted.sort(cmp=objective.compare)
log.debug("contraction performed")
self.simplex_points = contracted
def scaled_simplex(self, scale):
"""
assumes self.simplex_points[0] is best point and returns a new simplex
reflected across self.simplex_points[0] by scale
"""
simplex = list(self.simplex_points) # shallow copy
for i in xrange(1, len(simplex)):
simplex[i] = self.driver.get_configuration(
self.linear_point(simplex[0].data, simplex[i].data, scale))
self.yield_nonblocking(simplex[i])
return simplex
def reflected_simplex(self):
return self.scaled_simplex(self.alpha)
def expanded_simplex(self):
return self.scaled_simplex(self.gamma)
def contracted_simplex(self):
return self.scaled_simplex(-self.beta)
class RandomNelderMead(RandomInitialMixin, NelderMead):
pass
class RightNelderMead(RightInitialMixin, NelderMead):
pass
class RegularNelderMead(RegularInitialMixin, NelderMead):
pass
class RandomTorczon(RandomInitialMixin, Torczon):
pass
class RightTorczon(RightInitialMixin, Torczon):
pass
class RegularTorczon(RegularInitialMixin, Torczon):
pass
class MultiNelderMead(RecyclingMetaTechnique):
def __init__(self):
super(MultiNelderMead, self).__init__([RightNelderMead, RandomNelderMead,
RegularNelderMead])
class MultiTorczon(RecyclingMetaTechnique):
def __init__(self):
super(MultiTorczon, self).__init__([RightTorczon, RandomTorczon,
RegularTorczon])
register(RandomNelderMead())
register(RegularNelderMead())
register(RightNelderMead())
register(MultiNelderMead())
register(RandomTorczon())
register(RegularTorczon())
register(RightTorczon())
register(MultiTorczon())
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class UdemyIE(InfoExtractor):
IE_NAME = 'udemy'
_VALID_URL = r'https?://www\.udemy\.com/(?:[^#]+#/lecture/|lecture/view/?\?lectureId=)(?P<id>\d+)'
_LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1'
_ORIGIN_URL = 'https://www.udemy.com'
_NETRC_MACHINE = 'udemy'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757',
'md5': '98eda5b657e752cf945d8445e261b5c5',
'info_dict': {
'id': '160614',
'ext': 'mp4',
'title': 'Introduction and Installation',
'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876',
'duration': 579.29,
},
'skip': 'Requires udemy account credentials',
}]
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message'))
error_data = error.get('data')
if error_data:
error_str += ' - %s' % error_data.get('formErrors')
raise ExtractorError(error_str, expected=True)
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'):
headers = {
'X-Udemy-Snail-Case': 'true',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'client_id':
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:
url_or_request = compat_urllib_request.Request(url_or_request, headers=headers)
response = super(UdemyIE, self)._download_json(url_or_request, video_id, note)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
self.raise_login_required('Udemy account is required')
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(webpage):
return any(p in webpage for p in ['href="https://www.udemy.com/user/logout/', '>Logout<'])
# already logged in
if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
login_form.update({
'email': username.encode('utf-8'),
'password': password.encode('utf-8'),
})
request = compat_urllib_request.Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._ORIGIN_URL)
request.add_header('Origin', self._ORIGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_extract(self, url):
lecture_id = self._match_id(url)
lecture = self._download_json(
'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id,
lecture_id, 'Downloading lecture JSON')
asset_type = lecture.get('assetType') or lecture.get('asset_type')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
asset = lecture['asset']
stream_url = asset.get('streamUrl') or asset.get('stream_url')
mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url)
if mobj:
return self.url_result(mobj.group(1), 'Youtube')
video_id = asset['id']
thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url')
duration = asset['data']['duration']
download_url = asset.get('downloadUrl') or asset.get('download_url')
video = download_url.get('Video') or download_url.get('video')
video_480p = download_url.get('Video480p') or download_url.get('video_480p')
formats = [
{
'url': video_480p[0],
'format_id': '360p',
},
{
'url': video[0],
'format_id': '720p',
},
]
title = lecture['title']
description = lecture['description']
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats
}
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
_VALID_URL = r'https?://www\.udemy\.com/(?P<coursepath>[\da-z-]+)'
_SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<'
_ALREADY_ENROLLED = '>You are already taking this course.<'
_TESTS = []
@classmethod
def suitable(cls, url):
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath')
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s' % course_path,
course_path, 'Downloading course JSON')
course_id = int(response['id'])
course_title = response['title']
webpage = self._download_webpage(
'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id,
course_id, 'Enrolling in the course')
if self._SUCCESSFULLY_ENROLLED in webpage:
self.to_screen('%s: Successfully enrolled in' % course_id)
elif self._ALREADY_ENROLLED in webpage:
self.to_screen('%s: Already enrolled in' % course_id)
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
course_id, 'Downloading course curriculum')
entries = [
self.url_result(
'https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
for asset in response if asset.get('assetType') or asset.get('asset_type') == 'Video'
]
return self.playlist_result(entries, course_id, course_title)
|
|
"""The Instrument class holds all events for a single instrument and contains
functions for extracting information from the events it contains.
"""
import numpy as np
try:
import fluidsynth
_HAS_FLUIDSYNTH = True
except ImportError:
_HAS_FLUIDSYNTH = False
import os
import pkg_resources
from .containers import PitchBend
from .utilities import pitch_bend_to_semitones, note_number_to_hz
DEFAULT_SF2 = 'TimGM6mb.sf2'
class Instrument(object):
"""Object to hold event information for a single instrument.
Parameters
----------
program : int
MIDI program number (instrument index), in ``[0, 127]``.
is_drum : bool
Is the instrument a drum instrument (channel 9)?
name : str
Name of the instrument.
Attributes
----------
program : int
The program number of this instrument.
is_drum : bool
Is the instrument a drum instrument (channel 9)?
name : str
Name of the instrument.
notes : list
List of :class:`pretty_midi.Note` objects.
pitch_bends : list
List of of :class:`pretty_midi.PitchBend` objects.
control_changes : list
List of :class:`pretty_midi.ControlChange` objects.
"""
def __init__(self, program, is_drum=False, name=''):
"""Create the Instrument.
"""
self.program = program
self.is_drum = is_drum
self.name = name
self.notes = []
self.pitch_bends = []
self.control_changes = []
def get_onsets(self):
"""Get all onsets of all notes played by this instrument.
May contain duplicates.
Returns
-------
onsets : np.ndarray
List of all note onsets.
"""
onsets = []
# Get the note-on time of each note played by this instrument
for note in self.notes:
onsets.append(note.start)
# Return them sorted (because why not?)
return np.sort(onsets)
def get_piano_roll(self, fs=100, times=None,
pedal_threshold=64):
"""Compute a piano roll matrix of this instrument.
Parameters
----------
fs : int
Sampling frequency of the columns, i.e. each column is spaced apart
by ``1./fs`` seconds.
times : np.ndarray
Times of the start of each column in the piano roll.
Default ``None`` which is ``np.arange(0, get_end_time(), 1./fs)``.
pedal_threshold : int
Value of control change 64 (sustain pedal) message that is less
than this value is reflected as pedal-off. Pedals will be
reflected as elongation of notes in the piano roll.
If None, then CC64 message is ignored.
Default is 64.
Returns
-------
piano_roll : np.ndarray, shape=(128,times.shape[0])
Piano roll of this instrument.
"""
# If there are no notes, return an empty matrix
if self.notes == []:
return np.array([[]]*128)
# Get the end time of the last event
end_time = self.get_end_time()
# Extend end time if one was provided
if times is not None and times[-1] > end_time:
end_time = times[-1]
# Allocate a matrix of zeros - we will add in as we go
piano_roll = np.zeros((128, int(fs*end_time)))
# Drum tracks don't have pitch, so return a matrix of zeros
if self.is_drum:
if times is None:
return piano_roll
else:
return np.zeros((128, times.shape[0]))
# Add up piano roll matrix, note-by-note
for note in self.notes:
# Should interpolate
piano_roll[note.pitch,
int(note.start*fs):int(note.end*fs)] += note.velocity
# Process sustain pedals
if pedal_threshold is not None:
CC_SUSTAIN_PEDAL = 64
time_pedal_on = 0
is_pedal_on = False
for cc in [_e for _e in self.control_changes
if _e.number == CC_SUSTAIN_PEDAL]:
time_now = int(cc.time*fs)
is_current_pedal_on = (cc.value >= pedal_threshold)
if not is_pedal_on and is_current_pedal_on:
time_pedal_on = time_now
is_pedal_on = True
elif is_pedal_on and not is_current_pedal_on:
# For each pitch, a sustain pedal "retains"
# the maximum velocity up to now due to
# logarithmic nature of human loudness perception
subpr = piano_roll[:, time_pedal_on:time_now]
# Take the running maximum
pedaled = np.maximum.accumulate(subpr, axis=1)
piano_roll[:, time_pedal_on:time_now] = pedaled
is_pedal_on = False
# Process pitch changes
# Need to sort the pitch bend list for the following to work
ordered_bends = sorted(self.pitch_bends, key=lambda bend: bend.time)
# Add in a bend of 0 at the end of time
end_bend = PitchBend(0, end_time)
for start_bend, end_bend in zip(ordered_bends,
ordered_bends[1:] + [end_bend]):
# Piano roll is already generated with everything bend = 0
if np.abs(start_bend.pitch) < 1:
continue
# Get integer and decimal part of bend amount
start_pitch = pitch_bend_to_semitones(start_bend.pitch)
bend_int = int(np.sign(start_pitch)*np.floor(np.abs(start_pitch)))
bend_decimal = np.abs(start_pitch - bend_int)
# Column indices effected by the bend
bend_range = np.r_[int(start_bend.time*fs):int(end_bend.time*fs)]
# Construct the bent part of the piano roll
bent_roll = np.zeros(piano_roll[:, bend_range].shape)
# Easiest to process differently depending on bend sign
if start_bend.pitch >= 0:
# First, pitch shift by the int amount
if bend_int is not 0:
bent_roll[bend_int:] = piano_roll[:-bend_int, bend_range]
else:
bent_roll = piano_roll[:, bend_range]
# Now, linear interpolate by the decimal place
bent_roll[1:] = ((1 - bend_decimal)*bent_roll[1:] +
bend_decimal*bent_roll[:-1])
else:
# Same procedure as for positive bends
if bend_int is not 0:
bent_roll[:bend_int] = piano_roll[-bend_int:, bend_range]
else:
bent_roll = piano_roll[:, bend_range]
bent_roll[:-1] = ((1 - bend_decimal)*bent_roll[:-1] +
bend_decimal*bent_roll[1:])
# Store bent portion back in piano roll
piano_roll[:, bend_range] = bent_roll
if times is None:
return piano_roll
piano_roll_integrated = np.zeros((128, times.shape[0]))
# Convert to column indices
times = np.array(np.round(times*fs), dtype=np.int)
for n, (start, end) in enumerate(zip(times[:-1], times[1:])):
if start < piano_roll.shape[1]: # if start is >=, leave zeros
if start == end:
end = start + 1
# Each column is the mean of the columns in piano_roll
piano_roll_integrated[:, n] = np.mean(piano_roll[:, start:end],
axis=1)
return piano_roll_integrated
def get_chroma(self, fs=100, times=None, pedal_threshold=64):
"""Get a sequence of chroma vectors from this instrument.
Parameters
----------
fs : int
Sampling frequency of the columns, i.e. each column is spaced apart
by ``1./fs`` seconds.
times : np.ndarray
Times of the start of each column in the piano roll.
Default ``None`` which is ``np.arange(0, get_end_time(), 1./fs)``.
pedal_threshold : int
Value of control change 64 (sustain pedal) message that is less
than this value is reflected as pedal-off. Pedals will be
reflected as elongation of notes in the piano roll.
If None, then CC64 message is ignored.
Default is 64.
Returns
-------
piano_roll : np.ndarray, shape=(12,times.shape[0])
Chromagram of this instrument.
"""
# First, get the piano roll
piano_roll = self.get_piano_roll(fs=fs, times=times,
pedal_threshold=pedal_threshold)
# Fold into one octave
chroma_matrix = np.zeros((12, piano_roll.shape[1]))
for note in range(12):
chroma_matrix[note, :] = np.sum(piano_roll[note::12], axis=0)
return chroma_matrix
def get_end_time(self):
"""Returns the time of the end of the events in this instrument.
Returns
-------
end_time : float
Time, in seconds, of the last event.
"""
# Cycle through all note ends and all pitch bends and find the largest
events = ([n.end for n in self.notes] +
[b.time for b in self.pitch_bends] +
[c.time for c in self.control_changes])
# If there are no events, just return 0
if len(events) == 0:
return 0.
else:
return max(events)
def get_pitch_class_histogram(self, use_duration=False, use_velocity=False,
normalize=False):
"""Computes the frequency of pitch classes of this instrument,
optionally weighted by their durations or velocities.
Parameters
----------
use_duration : bool
Weight frequency by note duration.
use_velocity : bool
Weight frequency by note velocity.
normalize : bool
Normalizes the histogram such that the sum of bin values is 1.
Returns
-------
histogram : np.ndarray, shape=(12,)
Histogram of pitch classes given current instrument, optionally
weighted by their durations or velocities.
"""
# Return all zeros if track is drum
if self.is_drum:
return np.zeros(12)
weights = np.ones(len(self.notes))
# Assumes that duration and velocity have equal weight
if use_duration:
weights *= [note.end - note.start for note in self.notes]
if use_velocity:
weights *= [note.velocity for note in self.notes]
histogram, _ = np.histogram([n.pitch % 12 for n in self.notes],
bins=np.arange(13),
weights=weights,
density=normalize)
return histogram
def get_pitch_class_transition_matrix(self, normalize=False,
time_thresh=0.05):
"""Computes the pitch class transition matrix of this instrument.
Transitions are added whenever the end of a note is within
``time_tresh`` from the start of any other note.
Parameters
----------
normalize : bool
Normalize transition matrix such that matrix sum equals to 1.
time_thresh : float
Maximum temporal threshold, in seconds, between the start of a note
and end time of any other note for a transition to be added.
Returns
-------
transition_matrix : np.ndarray, shape=(12,12)
Pitch class transition matrix.
"""
# instrument is drum or less than one note, return all zeros
if self.is_drum or len(self.notes) <= 1:
return np.zeros((12, 12))
# retrieve note starts, ends and pitch classes(nodes) from self.notes
starts, ends, nodes = np.array(
[[x.start, x.end, x.pitch % 12] for x in self.notes]).T
# compute distance matrix for all start and end time pairs
dist_mat = np.subtract.outer(ends, starts)
# find indices of pairs of notes where the end time of one note is
# within time_thresh of the start time of the other
sources, targets = np.where(abs(dist_mat) < time_thresh)
transition_matrix, _, _ = np.histogram2d(nodes[sources],
nodes[targets],
bins=np.arange(13),
normed=normalize)
return transition_matrix
def remove_invalid_notes(self):
"""Removes any notes whose end time is before or at their start time.
"""
# Crete a list of all invalid notes
notes_to_delete = []
for note in self.notes:
if note.end <= note.start:
notes_to_delete.append(note)
# Remove the notes found
for note in notes_to_delete:
self.notes.remove(note)
def synthesize(self, fs=44100, wave=np.sin):
"""Synthesize the instrument's notes using some waveshape.
For drum instruments, returns zeros.
Parameters
----------
fs : int
Sampling rate of the synthesized audio signal.
wave : function
Function which returns a periodic waveform,
e.g. ``np.sin``, ``scipy.signal.square``, etc.
Returns
-------
synthesized : np.ndarray
Waveform of the instrument's notes, synthesized at ``fs``.
"""
# Pre-allocate output waveform
synthesized = np.zeros(int(fs*(self.get_end_time() + 1)))
# If we're a percussion channel, just return the zeros
if self.is_drum:
return synthesized
# If the above if statement failed, we need to revert back to default
if not hasattr(wave, '__call__'):
raise ValueError('wave should be a callable Python function')
# This is a simple way to make the end of the notes fade-out without
# clicks
fade_out = np.linspace(1, 0, int(.1*fs))
# Create a frequency multiplier array for pitch bend
bend_multiplier = np.ones(synthesized.shape)
# Need to sort the pitch bend list for the loop below to work
ordered_bends = sorted(self.pitch_bends, key=lambda bend: bend.time)
# Add in a bend of 0 at the end of time
end_bend = PitchBend(0, self.get_end_time())
for start_bend, end_bend in zip(ordered_bends,
ordered_bends[1:] + [end_bend]):
# Bend start and end time in samples
start = int(start_bend.time*fs)
end = int(end_bend.time*fs)
# The multiplier will be (twelfth root of 2)^(bend semitones)
bend_semitones = pitch_bend_to_semitones(start_bend.pitch)
bend_amount = (2**(1/12.))**bend_semitones
# Sample indices effected by the bend
bend_multiplier[start:end] = bend_amount
# Add in waveform for each note
for note in self.notes:
# Indices in samples of this note
start = int(fs*note.start)
end = int(fs*note.end)
# Get frequency of note from MIDI note number
frequency = note_number_to_hz(note.pitch)
# When a pitch bend gets applied, there will be a sample
# discontinuity. So, we also need an array of offsets which get
# applied to compensate.
offsets = np.zeros(end - start)
for bend in ordered_bends:
bend_sample = int(bend.time*fs)
# Does this pitch bend fall within this note?
if bend_sample > start and bend_sample < end:
# Compute the average bend so far
bend_so_far = bend_multiplier[start:bend_sample].mean()
bend_amount = bend_multiplier[bend_sample]
# Compute the offset correction
offset = (bend_so_far - bend_amount)*(bend_sample - start)
# Store this offset for samples effected
offsets[bend_sample - start:] = offset
# Compute the angular frequencies, bent, over this interval
frequencies = 2*np.pi*frequency*(bend_multiplier[start:end])/fs
# Synthesize using wave function at this frequency
note_waveform = wave(frequencies*np.arange(end - start) +
2*np.pi*frequency*offsets/fs)
# Apply an exponential envelope
envelope = np.exp(-np.arange(end - start)/(1.0*fs))
# Make the end of the envelope be a fadeout
if envelope.shape[0] > fade_out.shape[0]:
envelope[-fade_out.shape[0]:] *= fade_out
else:
envelope *= np.linspace(1, 0, envelope.shape[0])
# Multiply by velocity (don't think it's linearly scaled but
# whatever)
envelope *= note.velocity
# Add in envelope'd waveform to the synthesized signal
synthesized[start:end] += envelope*note_waveform
return synthesized
def fluidsynth(self, fs=44100, sf2_path=None):
"""Synthesize using fluidsynth.
Parameters
----------
fs : int
Sampling rate to synthesize.
sf2_path : str
Path to a .sf2 file.
Default ``None``, which uses the TimGM6mb.sf2 file included with
``pretty_midi``.
Returns
-------
synthesized : np.ndarray
Waveform of the MIDI data, synthesized at ``fs``.
"""
# If sf2_path is None, use the included TimGM6mb.sf2 path
if sf2_path is None:
sf2_path = pkg_resources.resource_filename(__name__, DEFAULT_SF2)
if not _HAS_FLUIDSYNTH:
raise ImportError("fluidsynth() was called but pyfluidsynth "
"is not installed.")
if not os.path.exists(sf2_path):
raise ValueError("No soundfont file found at the supplied path "
"{}".format(sf2_path))
# If the instrument has no notes, return an empty array
if len(self.notes) == 0:
return np.array([])
# Create fluidsynth instance
fl = fluidsynth.Synth(samplerate=fs)
# Load in the soundfont
sfid = fl.sfload(sf2_path)
# If this is a drum instrument, use channel 9 and bank 128
if self.is_drum:
channel = 9
# Try to use the supplied program number
res = fl.program_select(channel, sfid, 128, self.program)
# If the result is -1, there's no preset with this program number
if res == -1:
# So use preset 0
fl.program_select(channel, sfid, 128, 0)
# Otherwise just use channel 0
else:
channel = 0
fl.program_select(channel, sfid, 0, self.program)
# Collect all notes in one list
event_list = []
for note in self.notes:
event_list += [[note.start, 'note on', note.pitch, note.velocity]]
event_list += [[note.end, 'note off', note.pitch]]
for bend in self.pitch_bends:
event_list += [[bend.time, 'pitch bend', bend.pitch]]
for control_change in self.control_changes:
event_list += [[control_change.time, 'control change',
control_change.number, control_change.value]]
# Sort the event list by time, and secondarily by whether the event
# is a note off
event_list.sort(key=lambda x: (x[0], x[1] != 'note off'))
# Add some silence at the beginning according to the time of the first
# event
current_time = event_list[0][0]
# Convert absolute seconds to relative samples
next_event_times = [e[0] for e in event_list[1:]]
for event, end in zip(event_list[:-1], next_event_times):
event[0] = end - event[0]
# Include 1 second of silence at the end
event_list[-1][0] = 1.
# Pre-allocate output array
total_time = current_time + np.sum([e[0] for e in event_list])
synthesized = np.zeros(int(np.ceil(fs*total_time)))
# Iterate over all events
for event in event_list:
# Process events based on type
if event[1] == 'note on':
fl.noteon(channel, event[2], event[3])
elif event[1] == 'note off':
fl.noteoff(channel, event[2])
elif event[1] == 'pitch bend':
fl.pitch_bend(channel, event[2])
elif event[1] == 'control change':
fl.cc(channel, event[2], event[3])
# Add in these samples
current_sample = int(fs*current_time)
end = int(fs*(current_time + event[0]))
samples = fl.get_samples(end - current_sample)[::2]
synthesized[current_sample:end] += samples
# Increment the current sample
current_time += event[0]
# Close fluidsynth
fl.delete()
return synthesized
def __repr__(self):
return 'Instrument(program={}, is_drum={}, name="{}")'.format(
self.program, self.is_drum, self.name.replace('"', r'\"'))
|
|
# coding=utf-8
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2012, Intel Performance Learning Solutions Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test network resource backend.
"""
#pylint: disable=W0102,C0103,R0904,R0801
from nova.openstack.common import gettextutils
gettextutils.install('nova')
import mox
import unittest
from occi import core_model, exceptions
from occi.extensions import infrastructure
from occi_os_api import nova_glue
from occi_os_api.backends import storage
class TestStorageBackend(unittest.TestCase):
"""
Tests the storage backend!
"""
def setUp(self):
"""
Setup the tests.
"""
self.backend = storage.StorageBackend()
self.sec_obj = {'nova_ctx': None}
self.mox = mox.Mox()
def tearDown(self):
"""
Cleanup mocks.
"""
self.mox.UnsetStubs()
# Test for failure
def test_create_for_failure(self):
"""
Test attachement.
"""
# msg size attribute
res = mox.MockObject(core_model.Resource)
res.attributes = {}
self.assertRaises(AttributeError, self.backend.create, res,
self.sec_obj)
# error in volume creation
res.attributes = {'occi.storage.size': '1'}
self.mox.StubOutWithMock(nova_glue.storage, 'create_storage')
nova_glue.storage.create_storage(mox.IsA(object),
mox.IsA(object)).\
AndReturn({'id': '1'})
self.mox.StubOutWithMock(nova_glue.storage, 'get_storage')
nova_glue.storage.get_storage(mox.IsA(object),
mox.IsA(object)).\
AndReturn({'status': 'error'})
self.mox.ReplayAll()
self.assertRaises(exceptions.HTTPError, self.backend.create, res,
self.sec_obj)
self.mox.VerifyAll()
def test_action_for_failure(self):
"""
Test actions
"""
res = mox.MockObject(core_model.Resource)
res.actions = []
# snapshot
self.assertRaises(AttributeError, self.backend.action, res,
infrastructure.SNAPSHOT, {}, self.sec_obj)
# Test for sanity
def test_create_for_sanity(self):
"""
Test creation.
"""
res = mox.MockObject(core_model.Resource)
res.attributes = {'occi.storage.size': '1'}
self.mox.StubOutWithMock(nova_glue.storage, 'create_storage')
nova_glue.storage.create_storage(mox.IsA(object),
mox.IsA(object)).\
AndReturn({'id': '1'})
self.mox.StubOutWithMock(nova_glue.storage, 'get_storage')
nova_glue.storage.get_storage(mox.IsA(object),
mox.IsA(object)).\
AndReturn({'status': 'available'})
self.mox.ReplayAll()
self.backend.create(res, self.sec_obj)
# verify all attrs.
self.assertEqual(res.attributes['occi.storage.state'], 'active')
self.assertListEqual([infrastructure.OFFLINE, infrastructure.BACKUP,
infrastructure.SNAPSHOT, infrastructure.RESIZE],
res.actions)
self.mox.VerifyAll()
def test_retrieve_for_sanity(self):
"""
Test retrieval.
"""
res = mox.MockObject(core_model.Resource)
res.attributes = {'occi.core.id': '1'}
self.mox.StubOutWithMock(nova_glue.storage, 'get_storage')
nova_glue.storage.get_storage(mox.IsA(object),
mox.IsA(object)).\
AndReturn({'status': 'available', 'size': '1'})
self.mox.ReplayAll()
self.backend.retrieve(res, self.sec_obj)
# verify all attrs.
self.assertEqual(res.attributes['occi.storage.state'], 'online')
self.assertListEqual([infrastructure.OFFLINE, infrastructure.BACKUP,
infrastructure.SNAPSHOT, infrastructure.RESIZE],
res.actions)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.mox.StubOutWithMock(nova_glue.storage, 'get_storage')
nova_glue.storage.get_storage(mox.IsA(object),
mox.IsA(object)).\
AndReturn({'status': 'bla', 'size': '1'})
self.mox.ReplayAll()
self.backend.retrieve(res, self.sec_obj)
# verify all attrs.
self.assertEqual(res.attributes['occi.storage.state'], 'offline')
self.assertTrue(len(res.actions) == 1)
self.mox.VerifyAll()
def test_update_for_sanity(self):
"""
Test updating.
"""
res1 = mox.MockObject(core_model.Resource)
res1.attributes = {}
res2 = mox.MockObject(core_model.Resource)
res2.attributes = {'occi.core.title': 'foo',
'occi.core.summary': 'bar'}
self.mox.ReplayAll()
self.backend.update(res1, res2, self.sec_obj)
# verify all attrs.
self.assertEqual(res1.attributes['occi.core.title'], 'foo')
self.assertEqual(res1.attributes['occi.core.summary'], 'bar')
self.mox.VerifyAll()
def test_remove_for_sanity(self):
"""
Test removal.
"""
res = mox.MockObject(core_model.Resource)
res.attributes = {'occi.core.id': '1'}
self.mox.StubOutWithMock(nova_glue.storage, 'delete_storage_instance')
nova_glue.storage.delete_storage_instance(mox.IsA(object),
mox.IsA(object))
self.mox.ReplayAll()
self.backend.delete(res, self.sec_obj)
self.mox.VerifyAll()
def test_action_for_sanity(self):
"""
Test actions
"""
res = mox.MockObject(core_model.Resource)
res.attributes = {'occi.core.id': '1',
'occi.core.summary': 'foo'}
res.actions = [infrastructure.SNAPSHOT, infrastructure.BACKUP]
# snapshot
self.mox.StubOutWithMock(nova_glue.storage,
'snapshot_storage_instance')
nova_glue.storage.snapshot_storage_instance(mox.IsA(object),
mox.IsA(object),
mox.IsA(object),
mox.IsA(object))
self.mox.ReplayAll()
self.backend.action(res, infrastructure.SNAPSHOT, {}, self.sec_obj)
self.mox.VerifyAll()
# some other action
self.mox.ReplayAll()
self.backend.action(res, infrastructure.BACKUP, {}, self.sec_obj)
self.mox.VerifyAll()
class TestStorageLinkBackend(unittest.TestCase):
"""
Tests storage linking.
"""
def setUp(self):
"""
Setup the tests.
"""
self.backend = storage.StorageLinkBackend()
self.sec_obj = {'nova_ctx': None}
self.mox = mox.Mox()
def tearDown(self):
"""
Cleanup mocks.
"""
self.mox.UnsetStubs()
# Test for sanity
def test_create_for_sanity(self):
"""
Test attachement.
"""
source = mox.MockObject(core_model.Resource)
source.attributes = {'occi.core.id': 'foo'}
target = mox.MockObject(core_model.Resource)
target.attributes = {'occi.core.id': 'bar'}
link = core_model.Link('foo', None, [], source, target)
link.attributes = {'occi.storagelink.deviceid': '/dev/sda'}
self.mox.StubOutWithMock(nova_glue.vm, 'attach_volume')
nova_glue.vm.attach_volume(mox.IsA(object), mox.IsA(object),
mox.IsA(object), mox.IsA(object)).\
AndReturn({})
self.mox.ReplayAll()
self.backend.create(link, self.sec_obj)
# verify all attrs.
self.assertEqual(link.attributes['occi.storagelink.deviceid'],
'/dev/sda')
self.assertIn('occi.storagelink.mountpoint', link.attributes)
self.assertEqual(link.attributes['occi.storagelink.state'], 'active')
self.mox.VerifyAll()
def test_delete_for_sanity(self):
"""
Test deattachement.
"""
source = mox.MockObject(core_model.Resource)
target = mox.MockObject(core_model.Resource)
target.attributes = {'occi.core.id': 'bar'}
link = core_model.Link('foo', None, [], source, target)
self.mox.StubOutWithMock(nova_glue.vm, 'detach_volume')
nova_glue.vm.detach_volume(mox.IsA(object), mox.IsA(object))
self.mox.ReplayAll()
self.backend.delete(link, self.sec_obj)
self.mox.VerifyAll()
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides distutils command classes for the gRPC Python setup process."""
from distutils import errors as _errors
import glob
import os
import os.path
import platform
import re
import shutil
import subprocess
import sys
import traceback
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
from setuptools.command import easy_install
from setuptools.command import install
from setuptools.command import test
PYTHON_STEM = os.path.dirname(os.path.abspath(__file__))
GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../')
GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto')
PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
class CommandError(object):
pass
class GatherProto(setuptools.Command):
description = 'gather proto dependencies'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# TODO(atash) ensure that we're running from the repository directory when
# this command is used
try:
shutil.rmtree(PROTO_STEM)
except Exception as error:
# We don't care if this command fails
pass
shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM)
for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL):
path = os.path.join(root, '__init__.py')
open(path, 'a').close()
class BuildProtoModules(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build protobuf modules'
user_options = [
('include=', None, 'path patterns to include in protobuf generation'),
('exclude=', None, 'path patterns to exclude from protobuf generation')
]
def initialize_options(self):
self.exclude = None
self.include = r'.*\.proto$'
def finalize_options(self):
pass
def run(self):
import grpc_tools.protoc as protoc
include_regex = re.compile(self.include)
exclude_regex = re.compile(self.exclude) if self.exclude else None
paths = []
for walk_root, directories, filenames in os.walk(PROTO_STEM):
for filename in filenames:
path = os.path.join(walk_root, filename)
if include_regex.match(path) and not (
exclude_regex and exclude_regex.match(path)):
paths.append(path)
# TODO(kpayson): It would be nice to do this in a batch command,
# but we currently have name conflicts in src/proto
for path in paths:
command = [
'grpc_tools.protoc',
'-I {}'.format(PROTO_STEM),
'--python_out={}'.format(PROTO_STEM),
'--grpc_python_out={}'.format(PROTO_STEM),
] + [path]
if protoc.main(command) != 0:
sys.stderr.write(
'warning: Command:\n{}\nFailed'.format(command))
# Generated proto directories dont include __init__.py, but
# these are needed for python package resolution
for walk_root, _, _ in os.walk(PROTO_STEM):
path = os.path.join(walk_root, '__init__.py')
open(path, 'a').close()
class BuildPy(build_py.build_py):
"""Custom project build command."""
def run(self):
try:
self.run_command('build_package_protos')
except CommandError as error:
sys.stderr.write('warning: %s\n' % error.message)
build_py.build_py.run(self)
class TestLite(setuptools.Command):
"""Command to run tests without fetching or building anything."""
description = 'run tests without fetching or building anything.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
# distutils requires this override.
pass
def run(self):
self._add_eggs_to_path()
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests'])
runner = tests.Runner()
result = runner.run(loader.suite)
if not result.wasSuccessful():
sys.exit('Test failure')
def _add_eggs_to_path(self):
"""Fetch install and test requirements"""
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class RunInterop(test.test):
description = 'run interop test client/server'
user_options = [('args=', 'a', 'pass-thru arguments for the client/server'),
('client', 'c', 'flag indicating to run the client'),
('server', 's', 'flag indicating to run the server')]
def initialize_options(self):
self.args = ''
self.client = False
self.server = False
def finalize_options(self):
if self.client and self.server:
raise _errors.DistutilsOptionError(
'you may only specify one of client or server')
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.client:
self.run_client()
elif self.server:
self.run_server()
def run_server(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import server
sys.argv[1:] = self.args.split()
server.serve()
def run_client(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import client
sys.argv[1:] = self.args.split()
client.test_interoperability()
|
|
# -*- encoding: utf-8 -*-
import re
import os
import glob
import typing
import numpy as np
scores_dict = {
'train_single': ["single_best_train_score", "single_best_optimization_score"],
'test_single': ["single_best_test_score"],
'train_ensamble': ["ensemble_optimization_score"],
'test_ensamble': ["ensemble_test_score"]
}
def print_debug_information(automl):
# In case it is called with estimator,
# Get the automl object
if hasattr(automl, 'automl_'):
automl = automl.automl_
# Log file path
log_file = glob.glob(os.path.join(
automl._backend.temporary_directory, 'AutoML*.log'))[0]
include_messages = ['INFO', 'DEBUG', 'WARN',
'CRITICAL', 'ERROR', 'FATAL']
# There is a lot of content in the log files. Only
# parsing the main message and ignore the metalearning
# messages
try:
with open(log_file) as logfile:
content = logfile.readlines()
# Get the messages to debug easier!
content = [line for line in content if any(
msg in line for msg in include_messages
) and 'metalearning' not in line]
except Exception as e:
return str(e)
# Also add the run history if any
if hasattr(automl, 'runhistory_') and hasattr(automl.runhistory_, 'data'):
for k, v in automl.runhistory_.data.items():
content += ["{}->{}".format(k, v)]
else:
content += ['No RunHistory']
# Also add the ensemble history if any
if len(automl.ensemble_performance_history) > 0:
content += [str(h) for h in automl.ensemble_performance_history]
else:
content += ['No Ensemble History']
return os.linesep.join(content)
def _includes(scores, all_scores):
return all(score in all_scores for score in scores) and len(scores) == len(all_scores)
def count_succeses(cv_results):
return np.sum(
[status in ['Success', 'Success (but do not advance to higher budget)']
for status in cv_results['status']]
)
def includes_all_scores(scores):
all_scores = scores_dict["train_single"] + scores_dict["test_single"] + \
scores_dict["train_ensamble"] + scores_dict["test_ensamble"] + ["Timestamp"]
return _includes(scores, all_scores)
def include_single_scores(scores):
all_scores = scores_dict["train_single"] + scores_dict["test_single"] + ["Timestamp"]
return _includes(scores, all_scores)
def includes_train_scores(scores):
all_scores = scores_dict["train_single"] + scores_dict["train_ensamble"] + ["Timestamp"]
return _includes(scores, all_scores)
def performance_over_time_is_plausible(poT):
if len(poT) < 1:
return False
if len(poT.drop(columns=["Timestamp"]).dropna()) < 1:
return False
if not poT["Timestamp"].is_monotonic:
return False
return True
class AutoMLLogParser(object):
def __init__(self, logfile: str):
self.logfile = logfile
self.lines = self.parse_logfile()
def parse_logfile(self) -> typing.List[str]:
# We care about the [debug/info/...] messages
# At the time of writing, the message format was:
# [DEBUG] [2020-11-30 11:54:05,072:EnsembleBuilder] Restricting your
# function to 3072 mb memory.
#
# [DEBUG] [2020-11-30 11:53:55,062:pynisher] Redirecting
# output of the function to files.
assert os.path.exists(self.logfile), "{} not found".format(self.logfile)
with open(self.logfile) as fh:
content = [line.strip() for line in fh if re.search(r'[\w+]', line)]
return content
def count_ensembler_iterations(self) -> int:
iterations = []
# One thing is to call phynisher, the other is to actually execute the funciton
iterations_from_inside_ensemble_builder = []
for line in self.lines:
# Pynisher call
# we have to count the start msg from pynisher
# and the return msg
# We expect the start msg to be something like:
# [DEBUG] [2020-11-26 19:22:42,160:EnsembleBuilder] \
# Function called with argument: (61....
# [DEBUG] [2020-11-30 11:53:47,069:EnsembleBuilder] Function called with argument:
# (28.246965646743774, 1, False), {}
match = re.search(
r'EnsembleBuilder]\s+Function called with argument:\s+\(\d+\.\d+, (\d+), \w+',
line)
if match:
iterations.append(int(match.group(1)))
# Ensemble Builder actual call
# Here we expect the msg:
# [DEBUG] [2020-11-30 11:53:14,877:EnsembleBuilder] Starting iteration 0,
# time left: 61.266255
# [DEBUG] [2020-11-27 20:27:28,044:EnsembleBuilder] Starting iteration 2,
# time left: 10.603252
match = re.search(
r'EnsembleBuilder]\s+Starting iteration (\d+)',
line)
if match:
iterations_from_inside_ensemble_builder.append(int(match.group(1)))
# The ensemble builder might not be called if there is no time.
# Here we expect the msg:
# [DEBUG] [2020-11-27 20:27:28,044:EnsembleBuilder] Not starting iteration 2,
# as time left: 1.59324
match = re.search(
r'EnsembleBuilder]\s+Not starting iteration (\d+)',
line)
if match:
iterations_from_inside_ensemble_builder.append(int(match.group(1)))
assert iterations == iterations_from_inside_ensemble_builder, "{} ! {}".format(
iterations, iterations_from_inside_ensemble_builder
)
return iterations
def count_ensembler_success_pynisher_calls(self) -> int:
# We expect the return msg to be something like:
# [DEBUG] [2020-11-30 11:53:47,911:EnsembleBuilder] return value:
# (([{'Timestamp': Timestamp('2020-11-30 11:53:47.910727'),
# 'ensemble_optimization_score': 0.9787234042553191}], 50, None, None, None), 0)
# [DEBUG] [2020-11-30 11:54:05,984:EnsembleBuilder] return value:
# (([{'Timestamp': Timestamp('2020-11- 30 11:54:05.983837'),
# 'ensemble_optimization_score': 0.9787234042553191}], 50, None, None, None), 0)
return_msgs = len([line for line in self.lines if re.search(
r'EnsembleBuilder]\s+return value:.*Timestamp', line)])
return return_msgs
def count_tae_pynisher_calls(self) -> int:
# We expect the return msg to be something like:
# [DEBUG] [2020-12-16 11:57:08,987:Client-pynisher] Function called with argument: ()
# , {'queue': <multiprocessing.queues.Queue object at 0x7f9e3cfaae20>, 'config': 1
# [DEBUG] [2020-12-16 11:57:10,537:Client-pynisher] Function called with argument: ()
# , {'queue': <multiprocessing.queues.Queue object at 0x7f16f5d95c40>,
# 'config': Configuration:
# Only the parenthesis below need to be escaped, ] and { do not.
call_msgs = len([line for line in self.lines if re.search(
r'pynisher]\s+Function called with argument: \(\), {', line)])
return call_msgs
def count_tae_pynisher_returns(self) -> int:
# We expect the return msg to be something like:
# [DEBUG] [2020-11-30 11:53:11,264:pynisher] return value: (None, 0)
# [DEBUG] [2020-11-30 11:53:13,768:pynisher] return value: (None, 0)
return_msgs = len([line for line in self.lines if re.search(
r'pynisher]\s+return value:\s+', line)])
# When the pynisher pipe is prematurely closed, we also expect:
# Your function call closed the pipe prematurely
# -> Subprocess probably got an uncatchable signal
# We expect the return msg to be something like:
# OR
# Something else went wrong, sorry.
premature_msgs = len([line for line in self.lines if re.search(
r'pynisher]\s+Your function call closed the pipe prematurely', line)])
failure_msgs = len([line for line in self.lines if re.search(
r'pynisher]\s+Something else went wrong, sorry.', line)])
return return_msgs + premature_msgs + failure_msgs
def get_automl_setting_from_log(self, dataset_name: str, setting: str) -> str:
for line in self.lines:
# We expect messages of the form
# [DEBUG] [2020-11-30 11:53:10,457:AutoML(5):breast_cancer] ensemble_size: 50
# [DEBUG] [2020-11-30 11:53:10,457:AutoML(5):breast_cancer] ensemble_nbest: 50
match = re.search(
f"{dataset_name}]\\s*{setting}\\s*:\\s*(\\w+)",
line)
if match:
return match.group(1)
return None
|
|
from flask import Flask, render_template
from mipframework.hichart_server.algorun import get_algorithm_result
from PCA import PCA
from PEARSON_CORRELATION import Pearson
from LOGISTIC_REGRESSION import LogisticRegression
from CALIBRATION_BELT import CalibrationBelt
from KAPLAN_MEIER import KaplanMeier
from ANOVA_ONEWAY import Anova
from NAIVE_BAYES import NaiveBayes
app = Flask(__name__)
charts_info = {
"pca_scree_eigen": {
"title": "PCA eigenvalue scree plot",
"url": "pca_scree_eigenvalues",
},
"pca_heatmap_eigen": {
"title": "PCA eigenvecots heatmap",
"url": "pca_heatmap_eigenvec",
},
"pearson_heatmap": {
"title": "Pearson Correlation Heatmap",
"url": "pearson_heatmap",
},
"logistic_confusion_matrix": {
"title": "Logistic Regression Confusion Matrix",
"url": "logistic_confmat",
},
"logistic_roc": {"title": "Logistic Regression ROC", "url": "logistic_roc"},
"calibration_belt": {"title": "Calibration Belt", "url": "calibration_belt"},
"kaplan_meier_survival": {
"title": "Kaplan-Meier Survival Curves",
"url": "kaplan_meier_survival",
},
"anova_errorbars": {"title": "Anova Mean Plot", "url": "anova_errorbars"},
"naive_bayes_confusion_matrix": {
"title": "NaiveBayes CM",
"url": "naive_bayes_confusion_matrix",
},
"naive_bayes_roc": {"title": "NaiveBayes ROC", "url": "naive_bayes_roc",},
}
@app.route("/")
@app.route("/home")
def home():
return render_template(
"home.html", title="Exareme Highcharts", charts_info=charts_info
)
@app.route("/anova_errorbars")
def anova_errorbars():
anova_args = [
"-y",
"lefthippocampus",
"-x",
"alzheimerbroadcategory",
"-pathology",
"dementia",
"-dataset",
"adni",
"-filter",
"",
]
result = get_algorithm_result(Anova, anova_args)
result = result["result"][3]["data"]
return render_template(
"highchart_layout.html", title="Anova Mean Plot", data=result
)
@app.route("/pca_scree_eigenvalues")
def pca_scree_eigenvalues():
pca_args = [
"-y",
"subjectage,rightventraldc,rightaccumbensarea, gender",
"-pathology",
"dementia, leftaccumbensarea",
"-dataset",
"adni",
"-filter",
"",
"-formula",
"",
"-coding",
"Treatment",
]
result = get_algorithm_result(PCA, pca_args)
result = result["result"][3]["data"]
return render_template("highchart_layout.html", title="PCA scree plot", data=result)
@app.route("/pca_heatmap_eigenvec")
def pca_heatmap_eigenvec():
pca_args = [
"-y",
"subjectage,rightventraldc,rightaccumbensarea, gender",
"-pathology",
"dementia, leftaccumbensarea",
"-dataset",
"adni",
"-filter",
"",
"-formula",
"",
"-coding",
"Treatment",
]
result = get_algorithm_result(PCA, pca_args)
result = result["result"][4]["data"]
return render_template(
"highchart_layout.html", title="PCA bubble plot", data=result
)
@app.route("/pearson_heatmap")
def pearson_heatmap():
args = [
"-x",
"",
"-y",
"leftputamen, righthippocampus, subjectage,rightventraldc,rightaccumbensarea, "
"rightioginferioroccipitalgyrus,rightmfcmedialfrontalcortex, lefthippocampus,"
"rightppplanumpolare",
"-pathology",
"dementia, leftaccumbensarea",
"-dataset",
"adni",
"-filter",
"",
"-formula",
"",
"-coding",
"",
]
result = get_algorithm_result(Pearson, args)
result = result["result"][2]["data"]
return render_template(
"highchart_layout.html", title="Pearson Correlation Heatmap", data=result
)
@app.route("/logistic_confmat")
def logistic_confmat():
args = [
"-x",
"lefthippocampus",
"-y",
"alzheimerbroadcategory",
"-pathology",
"dementia",
"-dataset",
"adni",
"-filter",
"""
{
"condition": "OR",
"rules": [
{
"id": "alzheimerbroadcategory",
"field": "alzheimerbroadcategory",
"type": "string",
"input": "text",
"operator": "equal",
"value": "AD"
},
{
"id": "alzheimerbroadcategory",
"field": "alzheimerbroadcategory",
"type": "string",
"input": "text",
"operator": "equal",
"value": "CN"
}
],
"valid": true
}
""",
"-formula",
"",
]
result = get_algorithm_result(LogisticRegression, args)
result = result["result"][3]["data"]
return render_template(
"highchart_layout.html",
title="Logistic Regression Confusion Matrix",
data=result,
)
@app.route("/logistic_roc")
def logistic_roc():
args = [
"-x",
"lefthippocampus",
"-y",
"alzheimerbroadcategory",
"-pathology",
"dementia",
"-dataset",
"adni",
"-filter",
"""
{
"condition": "OR",
"rules": [
{
"id": "alzheimerbroadcategory",
"field": "alzheimerbroadcategory",
"type": "string",
"input": "text",
"operator": "equal",
"value": "AD"
},
{
"id": "alzheimerbroadcategory",
"field": "alzheimerbroadcategory",
"type": "string",
"input": "text",
"operator": "equal",
"value": "CN"
}
],
"valid": true
}
""",
"-formula",
"",
]
result = get_algorithm_result(LogisticRegression, args)
result = result["result"][4]["data"]
return render_template(
"highchart_layout.html", title="Logistic Regression Confusion ROC", data=result,
)
@app.route("/calibration_belt")
def calibration_belt():
args = [
"-x",
"probGiViTI_2017_Complessiva",
"-y",
"hospOutcomeLatest_RIC10",
"-devel",
"internal",
"-max_deg",
"4",
"-confLevels",
"0.80, 0.95",
"-thres",
"0.95",
"-num_points",
"200",
"-pathology",
"dementia",
"-dataset",
"cb_data",
"-filter",
"",
"-formula",
"",
]
result = get_algorithm_result(CalibrationBelt, args)
result = result["result"][1]["data"]
return render_template(
"highchart_layout.html", title="Calibration Belt", data=result,
)
@app.route("/kaplan_meier_survival")
def kaplan_meier_survival():
args = [
"-x",
"apoe4",
"-y",
"alzheimerbroadcategory",
"-pathology",
"dementia",
"-dataset",
"alzheimer_fake_cohort",
"-filter",
"""
{
"condition":"OR",
"rules":[
{
"id":"alzheimerbroadcategory",
"field":"alzheimerbroadcategory",
"type":"string",
"input":"select",
"operator":"equal",
"value":"AD"
},
{
"id":"alzheimerbroadcategory",
"field":"alzheimerbroadcategory",
"type":"string",
"input":"select",
"operator":"equal",
"value":"MCI"
}
],
"valid":true
}
""",
"-outcome_pos",
"AD",
"-outcome_neg",
"MCI",
"-total_duration",
"1100",
]
result = get_algorithm_result(KaplanMeier, args)
result = result["result"][1]["data"]
return render_template("highchart_layout.html", title="Kaplan Meier", data=result,)
nb_args = [
"-x",
# "lefthippocampus,righthippocampus,leftaccumbensarea",
# "gender,alzheimerbroadcategory,agegroup",
"lefthippocampus,righthippocampus,leftaccumbensarea,apoe4,alzheimerbroadcategory",
"-y",
"agegroup",
"-alpha",
"1",
"-k",
"10",
"-pathology",
"dementia",
"-dataset",
"adni, ppmi",
"-filter",
"",
]
@app.route("/naive_bayes_confusion_matrix")
def naive_bayes_confusion_matrix():
result = get_algorithm_result(NaiveBayes, nb_args)
result = result["result"][4]["data"]
return render_template(
"highchart_layout.html", title="NaiveBayes Confusion Martix", data=result
)
@app.route("/naive_bayes_roc")
def naive_bayes_roc():
result = get_algorithm_result(NaiveBayes, nb_args)
result = result["result"][5]["data"]
return render_template("highchart_layout.html", title="NaiveBayes ROC", data=result)
if __name__ == "__main__":
app.run(debug=True)
|
|
# edoAutoHome.py - This Project for HomeAutomation
# URL: https://github.com/engdan77/edoAutoHomeMobile
# Author: Daniel Engvall ([email protected])
import kivy
kivy.require('1.9.0')
import re
from functools import partial
from collections import namedtuple
import webbrowser
from kivy.app import App
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.factory import Factory
from kivy.animation import Animation
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.listview import ListView
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.scatter import Scatter
from kivy.uix.widget import Widget
from kivy.core.image import Image
from kivy.core.window import Window
from kivy.animation import Animation
from kivy.properties import StringProperty, ObjectProperty, ListProperty, DictProperty, NumericProperty
from kivy.clock import Clock
from kivy.network.urlrequest import UrlRequest
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.config import ConfigParser
from kivy.adapters.simplelistadapter import SimpleListAdapter
from kivy.uix.settings import (Settings, SettingsWithSidebar,
SettingsWithSpinner,
SettingsWithTabbedPanel)
from garden import Graph, MeshLinePlot
import json
from kivy.support import install_twisted_reactor
install_twisted_reactor()
from twisted.internet import reactor, protocol
__version__ = "$Revision: 20150730.1573 $"
# ---------- Generic functions ----------
def get_date(msg):
import time
if not msg:
return time.strftime("%Y-%m-%d %H:%M:%S")
else:
return '%s: %s' % (time.strftime("%Y-%m-%d %H:%M:%S"), msg)
def show_url_result(req, results):
''' Show result of url request '''
Logger.info("req: %s" % (str(req),))
Logger.info("results: %s" % (str(results),))
def updates_to_plots(last_records):
''' Convert last records to graph plots '''
from datetime import datetime
last_records.reverse()
last_records = sorted(last_records)
Logger.info('='*30)
Logger.info(str(last_records))
Logger.info('='*30)
result = []
d_fmt = '%Y-%m-%d %H:%M:%S'
now = datetime.now()
prev_value = None
for value_tuple in last_records:
d, v = value_tuple
d = datetime.strptime(d, d_fmt)
# Divide timediff in seconds by one day to get how many days diff
timediff = float(int(now.strftime('%s')) - int(d.strftime('%s'))) / 86400
timediff = 0 - float(format(timediff, '.4f'))
# Change value if required
next_zero = None
prev_zero = None
prev_one = None
if re.match(r'.*Motion.*', v, re.IGNORECASE):
v = 1
prev_zero = True
next_zero = True
elif re.match(r'.*Door Open.*', v, re.IGNORECASE):
if prev_value == 1:
prev_one = True
if prev_value == 0:
prev_zero = True
v = 1
elif re.match(r'.*Door Close.*', v, re.IGNORECASE):
if prev_value == 1:
prev_one = True
v = 0
else:
v = float(v)
# Add one where required
if prev_one is True:
result.append((timediff-0.0001, 1))
if prev_zero is True:
result.append((timediff-0.0001, 0))
# Adding value
result.append((timediff, v))
# Correct issue with Motion/Door
if next_zero is True:
result.append((timediff+0.0001, 0))
# Store previous value
prev_value = v
# Append last value to result
if re.match(r'.*Motion.*', value_tuple[1], re.IGNORECASE):
result.append((0-0.0001, 0))
result.append((0, v))
else:
result.append((0, v))
# Determina min/max values
# result = sorted(result)
all_x_values, all_y_values = zip(*result)
min_x_value = float(min(*all_x_values))
min_y_value = float(min(*all_y_values))
max_y_value = float(max(*all_y_values))
# Return result as dict
return {'plots': result, 'min_y': min_y_value, 'max_y': max_y_value, 'min_x': min_x_value}
# ---------- ScreenManager and Screens ----------
class MyScreenManager(ScreenManager):
connect_server_status = StringProperty('Initiating connection')
json_sensors = StringProperty('........')
settings_dict = DictProperty()
def __init__(self, **kwargs):
super(MyScreenManager, self).__init__(**kwargs)
self._app = App.get_running_app()
def change_screen(self, *args):
screen_name = args[0]
sm = self._app.root
# If changing back to Initial Screen
if screen_name == 'initial_screen':
Clock.unschedule(sm.update_views)
sm.current = screen_name
def refresh_variables(self):
Logger.info("Refreshing variables after connecting to MyScreenManager")
self._app.config.update_config('my.ini')
self.port = self._app.config.get('network', 'port')
self.server = self._app.config.get('network', 'ip')
self.connect_server_status = "Connecting to %s:%s" % (self.server, self.port)
self.json_sensors = '....'
def update_views(self, dt):
''' Method to be scheduled for updating from server '''
# Poll new JSON data
Logger.info(str(self._app.connect_to_server()))
# Clean records in log_screen if too many lines
if len(self._app.log_list) > 100:
self._app.log_list.append(get_date("Cleaning old records in log"))
while len(self._app.log_list) > 100:
self._app.log_list.pop(0)
def return_screen_object(screen_name):
# Iterate through all screens
found = None
for current_screen in self._app.sm.screens:
if current_screen.name == screen_name:
found = current_screen
return found
# For each device update Screen
if re.match(r'^\{.*\}$', self._app.sm.json_sensors):
try:
j = json.loads(self._app.sm.json_sensors)
except Exception as e:
self._app.sm.json_sensors = 'Error in JSON'
else:
for device in j.keys():
Logger.info("Updating screen for device %s" % (device,))
self._app.log_list.append(get_date("Updating screen for device %s" % (device,)))
# Create Device Screen with sensors
box_device = BoxLayout(orientation='vertical', spacing=10)
box_device.add_widget(Label(text=''))
box_device.add_widget(Label(size_hint_y=0.2, text='[color=ff3333]' + device + '[/color]', font_size=40, markup=True))
box_device.add_widget(Label(text=''))
# Create Sensor Screen and button on device screen
for sensor in j[device]:
sensor_name = sensor.keys()[0]
sensor_data = sensor[sensor_name]
sensor_values = sensor_data['last_records']
sensor_dict = updates_to_plots(sensor_values)
sensor_plots = sensor_dict['plots']
ymin = sensor_dict['min_y']
ymax = sensor_dict['max_y']
xmin = sensor_dict['min_x']
last_date, last_value = sensor_values[-1]
# Determine suffix
suffix = ' '
if re.match(r'.*temp.*', sensor_name, re.IGNORECASE):
suffix = u"\u00b0C"
if re.match(r'.*humid.*', sensor_name, re.IGNORECASE):
suffix = " %"
if re.match(r'.*smoke.*', sensor_name, re.IGNORECASE):
suffix = " %"
if re.match(r'.*stove.*', sensor_name, re.IGNORECASE):
suffix = " %"
sensor = device + '_' + sensor_name
Logger.info(str(sensor))
Logger.info("Last data %s %s" % (last_date, last_value))
# Create new history view
box_sensor_history = BoxLayout(orientation='vertical', spacing=10)
box_sensor_history.add_widget(Label(size_hint_y=0.1, text='[color=B6BAB9]' + sensor_name + ' (' + device + ')[/color]', font_size=30, markup=True))
# Create history text
text_history = []
for d, v in sensor_values:
text_history.append(str("%s %s" % (d, v)))
# Create left aligned list
adapter = SimpleListAdapter(data=text_history, cls=MyLeftAlignedLabel)
list_view = ListView(adapter=adapter)
# Fix bug with ListView to refresh if required
if(hasattr(list_view, '_reset_spopulate')):
Logger.info("Refresh list_view")
list_view._reset_spopulate()
# Add ListView to Sensor History
box_sensor_history.add_widget(list_view)
back_button = Button(size_hint_y=0.1, font_size=20, text='Back')
back_button.bind(on_press=partial(self.change_screen, device + "_" + sensor_name))
box_sensor_history.add_widget(back_button)
screen_sensor_history = return_screen_object(device + "_" + sensor_name + '_history')
screen_sensor_history.clear_widgets()
screen_sensor_history.add_widget(box_sensor_history)
screen_sensor = return_screen_object(device + "_" + sensor_name)
box_sensor = BoxLayout(orientation='vertical')
box_sensor.add_widget(Label(size_hint_y=0.1, text='[color=B6BAB9]' + sensor_name + ' (' + device + ')[/color]', font_size=30, markup=True))
# Add sensor value
box_sensor.add_widget(Label(text=last_value + suffix, font_size=60))
# Add sensor date
box_sensor.add_widget(Label(size_hint_y=0.1, markup=True, text='[b]Sensor last updated ' + last_date[:-3] + '[/b]\nPolled ' + get_date(None)[:-3], font_size=15))
# Add sensor graph
Logger.info("Create plot for %s" % (sensor_name,))
Logger.info(str(sensor_plots))
plot = MeshLinePlot(mode='line_strip', color=[1, 0, 0, 1])
plot.points = sensor_plots
sensor_graph = Graph(id='plots_' + sensor_name, precision='%0.0f', x_grid_label=True, y_grid_label=True, xmin=xmin, xmax=0, ymin=ymin, ymax=ymax, xlabel='days ago', ylabel=suffix, x_grid=True, y_grid=False, x_ticks_major=1, y_ticks_major=1)
sensor_graph.add_plot(plot)
box_sensor.add_widget(sensor_graph)
# Add buttonbar for sensor
box_buttons = BoxLayout(orientation='horizontal')
# Create button for history
history_button = Button(size_hint_y=0.2, font_size=20, text='History')
history_button.bind(on_press=partial(self.change_screen, device + "_" + sensor_name + "_history"))
# Create Back button
back_button = Button(size_hint_y=0.2, font_size=20, text='Back')
back_button.bind(on_press=partial(self.change_screen, device))
# Add buttons to row
box_buttons.add_widget(back_button)
box_buttons.add_widget(history_button)
# Add row to screen
box_sensor.add_widget(box_buttons)
# Add all of it to screen
screen_sensor.clear_widgets()
screen_sensor.add_widget(box_sensor)
class InitialScreen(Screen):
version = StringProperty(__version__.replace('$', ''))
logo_image = ObjectProperty(None)
def __init__(self, **kwargs):
super(InitialScreen, self).__init__(**kwargs)
self.logo_image = Image('logo.png')
def move_logo(self, *args):
screen = self._app.sm.get_screen('initial_screen')
logo_object = screen.ids['logo']
window_x, window_y = Window.size
anim = Animation(y=window_y-(window_y/1.5), duration=6, t='in_bounce') + Animation(y=0, duration=6, t='out_bounce')
anim.repeat = True
anim.start(logo_object)
def stop_logo(self, *args):
screen = self._app.sm.get_screen('initial_screen')
logo_object = screen.ids['logo']
Animation.cancel_all(logo_object)
class ConnectingServerScreen(Screen):
slideshow_all_sensors_counter = NumericProperty(0)
slideshow_all_sensors_screens = ListProperty([])
slideshow_all_sensors_index = NumericProperty(0)
def change_screen(self, *args):
screen_name = args[0]
sm = self._app.root
# If changing back to Initial Screen
if screen_name == 'initial_screen':
Clock.unschedule(sm.update_views)
sm.current = screen_name
def control_slideshow_all_sensors(self, button, *args):
Logger.info('Slideshow for all sensors button is %s' % (button.state,))
self._app.log_list.append(get_date('Slideshow for all sensors button is %s' % (button.state,)))
if button.state == 'down':
# Create list of screens to switch between
for screen in self._app.sm.screens:
if re.match(r'[^-]+-[^_]+_[^_]+$', screen.name):
self.slideshow_all_sensors_screens.append(screen)
self.timeout = int(self._app.sm.settings_dict['slideshow_refresh_time'])
self.slideshow_all_sensors_counter = self.timeout
device_screen = self._app.sm.get_screen('all_devices_buttons')
# Search for button object
for widget in device_screen.walk():
button = widget
if widget.id == 'slide_all_button':
button.text = 'Slideshow All Sensors (' + str(self.slideshow_all_sensors_counter) + ')'
Clock.schedule_interval(self.slideshow_all_sensors, 1)
if button.state == 'normal':
button.text = 'Slideshow All Sensors'
Clock.unschedule(self.slideshow_all_sensors)
def slideshow_all_sensors(self, dt):
self.slideshow_all_sensors_counter -= 1
device_screen = self._app.sm.get_screen('all_devices_buttons')
for widget in device_screen.walk():
button = widget
if widget.id == 'slide_all_button':
button.text = 'Slideshow All Sensors (' + str(self.slideshow_all_sensors_counter) + ')'
if self.slideshow_all_sensors_counter == 0:
self.slideshow_all_sensors_counter = self.timeout
if self.slideshow_all_sensors_index < len(self.slideshow_all_sensors_screens)-1:
self.slideshow_all_sensors_index += 1
else:
self.slideshow_all_sensors_index = 0
# Switch to next sensor screen
self.change_screen(self.slideshow_all_sensors_screens[self.slideshow_all_sensors_index].name)
def create_button_view(self, dt):
import json
import time
# Make reference to app root widget
sm = self._app.root
# Check that JSON been recieved
if re.match(r'^\{.*\}$', self._app.sm.json_sensors):
try:
j = json.loads(self._app.sm.json_sensors)
except Exception as e:
self._app.sm.json_sensors = 'Error in JSON'
else:
all_devices_boxlayout = BoxLayout(orientation='vertical', spacing=10)
all_devices_boxlayout.add_widget(Label(text=''))
all_devices_boxlayout.add_widget(Label(size_hint_y=0.2, text='[color=ff3333]Devices[/color]', font_size=40, markup=True))
all_devices_boxlayout.add_widget(Label(text=''))
all_devices_screen = Screen(id='all_devices_buttons', name='all_devices_buttons')
all_devices_screen.add_widget(all_devices_boxlayout)
sm.add_widget(all_devices_screen)
# Bulding new screens for list of devices and sensors based on json
# For each device create its own Screen
for device in j.keys():
Logger.info("Creating screen for device %s" % (device,))
self._app.log_list.append(get_date("Creating screen for device %s" % (device,)))
screen_device = Screen(name=device)
# Add button for device on all_devices_boxlayout
b = Button(text=device)
# This will call the function with 'device' as argument to switch Screen
b.bind(on_press=partial(self.change_screen, device))
all_devices_boxlayout.add_widget(b)
# Create Device Screen with sensors
box_device = BoxLayout(orientation='vertical', spacing=10)
box_device.add_widget(Label(text=''))
box_device.add_widget(Label(size_hint_y=0.2, text='[color=ff3333]' + device + '[/color]', font_size=40, markup=True))
box_device.add_widget(Label(text=''))
# Create Sensor Screen and button on device screen
for sensor in j[device]:
sensor_name = sensor.keys()[0]
sensor_data = sensor[sensor_name]
sensor_values = sensor_data['last_records']
sensor_dict = updates_to_plots(sensor_values)
sensor_plots = sensor_dict['plots']
ymin = sensor_dict['min_y']
ymax = sensor_dict['max_y']
xmin = sensor_dict['min_x']
last_date, last_value = sensor_values[-1]
# Determine suffix
suffix = ' '
if re.match(r'.*temp.*', sensor_name, re.IGNORECASE):
suffix = u"\u00b0C"
if re.match(r'.*humid.*', sensor_name, re.IGNORECASE):
suffix = " %"
if re.match(r'.*smoke.*', sensor_name, re.IGNORECASE):
suffix = " %"
if re.match(r'.*stove.*', sensor_name, re.IGNORECASE):
suffix = " %"
sensor = device + '_' + sensor_name
Logger.info(str(sensor))
Logger.info("Last data %s %s" % (last_date, last_value))
# Create history view
screen_sensor_history = Screen(name=device + "_" + sensor_name + "_history")
box_sensor_history = BoxLayout(orientation='vertical', spacing=10)
box_sensor_history.add_widget(Label(size_hint_y=0.1, text='[color=B6BAB9]' + sensor_name + ' (' + device + ')[/color]', font_size=30, markup=True))
# Create history text
text_history = []
for d, v in sensor_values:
text_history.append(str("%s %s" % (d, v)))
# Create left aligned list
adapter = SimpleListAdapter(data=text_history, cls=MyLeftAlignedLabel)
list_view = ListView(adapter=adapter)
# Fix bug with ListView to refresh if required
if(hasattr(list_view, '_reset_spopulate')):
Logger.info("Refresh list_view")
list_view._reset_spopulate()
# Add ListView to Sensor History
box_sensor_history.add_widget(list_view)
back_button = Button(size_hint_y=0.1, font_size=20, text='Back')
back_button.bind(on_press=partial(self.change_screen, device + "_" + sensor_name))
box_sensor_history.add_widget(back_button)
screen_sensor_history.add_widget(box_sensor_history)
sm.add_widget(screen_sensor_history)
# Create sensor screen
screen_sensor = Screen(name=device + "_" + sensor_name)
box_sensor = BoxLayout(orientation='vertical')
box_sensor.add_widget(Label(size_hint_y=0.1, text='[color=B6BAB9]' + sensor_name + ' (' + device + ')[/color]', font_size=30, markup=True))
# Add sensor value
box_sensor.add_widget(Label(text=last_value + suffix, font_size=60))
# Add sensor date
box_sensor.add_widget(Label(size_hint_y=0.1, markup=True, text='[b]Sensor last updated ' + last_date[:-3] + '[/b]\nPolled ' + get_date(None)[:-3], font_size=15))
# Add sensor graph
Logger.info("Create plot for %s" % (sensor_name,))
Logger.info(str(sensor_plots))
plot = MeshLinePlot(mode='line_strip', color=[1, 0, 0, 1])
plot.points = sensor_plots
sensor_graph = Graph(id='plots_' + sensor_name, precision='%0.0f', x_grid_label=True, y_grid_label=True, xmin=xmin, xmax=0, ymin=ymin, ymax=ymax, xlabel='days ago', ylabel=suffix, x_grid=True, y_grid=False, x_ticks_major=1, y_ticks_major=1)
sensor_graph.add_plot(plot)
box_sensor.add_widget(sensor_graph)
# Add buttonbar for sensor
box_buttons = BoxLayout(orientation='horizontal')
# Create button for history
history_button = Button(size_hint_y=0.2, font_size=20, text='History')
history_button.bind(on_press=partial(self.change_screen, device + "_" + sensor_name + "_history"))
# Create Back button
back_button = Button(size_hint_y=0.2, font_size=20, text='Back')
back_button.bind(on_press=partial(self.change_screen, device))
# Add buttons to row
box_buttons.add_widget(back_button)
box_buttons.add_widget(history_button)
# Add row to screen
box_sensor.add_widget(box_buttons)
# Add all of it to screen
screen_sensor.add_widget(box_sensor)
sm.add_widget(screen_sensor)
# Create button on device screen
button_sensor = Button(text=sensor_name)
button_sensor.bind(on_press=partial(self.change_screen, sensor))
box_device.add_widget(button_sensor)
# Add Device Screen with all sensor buttons to ScreenManager
back_button = Button(font_size=20, text='[b]Back[/b]', markup=True)
back_button.bind(on_press=partial(self.change_screen, 'all_devices_buttons'))
box_device.add_widget(back_button)
screen_device.add_widget(box_device)
sm.add_widget(screen_device)
# Adding Back button to Devices screen
back_button = Button(font_size=20, text='[b]Back[/b]', markup=True)
back_button.bind(on_press=partial(self.change_screen, 'initial_screen'))
all_devices_buttonrow = BoxLayout(orientation='horizontal')
all_devices_buttonrow.add_widget(back_button)
slide_all_button = ToggleButton(id='slide_all_button', font_size=20, text='Slideshow All Sensors')
slide_all_button.bind(on_press=partial(self.control_slideshow_all_sensors, slide_all_button))
all_devices_buttonrow.add_widget(slide_all_button)
all_devices_boxlayout.add_widget(all_devices_buttonrow)
# Unschedule timer
Clock.unschedule(self.create_button_view)
# Return to buttons of all devices
sm.current = 'all_devices_buttons'
# Schedule updates from server
Clock.schedule_interval(sm.update_views, int(self._app.sm.settings_dict['refresh_time']))
# Check if failed pause for error before return
if re.match(r'.*fail.*', self._app.sm.connect_server_status) or re.match(r'.*error.*', self._app.sm.json_sensors):
Clock.unschedule(self.create_button_view)
time.sleep(2)
self.port = self._app.config.get('network', 'port')
self.server = self._app.config.get('network', 'ip')
self._app.sm.connect_server_status = "Connecting to %s:%s" % (self.server, self.port)
sm.current = 'initial_screen'
def call_connect_sensor_status(self, dt):
''' Function that connects and retrieves json '''
self._app.config.update_config('my.ini')
port = self._app.config.get('network', 'port')
server = self._app.config.get('network', 'ip')
refresh_time = self._app.config.get('network', 'refresh_time')
slideshow_refresh_time = self._app.config.get('other', 'slideshow_refresh_time')
self._app.sm.settings_dict['ip'] = server
self._app.sm.settings_dict['port'] = port
self._app.sm.settings_dict['refresh_time'] = refresh_time
self._app.sm.settings_dict['slideshow_refresh_time'] = slideshow_refresh_time
# Initiate connection
Logger.info("Connecting to %s:%s" % (server, port))
self._app.log_list.append(get_date("Connecting to %s:%s" % (server, port)))
Logger.info(str(self._app.connect_to_server()))
Clock.schedule_interval(self.create_button_view, 1)
class AboutScreen(Screen):
def move_text(self, *args):
screen = self._app.sm.get_screen('about_screen')
text_object = screen.ids['moving_text']
window_x, window_y = Window.size
center_x = window_x/2
center_x = 10
center_y = window_y/2
center_y = 10
dia = 200
dur = 3
t = 'in_out_circ'
anim = Animation(x=center_x, y=center_y-dia, duration=dur, t=t) + Animation(x=center_x+dia, y=center_y, duration=dur, t=t) + Animation(x=center_x, y=center_y+dia, duration=dur, t=t) + Animation(x=center_x-dia, y=center_y, duration=dur, t=t)
anim.repeat = True
anim.start(text_object)
def stop_text(self, *args):
screen = self._app.sm.get_screen('about_screen')
text_object = screen.ids['moving_text']
Animation.cancel_all(text_object)
def open_browser(self, *args):
url = 'https://github.com/engdan77'
webbrowser.open(url)
class MyLeftAlignedLabel(Label):
pass
class LogScreen(Screen):
left_label = ObjectProperty(MyLeftAlignedLabel)
class SettingScreen(Screen):
pass
Builder.load_string('''
#:import FadeTransition kivy.uix.screenmanager.FadeTransition
#:import Clock kivy.clock.Clock
#:import partial functools
#:import sla kivy.adapters.simplelistadapter
#:import label kivy.uix.label
#:import window kivy.core.window
<InitialScreen>
name: 'initial_screen'
_app: app
on_enter: self.move_logo(self)
on_leave: self.stop_logo(self)
BoxLayout:
orientation: 'horizontal'
BoxLayout:
orientation: 'vertical'
Button:
text: 'View Sensors'
on_press: root._app.sm.refresh_variables(); app.root.current = 'connecting_server_screen'
Button:
text: 'View App Log'
on_press: app.root.current = 'log_screen'
Button:
text: 'Settings'
on_press: app.open_settings()
Button:
text: 'About This App'
on_press: app.root.current = 'about_screen'
Button:
text: '[b]Exit[/b]'
font_size: 20
on_press: app.stop()
markup: True
BoxLayout:
orientation: 'vertical'
id: right_pane
Image:
id: logo
source: 'logo.png'
allow_stretch: False
keep_ratio: True
pos_hint: {'center_x': .5, 'top': .9}
Label:
text: root.version
size_hint: None, None
pos_hint: {'center_x': .5, 'top': .8}
font_size: self.width / 7
valign: 'middle'
halign: 'center'
<ConnectingServerScreen>
name: 'connecting_server_screen'
id: 'connecting_server_screen'
on_enter: Clock.schedule_once(self.call_connect_sensor_status)
_app: app
BoxLayout:
orientation: 'vertical'
Label:
font_size: 30
text: app.sm.connect_server_status
Image:
source: 'RingGreen.zip'
allow_stretch: False
keep_ratio: True
anim_delay: 0.02
Label:
font_size: 20
text: app.sm.json_sensors[:40]
Button:
font_size: 20
text: 'Abort'
size_hint_y: 0.2
on_press: Clock.unschedule(root.create_button_view); app.sm.current = 'initial_screen'
<AboutScreen>
name: 'about_screen'
_app: app
on_enter: self.move_text(self)
on_leave: self.stop_text(self)
FloatLayout:
orientation: 'vertical'
Scatter:
auto_bring_to_front: False
Image:
center: self.parent.center
source: 'daniel_engvall.png'
size: root.width-400, root.height-400
Label:
text: "[color=ff3333][b]AutoHomeMobile[/b][/color]\\nDeveloped by [email protected]\\n[color=0000ff][ref=url]https://github.com/engdan77[/ref][/font]"
markup: True
on_ref_press: root.open_browser(self)
size_hint: None, None
font_size: self.width / 5
pos_hint: {'center_x': .5, 'top': .2}
Label:
id: moving_text
text: '[color=A31B00]Try to pinch/rotate me...[/color]'
markup: True
pos: 50, 50
Button:
text: 'Back'
on_press: app.root.current = 'initial_screen'
size_hint: None, None
<MyLeftAlignedLabel>
font_size: 15
halign: 'left'
size_hint_y: None
text_size: self.size
<LogScreen>:
name: 'log_screen'
BoxLayout:
orientation: 'vertical'
ListView:
adapter:
sla.SimpleListAdapter(data=app.log_list, cls=root.left_label)
Button:
size_hint_y: 0.1
text: 'Back'
on_press: app.root.current = 'initial_screen'
''')
# ---------- Generic Classes ----------
class ProtocolClass(protocol.Protocol):
def connectionMade(self):
self.factory.app.on_connection(self.transport)
def dataReceived(self, data):
self.factory.app.print_message(data)
class ConnectionClass(protocol.ClientFactory):
protocol = ProtocolClass
def __init__(self, app):
self.app = app
def clientConnectionLost(self, conn, reason):
self.app.print_message("Connection lost")
self.app.log_list.append(get_date("Connected and disconnecting from server"))
Logger.info('Connection lost')
def clientConnectionFailed(self, conn, reason):
self.app.print_message("Connection failed")
self.app.log_list.append(get_date("Connection failed"))
Logger.error('Connection failed')
# ---------- Main App ----------
class MyApp(App):
sm = ObjectProperty()
settings = DictProperty({'apa': 1})
connection = None
log_list = ListProperty([get_date(None) + ': Application Started'])
def __init__(self, **kwargs):
# Superclass if we like to adjust present init
super(MyApp, self).__init__(**kwargs)
def build_config(self, config):
config.setdefaults('network', {
'ip': '127.0.0.1',
'port': '3000',
'refresh_time': '60'})
config.setdefaults('other', {'slideshow_refresh_time': '60'})
def on_config_change(self, config, section, key, value):
pass
def build_settings(self, settings):
import json
self.setting_json = '''[
{
"type": "title",
"title": "Server"
},
{
"type": "string",
"title": "IP Address",
"desc": "IP address for server",
"section": "network",
"key": "ip"
},
{
"type": "numeric",
"title": "Port",
"desc": "Port for server",
"section": "network",
"key": "port"
},
{
"type": "numeric",
"title": "Server Refresh Time",
"desc": "Number of seconds before refresh status from server",
"section": "network",
"key": "refresh_time"
},
{
"type": "numeric",
"title": "Slideshow Refresh Time",
"desc": "Number of seconds for each slide",
"section": "other",
"key": "slideshow_refresh_time"
}
]'''
settings.add_json_panel('EdoAutoHome', self.config, data=self.setting_json)
def on_stop(self):
Logger.info("Good Bye!!")
def connect_to_server(self):
server = str(self.config.get('network', 'ip'))
port = int(self.config.get('network', 'port'))
return reactor.connectTCP(server, port, ConnectionClass(self))
def on_connection(self, connection):
self.sm.connect_server_status = 'Connected succesfully, retrieving JSON data!'
self.connection = connection
# Send actual command to server
self.send_message()
def send_message(self, *args):
msg = '"show_status_json"'
if msg and self.connection:
Logger.info("Sending %s" % (msg,))
self.connection.write(msg)
def print_message(self, msg):
import time
# Successfully receieved JSON
if str(msg).find('{') > 0 or str(msg).find('}') > 0:
if not str(msg)[0] == '{':
Logger.info("Appending JSON")
self.sm.json_sensors += str(msg)
else:
Logger.info("Found JSON")
self.sm.json_sensors = str(msg)
Logger.info("Printing Result of JSON Sensor")
Logger.info(str(self.sm.json_sensors))
self.sm.connect_server_status = 'Parsing JSON!'
# Save to local file as debug
# with open('debug.txt', 'w') as f:
# f.write(msg)
# Failed connection
if msg.find("failed") > 0:
self.sm.connect_server_status = 'Connection failed!'
def build(self):
import time
super(MyApp, self).build()
# Set icon and name
self.title = 'AutoHomeMobile'
self.icon = 'icon.png'
# Configuration settings
config = self.config
# self.settings_cls = SettingsWithSidebar
self.settings_cls = SettingsWithTabbedPanel
self.use_kivy_settings = False
# Clock handler
# Clock.schedule_interval(self.timer, 20)
self.sm = MyScreenManager(id='manager', transition=FadeTransition())
self.sm.add_widget(InitialScreen(name='initial_screen'))
self.sm.add_widget(LogScreen(name='log_screen'))
self.sm.add_widget(AboutScreen(name='about_screen'))
self.sm.add_widget(ConnectingServerScreen(name='connecting_server_screen'))
# Return ScreenManager
return self.sm
if __name__ == '__main__':
MyApp().run()
|
|
# -*- coding: utf-8 -*-
#
# K9 Forward Sensors Controller
#
# authored by Richard Hopkins December 2017 for AoT Autonomy Team
#
# Licensed under The Unlicense, so free for public domain use
#
# This program turns K9's radar crisps into rotating LIDAR
# sensors that can detect what is in front of the robot dog.
#
# This data is supplemented by a stationary mouth sensor that permanently
# detects what is front of the dog at ground level (thanks to the wonderful
# Paul Booth for that particular idea!)
#
# The information from all the sensors is stored in a Redis database
# in the Pi, so that the Python Motorcontroller can transmit the current state
# to the browser via Node-RED
#
import sys # allows for command line to be interpreted
import json # enables creation of JSON strings
import os # enables access to environment variables
import math # import maths operations
import random # import random numbers
import time # enable sleep function
import memory # imports K9's memory capability
sim = False
# sim is a program wide flag to allow the program to run off the Raspberry Pi
# this can be enabled by appending the word "test" to the command line
if ( len(sys.argv) > 1 ) :
if ( sys.argv[1] == "test" ) :
sim = True
print "Executing ears in simulation mode" # let the user know they are in sim mode
# GPIO for left LIDAR shutdown pin
mouth_LIDAR_shutdown = 16
# GPIO for right LIDAR shutdown pin
right_LIDAR_shutdown = 20
left_LIDAR_shutdown = 21
focus = 0.5
# If running for real initialise servo driver, LIDARs and ADC
if not sim :
sys.path.append('/home/pi') # persistent import directory for K9 secrets
sys.path.append('/home/pi/Adafruit_Python_PCA9685/Adafruit_PCA9685') # persistent directory for Adafruit driver
print "Importing RPi GPIO and shutting down LIDAR..."
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
# Setup GPIO for shutdown pins on each VL53L0X
GPIO.setmode(GPIO.BCM)
GPIO.setup(left_LIDAR_shutdown, GPIO.OUT)
GPIO.setup(right_LIDAR_shutdown, GPIO.OUT)
GPIO.setup(mouth_LIDAR_shutdown, GPIO.OUT)
# Set all shutdown pins low to turn off each VL53L0X
GPIO.output(left_LIDAR_shutdown, GPIO.LOW)
GPIO.output(right_LIDAR_shutdown, GPIO.LOW)
GPIO.output(mouth_LIDAR_shutdown, GPIO.LOW)
# Keep all low for 500 ms or so to make sure they reset
time.sleep(0.50)
print "Importing servo driver library..."
import Adafruit_PCA9685 # enable control of devices ear servos via Adafruit library
print "Importing LIDAR driver library..."
import VL53L0X # enable control of LIDAR sesnsors
print "Importing ADC driver library..."
import Adafruit_ADS1x15
# Create ADC object
adc = Adafruit_ADS1x15.ADS1115()
GAIN = 1
# Create and intialise servo driver
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(60)
pwm.set_pwm(4,0,125)
pwm.set_pwm(5,0,615)
class K9ForwardSensors :
def __init__(self) :
"""Creates two LIDAR instances, one for each ear and a sensor array.
"""
print "K9 object initialising..."
# Create LIDAR sensor instances with different channels
self.left_ear = LIDAR(name="ear_left",adc=0,gpio=left_LIDAR_shutdown,address=0x30)
self.right_ear = LIDAR(name="ear_right",adc=1,gpio=right_LIDAR_shutdown,address=0x31)
#self.mouth = LIDAR(name="mouth",adc=99,gpio=mouth_LIDAR_shutdown,address=0x32)
# Initialise the various measures that will control the ears
# the pwm settings control the target directions of the ears
self.min_pwm = 140
self.mid_pwm = 370
self.max_pwm = 600
self.max_l_pot = 16776
self.mid_r_pot = 10932
self.mid_l_pot = 12963
self.min_r_pot = 4906
self.left_pwm_channel = 4
self.right_pwm_channel = 5
def getForwardSpeed(self) :
# retrieve current actual robot speed from Redis
self.left_speed=float(memory.retrieveState("left:speed"))
self.right_speed=float(memory.retrieveState("right:speed"))
# forward speed will be the average of the two
# then convert into a percentage of maximum speed (100)
return ((self.left_speed + self.right_speed)/200)
def makeReading(self) :
"""Controls the movement of the ears based on robot speed
"""
# make reading from mouth sensor
#self.mouth.makeReading()
#self.mouth.recordReading()
#make reading from ear sensors
self.forward_speed = self.getForwardSpeed()
# if the robot is moving forward, then work out what
# the boundaries should be for the potentiometer and pwm
# these boundaries should narrow the movement as the robot gets
# faster
if (self.forward_speed > 0) :
self.percent = min(1,self.forward_speed)
else:
self.percent = 0
self.left_pot_tgt = self.mid_l_pot + (focus*self.percent*(self.max_l_pot - self.mid_l_pot))
self.right_pot_tgt = self.mid_r_pot - (focus*self.percent*(self.mid_r_pot-self.min_r_pot))
self.left_pwm_tgt = self.mid_pwm + (focus*self.percent*(self.max_pwm - self.mid_pwm))
self.right_pwm_tgt = self.mid_pwm - (focus*self.percent*(self.mid_pwm - self.min_pwm))
#print ("LT:"+str(self.left_pot_tgt)+" RT:"+str(self.right_pot_tgt)+" pLT:"+str(self.left_pwm_tgt)+" pRT:"+str(self.right_pwm_tgt))
# Make a reading with the left ear to determine distance and direction
self.left_ear.makeReading()
self.left_ear.recordReading()
# Make a reading with the right ear to determine distance and direction
self.right_ear.makeReading()
self.right_ear.recordReading()
#print ("LA:"+str(self.left_ear.direction)+" RA:"+str(self.right_ear.direction))
# If both ears are outside the boundaries over which they are meant to move
# then reverse their direction of travel
if ((self.left_ear.direction < self.left_pot_tgt) or (self.right_ear.direction > self.right_pot_tgt)) :
if not sim :
pwm.set_pwm(self.left_pwm_channel,0, self.max_pwm)
pwm.set_pwm(self.right_pwm_channel,0, self.min_pwm)
#print("Flip L in towards: " + str(self.max_pwm) + ", R in towards: " + str(self.min_pwm))
if ((self.left_ear.direction > self.max_l_pot) or (self.right_ear.direction < self.min_r_pot)) :
if not sim :
pwm.set_pwm(self.left_pwm_channel,0, int(self.left_pwm_tgt))
pwm.set_pwm(self.right_pwm_channel,0, int(self.right_pwm_tgt))
#print("Flip L out towards: " + str(self.left_pwm_tgt) + ", R out towards: " + str(self.right_pwm_tgt))
class LIDAR :
def __init__(self, name, adc, gpio, address) :
"""Initialise the VL530L0X that will be used by this LIDAR instance
Arguments:
name -- the name of the sensor e.g. left or right
adc -- this value will determine which ADC channel is used
gpio -- the GPIO pin that controls the LIDAR shutdown
address -- the i2c address of the LIDAR itself
when the sensor is queried (also translates to I2C address)
"""
self.adc = adc
self.name = name
self.gpio = gpio
self.address = address
# initialise sensor via I2C and GPIO shutdown pin
if not sim :
self.sensor = VL53L0X.VL53L0X(address=self.address)
GPIO.output(self.gpio, GPIO.HIGH)
time.sleep(0.50)
# start sensor ranging
self.sensor.start_ranging(VL53L0X.VL53L0X_LONG_RANGE_MODE)
print str(self.name) + " LIDAR instantiated at " + str(self.address) + " measured by ADC " + str(self.adc) + " and controlled by GPIO " + str(self.gpio)
def recordReading(self) :
memory.storeSensorReading(self.name,self.distance,self.direction)
print("dist_"+str(self.name)+": "+str(self.distance)+"mm at bearing: "+str(self.direction))
def makeReading(self) :
"""Make a distance reading and update the sensors internal state
Gets the latest distance reading from the LIDAR sensor
and the direction from the associated ADC channel
Arguments: none
"""
self.time=time
# get distance from LIDAR sensor
if not sim :
self.distance = self.sensor.get_distance()
#self.distance = random.uniform(0,1200) # TEMPORARY FOR TESTING
if (self.adc == 99) :
self.direction = 99
else :
self.direction = adc.read_adc(self.adc,gain=GAIN)
else :
self.distance = random.uniform(0,1200)
self.direction = random.uniform(0,5)
# print str(self.name) + " reads: " + str(self.distance) + " at an angle of " + str(self.direction)
try :
k9sensors = K9ForwardSensors()
max_time = 0
while True :
k9sensors.makeReading()
except KeyboardInterrupt :
if not sim :
k9sensors.left_ear.sensor.stop_ranging()
k9sensors.right_ear.sensor.stop_ranging()
#k9sensors.mouth.sensor.stop_ranging()
GPIO.output(left_LIDAR_shutdown, GPIO.LOW)
GPIO.output(right_LIDAR_shutdown, GPIO.LOW)
GPIO.output(mouth_LIDAR_shutdown, GPIO.LOW)
|
|
#!/usr/bin/env python
# Inject TV show into iTunes
# system
import datetime
import os
import re
import time
import sys
import urllib
from optparse import OptionParser
from ConfigParser import SafeConfigParser
# Mac OS X
import appscript
import mactypes
# To get episode summary, some extra Python packages are required
#port install py25-lxml
#port install py25-libxslt
#port install py25-libxml2
#easy_install-2.5 pyxml
try:
import libxml2dom
#from xml.dom.ext.reader import HtmlLib
except ImportError:
# missing XML parser lib, do not try to retrieve summary
print >> sys.stderr, \
"Warning: libxml2dom missing, no summary can be retrieved"
class EasyConfigParser(SafeConfigParser):
"""ConfigParser extension to support default config values
"""
def get(self, section, option, default=None):
if not self.has_section(section):
return default
if not self.has_option(section, option):
return default
return SafeConfigParser.get(self, section, option)
class Injector(object):
"""
"""
def __init__(self, configfile, topdir, refdir):
# generate regular expression to parse video files
today = datetime.date.today()
monthre = '|'.join([today.replace(month=m).strftime("%b") \
for m in range(1,13)])
self.filecre = re.compile(r'^(?P<show>.*)\.[sS](?P<season>\d\d)[eE]' \
r'(?P<episode>\d\d)')
self.showbcre = re.compile(r'(?P<season>\d+)\-\s?(?P<episode>\d+).*' \
r'\s(?P<date>\d+\s(' + monthre + \
r')\s\d+).*' \
r'<a.*\shref="(?P<url>.*)"' + \
r'>(?P<title>.*)</a>')
# application handlers
self.qtime = appscript.app('QuickTime Player')
self.itunes = appscript.app('iTunes')
self._load_config(configfile, topdir, refdir)
self._query_shows()
def _load_config(self, configfile, topdir):
"""Load configuration from the configuration file"""
parser = EasyConfigParser()
if not parser.read(configfile):
raise AssertionError('Unable to read config file %s' % configfile)
self.epguides = parser.get('config', 'epguides', 'http://epguides.com')
self.topdir = parser.get('config', 'topdir', topdir)
if not self.topdir:
raise AssertionError('Top directory not defined in %s' % \
configfile)
self.refdir = parser.get('config', 'refdir', refdir)
if not self.refdir:
raise AssertionError('Reference directory not defined in %s' % \
configfile)
os.makedirs(self.refdir)
extensions = parser.get('config', 'extensions', 'avi')
self.exts = [e.strip() for e in extensions.split(',')]
self.playlistname = parser.get('config', 'playlist', 'ImportAVI')
self.map = dict()
for option in parser.options('map'):
self.map[option.lower()] = parser.get('map', option).strip()
self.ignore = []
for option in parser.options('ignore'):
if parser.get('ignore', option).strip():
self.ignore.append(option.lower())
def _query_shows(self):
"""Load existing TV shows from iTunes"""
self.tvshows = [t for t in self.itunes.tracks() \
if t.video_kind() == appscript.k.TV_show]
def _show_exists(self, show, season, episode):
"""Test whether the show already exists in iTunes"""
for track in self.tvshows:
if (show == track.show()) and \
(episode == int(track.episode_number() or 0)) and \
(season == int(track.season_number() or 0)):
print "Show %s Track %s.%s already in iTunes, skipping" % \
(show, season, episode)
return True
return False
def _make_quicktime_reffile(self, filename):
"""Creates a reference file so that iTunes can manage a foreign file"""
filesize = os.stat(filename)[6]
reffilename = os.path.join(self.refdir,
'%s.mov' % os.path.basename(filename))
try:
self.qtime.open(filename)
except appscript.reference.CommandError, e:
print >> sys.stderr, "Cannot open %s in QuickTime" % filename
return None
lastsize = 0
# increase nochange if quicktime is slow to import files
nochange = 2
while nochange:
size = self.qtime.documents[1]().data_size()
if size == lastsize and size:
nochange = max(nochange-1,0)
else:
nochange = 2
lastsize = size
time.sleep(1)
print "\rFile %s %02u%% %u" % \
(filename, (100*size)/filesize, nochange),
sys.stdout.flush()
self.qtime.save(self.qtime.documents[1],in_=reffilename)
self.qtime.close(self.qtime.documents[1])
print " saved"
return reffilename
def _inject(self, filename):
"""Inject the reference file into iTunes and define track properties"""
# Search for an exising playlist
pl = None
for p in self.itunes.playlists():
if p.name() == self.playlistname:
pl = p
break
if not pl:
pl = self.itunes.make(new = appscript.k.playlist,
with_properties = \
{appscript.k.name: self.playlistname})
macref = mactypes.File(filename)
track = self.itunes.add(macref,to = pl)
return track
def _retrieve_info(self, show, season, episode):
"""Retrieve the episode information"""
epurl = '%s/%s' % (self.epguides, show)
try:
epguide = urllib.urlopen(epurl)
except IOError:
print >> sys.stderr, "Cannot connect to %s" % self.epguides
return None
epdate = None
url = None
title = None
year = None
print "URL: %s" % epurl
for line in epguide.readlines():
mo = self.showbcre.search(line)
if mo:
if not (season == int(mo.group('season')) and
episode == int(mo.group('episode'))):
continue
epdate = datetime.datetime.strptime(mo.group('date'),
r'%d %b %y').date()
title = mo.group('title')
url = mo.group('url')
year = int(epdate.year)
print "Episode: %s %d.%d %s: %s" % \
(show, season, episode, epdate, title)
epguide.close()
if not title or not year:
return None
return (title, year, url)
def _retrieve_summary(self, url):
"""Retrieve the episode summary"""
# reader = HtmlLib.Reader()
if not url:
print >> sys.stderr, 'Warning: no URL for summary'
return ''
print "Retrieving %s" % url
show = urllib.urlopen(url)
showstr = show.read()
# Website sometimes contains invalid characters, which cause the
# DOM parser to fail. Discard any non-ASCII character
showstr = showstr.decode('ascii', 'ignore')
show.close()
doc = libxml2dom.parseString(showstr, html=1)
for node in doc.getElementsByTagName('p'):
if node.hasAttribute('class'):
if 'deck' in node.getAttribute('class'):
for child_node in node.childNodes:
if child_node.nodeType == child_node.TEXT_NODE:
summary = child_node.nodeValue.strip()
if summary:
return summary
return ''
def run(self):
"""Process video files, one after another"""
for root, dirs, files in os.walk(self.topdir):
for f in files:
(base, ext) = os.path.splitext(f)
if ext[1:].lower() in self.exts:
filename = os.path.join(root, f)
filemo = self.filecre.match(os.path.basename(filename))
if not filemo:
print >> sys.stderr, "Filename %s does not match " \
"any supported pattern" % filename
continue
show = filemo.group('show')
season = int(filemo.group('season'))
episode = int(filemo.group('episode'))
for ignore in self.ignore:
if -1 != show.lower().find(ignore):
print 'Show %s discarded, skipping' % show
show = None
break
if not show:
continue
showname = show
if show.lower() in self.map:
showname = self.map[show.lower()]
if self._show_exists(show, season, episode):
continue
info = self._retrieve_info(showname, season, episode)
if not info:
print "Unable to retrieve info for %s" % filename
continue
(title, year, url) = info
ref = self._make_quicktime_reffile(filename)
if not ref:
continue
summary = ''
if 'libxml2dom' in sys.modules:
summary = self._retrieve_summary(url)
track = self._inject(ref)
track.video_kind.set(appscript.k.TV_show)
track.show.set(show)
track.season_number.set(season)
track.episode_number.set(episode)
track.episode_ID.set(title)
track.year.set(year)
track.description.set(summary)
if __name__ == '__main__':
usage = 'Usage: %prog [options]\n'\
' Inject TV show into iTunes'
optparser = OptionParser(usage=usage)
optparser.add_option('-c', '--config', dest='config',
help='configuration file')
optparser.add_option('-d', '--dir', dest='topdir',
help='search path for video files')
optparser.add_option('-r', '--refdir', dest='refdir',
help='path to store reference files')
(options, args) = optparser.parse_args(sys.argv[1:])
if not options.config:
raise AssertionError('No configuration file')
injector = Injector(options.config, options.topdir, options.refdir)
injector.run()
|
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.5
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 5)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald XMPP
from . import FACTORY_TRANSPORT, SERVICE_XMPP_DIRECTORY, ACCESS_ID, \
PROP_XMPP_SERVER, PROP_XMPP_PORT, PROP_XMPP_JID, PROP_XMPP_PASSWORD, \
PROP_XMPP_KEEPALIVE_INTERVAL, PROP_XMPP_KEEPALIVE_DELAY
from .beans import XMPPAccess
from .bot import HeraldBot
import herald.transports.peer_contact as peer_contact
# Room creation utility
from .utils import RoomCreator, MarksCallback
# Herald Core
from herald.exceptions import InvalidPeerAccess
import herald
import herald.beans as beans
import herald.utils as utils
# XMPP
import sleekxmpp
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Property, Validate, Invalidate, RequiresBest
from pelix.utilities import to_str, to_bytes
import pelix.misc.jabsorb as jabsorb
import pelix.threadpool as threadpool
# Standard library
import hashlib
import json
import logging
import threading
import time
import uuid
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
FEATURE_MUC = 'http://jabber.org/protocol/muc'
# ------------------------------------------------------------------------------
@ComponentFactory(FACTORY_TRANSPORT)
@RequiresBest('_probe', herald.SERVICE_PROBE)
@Requires('_herald', herald.SERVICE_HERALD_INTERNAL)
@Requires('_directory', herald.SERVICE_DIRECTORY)
@Requires('_xmpp_directory', SERVICE_XMPP_DIRECTORY)
@Provides(herald.SERVICE_TRANSPORT, '_controller')
@Property('_access_id', herald.PROP_ACCESS_ID, ACCESS_ID)
@Property('_host', PROP_XMPP_SERVER, 'localhost')
@Property('_port', PROP_XMPP_PORT, 5222)
@Property('_username', PROP_XMPP_JID)
@Property('_password', PROP_XMPP_PASSWORD)
@Property('_xmpp_keepalive_interval', PROP_XMPP_KEEPALIVE_INTERVAL, 15)
@Property('_xmpp_keepalive_delay', PROP_XMPP_KEEPALIVE_DELAY, 5)
class XmppTransport(object):
"""
XMPP Messenger for Herald.
"""
def __init__(self):
"""
Sets up the transport
"""
# Probe service
self._probe = None
# Herald core service
self._herald = None
# Herald Core directory
self._directory = None
# Herald XMPP directory
self._xmpp_directory = None
# Service controller
self._controller = False
# Peer contact handling
self.__contact = None
# Properties
self._access_id = ACCESS_ID
self._host = "localhost"
self._port = 5222
self._username = None
self._password = None
# Herald XMPP Keppalive interval
self._xmpp_keepalive_interval = 15
# Herald XMPP Keepalive delay
self._xmpp_keepalive_delay = 5
# XMPP bot
self._authenticated = False
self._bot = None
# MUC service name
self.__muc_service = None
# Pending count downs and joined rooms
self.__countdowns = set()
self.__countdowns_lock = threading.Lock()
# Message sending queue
self.__pool = threadpool.ThreadPool(max_threads=1,
logname="Herald-XMPP-SendThread")
# Bot possible states : creating, created, destroying, destroyed
self._bot_state = "destroyed"
# Bot state's lock
self._bot_lock = threading.RLock()
# Bot method recall timer
self._bot_recall_timer = None
@Validate
def _validate(self, _):
"""
Component validated
"""
self.__create_new_bot()
def __create_new_bot(self):
"""
(Re)Creates a new XMPP bot object
"""
# Cancel the current timer
if self._bot_recall_timer is not None:
self._bot_recall_timer.cancel()
self._bot_recall_timer = None
with self._bot_lock:
if self._bot_state == "destroyed":
threading.Thread(target=self._create_new_bot,
name="Herald-CreateBot").start()
self._bot_state = "creating"
_logger.info("changing XMPP bot state to : creating")
elif self._bot_state == "destroying":
# Wait before trying to create new bot
self._bot_recall_timer = \
threading.Timer(1, self.__create_new_bot)
self._bot_recall_timer.start()
elif self._bot_state == "creating":
pass
elif self._bot_state == "created":
pass
def _create_new_bot(self):
"""
Really (re)creates a new XMPP bot object
"""
# Clear & Start the thread pool
self.__pool.clear()
self.__pool.start()
# Prepare the peer contact handler
self.__contact = peer_contact.PeerContact(self._directory, None,
__name__ + ".contact")
if self._username:
# Generate the bot full JID: resource is set to be peer's UUID
bot_jid = sleekxmpp.JID(self._username)
bot_jid.resource = self._directory.local_uid
else:
# Anonymous login
bot_jid = None
# Create the bot
self._bot = HeraldBot(bot_jid, self._password,
self._directory.local_uid)
# Avoids bot's auto reconnection.
# when disconnected event is captured,
# we destroy the old bot and re-create a new one.
self._bot.auto_reconnect = False
# This avoids to wait too long between initial reattempts, i.e. if
# the first server addresses (DNS records) aren't responding
self._bot.reconnect_max_delay = 5
# Register to session events
self._bot.add_event_handler("session_start", self._on_session_start)
self._bot.add_event_handler("failed_auth", self._on_failed_auth)
self._bot.add_event_handler("session_end", self._on_session_end)
self._bot.add_event_handler("disconnected", self._on_disconnected)
# Register the Multi-User Chat plug-in
self._bot.register_plugin('xep_0045')
# Register "XEP-0203: Delayed Delivery" plug-in
self._bot.register_plugin("xep_0203")
# Register to messages (loop back filtered by the bot)
self._bot.set_message_callback(self.__on_message)
# Connect to the server
# set reattempt to True to try to reconnect to the server in case of
# network problems or address errors.
if not self._bot.connect(self._host, self._port, reattempt=True):
_logger.error("Can't connect to the XMPP server at %s port %s",
self._host, self._port)
# the bot is fully created and activated when all rooms are created
# (see __on_ready)
def __destroy_bot(self):
"""
Destroys the current bot
"""
# Cancel the current timer
if self._bot_recall_timer is not None:
self._bot_recall_timer.cancel()
self._bot_recall_timer = None
with self._bot_lock:
if self._bot_state == "destroyed":
pass
elif self._bot_state == "destroying":
pass
elif self._bot_state == "creating":
# Wait before trying to destroying existing bot
self._bot_recall_timer = threading.Timer(1, self.__destroy_bot)
self._bot_recall_timer.start()
elif self._bot_state == "created":
# Destroy the bot, in a new thread
threading.Thread(target=self._destroy_bot,
name="Herald-DestroyBot").start()
self._bot_state = "destroying"
_logger.info("changing XMPP bot state to : destroying")
def _destroy_bot(self):
"""
Destroys the current bot
"""
# Stop & Clear the pool
self.__pool.stop()
self.__pool.clear()
# Disconnect the bot and clear callbacks
if self._bot is not None:
# self._bot.plugin['xep_0199'].disable_keepalive()
self._bot.set_message_callback(None)
self._bot.del_event_handler("session_start", self._on_session_start)
self._bot.del_event_handler("failed_auth", self._on_failed_auth)
self._bot.del_event_handler("session_end", self._on_session_end)
self._bot.del_event_handler("disconnected", self._on_disconnected)
self._bot.disconnect(reconnect=False)
self._bot.set_stop()
self._bot = None
else:
_logger.warning("trying to destroy an already destroyed XMPP bot!")
# Clean up internal storage
if self.__contact is not None:
self.__contact.clear()
self.__contact = None
if self._bot_lock:
# unregister provided service
self._controller = False
# change state
self._bot_state = "destroyed"
_logger.info("changing XMPP bot state to : destroyed")
@Invalidate
def _invalidate(self, _):
"""
Component invalidated
"""
self.__destroy_bot()
def room_jid(self, room_name):
"""
Prepares a JID object for the given room in the current MUC domain
:param room_name: The short name of a room
:return: A JID object
"""
if self.__muc_service == "groupchat.google.com":
# Special case: Google Talk requires a specific room name format
# Make a MD5 hash of the full room name
app_id = self._directory.get_local_peer().app_id
full_name = "cohorte-{0}-{1}".format(app_id, room_name)
md5 = hashlib.md5(to_bytes(full_name)).hexdigest()
# Format the room name to be Google Talk-compatible
room_name = "private-chat-{0}".format(str(uuid.UUID(md5)))
return sleekxmpp.JID(local=room_name, domain=self.__muc_service)
def __create_rooms(self, rooms, nickname):
"""
Creates or joins the given rooms
:param rooms: A list of rooms to join / create
:param nickname: Nickname to use in MUC rooms
:raise ValueError: No Multi-User Chat service available
"""
# Look for the MUC service if necessary
if not self.__muc_service:
if self._bot.boundjid.domain == "gmail.com":
# Special case: Google Talk
self.__muc_service = "groupchat.google.com"
else:
try:
self.__muc_service = next(
self._bot.iter_services(FEATURE_MUC))
except StopIteration:
raise ValueError("No Multi-User Chat service on server")
# Generate rooms JIDs
rooms_jids = {room: self.room_jid(room) for room in rooms}
# Prepare a callback
self.__countdowns.add(
MarksCallback((str(room_jid) for room_jid in rooms_jids.values()),
self.__on_ready, __name__ + ".RoomCreator"))
# Prepare the room creator
creator = RoomCreator(self._bot, __name__ + ".RoomCreator")
# Prepare rooms configuration
rooms_config = {
# ... no max users limit
'muc#roomconfig_maxusers': '0',
# ... open to anyone
'muc#roomconfig_membersonly': '0',
# ... every participant can send invites
'muc#roomconfig_allowinvites': '1',
# ... room can disappear
'muc#roomconfig_persistentroom': '0',
# ... OpenFire: Forbid nick changes
'x-muc#roomconfig_canchangenick': '0'}
# Create rooms, with our computing JID
for room, room_jid in rooms_jids.items():
creator.create_room(room, self.__muc_service, nickname,
rooms_config, self.__room_created,
self.__room_error, room_jid)
def __room_created(self, room, _):
"""
A room has been correctly created, and we're its owner
:param room: Bare JID of the room
:param _: Our nick in the room
"""
with self.__countdowns_lock:
to_remove = set()
for countdown in self.__countdowns:
# Mark the room
countdown.set(room)
# Check for cleanup
if countdown.is_done():
to_remove.add(countdown)
# Cleanup
self.__countdowns.difference_update(to_remove)
def __room_error(self, room, nick, condition, text):
"""
Error creating a room
:param room: Bare JID of the room
:param nick: Our nick in the room
:param condition: Category of error
:param text: Description of the error
"""
if condition == 'not-owner':
_logger.debug("We are not the owner of %s", room)
self.__room_created(room, nick)
else:
with self.__countdowns_lock:
to_remove = set()
for countdown in self.__countdowns:
# Mark the room
countdown.set_error(room)
# Check for cleanup
if countdown.is_done():
to_remove.add(countdown)
# Cleanup
self.__countdowns.difference_update(to_remove)
_logger.error("Error creating room: %s (%s)", text, condition)
def _on_failed_auth(self, _):
"""
An authentication attempt failed
"""
self._authenticated = False
def _on_session_start(self, _):
"""
The bot session has started: create the main room
"""
self._authenticated = True
# Log our JID
_logger.info("Bot connected with JID: %s", self._bot.boundjid.bare)
# Get our local peer description
peer = self._directory.get_local_peer()
# Create/join rooms for each group
all_rooms = ["{0}--{1}".format(peer.app_id, group)
for group in peer.groups]
all_rooms.append(peer.app_id)
# Wait to have joined all rooms before activating the service
_logger.debug("Creating XMPP rooms...")
self.__create_rooms(all_rooms, peer.uid)
# activate keepalive (ping) plugin xep_0199
self._bot.plugin['xep_0199'].enable_keepalive(
self._xmpp_keepalive_interval, self._xmpp_keepalive_delay)
def __on_ready(self, joined, erroneous):
"""
Called when all MUC rooms have created or joined
:param joined: List of joined rooms
:param erroneous: List of room that couldn't be joined
"""
_logger.debug("Client joined rooms: %s", ", ".join(joined))
if erroneous:
_logger.error("Error joining rooms: %s", ", ".join(erroneous))
# Register our local access
local_peer = self._directory.get_local_peer()
local_peer.set_access(self._access_id,
XMPPAccess(self._bot.boundjid.full))
# Listen to peers going away from the main room
self._bot.add_event_handler(
"muc::{0}::got_offline".format(self.room_jid(local_peer.app_id)),
self._on_room_out)
if self._bot_lock:
# We're on line, register our service
_logger.debug("XMPP transport service activating...")
self._controller = True
_logger.info("XMPP transport service activated")
self._bot_state = "created"
_logger.info("changing XMPP bot state to : created")
# Start the discovery handshake, with everybody
_logger.debug("Sending discovery step 1...")
message = beans.Message(peer_contact.SUBJECT_DISCOVERY_STEP_1,
local_peer.dump())
self.__send_message("groupchat", self.room_jid(local_peer.app_id),
message)
def _on_session_end(self, _):
"""
End of session
"""
if self._authenticated:
_logger.info("End of session")
else:
_logger.warning("End of session due to authentication error")
with self._bot_lock:
if self._bot_state in ("creating", "created"):
# disable keepalive plugin
self._bot.plugin['xep_0199'].disable_keepalive()
# try:
# Clean up our access
local_peer = self._directory.get_local_peer()
if local_peer is not None:
local_peer.unset_access(self._access_id)
# Shut down the service (MOD: see _destroy_bot)
# self._controller = False
# Stop listening to the main room
# Listen to peers going away from the main room
self._bot.del_event_handler(
"muc::{0}::got_offline".format(
self.room_jid(local_peer.app_id)),
self._on_room_out)
# except:
# pass
def _on_room_out(self, data):
"""
Someone exited the main room
:param data: MUC presence stanza
"""
uid = data['from'].resource
room_jid = data['from'].bare
local_peer = self._directory.get_local_peer()
app_room_jid = self.room_jid(local_peer.app_id)
if uid != self._directory.local_uid and room_jid == app_room_jid:
# Someone else is leaving the main room: clean up the directory
try:
peer = self._directory.get_peer(uid)
peer.unset_access(ACCESS_ID)
except KeyError:
pass
else:
_logger.debug("Peer %s disconnected from XMPP", peer)
def _on_disconnected(self, _):
"""
The bot has been disconnected, maybe due to a network problem
"""
# create a new bot (which implies the deletion of the old one)
_logger.warning("Bot disconnected: reconnect (state=%s)",
self._bot_state)
with self._bot_lock:
# Destroy the old bot, if any
if self._bot_state == "created":
self.__destroy_bot()
# Create a new bot
self.__create_new_bot()
def __on_message(self, msg):
"""
Received an XMPP message
:param msg: A message stanza
"""
if msg['delay']['stamp'] is not None:
# Delayed message: ignore
return
subject = msg['subject']
if not subject:
# No subject: not an Herald message, treat it differently
self.__handle_raw_message(msg)
return
# Check if the message is from Multi-User Chat or direct
muc_message = msg['type'] == 'groupchat' \
or msg['from'].domain == self.__muc_service
sender_jid = msg['from'].full
try:
if muc_message:
# Group message: resource is the isolate UID
sender_uid = msg['from'].resource
else:
sender_uid = self._xmpp_directory.from_jid(sender_jid).uid
except KeyError:
sender_uid = "<unknown>"
try:
received_msg = utils.from_json(msg['body'])
content = received_msg.content
except ValueError:
# Content can't be decoded, use its string representation as is
content = msg['body']
uid = msg['thread']
reply_to = msg['parent_thread']
# Extra parameters, for a reply
extra = {"parent_uid": uid,
"sender_jid": sender_jid}
received_msg.add_header(herald.MESSAGE_HEADER_UID, uid)
received_msg.add_header(herald.MESSAGE_HEADER_SENDER_UID, sender_uid)
received_msg.add_header(herald.MESSAGE_HEADER_REPLIES_TO, reply_to)
received_msg.set_content(content)
received_msg.set_access(self._access_id)
received_msg.set_extra(extra)
# Call back the core service
message = received_msg
# Log before giving message to Herald
self._probe.store(
herald.PROBE_CHANNEL_MSG_RECV,
{"uid": message.uid, "timestamp": time.time(),
"transport": ACCESS_ID, "subject": message.subject,
"source": sender_uid, "repliesTo": reply_to or "",
"transportSource": str(sender_jid)})
if subject.startswith(peer_contact.SUBJECT_DISCOVERY_PREFIX):
# Handle discovery message
self.__contact.herald_message(self._herald, message)
else:
# All other messages are given to Herald Core
self._herald.handle_message(message)
def __handle_raw_message(self, msg):
"""
Handles a message that is not from Herald
:param msg: An XMPP message
"""
# Generate a UUID for this message
uid = str(uuid.uuid4())
# No sender
sender_uid = "<unknown>"
# Give the raw body as content
content = msg['body']
# Extra parameters, for a reply
sender_jid = msg['from'].full
extra = {"sender_jid": sender_jid, "raw": True}
# Call back the core service
message = beans.MessageReceived(uid, herald.SUBJECT_RAW,
content, sender_uid,
None, self._access_id, extra=extra)
# Log before giving message to Herald
self._probe.store(
herald.PROBE_CHANNEL_MSG_RECV,
{"uid": message.uid, "timestamp": time.time(),
"transport": ACCESS_ID, "subject": message.subject,
"source": sender_uid, "repliesTo": "",
"transportSource": str(sender_jid)})
# All other messages are given to Herald Core
self._herald.handle_message(message)
def __get_jid(self, peer, extra):
"""
Retrieves the JID to use to communicate with a peer
:param peer: A Peer bean or None
:param extra: The extra information for a reply or None
:return: The JID to use to reply, or None
"""
# Get JID from reply information
jid = None
if extra is not None:
jid = extra.get('sender_jid')
# Try to read information from the peer
if not jid and peer is not None:
try:
# Get the target JID
jid = peer.get_access(self._access_id).jid
except (KeyError, AttributeError):
pass
return jid
def __send_message(self, msgtype, target, message, parent_uid=None, target_peer=None, target_group=None):
"""
Prepares and sends a message over XMPP
:param msgtype: Kind of message (chat or groupchat)
:param target: Target JID or MUC room
:param message: Herald message bean
:param parent_uid: UID of the message this one replies to (optional)
"""
# Convert content to JSON
if message.subject in herald.SUBJECTS_RAW:
content = to_str(message.content)
else:
# update headers
local_peer = self._directory.get_local_peer()
message.add_header(herald.MESSAGE_HEADER_SENDER_UID, local_peer.uid)
if target_peer is not None:
message.add_header(herald.MESSAGE_HEADER_TARGET_PEER, target_peer.uid)
if target_group is not None:
message.add_header(herald.MESSAGE_HEADER_TARGET_GROUP, target_group)
content = utils.to_json(message)
# Prepare an XMPP message, based on the Herald message
xmpp_msg = self._bot.make_message(mto=target,
mbody=content,
msubject=message.subject,
mtype=msgtype)
xmpp_msg['thread'] = message.uid
if parent_uid:
xmpp_msg['parent_thread'] = parent_uid
# Store message content
self._probe.store(
herald.PROBE_CHANNEL_MSG_CONTENT,
{"uid": message.uid, "content": content}
)
# Send it, using the 1-thread pool, and wait for its execution
future = self.__pool.enqueue(xmpp_msg.send)
return future.result()
def fire(self, peer, message, extra=None):
"""
Fires a message to a peer
:param peer: A Peer bean
:param message: Message to send
:param extra: Extra information used in case of a reply
"""
# Get the request message UID, if any
parent_uid = None
if extra is not None:
parent_uid = extra.get('parent_uid')
# Try to read extra information
jid = self.__get_jid(peer, extra)
if jid:
# Log before sending
self._probe.store(
herald.PROBE_CHANNEL_MSG_SEND,
{"uid": message.uid, "timestamp": time.time(),
"transport": ACCESS_ID, "subject": message.subject,
"target": peer.uid if peer else "<unknown>",
"transportTarget": str(jid), "repliesTo": parent_uid or ""})
# Send the XMPP message
self.__send_message("chat", jid, message, parent_uid, target_peer=peer)
else:
# No XMPP access description
raise InvalidPeerAccess(beans.Target(uid=peer.uid),
"No '{0}' access found"
.format(self._access_id))
def fire_group(self, group, peers, message):
"""
Fires a message to a group of peers
:param group: Name of a group
:param peers: Peers to communicate with
:param message: Message to send
:return: The list of reached peers
"""
# Get the local application ID
app_id = self._directory.get_local_peer().app_id
# Special case for the main room
if group in ('all', 'others'):
group_jid = self.room_jid(app_id)
else:
# Get the group JID
group_jid = self.room_jid("{0}--{1}".format(app_id, group))
# Log before sending
self._probe.store(
herald.PROBE_CHANNEL_MSG_SEND,
{"uid": message.uid, "timestamp": time.time(),
"transport": ACCESS_ID, "subject": message.subject,
"target": group, "transportTarget": str(group_jid),
"repliesTo": ""})
# Send the XMPP message
self.__send_message("groupchat", group_jid, message, target_group=group)
return peers
|
|
import unittest
import os.path
import sys
import mtree.tests.fixtures as fixtures
import mtree.tests.fixtures.generator as generator
from mtree import MTree
import mtree.functions as f
class Test(unittest.TestCase):
def setUp(self):
# Removing randomness
def not_random_promotion(data_objects, distance_function):
data_objects = sorted(data_objects)
return data_objects[0], data_objects[-1]
self.mtree = MTree(
min_node_capacity=2,
max_node_capacity=3,
split_function=f.make_split_function(not_random_promotion, f.balanced_partition)
)
def checked(unchecked_method):
def checked_method(*args, **kwargs):
try:
return unchecked_method(*args, **kwargs)
finally:
self.mtree._check()
return checked_method
self.mtree.add = checked(self.mtree.add)
self.mtree.remove = checked(self.mtree.remove)
self.all_data = set()
def testEmpty(self):
self._check_nearest_by_range((1, 2, 3), 4)
self._check_nearest_by_limit((1, 2, 3), 4)
def test01(self): self._test('f01')
def test02(self): self._test('f02')
def test03(self): self._test('f03')
def test04(self): self._test('f04')
def test05(self): self._test('f05')
def test06(self): self._test('f06')
def test07(self): self._test('f07')
def test08(self): self._test('f08')
def test09(self): self._test('f09')
def test10(self): self._test('f10')
def test11(self): self._test('f11')
def test12(self): self._test('f12')
def test13(self): self._test('f13')
def test14(self): self._test('f14')
def test15(self): self._test('f15')
def test16(self): self._test('f16')
def test17(self): self._test('f17')
def test18(self): self._test('f18')
def test19(self): self._test('f19')
def test20(self): self._test('f20')
def testLots(self): self._test('fLots')
def testRemoveNonExisting(self):
# Empty
self.assertRaises(KeyError, lambda: self.mtree.remove((99, 77)))
# With some items
self.mtree.add((4, 44))
self.assertRaises(KeyError, lambda: self.mtree.remove((99, 77)))
self.mtree.add((95, 43))
self.assertRaises(KeyError, lambda: self.mtree.remove((99, 77)))
self.mtree.add((76, 21))
self.assertRaises(KeyError, lambda: self.mtree.remove((99, 77)))
self.mtree.add((64, 53))
self.assertRaises(KeyError, lambda: self.mtree.remove((99, 77)))
self.mtree.add((47, 3))
self.assertRaises(KeyError, lambda: self.mtree.remove((99, 77)))
self.mtree.add((26, 11))
self.assertRaises(KeyError, lambda: self.mtree.remove((99, 77)))
def testGeneratedCase01(self): self._test('fG01')
def testGeneratedCase02(self): self._test('fG02')
def testRandom(self):
fixtures_path, _ = os.path.split(fixtures.__file__)
random_test_path = os.path.join(fixtures_path, 'fRandom.py')
if os.path.isfile(random_test_path):
print >>sys.stderr, "WARNING: Using previously generated random test (fRandom)."
generated = False
else:
# Random test doesn't exist. Generate it
options = generator.Options(actions=500, dimensions=3, remove_chance=0.2)
fixture = generator.generate_test_data(options)
f = file(random_test_path, 'w')
stdout_bkp = sys.stdout
sys.stdout = f
try:
print "# Test case generated by testRandom()."
generator.print_test_data(fixture, options)
finally:
sys.stdout = stdout_bkp
f.close()
generated = True
try:
self._test('fRandom')
except:
print >>sys.stderr, "WARNING: The random test (fRandom) failed."
print >>sys.stderr, "Investigate it, fix MTree and then convert"
print >>sys.stderr, "the random test to a permanent test case."
raise
else:
if generated:
os.remove(random_test_path)
for compiled_file in (random_test_path + 'c', random_test_path + 'o'):
if os.path.isfile(compiled_file):
os.remove(compiled_file)
else:
print >>sys.stderr, "ATTENTION: The previously existing random test"
print >>sys.stderr, "has passed. Do want to delete it or convert to"
print >>sys.stderr, "a permanent test case?"
def _test(self, fixture_name):
fixtures = __import__('fixtures.' + fixture_name)
fixture = getattr(fixtures, fixture_name)
self._test_fixture(fixture)
def _test_fixture(self, fixture):
def callback(action):
if isinstance(action, generator.ADD):
assert action.data not in self.all_data
self.all_data.add(action.data)
self.mtree.add(action.data)
elif isinstance(action, generator.REMOVE):
assert action.data in self.all_data
self.all_data.remove(action.data)
self.mtree.remove(action.data)
else:
assert False, action.__class__
self._check_nearest_by_range(action.query.data, action.query.radius)
self._check_nearest_by_limit(action.query.data, action.query.limit)
fixture.PERFORM(callback)
def _check_nearest_by_range(self, query_data, radius):
result = list(self.mtree.get_nearest(query_data, range=radius))
previous_distance = None
for item in result:
data, distance = item
# Check if increasing distance
if previous_distance is not None:
self.assertTrue(distance is not None)
self.assertLessEqual(previous_distance, distance)
previous_distance = distance
# Check if every item in the results came from the generated query_data
self.assertIn(data, self.all_data)
self.assertTrue(isinstance(item, MTree.ResultItem), item)
# Check if every item in the results is within the range
self.assertLessEqual(distance, radius)
self.assertEqual(self.mtree.distance_function(data, query_data), distance)
stripped_result = [item.data for item in result]
for data in self.all_data:
dist = self.mtree.distance_function(data, query_data)
if dist <= radius:
self.assertIn(data, stripped_result)
else:
self.assertNotIn(data, stripped_result)
def _check_nearest_by_limit(self, query_data, limit):
nearest_result = list(self.mtree.get_nearest(query_data, limit=limit))
if limit <= len(self.all_data):
self.assertEquals(limit, len(nearest_result))
else: # limit > len(self.all_data)
self.assertEquals(len(self.all_data), len(nearest_result))
farthest = 0.0
previous_distance = None
for item in nearest_result:
data, distance = item
# Check if increasing distance
if previous_distance is not None:
self.assertTrue(distance is not None)
self.assertLessEqual(previous_distance, distance)
previous_distance = distance
# Check if every item in the results came from the generated query_data
self.assertIn(data, self.all_data)
self.assertTrue(isinstance(item, MTree.ResultItem))
# Check if items are not repeated
self.assertEqual(1, nearest_result.count(item))
d = self.mtree.distance_function(data, query_data)
self.assertEqual(d, distance)
farthest = max(farthest, d)
stripped_nearest_result = [item.data for item in nearest_result]
for data in self.all_data:
d = self.mtree.distance_function(data, query_data)
if d < farthest:
self.assertIn(data, stripped_nearest_result)
elif d > farthest:
self.assertNotIn(data, stripped_nearest_result)
else: # d == farthest:
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2017 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports if possible
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
NetworkThread,
NodeConn,
NodeConnCB,
mininode_lock,
msg_block,
msg_getdata,
wait_until,
)
from test_framework.test_framework import PlanbcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
p2p_port,
)
# NodeConnCB is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass NodeConnCB and
# override the on_*() methods if you need custom behaviour.
class BaseNode(NodeConnCB):
def __init__(self):
"""Initialize the NodeConnCB
Used to inialize custom properties for the Node that aren't
included by default in the base class. Be aware that the NodeConnCB
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, conn, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the PlanbcoinTestFramework
pass
class ExampleTest(PlanbcoinTestFramework):
# Each functional test is a subclass of the PlanbcoinTestFramework class.
# Override the __init__(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def __init__(self):
"""Initialize the test
Call super().__init__() first, and then override any test parameters
for your individual test."""
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished __init__") # Oops! Can't run self.log before run_test()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all([self.nodes[0:1]])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
PlanbcoinTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create a P2P connection to one of the nodes
node0 = BaseNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
# Start up network handling in another thread. This needs to be called
# after the P2P connections have been created.
NetworkThread().start()
# wait_for_verack ensures that the P2P connection is fully up.
node0.wait_for_verack()
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all([self.nodes[0:1]])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = 1
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our NodeConn connection
node0.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
node2 = BaseNode()
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[1])
node2.wait_for_verack()
self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")
for block in blocks:
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, block))
node2.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# NodeConnCB objects.
assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in node2.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
|
|
"""
This module provides an optimizer class that is based on an evolution
strategy algorithm.
"""
import random, math
from time import time
from xml.dom import minidom
from extra.schedule import Schedule
from extra.printer import pprint, BLUE
class Optimizer(object):
"""
This class is the implementation of the evolution strategy to optimize
and evaluate schedules.
"""
def __init__(self, plant, orderList, simulator, evaluator):
"""
plant - the plant to run the simulation and evaluation on
orderList - the list of orders in the given schedule
simulator - Simulator instance to run a schedule
evaluator - Evaluator instance to evaluate a schedule
"""
assert plant != None
assert orderList != None
self.plant = plant
self.orderList = orderList
self.simulator = simulator
self.evaluator = evaluator
# used for benchmarking
self.simulatorTime = 0
# enable/disable console output
self.printing = True
# parameters for the evolution strategy algorithm
self.populationSize = 0
self.indivMutationRate = 0
self.selectionRate = 0
self.mutationRange = 0
self.iterations = 0
@staticmethod
def fromXml(xmlDoc, plant, orderList, simulator, evaluator):
"""
Loads the optimizer configuration and parameters from an XML tree.
"""
optimizer = Optimizer(plant, orderList, simulator, evaluator)
element = xmlDoc.getElementsByTagName("optimizer")
# there should only be 1 optimizer node in the XML tree!
assert len(element) == 1
element = element[0]
# load the different attributes
optimizer.populationSize = \
int(element.getAttribute("populationSize"))
optimizer.mutationRange = \
int(element.getAttribute("mutationRange"))
optimizer.iterations = \
int(element.getAttribute("iterations"))
optimizer.indivMutationRate = \
float(element.getAttribute("indivMutationRate"))
optimizer.selectionRate = \
float(element.getAttribute("selectionRate"))
return optimizer
@staticmethod
def fromXmlFile(filename, plant, orderList, simulator, evaluator):
"""
Loads the optimizer configuration and parameters from an XML tree.
"""
file = open(filename, "r")
doc = minidom.parse(file)
optimizer = Optimizer.fromXml(doc, plant, orderList, simulator, evaluator)
file.close()
return optimizer
def run(self, initialPopulation = None):
"""
Entry point of the evolution strategy algorithm.
"""
pprint("OPT calculating initial population...", BLUE, self.printing)
if initialPopulation == None:
# if we don't get an initial set of schedules as the initial population,
# then we need to generate one.
population = self.initialPopulation()
else:
# if we do get an initial population as input, then we just need to
# calculate the fitnesses of the schedules in it.
for p in initialPopulation:
self.calcIndividualFitness(p)
# if the population is too small or too large (less than or larger than
# self.populationSize) then this will fix that for us.
population = self.mutatePopulation(initialPopulation)
# go through the needed number of iterations and mutate the population
# everytime, this will keep the best individuals and will return the
# best population achieved at the end.
i = 0
while i < self.iterations:
pprint("OPT iteration number %s" % (i + 1), BLUE, self.printing)
population = self.mutatePopulation(population)
i += 1
return population
def calcIndividualFitness(self, indiv):
"""
Calculates fitness of a schedule.
"""
assert indiv.fitness == None
t = time()
self.simulator.simulate(indiv)
self.evaluator.evaluate(indiv)
t = time() - t
self.simulatorTime += t
def sortPopulation(self, population):
"""
Sorts the population based on fitness, to have the better individuals
at the beginning of the population list.
"""
population.sort(lambda a, b: cmp(b.fitness, a.fitness))
def mutatePopulation(self, population):
"""
Mutates a population. Selects the best n individuals (based on the
selectionRate) to mutate (maybe they'll give us even better individuals!).
After mutating an individual, it checks if we have an individual that is
similar to the mutated one, if so, then try to mutate again, otherwise,
we simply calculate its fitness and append it to the list. We then sort
the population based on fitness and return the best PopulationSize items.
"""
limit = int(math.ceil(self.selectionRate * len(population)))
i = 0
while i < limit:
mutatedIndiv = self.mutateIndividual(population[i])
while self.isIndividualInPopulation(mutatedIndiv, population) == True:
mutatedIndiv = self.mutateIndividual(population[i])
self.calcIndividualFitness(mutatedIndiv)
population.append(mutatedIndiv)
i += 1
self.sortPopulation(population)
return population[:self.populationSize]
def isIndividualInPopulation(self, individual, population):
"""
Checks if an individual is in a population.
"""
for i in population:
if i == individual:
return True
return False
def initialPopulation(self):
"""
Generates an initial population.
"""
population = []
# generate an initial individual, calculate its fitness and add it to our
# new population
initIndiv = self.initialIndividual()
self.calcIndividualFitness(initIndiv)
population.append(initIndiv)
# until we have filled the population
i = 0
while i < self.populationSize:
# keep mutating the initial individual to get new ones
mutatedIndiv = self.mutateIndividual(initIndiv)
# if that new individual is in the population, don't add it, try
# getting a new one
while self.isIndividualInPopulation(mutatedIndiv, population) == True:
mutatedIndiv = self.mutateIndividual(initIndiv)
self.calcIndividualFitness(mutatedIndiv)
population.append(mutatedIndiv)
i += 1
self.sortPopulation(population)
return population
def mutateIndividual(self, originalIndiv):
"""
Gets an individual and returns a mutation of it.
"""
newIndiv = Schedule()
# we need to deepcopy the schedule startTimes list object
# newIndiv.startTimes = copy.deepcopy(originalIndiv.startTimes)
for st in originalIndiv.startTimes:
newIndiv.startTimes.append([st[0], st[1], st[2]])
indivLen = len(newIndiv.startTimes)
# the plant-entrance times in the schedule should be equal to the number
# of orders! otherwise, something is wrong!
assert indivLen == len(self.orderList.orders)
indexes = range(indivLen)
# for n times (based on the individual mutation rate), mutate a random
# order plant-entrance time that we didn't mutate before.
limit = int(self.indivMutationRate * indivLen)
i = 0
while i < limit:
index = int(random.uniform(0, len(indexes)))
if newIndiv.startTimes[indexes[index]][0].currentMachine == "":
newIndiv.startTimes[indexes[index]][2] = \
self.mutateGene(newIndiv.startTimes[indexes[index]][2])
del indexes[index]
i += 1
return newIndiv
def mutateGene(self, value):
"""
Gets a value and returns a mutation of it based on the mutation range.
"""
addent = int(random.uniform(0, self.mutationRange))
if (random.uniform(0, 1) < 0.5):
addent = -addent
return max(0, value + addent)
def initialIndividual(self):
"""
Generates an initial individual based on order deadlines - minimum
processing time. Account whether an order has a current machine and
current overtime.
"""
indiv = Schedule()
for o in self.orderList.orders:
if o.currentMachine == "":
minProcTime = o.recipe.minProcTime(self.plant)
machineName = o.recipe.recipe[0][0]
enterTime = max(0, o.deadline - minProcTime)
else:
machineName = o.currentMachine
enterTime = 0
indiv.startTimes.append([o, str(machineName), enterTime])
return indiv
|
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from random import shuffle
from unittest import TestCase
from traceml.processors import df_processors
from traceml.processors.units_processors import to_percentage
@pytest.mark.processors_mark
class DataFrameSummaryTest(TestCase):
def setUp(self):
self.size = 1000
missing = [np.nan] * (self.size // 10) + list(range(10)) * (
(self.size - self.size // 10) // 10
)
shuffle(missing)
self.types = [
df_processors.DF_TYPE_NUMERIC,
df_processors.DF_TYPE_BOOL,
df_processors.DF_TYPE_CATEGORICAL,
df_processors.DF_TYPE_CONSTANT,
df_processors.DF_TYPE_UNIQUE,
df_processors.DF_TYPE_DATE,
]
self.columns = [
"dbool1",
"dbool2",
"duniques",
"dcategoricals",
"dnumerics1",
"dnumerics2",
"dnumerics3",
"dmissing",
"dconstant",
"ddates",
]
self.df = pd.DataFrame(
dict(
dbool1=np.random.choice([0, 1], size=self.size),
dbool2=np.random.choice(["a", "b"], size=self.size),
duniques=["x{}".format(i) for i in range(self.size)],
dcategoricals=[
"a".format(i)
if i % 2 == 0
else "b".format(i)
if i % 3 == 0
else "c".format(i)
for i in range(self.size)
],
dnumerics1=range(self.size),
dnumerics2=range(self.size, 2 * self.size),
dnumerics3=list(range(self.size - self.size // 10))
+ list(range(-self.size // 10, 0)),
dmissing=missing,
dconstant=["a"] * self.size,
ddates=pd.date_range("2010-01-01", periods=self.size, freq="1M"),
)
)
self.column_stats = df_processors.get_df_column_stats(self.df)
self.columns_types = df_processors.get_df_columns_types(self.column_stats)
def test_get_columns_works_as_expected(self):
assert len(df_processors.get_df_columns(self.df, df_processors.ALL)) == 10
assert (
len(
df_processors.get_df_columns(
self.df,
df_processors.INCLUDE,
["dnumerics1", "dnumerics2", "dnumerics3"],
)
)
== 3
)
assert (
len(
df_processors.get_df_columns(
self.df,
df_processors.EXCLUDE,
["dnumerics1", "dnumerics2", "dnumerics3"],
)
)
== 7
)
def test_column_types_works_as_expected(self):
expected = pd.Series(index=self.types, data=[4, 2, 1, 1, 1, 1], name="types")
assert_series_equal(self.columns_types[self.types], expected[self.types])
def test_column_stats_works_as_expected(self):
self.assertTupleEqual(self.column_stats.shape, (5, 10))
# counts
expected = pd.Series(
index=self.columns, data=self.size, name="counts", dtype="object"
)
expected["dmissing"] -= 100
assert_series_equal(
self.column_stats[self.columns].loc["counts"], expected[self.columns]
)
# uniques
expected = pd.Series(
index=self.columns, data=self.size, name="uniques", dtype="object"
)
expected[["dbool1", "dbool2"]] = 2
expected[["dcategoricals"]] = 3
expected[["dconstant"]] = 1
expected[["dmissing"]] = 10
assert_series_equal(
self.column_stats[self.columns].loc["uniques"].sort_index(),
expected[self.columns].sort_index(),
check_dtype=False,
)
# missing
expected = pd.Series(index=self.columns, data=0, name="missing", dtype="object")
expected[["dmissing"]] = 100
assert_series_equal(
self.column_stats[self.columns].loc["missing"],
expected[self.columns],
check_dtype=False,
)
# missing_perc
expected = pd.Series(
index=self.columns, data=["0%"] * 10, name="missing_perc", dtype="object"
)
expected[["dmissing"]] = "10%"
assert_series_equal(
self.column_stats[self.columns].loc["missing_perc"], expected[self.columns]
)
# types
expected = pd.Series(
index=self.columns, data=[np.nan] * 10, name="types", dtype="object"
)
expected[["dbool1", "dbool2"]] = df_processors.DF_TYPE_BOOL
expected[["dcategoricals"]] = df_processors.DF_TYPE_CATEGORICAL
expected[["dconstant"]] = df_processors.DF_TYPE_CONSTANT
expected[["ddates"]] = df_processors.DF_TYPE_DATE
expected[["duniques"]] = df_processors.DF_TYPE_UNIQUE
expected[
["dnumerics1", "dnumerics2", "dnumerics3", "dmissing"]
] = df_processors.DF_TYPE_NUMERIC
assert_series_equal(
self.column_stats[self.columns].loc["types"], expected[self.columns]
)
def test_uniques_summary(self):
expected = pd.Series(
index=["counts", "uniques", "missing", "missing_perc", "types"],
data=[self.size, self.size, 0, "0%", df_processors.DF_TYPE_UNIQUE],
name="duniques",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "duniques"), expected
)
def test_constant_summary(self):
self.assertEqual(
df_processors.get_df_column_summary(self.df, "dconstant"),
"This is a constant value: a",
)
def test_bool1_summary(self):
count_values = self.df["dbool1"].value_counts()
total_count = self.df["dbool1"].count()
count0 = count_values[0]
count1 = count_values[1]
perc0 = to_percentage(count0 / total_count)
perc1 = to_percentage(count1 / total_count)
expected = pd.Series(
index=[
'"0" count',
'"0" perc',
'"1" count',
'"1" perc',
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
str(count0),
perc0,
str(count1),
perc1,
self.size,
2,
0,
"0%",
df_processors.DF_TYPE_BOOL,
],
name="dbool1",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dbool1"), expected
)
def test_bool2_summary(self):
count_values = self.df["dbool2"].value_counts()
total_count = self.df["dbool2"].count()
count0 = count_values["a"]
count1 = count_values["b"]
perc0 = to_percentage(count0 / total_count)
perc1 = to_percentage(count1 / total_count)
expected = pd.Series(
index=[
'"a" count',
'"a" perc',
'"b" count',
'"b" perc',
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
str(count0),
perc0,
str(count1),
perc1,
self.size,
2,
0,
"0%",
df_processors.DF_TYPE_BOOL,
],
name="dbool2",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dbool2"), expected
)
def test_categorical_summary(self):
expected = pd.Series(
index=["top", "counts", "uniques", "missing", "missing_perc", "types"],
data=["a: 500", self.size, 3, 0, "0%", df_processors.DF_TYPE_CATEGORICAL],
name="dcategoricals",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dcategoricals"), expected
)
def test_dates_summary(self):
dmin = self.df["ddates"].min()
dmax = self.df["ddates"].max()
expected = pd.Series(
index=[
"max",
"min",
"range",
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
dmax,
dmin,
dmax - dmin,
self.size,
self.size,
0,
"0%",
df_processors.DF_TYPE_DATE,
],
name="ddates",
dtype=object,
).sort_index()
tmp = df_processors.get_df_column_summary(self.df, "ddates").sort_index()
assert_series_equal(tmp, expected)
def test_numerics_summary(self):
num1 = self.df["dnumerics1"]
dm, dmp = df_processors.get_deviation_of_mean(num1)
dam, damp = df_processors.get_median_absolute_deviation(num1)
expected = pd.Series(
index=[
"mean",
"std",
"variance",
"min",
"max",
"mode",
"5%",
"25%",
"50%",
"75%",
"95%",
"iqr",
"kurtosis",
"skewness",
"sum",
"mad",
"cv",
"zeros_num",
"zeros_perc",
"deviating_of_mean",
"deviating_of_mean_perc",
"deviating_of_median",
"deviating_of_median_perc",
"top_correlations",
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
num1.mean(),
num1.std(),
num1.var(),
num1.min(),
num1.max(),
num1.mode()[0],
num1.quantile(0.05),
num1.quantile(0.25),
num1.quantile(0.5),
num1.quantile(0.75),
num1.quantile(0.95),
num1.quantile(0.75) - num1.quantile(0.25),
num1.kurt(),
num1.skew(),
num1.sum(),
num1.mad(),
num1.std() / num1.mean() if num1.mean() else np.nan,
self.size - np.count_nonzero(num1),
to_percentage((self.size - np.count_nonzero(num1)) / self.size),
dm,
dmp,
dam,
damp,
"dnumerics2: 100%",
self.size,
self.size,
0,
"0%",
df_processors.DF_TYPE_NUMERIC,
],
name="dnumerics1",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dnumerics1"), expected
)
|
|
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the :class:`SyntacticcategoriesController` and its auxiliary functions.
.. module:: syntacticcategories
:synopsis: Contains the syntactic category controller and its auxiliary functions.
"""
import logging
import datetime
import simplejson as json
from pylons import request, response, config
from formencode.validators import Invalid
from onlinelinguisticdatabase.lib.base import BaseController
from onlinelinguisticdatabase.lib.schemata import SyntacticCategorySchema
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.lib.SQLAQueryBuilder import SQLAQueryBuilder
from onlinelinguisticdatabase.model.meta import Session
from onlinelinguisticdatabase.model import SyntacticCategory
from forms import update_forms_containing_this_form_as_morpheme
log = logging.getLogger(__name__)
class SyntacticcategoriesController(BaseController):
"""Generate responses to requests on syntactic category resources.
REST Controller styled on the Atom Publishing Protocol.
.. note::
The ``h.jsonify`` decorator converts the return value of the methods to
JSON.
"""
query_builder = SQLAQueryBuilder('SyntacticCategory', config=config)
@h.jsonify
@h.restrict('GET')
@h.authenticate
def index(self):
"""Get all syntactic category resources.
:URL: ``GET /syntacticcategorys`` with optional query string parameters
for ordering and pagination.
:returns: a list of all syntactic category resources.
.. note::
See :func:`utils.add_order_by` and :func:`utils.add_pagination` for the
query string parameters that effect ordering and pagination.
"""
try:
query = Session.query(SyntacticCategory)
query = h.add_order_by(query, dict(request.GET), self.query_builder)
return h.add_pagination(query, dict(request.GET))
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
@h.jsonify
@h.restrict('POST')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def create(self):
"""Create a new syntactic category resource and return it.
:URL: ``POST /syntacticcategorys``
:request body: JSON object representing the syntactic category to create.
:returns: the newly created syntactic category.
"""
try:
schema = SyntacticCategorySchema()
values = json.loads(unicode(request.body, request.charset))
data = schema.to_python(values)
syntactic_category = create_new_syntactic_category(data)
Session.add(syntactic_category)
Session.commit()
return syntactic_category
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
@h.jsonify
@h.restrict('GET')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def new(self):
"""Return the data necessary to create a new syntactic category.
:URL: ``GET /syntacticcategorys/new``.
:returns: a dictionary containing the valid syntactic category types as
defined in :mod:`onlinelinguisticdatabase.lib.utils`.
"""
return {'syntactic_category_types': h.syntactic_category_types}
@h.jsonify
@h.restrict('PUT')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def update(self, id):
"""Update a syntactic category and return it.
:URL: ``PUT /syntacticcategorys/id``
:Request body: JSON object representing the syntactic category with updated attribute values.
:param str id: the ``id`` value of the syntactic category to be updated.
:returns: the updated syntactic category model.
"""
syntactic_category = Session.query(SyntacticCategory).get(int(id))
if syntactic_category:
try:
old_name = syntactic_category.name
schema = SyntacticCategorySchema()
values = json.loads(unicode(request.body, request.charset))
state = h.get_state_object(values)
state.id = id
data = schema.to_python(values, state)
syntactic_category = update_syntactic_category(syntactic_category, data)
# syntactic_category will be False if there are no changes (cf. update_syntactic_category).
if syntactic_category:
Session.add(syntactic_category)
Session.commit()
if syntactic_category.name != old_name:
update_forms_referencing_this_category(syntactic_category)
return syntactic_category
else:
response.status_int = 400
return {'error':
u'The update request failed because the submitted data were not new.'}
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
else:
response.status_int = 404
return {'error': 'There is no syntactic category with id %s' % id}
@h.jsonify
@h.restrict('DELETE')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def delete(self, id):
"""Delete an existing syntactic category and return it.
:URL: ``DELETE /syntacticcategorys/id``
:param str id: the ``id`` value of the syntactic category to be deleted.
:returns: the deleted syntactic category model.
"""
syntactic_category = Session.query(SyntacticCategory).get(id)
if syntactic_category:
Session.delete(syntactic_category)
Session.commit()
update_forms_referencing_this_category(syntactic_category)
return syntactic_category
else:
response.status_int = 404
return {'error': 'There is no syntactic category with id %s' % id}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def show(self, id):
"""Return a syntactic category.
:URL: ``GET /syntacticcategorys/id``
:param str id: the ``id`` value of the syntactic category to be returned.
:returns: a syntactic category model object.
"""
syntactic_category = Session.query(SyntacticCategory).get(id)
if syntactic_category:
return syntactic_category
else:
response.status_int = 404
return {'error': 'There is no syntactic category with id %s' % id}
@h.jsonify
@h.restrict('GET')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def edit(self, id):
"""Return a syntactic category resource and the data needed to update it.
:URL: ``GET /syntacticcategorys/edit``
:param str id: the ``id`` value of the syntactic category that will be updated.
:returns: a dictionary of the form::
{"syntactic_category": {...}, "data": {...}}
where the value of the ``syntactic_category`` key is a dictionary
representation of the syntactic category and the value of the
``data`` key is a dictionary of valid syntactic category types as
defined in :mod:`onlinelinguisticdatabase.lib.utils`.
"""
syntactic_category = Session.query(SyntacticCategory).get(id)
if syntactic_category:
return {
'data': {'syntactic_category_types': h.syntactic_category_types},
'syntactic_category': syntactic_category
}
else:
response.status_int = 404
return {'error': 'There is no syntactic category with id %s' % id}
################################################################################
# SyntacticCategory Create & Update Functions
################################################################################
def create_new_syntactic_category(data):
"""Create a new syntactic category.
:param dict data: the data for the syntactic category to be created.
:returns: an SQLAlchemy model object representing the syntactic category.
"""
syntactic_category = SyntacticCategory()
syntactic_category.name = h.normalize(data['name'])
syntactic_category.type = data['type']
syntactic_category.description = h.normalize(data['description'])
syntactic_category.datetime_modified = datetime.datetime.utcnow()
return syntactic_category
def update_syntactic_category(syntactic_category, data):
"""Update a syntactic category.
:param syntactic_category: the syntactic category model to be updated.
:param dict data: representation of the updated syntactic category.
:returns: the updated syntactic category model or, if ``changed`` has not
been set to ``True``, ``False``.
"""
changed = False
# Unicode Data
changed = syntactic_category.set_attr('name', h.normalize(data['name']), changed)
changed = syntactic_category.set_attr('type', h.normalize(data['type']), changed)
changed = syntactic_category.set_attr('description', h.normalize(data['description']), changed)
if changed:
syntactic_category.datetime_modified = datetime.datetime.utcnow()
return syntactic_category
return changed
def update_forms_referencing_this_category(syntactic_category):
"""Update all forms that reference a syntactic category.
:param syntactic_category: a syntactic category model object.
:returns: ``None``
.. note::
This function is only called when a syntactic category is deleted or
when its name is changed.
"""
forms_of_this_category = syntactic_category.forms
for form in forms_of_this_category:
update_forms_containing_this_form_as_morpheme(form)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SelfAdjointEigTest(test.TestCase):
def testWrongDimensions(self):
# The input to self_adjoint_eig should be a tensor of
# at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(vector)
def SortEigenDecomposition(e, v):
if v.ndim < 2:
return e, v
else:
perm = np.argsort(e, -1)
return np.take(e, perm, -1), np.take(v, perm, -1)
def NormalizeEigenvectorsPhase(v):
"""Normalizes the phase of the Eigenvectors stored in the columns of `v`.
(complex) Eigenvectors are only unique up to an arbitrary phase.
We normalize the vectors such that the first component has phase 0.
Args:
v: `np.ndarray` with Eigenvectors as returned from `np.linalg.eigh`.
Returns:
`np.ndarray` normalized Eigenvectors.
"""
reference = v / np.linalg.norm(v[..., 0:1, :], axis=-1, keepdims=True)
return v * reference.conj()
def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_):
def CompareEigenVectors(self, x, y, tol):
x = NormalizeEigenvectorsPhase(x)
y = NormalizeEigenvectorsPhase(y)
self.assertAllClose(x, y, atol=tol, rtol=tol)
def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
num_batches = int(np.prod(x_e.shape[:-1]))
n = x_e.shape[-1]
x_e = np.reshape(x_e, [num_batches] + [n])
x_v = np.reshape(x_v, [num_batches] + [n, n])
y_e = np.reshape(y_e, [num_batches] + [n])
y_v = np.reshape(y_v, [num_batches] + [n, n])
for i in range(num_batches):
x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
CompareEigenVectors(self, x_vi, y_vi, tol)
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
atol = 1e-4
else:
atol = 1e-12
np_e, np_v = np.linalg.eigh(a)
with self.test_session():
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
# Check that V*diag(E)*V^T is close to A.
a_ev = math_ops.matmul(
math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
tf_v,
adjoint_b=True)
self.assertAllClose(a_ev.eval(), a, atol=atol)
# Compare to numpy.linalg.eigh.
CompareEigenDecompositions(self, np_e, np_v,
tf_e.eval(), tf_v.eval(), atol)
else:
tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
self.assertAllClose(
np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
return Test
class SelfAdjointEigGradTest(test.TestCase):
pass # Filled in below
def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_):
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.test_session():
tf_a = constant_op.constant(a)
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
reference = tf_v / linalg_ops.norm(
tf_v[..., 0:1, :], axis=-1, keep_dims=True)
tf_v *= math_ops.conj(reference)
outputs = [tf_e, tf_v]
else:
tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
outputs = [tf_e,]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
for compute_v in [True, False]:
for dtype in (
dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128):
for size in 1, 2, 5, 10:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(size, size) < 10):
shape = batch_dims + (size, size)
name = '%s_%s_%s' % (dtype, '_'.join(map(str, shape)), compute_v)
setattr(SelfAdjointEigTest, 'testSelfAdjointEig_' + name,
_GetSelfAdjointEigTest(dtype, shape, compute_v))
setattr(SelfAdjointEigGradTest, 'testSelfAdjointEigGrad_' + name,
_GetSelfAdjointEigGradTest(dtype, shape, compute_v))
test.main()
|
|
#pylint: skip-file
"""
This module handles all the phishing related operations for
Wifiphisher.py
"""
import os
from wifiphisher.common.constants import *
from shutil import copyfile
import ConfigParser
def config_section_map(config_file, section):
"""
Map the values of a config file to a dictionary.
"""
config = ConfigParser.ConfigParser()
config.read(config_file)
dict1 = {}
if section not in config.sections():
return dict1
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
except:
dict1[option] = None
return dict1
class InvalidTemplate(Exception):
""" Exception class to raise in case of a invalid template """
def __init__(self):
Exception.__init__(self, "The given template is either invalid or " +
"not available locally!")
class PhishingTemplate(object):
""" This class represents phishing templates """
def __init__(self, name):
"""
Construct object.
:param self: A PhishingTemplate object
:type self: PhishingScenario
:return: None
:rtype: None
.. todo:: Maybe add a category field
"""
# setup all the variables
config_path = os.path.join(PHISHING_PAGES_DIR, name, 'config.ini')
info = config_section_map(config_path, 'info')
self._name = name
self._display_name = info['name']
self._description = info['description']
self._payload = False
if 'payloadpath' in info:
self._payload = info['payloadpath']
self._path = PHISHING_PAGES_DIR + self._name.lower() + "/"
self._path_static = PHISHING_PAGES_DIR + self._name.lower() + "/static/"
self._context = config_section_map(config_path, 'context')
self._extra_files = []
def merge_context(self, context):
"""
Merge dict context with current one
In case of confict always keep current values
"""
context.update(self._context)
self._context = context
def get_context(self):
"""
Return the context of the template.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: the context of the template
:rtype: dict
"""
return self._context
def get_display_name(self):
"""
Return the display name of the template.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: the display name of the template
:rtype: str
"""
return self._display_name
def get_payload_path(self):
"""
Return the payload path of the template.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: The path of the template
:rtype: bool
"""
return self._payload
def has_payload(self):
"""
Return whether the template has a payload.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: boolean if it needs payload
:rtype: bool
"""
if self._payload:
return True
return False
def get_description(self):
"""
Return the description of the template.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: the description of the template
:rtype: str
"""
return self._description
def get_path(self):
"""
Return the path of the template files.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: the path of template files
:rtype: str
"""
return self._path
def get_path_static(self):
"""
Return the path of the static template files.
JS, CSS, Image files lie there.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: the path of static template files
:rtype: str
"""
return self._path_static
def use_file(self, path):
"""
Copies a file in the filesystem to the path
of the template files.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:param path: path of the file that is to be copied
:type self: str
:return: the path of the file under the template files
:rtype: str
"""
if path is not None and os.path.isfile(path):
filename = os.path.basename(path)
copyfile(path, self.get_path_static() + filename)
self._extra_files.append(self.get_path_static() + filename)
return filename
def remove_extra_files(self):
"""
Removes extra used files (if any)
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: None
:rtype: None
"""
for f in self._extra_files:
if os.path.isfile(f):
os.remove(f)
def __str__(self):
"""
Return a string representation of the template.
:param self: A PhishingTemplate object
:type self: PhishingTemplate
:return: the name followed by the description of the template
:rtype: str
"""
return (self._display_name + "\n\t" + self._description + "\n")
class TemplateManager(object):
""" This class handles all the template management operations """
def __init__(self):
"""
Construct object.
:param self: A TemplateManager object
:type self: TemplateManager
:return: None
:rtype: None
"""
# setup the templates
self._template_directory = PHISHING_PAGES_DIR
page_dirs = os.listdir(PHISHING_PAGES_DIR)
self._templates = {}
for page in page_dirs:
if os.path.isdir(page):
self._templates[page] = PhishingTemplate(page)
# add all the user templates to the database
self.add_user_templates()
def get_templates(self):
"""
Return all the available templates.
:param self: A TemplateManager object
:type self: TemplateManager
:return: all the available templates
:rtype: dict
"""
return self._templates
def find_user_templates(self):
"""
Return all the user's templates available.
:param self: A TemplateManager object
:type self: TemplateManager
:return: all the local templates available
:rtype: list
.. todo:: check to make sure directory contains HTML files
"""
# a list to store file names in
local_templates = []
# loop through the directory content
for name in os.listdir(self._template_directory):
# check to see if it is a directory and not in the database
if (os.path.isdir(os.path.join(self._template_directory, name)) and
name not in self._templates):
# add it to the list
local_templates.append(name)
return local_templates
def add_user_templates(self):
"""
Add all the user templates to the database.
:param self: A TemplateManager object
:type: self: TemplateManager
:return: None
:rtype: None
"""
# get all the user's templates
user_templates = self.find_user_templates()
# loop through the templates
for template in user_templates:
# create a template object and add it to the database
local_template = PhishingTemplate(template)
self._templates[template] = local_template
def on_exit(self):
"""
Delete any extra files on exit
:param self: A TemplateManager object
:type: self: TemplateManager
:return: None
:rtype: None
"""
for templ_name, templ_obj in self._templates.iteritems():
templ_obj.remove_extra_files()
|
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
timer.py
Created on Wed Apr 24 12:00:00 2013
@author: kyle
"""
from __future__ import print_function
import forseti2
import configurator
import json
import lcm
import threading
import time
import random
import os
import settings
import util
import LCMNode
Node = LCMNode.Node
LCMNode = LCMNode.LCMNode
class Timer(object):
def __init__(self):
self.segments = []
self.segment_start_time = time.time()
self.running = False
def _this_segment_time(self):
return time.time() - self.segment_start_time
def time(self):
if self.running:
return sum(self.segments) + self._this_segment_time()
else:
return sum(self.segments)
def start(self):
if not self.running:
self.running = True
self.segment_start_time = time.time()
return self
def pause(self):
if not self.running:
return self
self.running = False
self.segments.append(self._this_segment_time())
return self
def add(self, additional_time):
self.segments.append(additional_time)
def subtract(self, less_time):
self.segments.append(-less_time)
def reset(self):
self.__init__()
return self
class Period(object):
def __init__(self, name, length, autocontinue=False):
self.name = name
self.length = length
self.autocontinue = autocontinue
class MatchTimer(LCMNode):
def __init__(self, lc, match):
self.stage_ended = False
self.lc = lc
self.match = match
self.stages = [Period('Setup', 0),
Period('Autonomous', settings.AUTONOMOUS_LENGTH_SECONDS, True), Period('Paused', 0),
Period('Teleop', settings.TELEOP_LENGTH_SECONDS, True), Period('End', 0)]
self.stage_index = 0
self.match_timer = Timer()
self.stage_timer = Timer()
self.lc.subscribe('Timer/Control', self.handle_control)
self.lc.subscribe('Match/Init', self.handle_init)
self.start_thread()
self.on_stage_change(None, self.stages[0])
def reset(self):
self.stage_index = 0
self.match_timer.reset()
self.stage_timer.reset()
self.on_stage_change(self.stages[self.stage_index], self.stages[0])
self.stage_ended = True
def current_stage(self):
return self.stages[self.stage_index]
def check_for_stage_change(self):
if self.stage_timer.time() > self.current_stage().length:
self.stage_timer.reset()
self.stage_ended = True
if self.current_stage().autocontinue:
self.stage_index += 1
self.on_stage_change(self.stages[self.stage_index - 1],
self.stages[self.stage_index])
self.pause()
else:
#print('Stage = ', self.current_stage().name)
self.pause()
def on_stage_change(self, old_stage, new_stage):
if new_stage.name == 'Setup':
self.match.stage = 'Autonomous'
self.match.disable_all()
self.pause()
elif new_stage.name == 'Autonomous':
self.match.stage = 'Autonomous'
self.match.enable_all()
elif new_stage.name == 'Paused':
self.match.stage = 'Paused'
self.match.disable_all()
self.pause()
elif new_stage.name == 'Teleop':
self.match.stage = 'Teleop'
self.match.enable_all()
elif new_stage.name == 'End':
self.match.stage = 'End'
self.pause()
def start(self):
self.match_timer.start()
self.stage_timer.start()
if self.stage_ended and self.stage_index + 1 < len(self.stages):
self.stage_index += 1
self.stage_ended = False
self.on_stage_change(self.stages[self.stage_index - 1],
self.stages[self.stage_index])
def pause(self):
self.stage_timer.pause()
self.match_timer.pause()
def reset_stage(self):
self.match_timer.stop()
self.stage_timer.reset()
def reset_match(self):
self.stage_index = 0
self.stage_timer.reset()
self.match_timer.reset()
def run(self):
while self.stage_index < len(self.stages):
time.sleep(0.3)
self.check_for_stage_change()
self.match.time = int(self.match_timer.time())
msg = forseti2.Time()
msg.game_time_so_far = self.match_timer.time()
msg.stage_time_so_far = self.stage_timer.time()
msg.total_stage_time = self.current_stage().length
msg.stage_name = self.current_stage().name
self.lc.publish('Timer/Time', msg.encode())
def handle_control(self, channel, data):
msg = forseti2.TimeControl.decode(data)
print('Received command', msg.command_name)
func = {
'pause': self.pause,
'start': self.start,
'reset_match': self.reset_match,
'reset_stage': self.reset_stage
}[msg.command_name]
func()
def handle_init(self, channel, data):
msg = forseti2.Match.decode(data)
self.match.teams = [Team(msg.team_numbers[i], msg.team_names[i]) for i in range(4)]
self.reset()
class Team(object):
def __init__(self, number, name=None):
self.number = number
if name is None:
self.name = configurator.get_team_name(number)
else:
self.name = name
self.teleop = False
self.halt_radio = False
self.auto = False
self.enabled = False
def toggle(self):
self.enabled = not self.enabled
class Match(object):
def __init__(self, team_numbers):
self.teams = [Team(num) for num in team_numbers]
self.stage = 'Setup'
self.time = 0
def get_team(self, team_number):
for team in self.teams:
if team.number == team_number:
return team
def enable_all(self):
for team in self.teams:
team.enabled = True
def disable_all(self):
for team in self.teams:
team.enabled = False
class ControlDataSender(Node):
def __init__(self, lc, match, timer):
self.lc = lc
self.match = match
self.timer = timer
self.thread = threading.Thread()
self.thread.daemon = True
self.start_thread()
self.seq = 0;
def _loop(self):
while True:
time.sleep(0.5)
for i in range(len(self.match.teams)):
self.send(i + 1, self.match.teams[i])
def send(self, piemos_num, team):
#print('Sending')
msg = forseti2.ControlData()
msg.TeleopEnabled = self.match.stage in ['Teleop', 'Paused']
msg.HaltRadio = False
msg.AutonomousEnabled = self.match.stage == 'Autonomous'
msg.RobotEnabled = self.timer.match_timer.running
msg.Stage = self.match.stage
msg.Time = self.match.time
"""
msg = forseti2.piemos_cmd()
msg.header = forseti2.header()
msg.header.seq = self.seq;
self.seq += 1;
msg.header.time = time.time()
msg.auton = self.match.stage == 'Autonomous'
msg.enabled = self.timer.match_timer.running"""
self.lc.publish('piemos/Control', msg.encode())
'''
TODO This does not appear to be used anywhere.
'''
class RemoteTimer(object):
def __init__(self):
self.lc = lcm.LCM(settings.LCM_URI)
def send(self, command):
print('Sending', command)
msg = forseti2.TimeControl()
msg.command_name = command
self.lc.publish('Timer/Control', msg.encode())
def pause(self):
self.send('pause')
def start(self):
self.send('start')
def reset_match(self):
self.send('reset_match')
def reset_stage(self):
self.send('reset_stage')
def main():
lc = lcm.LCM(settings.LCM_URI)
match = Match([0] * 4)
timer = MatchTimer(lc, match)
cd_sender = ControlDataSender(lc, match, timer)
timer.run()
if __name__ == '__main__':
main()
|
|
"""
drfjsonapi.filters
~~~~~~~~~~~~~~~~~~~
DRF relationship fields to assist with a JSON API spec
compliant API.
"""
import itertools
import re
from django.db.models import Q
from rest_framework.exceptions import ValidationError
from rest_framework.filters import BaseFilterBackend, OrderingFilter
from .exceptions import InvalidFilterParam, InvalidIncludeParam, InvalidSortParam
from .utils import _get_relationship, _to_set
class JsonApiBackend(object):
""" For easy `isinstance` checks """
pass
class JsonApiFieldFilter(JsonApiBackend, BaseFilterBackend):
""" Support the filtering of arbitrary resource fields """
fields = {}
max_params = 15
def filter_queryset(self, request, queryset, view):
""" DRF entry point into the custom FilterBackend """
filters = self.to_internal_value(request)
filters = self.validate(filters)
return self.apply_filter(queryset, filters)
def apply_filter(self, queryset, filters):
""" Turn the vetted query param filters into Q object expressions """
q_filter = Q()
for param, value in filters.items():
q_filter.add(Q((param, value)), Q.AND)
return queryset.filter(q_filter)
def to_internal_value(self, request):
""" Coerce & validate the query params & values
Loop through all the query parameters & use a regular
expression to find all the filters that match a format
of:
filter[<field>__<lookup>]=<value>
An example filter of `filter[home__city__exact]=Orlando`
would return a dict of:
{'home__city__exact': 'Orlando'}
"""
filters = {}
regex = re.compile(r'^filter\[([A-Za-z0-9_.]+)\]$')
for param, value in request.query_params.items():
try:
param = regex.match(param).groups()[0]
filters[param] = value
except (AttributeError, IndexError):
continue
return filters
def validate(self, filters):
""" Hook to validate the coerced data """
if len(filters) > self.max_params:
msg = 'The request has "%s" filter query parameters which ' \
'exceeds the max number of "%s" that can be requested.' \
% (len(filters), self.max_params)
raise InvalidFilterParam(msg)
return {k: self.validate_filter(k, v) for k, v in filters.items()}
def validate_filter(self, param, value):
""" Coerce & validate each query param & value one-by-one """
# pylint: disable=invalid-name,unused-variable
field, _, lookup = param.rpartition('__')
try:
validator = self.fields[field]
return validator.validate(lookup, value)
except KeyError:
msg = 'The "%s" filter query parameter is invalid, the ' \
'"%s" field either does not exist on the requested ' \
'resource or you are not allowed to filter on it.' \
% (param, field)
raise InvalidFilterParam(msg)
except ValidationError as exc:
msg = 'The "%s" filter query parameters value failed ' \
'validation checks with the following error(s): ' \
'%s' % (param, ' '.join(exc.detail))
raise InvalidFilterParam(msg)
class JsonApiIncludeFilter(JsonApiBackend, BaseFilterBackend):
""" Support the inclusion of compound documents
The santizied includes query params will be available on the
request object via a `jsonapi_include` attribute.
"""
fields = {}
max_params = 15
def filter_queryset(self, request, queryset, view):
""" DRF entry point into the custom FilterBackend """
include = self.to_internal_value(request)
include = self.validate(include)
request.jsonapi_include = include
return self.apply_filter(queryset, include)
def apply_filter(self, queryset, include):
""" Return a filtered queryset for the query params """
return queryset.prefetch_related(*include)
def to_internal_value(self, request):
""" Return the sanitized `include` query parameters
Handles comma separated & multiple include params & returns
a tuple of duplicate free strings
"""
include = request.query_params.getlist('include')
include = (name.split(',') for name in include)
include = list(itertools.chain(*include))
return tuple(set(include))
def to_representation(self, serializer, context=None):
""" Return the JSON API include array """
try:
include = context['request'].jsonapi_include
except (AttributeError, KeyError, TypeError):
include = []
if not include or not serializer.instance:
return []
# uniqifies duplicate serializers & models by using
# the serializer as a key & set as value
icache = {v: set() for k, v in self.fields.items()}
models = _to_set(serializer.instance)
for model in models:
for field in include:
cache_set = icache[self.fields[field]]
cache_set.update(_to_set(_get_relationship(model, field)))
# prune dupes in the include cache that are also present
# in the primary data.
_class = serializer.__class__
if _class in icache:
icache[_class] = icache[_class].difference(models)
return [
serializer(context=context).to_representation(model)
for serializer, models in icache.items() for model in models
]
def validate(self, include):
""" Hook to validate the coerced include """
if len(include) > self.max_params:
msg = 'The request has "%s" include query parameters which ' \
'exceeds the max number of "%s" that can be requested.' \
% (len(include), self.max_params)
raise InvalidIncludeParam(msg)
for name in include:
if name not in self.fields:
msg = 'The "%s" include query parameter is not supported ' \
'by this endpoint.' % name
raise InvalidIncludeParam(msg)
return include
class JsonApiSortFilter(JsonApiBackend, OrderingFilter):
""" Override default OrderingFilter to be JSON API compliant
If the global `max_sorts` property limit is not exceeded then
each sort is tested for eligibility. To be eligible is MUST
meet all of the following criteria:
1. The sort cannot be a relationship sort. That is not
supported currently & is generally frowned upon.
2. Be present in the list of the DRF superclasses
`get_valid_fields` method
Step 2 is standard DRF OrderingFilter logic so read it's
documentation for more info.
"""
max_sorts = 3
ordering_param = 'sort'
relation_sep = '.'
def remove_invalid_fields(self, queryset, fields, view, request):
""" Override the default to support exception handling """
if len(fields) > self.max_sorts:
msg = 'Sorting on "%s" fields exceeds the maximum number of ' \
'"%s" sortable fields' % (len(fields), self.max_sorts)
raise InvalidSortParam(msg)
allow = [i[0] for i in self.get_valid_fields(queryset, view)]
for field in fields:
if not field.lstrip('-') in allow:
msg = 'The "%s" sort query parameter either does not ' \
'exist or you are not allowed to sort on it' % field
raise InvalidSortParam(msg)
elif self.relation_sep in field:
msg = 'The "%s" sort query parameter is not allowed due to ' \
'unpredictable results when sorting on relationships' % field
raise InvalidSortParam(msg)
return fields
|
|
from __future__ import division
import math, random, re
from collections import defaultdict
class Table:
def __init__(self, columns):
self.columns = columns
self.rows = []
def __repr__(self):
"""pretty representation of the table: columns then rows"""
return str(self.columns) + "\n" + "\n".join(map(str, self.rows))
def insert(self, row_values):
if len(row_values) != len(self.columns):
raise TypeError("wrong number of elements")
row_dict = dict(zip(self.columns, row_values))
self.rows.append(row_dict)
def update(self, updates, predicate):
for row in self.rows:
if predicate(row):
for column, new_value in updates.iteritems():
row[column] = new_value
def delete(self, predicate=lambda row: True):
"""delete all rows matching predicate
or all rows if no predicate supplied"""
self.rows = [row for row in self.rows if not(predicate(row))]
def select(self, keep_columns=None, additional_columns=None):
if keep_columns is None: # if no columns specified,
keep_columns = self.columns # return all columns
if additional_columns is None:
additional_columns = {}
# new table for results
result_table = Table(keep_columns + additional_columns.keys())
for row in self.rows:
new_row = [row[column] for column in keep_columns]
for column_name, calculation in additional_columns.iteritems():
new_row.append(calculation(row))
result_table.insert(new_row)
return result_table
def where(self, predicate=lambda row: True):
"""return only the rows that satisfy the supplied predicate"""
where_table = Table(self.columns)
where_table.rows = filter(predicate, self.rows)
return where_table
def limit(self, num_rows=None):
"""return only the first num_rows rows"""
limit_table = Table(self.columns)
limit_table.rows = (self.rows[:num_rows]
if num_rows is not None
else self.rows)
return limit_table
def group_by(self, group_by_columns, aggregates, having=None):
grouped_rows = defaultdict(list)
# populate groups
for row in self.rows:
key = tuple(row[column] for column in group_by_columns)
grouped_rows[key].append(row)
result_table = Table(group_by_columns + aggregates.keys())
for key, rows in grouped_rows.iteritems():
if having is None or having(rows):
new_row = list(key)
for aggregate_name, aggregate_fn in aggregates.iteritems():
new_row.append(aggregate_fn(rows))
result_table.insert(new_row)
return result_table
def order_by(self, order):
new_table = self.select() # make a copy
new_table.rows.sort(key=order)
return new_table
def join(self, other_table, left_join=False):
join_on_columns = [c for c in self.columns # columns in
if c in other_table.columns] # both tables
additional_columns = [c for c in other_table.columns # columns only
if c not in join_on_columns] # in right table
# all columns from left table + additional_columns from right table
join_table = Table(self.columns + additional_columns)
for row in self.rows:
def is_join(other_row):
return all(other_row[c] == row[c] for c in join_on_columns)
other_rows = other_table.where(is_join).rows
# each other row that matches this one produces a result row
for other_row in other_rows:
join_table.insert([row[c] for c in self.columns] +
[other_row[c] for c in additional_columns])
# if no rows match and it's a left join, output with Nones
if left_join and not other_rows:
join_table.insert([row[c] for c in self.columns] +
[None for c in additional_columns])
return join_table
if __name__ == "__main__":
users = Table(["user_id", "name", "num_friends"])
users.insert([0, "Hero", 0])
users.insert([1, "Dunn", 2])
users.insert([2, "Sue", 3])
users.insert([3, "Chi", 3])
users.insert([4, "Thor", 3])
users.insert([5, "Clive", 2])
users.insert([6, "Hicks", 3])
users.insert([7, "Devin", 2])
users.insert([8, "Kate", 2])
users.insert([9, "Klein", 3])
users.insert([10, "Jen", 1])
print "users table"
print users
print
# SELECT
print "users.select()"
print users.select()
print
print "users.limit(2)"
print users.limit(2)
print
print "users.select(keep_columns=[\"user_id\"])"
print users.select(keep_columns=["user_id"])
print
print 'where(lambda row: row["name"] == "Dunn")'
print users.where(lambda row: row["name"] == "Dunn") \
.select(keep_columns=["user_id"])
print
def name_len(row): return len(row["name"])
print 'with name_length:'
print users.select(keep_columns=[],
additional_columns = { "name_length" : name_len })
print
# GROUP BY
def min_user_id(rows): return min(row["user_id"] for row in rows)
stats_by_length = users \
.select(additional_columns={"name_len" : name_len}) \
.group_by(group_by_columns=["name_len"],
aggregates={ "min_user_id" : min_user_id,
"num_users" : len })
print "stats by length"
print stats_by_length
print
def first_letter_of_name(row):
return row["name"][0] if row["name"] else ""
def average_num_friends(rows):
return sum(row["num_friends"] for row in rows) / len(rows)
def enough_friends(rows):
return average_num_friends(rows) > 1
avg_friends_by_letter = users \
.select(additional_columns={'first_letter' : first_letter_of_name}) \
.group_by(group_by_columns=['first_letter'],
aggregates={ "avg_num_friends" : average_num_friends },
having=enough_friends)
print "avg friends by letter"
print avg_friends_by_letter
print
def sum_user_ids(rows): return sum(row["user_id"] for row in rows)
user_id_sum = users \
.where(lambda row: row["user_id"] > 1) \
.group_by(group_by_columns=[],
aggregates={ "user_id_sum" : sum_user_ids })
print "user id sum"
print user_id_sum
print
# ORDER BY
friendliest_letters = avg_friends_by_letter \
.order_by(lambda row: -row["avg_num_friends"]) \
.limit(4)
print "friendliest letters"
print friendliest_letters
print
# JOINs
user_interests = Table(["user_id", "interest"])
user_interests.insert([0, "SQL"])
user_interests.insert([0, "NoSQL"])
user_interests.insert([2, "SQL"])
user_interests.insert([2, "MySQL"])
sql_users = users \
.join(user_interests) \
.where(lambda row: row["interest"] == "SQL") \
.select(keep_columns=["name"])
print "sql users"
print sql_users
print
def count_interests(rows):
"""counts how many rows have non-None interests"""
return len([row for row in rows if row["interest"] is not None])
user_interest_counts = users \
.join(user_interests, left_join=True) \
.group_by(group_by_columns=["user_id"],
aggregates={"num_interests" : count_interests })
print "user interest counts"
print user_interest_counts
# SUBQUERIES
likes_sql_user_ids = user_interests \
.where(lambda row: row["interest"] == "SQL") \
.select(keep_columns=['user_id'])
likes_sql_user_ids.group_by(group_by_columns=[],
aggregates={ "min_user_id" : min_user_id })
print "likes sql user ids"
print likes_sql_user_ids
|
|
# Copyright 2015 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from os_win import _utils
from os_win import exceptions
from os_win.tests.unit import test_base
from os_win.utils import win32utils
from os_win.utils.winapi import constants as w_const
from os_win.utils.winapi import wintypes
@ddt.ddt
class Win32UtilsTestCase(test_base.BaseTestCase):
def setUp(self):
super(Win32UtilsTestCase, self).setUp()
self._setup_lib_mocks()
self._win32_utils = win32utils.Win32Utils()
self.addCleanup(mock.patch.stopall)
def _setup_lib_mocks(self):
self._ctypes = mock.Mock()
# This is used in order to easily make assertions on the variables
# passed by reference.
self._ctypes.byref = lambda x: (x, "byref")
self._ctypes_patcher = mock.patch.multiple(
win32utils, ctypes=self._ctypes)
self._ctypes_patcher.start()
mock.patch.multiple(win32utils,
kernel32=mock.DEFAULT,
create=True).start()
@mock.patch.object(win32utils.Win32Utils, 'get_error_message')
@mock.patch.object(win32utils.Win32Utils, 'get_last_error')
def _test_run_and_check_output(self, mock_get_last_err, mock_get_err_msg,
ret_val=0, expected_exc=None,
**kwargs):
self._ctypes_patcher.stop()
mock_func = mock.Mock()
mock_func.return_value = ret_val
if expected_exc:
self.assertRaises(expected_exc,
self._win32_utils.run_and_check_output,
mock_func,
mock.sentinel.arg,
kwarg=mock.sentinel.kwarg,
**kwargs)
else:
actual_ret_val = self._win32_utils.run_and_check_output(
mock_func,
mock.sentinel.arg,
kwarg=mock.sentinel.kwarg,
**kwargs)
self.assertEqual(ret_val, actual_ret_val)
mock_func.assert_called_once_with(mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
return mock_get_last_err, mock_get_err_msg
def test_run_and_check_output(self):
self._test_run_and_check_output()
def test_run_and_check_output_fail_on_nonzero_ret_val(self):
ret_val = 1
(mock_get_last_err,
mock_get_err_msg) = self._test_run_and_check_output(
ret_val=ret_val,
expected_exc=exceptions.VHDWin32APIException,
failure_exc=exceptions.VHDWin32APIException)
mock_get_err_msg.assert_called_once_with(ret_val)
def test_run_and_check_output_explicit_error_ret_vals(self):
ret_val = 1
error_ret_vals = [ret_val]
(mock_get_last_err,
mock_get_err_msg) = self._test_run_and_check_output(
ret_val=ret_val,
error_ret_vals=error_ret_vals,
ret_val_is_err_code=False,
expected_exc=exceptions.Win32Exception)
mock_get_err_msg.assert_called_once_with(
win32utils.ctypes.c_ulong(mock_get_last_err).value)
def test_run_and_check_output_ignored_error(self):
ret_val = 1
ignored_err_codes = [ret_val]
self._test_run_and_check_output(ret_val=ret_val,
ignored_error_codes=ignored_err_codes)
def test_run_and_check_output_kernel32_lib_func(self):
ret_val = 0
self._test_run_and_check_output(ret_val=ret_val,
expected_exc=exceptions.Win32Exception,
kernel32_lib_func=True)
def test_run_and_check_output_with_err_msg_dict(self):
self._ctypes_patcher.stop()
err_code = 1
err_msg = 'fake_err_msg'
err_msg_dict = {err_code: err_msg}
mock_func = mock.Mock()
mock_func.return_value = err_code
try:
self._win32_utils.run_and_check_output(mock_func,
mock.sentinel.arg,
error_msg_src=err_msg_dict)
except Exception as ex:
self.assertIsInstance(ex, exceptions.Win32Exception)
self.assertIn(err_msg, ex.message)
@mock.patch.object(win32utils.Win32Utils, '_run_and_check_output')
def test_run_and_check_output_eventlet_nb_mode_disabled(self, mock_helper):
self._win32_utils.run_and_check_output(
mock.sentinel.func,
mock.sentinel.arg,
eventlet_nonblocking_mode=False)
mock_helper.assert_called_once_with(mock.sentinel.func,
mock.sentinel.arg)
@mock.patch.object(_utils, 'avoid_blocking_call')
def test_run_and_check_output_eventlet_nb_mode_enabled(self, mock_helper):
self._win32_utils.run_and_check_output(
mock.sentinel.func,
mock.sentinel.arg,
eventlet_nonblocking_mode=True)
mock_helper.assert_called_once_with(
self._win32_utils._run_and_check_output,
mock.sentinel.func,
mock.sentinel.arg)
def test_get_error_message(self):
err_msg = self._win32_utils.get_error_message(mock.sentinel.err_code)
fake_msg_buff = win32utils.ctypes.c_char_p.return_value
expected_flags = (w_const.FORMAT_MESSAGE_FROM_SYSTEM |
w_const.FORMAT_MESSAGE_ALLOCATE_BUFFER |
w_const.FORMAT_MESSAGE_IGNORE_INSERTS)
win32utils.kernel32.FormatMessageA.assert_called_once_with(
expected_flags, None, mock.sentinel.err_code, 0,
win32utils.ctypes.byref(fake_msg_buff), 0, None)
self.assertEqual(fake_msg_buff.value, err_msg)
def test_get_last_error(self):
last_err = self._win32_utils.get_last_error()
self.assertEqual(win32utils.kernel32.GetLastError.return_value,
last_err)
win32utils.kernel32.SetLastError.assert_called_once_with(0)
@ddt.data(0, 1)
@mock.patch.object(win32utils.LOG, 'exception')
def test_local_free(self, ret_val, mock_log_exc):
mock_localfree = win32utils.kernel32.LocalFree
mock_localfree.return_value = ret_val
self._win32_utils.local_free(mock.sentinel.handle)
mock_localfree.assert_any_call(mock.sentinel.handle)
self.assertEqual(bool(ret_val), mock_log_exc.called)
@mock.patch.object(win32utils.Win32Utils, 'run_and_check_output')
def test_wait_for_multiple_objects(self, mock_helper):
fake_handles = [10, 11]
ret_val = self._win32_utils.wait_for_multiple_objects(
fake_handles, mock.sentinel.wait_all, mock.sentinel.milliseconds)
mock_helper.assert_called_once_with(
win32utils.kernel32.WaitForMultipleObjects,
len(fake_handles),
mock.ANY,
mock.sentinel.wait_all,
mock.sentinel.milliseconds,
kernel32_lib_func=True,
error_ret_vals=[w_const.WAIT_FAILED])
self.assertEqual(mock_helper.return_value, ret_val)
handles_arg = mock_helper.call_args_list[0][0][2]
self.assertIsInstance(handles_arg,
wintypes.HANDLE * len(fake_handles))
self.assertEqual(fake_handles, handles_arg[:])
@mock.patch.object(win32utils.Win32Utils, 'run_and_check_output')
def test_wait_for_multiple_objects_timeout(self, mock_helper):
fake_handles = [10]
mock_helper.return_value = w_const.ERROR_WAIT_TIMEOUT
self.assertRaises(
exceptions.Timeout,
self._win32_utils.wait_for_multiple_objects,
fake_handles, mock.sentinel.wait_all,
mock.sentinel.milliseconds)
@mock.patch.object(win32utils.Win32Utils, 'run_and_check_output')
def test_wait_for_single_object(self, mock_helper):
ret_val = self._win32_utils.wait_for_single_object(
mock.sentinel.handle, mock.sentinel.milliseconds)
mock_helper.assert_called_once_with(
win32utils.kernel32.WaitForSingleObject,
mock.sentinel.handle,
mock.sentinel.milliseconds,
kernel32_lib_func=True,
error_ret_vals=[w_const.WAIT_FAILED])
self.assertEqual(mock_helper.return_value, ret_val)
@mock.patch.object(win32utils.Win32Utils, 'run_and_check_output')
def test_wait_for_single_object_timeout(self, mock_helper):
mock_helper.return_value = w_const.ERROR_WAIT_TIMEOUT
self.assertRaises(
exceptions.Timeout,
self._win32_utils.wait_for_single_object,
mock.sentinel.timeout,
mock.sentinel.milliseconds)
|
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import logging
from coremltools.converters.mil.frontend.tensorflow.basic_graph_ops import (
disconnect_edge,
connect_edge,
delete_node,
replace_node,
replace_dest,
connect_edge_at_index,
)
from coremltools.converters.mil.frontend.tensorflow.parsed_tf_node import ParsedTFNode
def _rename_node_in_fn(node, new_name, fn):
"""
Rename a node and all it's connections.
Parameters
----------
node: ParsedTFNode
Node to rename.
new_name: str
New name of the node.
fn: SSAFunction
Function that contains graph to operate on.
"""
old_name = node.name
node.name = new_name
for i in node.inputs:
idx = fn.graph[i].outputs.index(old_name)
fn.graph[i].outputs[idx] = new_name
if old_name in fn.graph[i].control_outputs:
idx = fn.graph[i].control_outputs.index(old_name)
fn.graph[i].control_outputs[idx] = new_name
for o in node.outputs:
idx = fn.graph[o].inputs.index(old_name)
fn.graph[o].inputs[idx] = new_name
if old_name in fn.graph[o].control_inputs:
idx = fn.graph[o].control_inputs.index(old_name)
fn.graph[o].control_inputs[idx] = new_name
for i in node.control_inputs:
if old_name in fn.graph[i].control_outputs:
idx = fn.graph[i].control_outputs.index(old_name)
fn.graph[i].control_outputs[idx] = new_name
for o in node.control_outputs:
if old_name in fn.graph[o].control_inputs:
idx = fn.graph[o].control_inputs.index(old_name)
fn.graph[o].control_inputs[idx] = new_name
fn.graph[new_name] = fn.graph.pop(old_name)
def _flatten_sub_graph_namespaces(tf_ssa, fn_name):
"""
A pass to flatten namespaces for sub-graphs of the control flow while_loop
op. For example, the while_loop's has two sub-graphs, "cond" and "body",
all the nodes in the graph will be prefixing the sub-graph's name. This
pass is required for converting control flow v2 ops (enabled by default in
TensorFlow 2.0+) as the original sub-graphs will contain duplicated names.
Parameters
----------
tf_ssa: NetworkEnsemble
An object that contains multiple functions / sub-graphs.
fn_name: str
Name of the function / sub-graph to operate on.
"""
count = 0
fn = tf_ssa.functions.get(fn_name)
for name, node in fn.graph.copy().items():
if node.op not in {"StatelessWhile", "While", "StatelessIf", "If"}:
continue
if node.op in {"StatelessWhile", "While"}:
sub_fn_names = [node.attr.get("cond"), node.attr.get("body")]
else:
sub_fn_names = [node.attr.get("then_branch"), node.attr.get("else_branch")]
for sf_name in sub_fn_names:
sf = tf_ssa.functions.get(sf_name)
prefix = "{}/{}".format(node.name, sf_name)
for old_name, n in sf.graph.copy().items():
_rename_node_in_fn(n, "{}/{}".format(prefix, old_name), sf)
count += 1
ios = set(sf.inputs + sf.outputs)
io_name_mappings = {n: "{}/{}".format(prefix, n) for n in ios}
sf.inputs = [io_name_mappings[n] for n in sf.inputs]
sf.outputs = [io_name_mappings[n] for n in sf.outputs]
_flatten_sub_graph_namespaces(tf_ssa, sf_name)
msg = "flatten_sub_graph_namespaces: {} nodes renamed in '{}'"
logging.info(msg.format(count, sf_name))
def _insert_op(fn, op, name, attr=None):
"""
Create a node with given attributes, then insert to the target graph in
given function.
Parameters
----------
fn: SSAFunction
Function that contains graph to operate on.
op: str
Type of the operation for the new node.
name: str
Name of the new node.
attr: dict or None (optional)
Attributes of the new node.
Returns
-------
node: ParsedTFNode
New node object.
"""
node = ParsedTFNode()
node.op = op
node.name = name
if attr is not None:
node.attr = attr
fn.graph[node.name] = node
return node
def _insert_function_entry(fn):
return _insert_op(fn=fn, op="function_entry", name="entry")
def _insert_return(fn):
return _insert_op(fn=fn, op="return", name="return")
def _insert_make_tuple(fn, name=None):
name = "make_tuple" if name is None else name
return _insert_op(fn=fn, op="make_tuple", name=name)
def _insert_get_tuple(fn, name, idx):
return _insert_op(fn=fn, op="get_tuple", name=name, attr={"index": idx})
def _rewrite_cond_functions(tf_ssa, fn):
r"""
Rewrite tf.cond's sub-graphs with get_tuple, make_tuple, function_entry and
return ops. This rewrite is required in order to convert functional form
control flow v2 nodes 'StatelessIf' and 'If'.
Parameters
----------
tf_ssa: NetworkEnsemble
An object that contains multiple functions / sub-graphs.
fn: SSAFunction
Function that contains graph to operate on.
Examples
--------
Input:
Before pass "main" graph:
[const/greater/y] ---------\
[placeholder/args_0] -> [greater] -> [if] -> [identity]
\------------------/ \--> [identity]
[placeholder/args_1] ----------------/
Before pass "then" graph:
[const/sub/y] ---------------\
[placeholder/sub_args_0] -> [sub]
[placeholder/sub_args_1] -> [identity]
Before pass "else" graph:
[const/add/y] ---------------\
[placeholder/add_args_0] -> [add]
[const/mul/y] ---------------\
[placeholder/add_args_1] -> [mul]
Output:
After pass "main" graph:
[const/greater/y] ---------\
[placeholder/args_0] -> [greater] -> [make_tuple] -> [if] -> [get_tuple] -> [identity]
\---------------------/ \--> [get_tuple] -> [identity]
[placeholder/args_1] -------------------/
After pass "then" graph:
[const/sub/y] ---------------\
[entry] -> [get_tuple] -> [placeholder/sub_args_0] -> [sub] -> [make_tuple] -> [return]
-> [get_tuple] -> [placeholder/sub_args_1] -----------------/
After pass "else" graph:
[const/add/y] ---------------\
[entry] -> [get_tuple] -> [placeholder/add_args_0] -> [add] -> [make_tuple] -> [return]
-> [get_tuple] -> [placeholder/add_args_1] -> [mul] --------/
[const/mul/y] ---------------/
"""
for cond_name, cond_node in fn.graph.copy().items():
if cond_node.op not in {"StatelessIf", "If"}:
continue
then_fn_name = cond_node.attr.get("then_branch")
else_fn_name = cond_node.attr.get("else_branch")
msg = "Rewriting '{}' ({}) sub-graphs: then '{}', else '{}'"
logging.info(
msg.format(cond_node.name, cond_node.op, then_fn_name, else_fn_name)
)
then_fn = tf_ssa.functions.get(then_fn_name)
else_fn = tf_ssa.functions.get(else_fn_name)
# insert function entry nodes
then_entry = _insert_function_entry(then_fn)
else_entry = _insert_function_entry(else_fn)
# pack node inputs to a single tuple
cond_input = _insert_make_tuple(fn, "make_tuple/{}".format(cond_name))
for ci in cond_node.inputs:
disconnect_edge(fn.graph, ci, cond_node.name)
connect_edge(fn.graph, ci, cond_input)
connect_edge(fn.graph, cond_input, cond_node.name)
# unpack node outputs to multiple get_tuples
for i, co in enumerate(cond_node.outputs):
# utilize FunctionDef's ret to make sure function outputs and
# node outputs order matches when multiple outputs are there.
# Fallback to use original cond_node.outputs order if fails.
o_original = fn.graph[co].original_node
if o_original:
c_input = [n for n in o_original.input if str(n).startswith(cond_name)][
0
]
if ":" in c_input:
identity_postfix = "identity_{}".format(c_input.split(":")[-1])
else: # access identity "0"
identity_postfix = "identity"
identity_keys = [t for t in then_fn.ret.keys() if t.endswith(identity_postfix)]
if len(identity_keys) != 1:
raise NotImplementedError("Branch not found.")
mapped_name = then_fn.ret[identity_keys[0]].split(":")[0]
if mapped_name in then_fn.outputs:
idx = then_fn.outputs.index(mapped_name)
else: # in else_fn.outputs
idx = else_fn.outputs.index(mapped_name)
else:
idx = i
cond_output = _insert_get_tuple(
fn, "get_tuple/{}/{}".format(idx, cond_name), idx
)
edge_idx = fn.graph[co].inputs.index(cond_node.name)
replace_dest(fn.graph, cond_node, co, cond_output)
connect_edge_at_index(fn.graph, cond_output, co, edge_idx)
# fetch inputs using get_tuple for then branch
for i, ti in enumerate(then_fn.inputs):
then_input = _insert_get_tuple(
then_fn, "get_tuple/{}/{}".format(i, ti), i + 1
)
connect_edge(then_fn.graph, then_entry, then_input)
replace_node(then_fn.graph, ti, then_input)
delete_node(then_fn.graph, ti)
# fetch inputs using get_tuple for else branch
for i, ei in enumerate(else_fn.inputs):
else_input = _insert_get_tuple(
else_fn, "get_tuple/{}/{}".format(i, ei), i + 1
)
connect_edge(else_fn.graph, else_entry, else_input)
replace_node(else_fn.graph, ei, else_input)
delete_node(else_fn.graph, ei)
# returns a tuple of value(s) as output for then branch
then_output = _insert_make_tuple(then_fn)
for to in then_fn.outputs:
if to not in then_fn.graph.keys():
# from identity, map back to get_tuple node
to = "get_tuple/{}/{}".format(then_fn.inputs.index(to), to)
connect_edge(then_fn.graph, to, then_output.name)
then_return = _insert_return(then_fn)
connect_edge(then_fn.graph, then_output.name, then_return.name)
# returns a tuple of value(s) as output for else branch
else_output = _insert_make_tuple(else_fn)
for eo in else_fn.outputs:
if eo not in else_fn.graph.keys():
# from identity, map back to get_tuple node
eo = "get_tuple/{}/{}".format(else_fn.inputs.index(eo), eo)
connect_edge(else_fn.graph, eo, else_output.name)
else_return = _insert_return(else_fn)
connect_edge(else_fn.graph, else_output.name, else_return.name)
def _eliminate_loop_cond_nodes(tf_ssa, fn):
"""
Eliminate loop condition nodes, such as loop_counters, max_iterations from
the cond sub-graph and body sub-graph of tf.while_loop.
Parameters
----------
tf_ssa: NetworkEnsemble
An object that contains multiple functions / sub-graphs.
fn: SSAFunction
Function that contains graph to operate on.
Examples
--------
Input:
Before pass "main" graph:
[while/maximum_iterations] -----\
[while/loop_counter] -------> [while] --> [identity]
[placeholder/args_0] ----------/
Before pass "cond" graph:
[const/mean] -------\
[placeholder] --> [mean] --> [greater]
[const/greater/y] --------------/
[while_maximum_iterations], [while_loop_counter] (not connected)
Before pass "body" graph:
[const/sub/y] ------\
[placeholder] ---> [sub]
[const/add/y] ------------\
[while_loop_counter] --> [add]
[while_maximum_iterations] (not connected)
Output:
After pass "main" graph:
[placeholder/args_0] --> [while] --> [identity]
After pass "cond" graph:
[const/mean] -------\
[placeholder] --> [mean] --> [greater]
[const/greater/y] --------------/
After pass "body" graph:
[const/sub/y] ------\
[placeholder] ---> [sub]
"""
for name, node in fn.graph.copy().items():
if node.op not in {"StatelessWhile", "While"}:
continue
cond_fn = tf_ssa.functions.get(node.attr.get("cond"))
body_fn = tf_ssa.functions.get(node.attr.get("body"))
cond_lc_nodes = {cond_fn.inputs.pop(0), cond_fn.inputs.pop(0)}
logging.info("Removing {} from cond graph".format(cond_lc_nodes))
for n in cond_lc_nodes:
delete_node(cond_fn.graph, n)
body_lc_nodes = {body_fn.inputs.pop(0), body_fn.inputs.pop(0)}
q = list(body_lc_nodes)
# delete entire sub-fn
while len(q) > 0:
n = body_fn.graph[q.pop(0)]
for o in n.outputs:
if o not in body_lc_nodes:
q.append(o)
body_lc_nodes.add(o)
for i in body_fn.graph[o].inputs:
if i not in body_lc_nodes:
q.append(i)
body_lc_nodes.add(i)
# remove if in outputs
for n in body_lc_nodes:
if n in body_fn.outputs:
msg = "Removing '{}' ({}) from body fn outputs"
logging.info(msg.format(n, body_fn.graph[n].op))
body_fn.outputs.remove(n)
logging.info("Removing {} from body graph".format(body_lc_nodes))
for n in body_lc_nodes:
delete_node(body_fn.graph, n)
def _rewrite_while_loop_functions(tf_ssa, fn):
"""
Rewrite tf.while_loop's sub-graphs with get_tuple, make_tuple,
function_entry and return ops. This rewrite is required in order to convert
functional form control flow v2 nodes 'StatelessWhile' and 'While'.
Parameters
----------
tf_ssa: NetworkEnsemble
An object that contains multiple functions / sub-graphs.
fn: SSAFunction
Function that contains graph to operate on.
Example
-------
Input:
Before pass "main" graph:
[placeholder/args_0] --> [while] --> [identity]
Before pass "cond" graph:
[const/mean] -------\
[placeholder] --> [mean] --> [greater]
[const/greater/y] --------------/
Before pass "body" graph:
[const/sub/y] ------\
[placeholder] ---> [sub]
Output:
After pass "main" graph:
[placeholder/args_0] --> [make_tuple] --> [while] --> [get_tuple] --> [identity]
After pass "cond" graph:
[const/mean] ------\
[entry] -> [get_tuple] -> [placeholder] -> [mean] -> [greater] -> [make_tuple] -> [return]
[const/greater/y] ------------/
After pass "body" graph:
[const/sub/y] ----\
[entry] -> [get_tuple] -> [placeholder] -> [sub] -> [make_tuple] -> [return]
"""
for while_name, while_node in fn.graph.copy().items():
if while_node.op not in {"StatelessWhile", "While"}:
continue
cond_fn_name = while_node.attr.get("cond")
body_fn_name = while_node.attr.get("body")
msg = "Rewriting '{}' ({}) sub-graphs: cond '{}', body '{}'"
logging.info(
msg.format(while_node.name, while_node.op, cond_fn_name, body_fn_name)
)
cond_fn = tf_ssa.functions.get(cond_fn_name)
body_fn = tf_ssa.functions.get(body_fn_name)
# insert function entry nodes
cond_entry = _insert_function_entry(cond_fn)
body_entry = _insert_function_entry(body_fn)
# pack node inputs to a single tuple
while_input_tuple = _insert_make_tuple(fn, "make_tuple/{}".format(while_name))
for wi in while_node.inputs:
disconnect_edge(fn.graph, wi, while_node.name)
connect_edge(fn.graph, wi, while_input_tuple)
connect_edge(fn.graph, while_input_tuple, while_node.name)
# unpack node outputs to multiple get_tuples
for i, wo in enumerate(while_node.outputs):
# utilize FunctionDef's ret to make sure function outputs and
# node outputs order matches when multiple outputs are there.
o_original = fn.graph[wo].original_node
while_input = [
n for n in o_original.input if str(n).startswith(while_name)
][0]
while_index = while_input.split(":")[-1]
if while_index != 0:
identity_postfix = "identity_{}".format(while_index)
else: # access identity "0"
identity_postfix = "identity"
identity_keys = [t for t in body_fn.ret.keys() if t.endswith(identity_postfix)]
if len(identity_keys) != 1:
raise NotImplementedError("Branch not found.")
mapped_name = body_fn.ret[identity_keys[0]].split(":")[0]
idx = body_fn.outputs.index(mapped_name)
loop_output = _insert_get_tuple(
fn, "get_tuple/{}/{}".format(idx, while_input), idx
)
edge_idx = fn.graph[wo].inputs.index(while_node.name)
replace_dest(fn.graph, while_node, wo, loop_output)
connect_edge_at_index(fn.graph, loop_output, wo, edge_idx)
# fetch inputs using get_tuple for cond fn
for i, ci in enumerate(cond_fn.inputs):
cond_input = _insert_get_tuple(cond_fn, "get_tuple/{}/{}".format(i, ci), i)
connect_edge(cond_fn.graph, cond_entry, cond_input)
replace_node(cond_fn.graph, ci, cond_input)
delete_node(cond_fn.graph, ci)
# fetch inputs using get_tuple for body fn
for i, bi in enumerate(body_fn.inputs):
new_name = "get_tuple/{}/{}".format(i, bi)
if bi in body_fn.outputs: # input is also an output
body_fn.outputs[body_fn.outputs.index(bi)] = new_name
body_input = _insert_get_tuple(body_fn, new_name, i)
connect_edge(body_fn.graph, body_entry, body_input)
replace_node(body_fn.graph, bi, body_input)
delete_node(body_fn.graph, bi)
# returns a tuple of value(s) as output for cond fn
cond_output = _insert_make_tuple(cond_fn)
for co in cond_fn.outputs:
connect_edge(cond_fn.graph, co, cond_output.name)
cond_return = _insert_return(cond_fn)
connect_edge(cond_fn.graph, cond_output.name, cond_return.name)
# returns a tuple of value(s) as output for body branch
body_output = _insert_make_tuple(body_fn)
for bo in body_fn.outputs:
connect_edge(body_fn.graph, bo, body_output.name)
body_return = _insert_return(body_fn)
connect_edge(body_fn.graph, body_output.name, body_return.name)
def rewrite_control_flow_functions(tf_ssa):
for fn_name, fn in tf_ssa.functions.items():
_rewrite_cond_functions(tf_ssa, fn)
for fn_name, fn in tf_ssa.functions.items():
_eliminate_loop_cond_nodes(tf_ssa, fn)
_rewrite_while_loop_functions(tf_ssa, fn)
def flatten_sub_graph_namespaces(tf_ssa):
_flatten_sub_graph_namespaces(tf_ssa, fn_name="main")
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Movielens 1-M dataset.
Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000
movies, which was collected by GroupLens Research. This module will download
Movielens 1-M dataset from
http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training
set and test set into paddle reader creators.
"""
import zipfile
import paddle.v2.dataset.common
import re
import random
import functools
__all__ = [
'train', 'test', 'get_movie_title_dict', 'max_movie_id', 'max_user_id',
'age_table', 'movie_categories', 'max_job_id', 'user_info', 'movie_info',
'convert'
]
age_table = [1, 18, 25, 35, 45, 50, 56]
URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
MD5 = 'c4d9eecfca2ab87c1945afe126590906'
class MovieInfo(object):
"""
Movie id, title and categories information are stored in MovieInfo.
"""
def __init__(self, index, categories, title):
self.index = int(index)
self.categories = categories
self.title = title
def value(self):
"""
Get information from a movie.
"""
return [
self.index, [CATEGORIES_DICT[c] for c in self.categories],
[MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()]
]
def __str__(self):
return "<MovieInfo id(%d), title(%s), categories(%s)>" % (
self.index, self.title, self.categories)
def __repr__(self):
return self.__str__()
class UserInfo(object):
"""
User id, gender, age, and job information are stored in UserInfo.
"""
def __init__(self, index, gender, age, job_id):
self.index = int(index)
self.is_male = gender == 'M'
self.age = age_table.index(int(age))
self.job_id = int(job_id)
def value(self):
"""
Get information from a user.
"""
return [self.index, 0 if self.is_male else 1, self.age, self.job_id]
def __str__(self):
return "<UserInfo id(%d), gender(%s), age(%d), job(%d)>" % (
self.index, "M"
if self.is_male else "F", age_table[self.age], self.job_id)
def __repr__(self):
return str(self)
MOVIE_INFO = None
MOVIE_TITLE_DICT = None
CATEGORIES_DICT = None
USER_INFO = None
def __initialize_meta_info__():
fn = paddle.v2.dataset.common.download(URL, "movielens", MD5)
global MOVIE_INFO
if MOVIE_INFO is None:
pattern = re.compile(r'^(.*)\((\d+)\)$')
with zipfile.ZipFile(file=fn) as package:
for info in package.infolist():
assert isinstance(info, zipfile.ZipInfo)
MOVIE_INFO = dict()
title_word_set = set()
categories_set = set()
with package.open('ml-1m/movies.dat') as movie_file:
for i, line in enumerate(movie_file):
movie_id, title, categories = line.strip().split('::')
categories = categories.split('|')
for c in categories:
categories_set.add(c)
title = pattern.match(title).group(1)
MOVIE_INFO[int(movie_id)] = MovieInfo(
index=movie_id, categories=categories, title=title)
for w in title.split():
title_word_set.add(w.lower())
global MOVIE_TITLE_DICT
MOVIE_TITLE_DICT = dict()
for i, w in enumerate(title_word_set):
MOVIE_TITLE_DICT[w] = i
global CATEGORIES_DICT
CATEGORIES_DICT = dict()
for i, c in enumerate(categories_set):
CATEGORIES_DICT[c] = i
global USER_INFO
USER_INFO = dict()
with package.open('ml-1m/users.dat') as user_file:
for line in user_file:
uid, gender, age, job, _ = line.strip().split("::")
USER_INFO[int(uid)] = UserInfo(
index=uid, gender=gender, age=age, job_id=job)
return fn
def __reader__(rand_seed=0, test_ratio=0.1, is_test=False):
fn = __initialize_meta_info__()
rand = random.Random(x=rand_seed)
with zipfile.ZipFile(file=fn) as package:
with package.open('ml-1m/ratings.dat') as rating:
for line in rating:
if (rand.random() < test_ratio) == is_test:
uid, mov_id, rating, _ = line.strip().split("::")
uid = int(uid)
mov_id = int(mov_id)
rating = float(rating) * 2 - 5.0
mov = MOVIE_INFO[mov_id]
usr = USER_INFO[uid]
yield usr.value() + mov.value() + [[rating]]
def __reader_creator__(**kwargs):
return lambda: __reader__(**kwargs)
train = functools.partial(__reader_creator__, is_test=False)
test = functools.partial(__reader_creator__, is_test=True)
def get_movie_title_dict():
"""
Get movie title dictionary.
"""
__initialize_meta_info__()
return MOVIE_TITLE_DICT
def __max_index_info__(a, b):
if a.index > b.index:
return a
else:
return b
def max_movie_id():
"""
Get the maximum value of movie id.
"""
__initialize_meta_info__()
return reduce(__max_index_info__, MOVIE_INFO.viewvalues()).index
def max_user_id():
"""
Get the maximum value of user id.
"""
__initialize_meta_info__()
return reduce(__max_index_info__, USER_INFO.viewvalues()).index
def __max_job_id_impl__(a, b):
if a.job_id > b.job_id:
return a
else:
return b
def max_job_id():
"""
Get the maximum value of job id.
"""
__initialize_meta_info__()
return reduce(__max_job_id_impl__, USER_INFO.viewvalues()).job_id
def movie_categories():
"""
Get movie categoriges dictionary.
"""
__initialize_meta_info__()
return CATEGORIES_DICT
def user_info():
"""
Get user info dictionary.
"""
__initialize_meta_info__()
return USER_INFO
def movie_info():
"""
Get movie info dictionary.
"""
__initialize_meta_info__()
return MOVIE_INFO
def unittest():
for train_count, _ in enumerate(train()()):
pass
for test_count, _ in enumerate(test()()):
pass
print train_count, test_count
def fetch():
paddle.v2.dataset.common.download(URL, "movielens", MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 1000, "movielens_train")
paddle.v2.dataset.common.convert(path, test(), 1000, "movielens_test")
if __name__ == '__main__':
unittest()
|
|
import time
import datetime
import dateutil
import stripe
import hashlib
import re
import redis
import uuid
import mongoengine as mongo
from django.db import models
from django.db import IntegrityError
from django.db.utils import DatabaseError
from django.db.models.signals import post_save
from django.db.models import Sum, Avg, Count
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from apps.rss_feeds.models import Feed, MStory, MStarredStory
from apps.rss_feeds.tasks import SchedulePremiumSetup
from apps.feed_import.models import GoogleReaderImporter, OPMLExporter
from apps.reader.models import UserSubscription
from apps.reader.models import RUserStory
from utils import log as logging
from utils import json_functions as json
from utils.user_functions import generate_secret_token
from utils.feed_functions import chunks
from vendor.timezones.fields import TimeZoneField
from vendor.paypal.standard.ipn.signals import subscription_signup, payment_was_successful, recurring_payment
from vendor.paypal.standard.ipn.signals import payment_was_flagged
from vendor.paypal.standard.ipn.models import PayPalIPN
from vendor.paypalapi.interface import PayPalInterface
from vendor.paypalapi.exceptions import PayPalAPIResponseError
from zebra.signals import zebra_webhook_customer_subscription_created
from zebra.signals import zebra_webhook_charge_succeeded
class Profile(models.Model):
user = models.OneToOneField(User, unique=True, related_name="profile")
is_premium = models.BooleanField(default=False)
premium_expire = models.DateTimeField(blank=True, null=True)
send_emails = models.BooleanField(default=True)
preferences = models.TextField(default="{}")
view_settings = models.TextField(default="{}")
collapsed_folders = models.TextField(default="[]")
feed_pane_size = models.IntegerField(default=242)
tutorial_finished = models.BooleanField(default=False)
hide_getting_started = models.NullBooleanField(default=False, null=True, blank=True)
has_setup_feeds = models.NullBooleanField(default=False, null=True, blank=True)
has_found_friends = models.NullBooleanField(default=False, null=True, blank=True)
has_trained_intelligence = models.NullBooleanField(default=False, null=True, blank=True)
last_seen_on = models.DateTimeField(default=datetime.datetime.now)
last_seen_ip = models.CharField(max_length=50, blank=True, null=True)
dashboard_date = models.DateTimeField(default=datetime.datetime.now)
timezone = TimeZoneField(default="America/New_York")
secret_token = models.CharField(max_length=12, blank=True, null=True)
stripe_4_digits = models.CharField(max_length=4, blank=True, null=True)
stripe_id = models.CharField(max_length=24, blank=True, null=True)
def __unicode__(self):
return "%s <%s> (Premium: %s)" % (self.user, self.user.email, self.is_premium)
@property
def unread_cutoff(self, force_premium=False):
if self.is_premium or force_premium:
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD_FREE)
@property
def unread_cutoff_premium(self):
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
def canonical(self):
return {
'is_premium': self.is_premium,
'premium_expire': int(self.premium_expire.strftime('%s')) if self.premium_expire else 0,
'preferences': json.decode(self.preferences),
'tutorial_finished': self.tutorial_finished,
'hide_getting_started': self.hide_getting_started,
'has_setup_feeds': self.has_setup_feeds,
'has_found_friends': self.has_found_friends,
'has_trained_intelligence': self.has_trained_intelligence,
'dashboard_date': self.dashboard_date
}
def save(self, *args, **kwargs):
if not self.secret_token:
self.secret_token = generate_secret_token(self.user.username, 12)
try:
super(Profile, self).save(*args, **kwargs)
except DatabaseError:
print " ---> Profile not saved. Table isn't there yet."
def delete_user(self, confirm=False, fast=False):
if not confirm:
print " ---> You must pass confirm=True to delete this user."
return
logging.user(self.user, "Deleting user: %s / %s" % (self.user.email, self.user.profile.last_seen_ip))
try:
self.cancel_premium()
except:
logging.user(self.user, "~BR~SK~FWError cancelling premium renewal for: %s" % self.user.username)
from apps.social.models import MSocialProfile, MSharedStory, MSocialSubscription
from apps.social.models import MActivity, MInteraction
try:
social_profile = MSocialProfile.objects.get(user_id=self.user.pk)
logging.user(self.user, "Unfollowing %s followings and %s followers" %
(social_profile.following_count,
social_profile.follower_count))
for follow in social_profile.following_user_ids:
social_profile.unfollow_user(follow)
for follower in social_profile.follower_user_ids:
follower_profile = MSocialProfile.objects.get(user_id=follower)
follower_profile.unfollow_user(self.user.pk)
social_profile.delete()
except MSocialProfile.DoesNotExist:
logging.user(self.user, " ***> No social profile found. S'ok, moving on.")
pass
shared_stories = MSharedStory.objects.filter(user_id=self.user.pk)
logging.user(self.user, "Deleting %s shared stories" % shared_stories.count())
for story in shared_stories:
try:
if not fast:
original_story = MStory.objects.get(story_hash=story.story_hash)
original_story.sync_redis()
except MStory.DoesNotExist:
pass
story.delete()
subscriptions = MSocialSubscription.objects.filter(subscription_user_id=self.user.pk)
logging.user(self.user, "Deleting %s social subscriptions" % subscriptions.count())
subscriptions.delete()
interactions = MInteraction.objects.filter(user_id=self.user.pk)
logging.user(self.user, "Deleting %s interactions for user." % interactions.count())
interactions.delete()
interactions = MInteraction.objects.filter(with_user_id=self.user.pk)
logging.user(self.user, "Deleting %s interactions with user." % interactions.count())
interactions.delete()
activities = MActivity.objects.filter(user_id=self.user.pk)
logging.user(self.user, "Deleting %s activities for user." % activities.count())
activities.delete()
activities = MActivity.objects.filter(with_user_id=self.user.pk)
logging.user(self.user, "Deleting %s activities with user." % activities.count())
activities.delete()
starred_stories = MStarredStory.objects.filter(user_id=self.user.pk)
logging.user(self.user, "Deleting %s starred stories." % starred_stories.count())
starred_stories.delete()
logging.user(self.user, "Deleting user: %s" % self.user)
self.user.delete()
def activate_premium(self, never_expire=False):
from apps.profile.tasks import EmailNewPremium
EmailNewPremium.delay(user_id=self.user.pk)
was_premium = self.is_premium
self.is_premium = True
self.save()
self.user.is_active = True
self.user.save()
# Only auto-enable every feed if a free user is moving to premium
subs = UserSubscription.objects.filter(user=self.user)
if not was_premium:
for sub in subs:
if sub.active: continue
sub.active = True
try:
sub.save()
except (IntegrityError, Feed.DoesNotExist):
pass
try:
scheduled_feeds = [sub.feed.pk for sub in subs]
except Feed.DoesNotExist:
scheduled_feeds = []
logging.user(self.user, "~SN~FMTasking the scheduling immediate premium setup of ~SB%s~SN feeds..." %
len(scheduled_feeds))
SchedulePremiumSetup.apply_async(kwargs=dict(feed_ids=scheduled_feeds))
UserSubscription.queue_new_feeds(self.user)
self.setup_premium_history()
if never_expire:
self.premium_expire = None
self.save()
logging.user(self.user, "~BY~SK~FW~SBNEW PREMIUM ACCOUNT! WOOHOO!!! ~FR%s subscriptions~SN!" % (subs.count()))
return True
def deactivate_premium(self):
self.is_premium = False
self.save()
subs = UserSubscription.objects.filter(user=self.user)
for sub in subs:
sub.active = False
try:
sub.save()
# Don't bother recalculating feed's subs, as it will do that on next fetch
# sub.feed.setup_feed_for_premium_subscribers()
except (IntegrityError, Feed.DoesNotExist):
pass
logging.user(self.user, "~BY~FW~SBBOO! Deactivating premium account: ~FR%s subscriptions~SN!" % (subs.count()))
def activate_free(self):
if self.user.is_active:
return
self.user.is_active = True
self.user.save()
self.send_new_user_queue_email()
def setup_premium_history(self, alt_email=None, check_premium=False, force_expiration=False):
paypal_payments = []
stripe_payments = []
total_stripe_payments = 0
existing_history = PaymentHistory.objects.filter(user=self.user,
payment_provider__in=['paypal', 'stripe'])
if existing_history.count():
logging.user(self.user, "~BY~SN~FRDeleting~FW existing history: ~SB%s payments" % existing_history.count())
existing_history.delete()
# Record Paypal payments
paypal_payments = PayPalIPN.objects.filter(custom=self.user.username,
payment_status='Completed',
txn_type='subscr_payment')
if not paypal_payments.count():
paypal_payments = PayPalIPN.objects.filter(payer_email=self.user.email,
payment_status='Completed',
txn_type='subscr_payment')
if alt_email and not paypal_payments.count():
paypal_payments = PayPalIPN.objects.filter(payer_email=alt_email,
payment_status='Completed',
txn_type='subscr_payment')
if paypal_payments.count():
# Make sure this doesn't happen again, so let's use Paypal's email.
self.user.email = alt_email
self.user.save()
seen_txn_ids = set()
for payment in paypal_payments:
if payment.txn_id in seen_txn_ids: continue
seen_txn_ids.add(payment.txn_id)
PaymentHistory.objects.create(user=self.user,
payment_date=payment.payment_date,
payment_amount=payment.payment_gross,
payment_provider='paypal')
# Record Stripe payments
if self.stripe_id:
self.retrieve_stripe_ids()
stripe.api_key = settings.STRIPE_SECRET
seen_payments = set()
for stripe_id_model in self.user.stripe_ids.all():
stripe_id = stripe_id_model.stripe_id
stripe_customer = stripe.Customer.retrieve(stripe_id)
stripe_payments = stripe.Charge.all(customer=stripe_customer.id).data
for payment in stripe_payments:
created = datetime.datetime.fromtimestamp(payment.created)
if payment.status == 'failed': continue
if created in seen_payments: continue
seen_payments.add(created)
total_stripe_payments += 1
PaymentHistory.objects.create(user=self.user,
payment_date=created,
payment_amount=payment.amount / 100.0,
payment_provider='stripe')
# Calculate payments in last year, then add together
payment_history = PaymentHistory.objects.filter(user=self.user)
last_year = datetime.datetime.now() - datetime.timedelta(days=364)
recent_payments_count = 0
oldest_recent_payment_date = None
free_lifetime_premium = False
for payment in payment_history:
if payment.payment_amount == 0:
free_lifetime_premium = True
if payment.payment_date > last_year:
recent_payments_count += 1
if not oldest_recent_payment_date or payment.payment_date < oldest_recent_payment_date:
oldest_recent_payment_date = payment.payment_date
if free_lifetime_premium:
self.premium_expire = None
self.save()
elif oldest_recent_payment_date:
new_premium_expire = (oldest_recent_payment_date +
datetime.timedelta(days=365*recent_payments_count))
# Only move premium expire forward, never earlier. Also set expiration if not premium.
if (force_expiration or
(check_premium and not self.premium_expire) or
(self.premium_expire and new_premium_expire > self.premium_expire)):
self.premium_expire = new_premium_expire
self.save()
logging.user(self.user, "~BY~SN~FWFound ~SB~FB%s paypal~FW~SN and ~SB~FC%s stripe~FW~SN payments (~SB%s payments expire: ~SN~FB%s~FW)" % (
len(paypal_payments), total_stripe_payments, len(payment_history), self.premium_expire))
if (check_premium and not self.is_premium and
(not self.premium_expire or self.premium_expire > datetime.datetime.now())):
self.activate_premium()
@classmethod
def reimport_stripe_history(cls, limit=10, days=7, starting_after=None):
stripe.api_key = settings.STRIPE_SECRET
week = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%s')
failed = []
i = 0
while True:
logging.debug(" ---> At %s / %s" % (i, starting_after))
i += 1
try:
data = stripe.Charge.all(created={'gt': week}, count=limit, starting_after=starting_after)
except stripe.APIConnectionError:
time.sleep(10)
continue
charges = data['data']
if not len(charges):
logging.debug("At %s (%s), finished" % (i, starting_after))
break
starting_after = charges[-1]["id"]
customers = [c['customer'] for c in charges if 'customer' in c]
for customer in customers:
if not customer:
print " ***> No customer!"
continue
try:
profile = Profile.objects.get(stripe_id=customer)
user = profile.user
except Profile.DoesNotExist:
logging.debug(" ***> Couldn't find stripe_id=%s" % customer)
failed.append(customer)
continue
except Profile.MultipleObjectsReturned:
logging.debug(" ***> Multiple stripe_id=%s" % customer)
failed.append(customer)
continue
try:
user.profile.setup_premium_history()
except stripe.APIConnectionError:
logging.debug(" ***> Failed: %s" % user.username)
failed.append(user.username)
time.sleep(2)
continue
return ','.join(failed)
def refund_premium(self, partial=False):
refunded = False
if self.stripe_id:
stripe.api_key = settings.STRIPE_SECRET
stripe_customer = stripe.Customer.retrieve(self.stripe_id)
stripe_payments = stripe.Charge.all(customer=stripe_customer.id).data
if partial:
stripe_payments[0].refund(amount=1200)
refunded = 12
else:
stripe_payments[0].refund()
self.cancel_premium()
refunded = stripe_payments[0].amount/100
logging.user(self.user, "~FRRefunding stripe payment: $%s" % refunded)
else:
self.cancel_premium()
paypal_opts = {
'API_ENVIRONMENT': 'PRODUCTION',
'API_USERNAME': settings.PAYPAL_API_USERNAME,
'API_PASSWORD': settings.PAYPAL_API_PASSWORD,
'API_SIGNATURE': settings.PAYPAL_API_SIGNATURE,
'API_CA_CERTS': False,
}
paypal = PayPalInterface(**paypal_opts)
transactions = PayPalIPN.objects.filter(custom=self.user.username,
txn_type='subscr_payment'
).order_by('-payment_date')
if not transactions:
transactions = PayPalIPN.objects.filter(payer_email=self.user.email,
txn_type='subscr_payment'
).order_by('-payment_date')
if transactions:
transaction = transactions[0]
refund = paypal.refund_transaction(transaction.txn_id)
try:
refunded = int(float(refund.raw['TOTALREFUNDEDAMOUNT'][0]))
except KeyError:
refunded = int(transaction.payment_gross)
logging.user(self.user, "~FRRefunding paypal payment: $%s" % refunded)
else:
logging.user(self.user, "~FRCouldn't refund paypal payment: not found by username or email")
refunded = 0
return refunded
def cancel_premium(self):
paypal_cancel = self.cancel_premium_paypal()
stripe_cancel = self.cancel_premium_stripe()
return stripe_cancel or paypal_cancel
def cancel_premium_paypal(self, second_most_recent_only=False):
transactions = PayPalIPN.objects.filter(custom=self.user.username,
txn_type='subscr_signup').order_by('-subscr_date')
if not transactions:
return
paypal_opts = {
'API_ENVIRONMENT': 'PRODUCTION',
'API_USERNAME': settings.PAYPAL_API_USERNAME,
'API_PASSWORD': settings.PAYPAL_API_PASSWORD,
'API_SIGNATURE': settings.PAYPAL_API_SIGNATURE,
'API_CA_CERTS': False,
}
paypal = PayPalInterface(**paypal_opts)
if second_most_recent_only:
# Check if user has an active subscription. If so, cancel it because a new one came in.
if len(transactions) > 1:
transaction = transactions[1]
else:
return False
else:
transaction = transactions[0]
profileid = transaction.subscr_id
try:
paypal.manage_recurring_payments_profile_status(profileid=profileid, action='Cancel')
except PayPalAPIResponseError:
logging.user(self.user, "~FRUser ~SBalready~SN canceled Paypal subscription: %s" % profileid)
else:
if second_most_recent_only:
logging.user(self.user, "~FRCanceling ~BR~FWsecond-oldest~SB~FR Paypal subscription: %s" % profileid)
else:
logging.user(self.user, "~FRCanceling Paypal subscription: %s" % profileid)
return True
def cancel_premium_stripe(self):
if not self.stripe_id:
return
stripe.api_key = settings.STRIPE_SECRET
stripe_customer = stripe.Customer.retrieve(self.stripe_id)
try:
stripe_customer.cancel_subscription()
except stripe.InvalidRequestError:
logging.user(self.user, "~FRFailed to cancel Stripe subscription")
logging.user(self.user, "~FRCanceling Stripe subscription")
return True
def retrieve_stripe_ids(self):
if not self.stripe_id:
return
stripe.api_key = settings.STRIPE_SECRET
stripe_customer = stripe.Customer.retrieve(self.stripe_id)
stripe_email = stripe_customer.email
stripe_ids = set()
for email in set([stripe_email, self.user.email]):
customers = stripe.Customer.list(email=email)
for customer in customers:
stripe_ids.add(customer.stripe_id)
self.user.stripe_ids.all().delete()
for stripe_id in stripe_ids:
self.user.stripe_ids.create(stripe_id=stripe_id)
@property
def latest_paypal_email(self):
ipn = PayPalIPN.objects.filter(custom=self.user.username)
if not len(ipn):
return
return ipn[0].payer_email
def activate_ios_premium(self, product_identifier, transaction_identifier, amount=36):
payments = PaymentHistory.objects.filter(user=self.user,
payment_identifier=transaction_identifier,
payment_date__gte=datetime.datetime.now()-datetime.timedelta(days=3))
if len(payments):
# Already paid
logging.user(self.user, "~FG~BBAlready paid iOS premium subscription: $%s~FW" % transaction_identifier)
return False
PaymentHistory.objects.create(user=self.user,
payment_date=datetime.datetime.now(),
payment_amount=amount,
payment_provider='ios-subscription',
payment_identifier=transaction_identifier)
self.setup_premium_history(check_premium=True)
if not self.is_premium:
self.activate_premium()
logging.user(self.user, "~FG~BBNew iOS premium subscription: $%s~FW" % product_identifier)
return True
@classmethod
def clear_dead_spammers(self, days=30, confirm=False):
users = User.objects.filter(date_joined__gte=datetime.datetime.now()-datetime.timedelta(days=days)).order_by('-date_joined')
usernames = set()
numerics = re.compile(r'[0-9]+')
for user in users:
opens = UserSubscription.objects.filter(user=user).aggregate(sum=Sum('feed_opens'))['sum']
reads = RUserStory.read_story_count(user.pk)
has_numbers = numerics.search(user.username)
try:
has_profile = user.profile.last_seen_ip
except Profile.DoesNotExist:
usernames.add(user.username)
print " ---> Missing profile: %-20s %-30s %-6s %-6s" % (user.username, user.email, opens, reads)
continue
if opens is None and not reads and has_numbers:
usernames.add(user.username)
print " ---> Numerics: %-20s %-30s %-6s %-6s" % (user.username, user.email, opens, reads)
elif not has_profile:
usernames.add(user.username)
print " ---> No IP: %-20s %-30s %-6s %-6s" % (user.username, user.email, opens, reads)
if not confirm: return usernames
for username in usernames:
u = User.objects.get(username=username)
u.profile.delete_user(confirm=True)
RNewUserQueue.user_count()
RNewUserQueue.activate_all()
@classmethod
def count_feed_subscribers(self, feed_id=None, user_id=None, verbose=True):
SUBSCRIBER_EXPIRE = datetime.datetime.now() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
r = redis.Redis(connection_pool=settings.REDIS_FEED_SUB_POOL)
entire_feed_counted = False
if verbose:
feed = Feed.get_by_id(feed_id)
logging.debug(" ---> [%-30s] ~SN~FBCounting subscribers for feed:~SB~FM%s~SN~FB user:~SB~FM%s" % (feed.log_title[:30], feed_id, user_id))
if feed_id:
feed_ids = [feed_id]
elif user_id:
feed_ids = [us['feed_id'] for us in UserSubscription.objects.filter(user=user_id, active=True).values('feed_id')]
else:
assert False, "feed_id or user_id required"
if feed_id and not user_id:
entire_feed_counted = True
for feed_id in feed_ids:
total = 0
premium = 0
active = 0
active_premium = 0
key = 's:%s' % feed_id
premium_key = 'sp:%s' % feed_id
if user_id:
active = UserSubscription.objects.get(feed_id=feed_id, user_id=user_id).only('active').active
user_ids = dict([(user_id, active)])
else:
user_ids = dict([(us.user_id, us.active)
for us in UserSubscription.objects.filter(feed_id=feed_id).only('user', 'active')])
profiles = Profile.objects.filter(user_id__in=user_ids.keys()).values('user_id', 'last_seen_on', 'is_premium')
feed = Feed.get_by_id(feed_id)
if entire_feed_counted:
r.delete(key)
r.delete(premium_key)
for profiles_group in chunks(profiles, 20):
pipeline = r.pipeline()
for profile in profiles_group:
last_seen_on = int(profile['last_seen_on'].strftime('%s'))
muted_feed = not bool(user_ids[profile['user_id']])
if muted_feed:
last_seen_on = 0
pipeline.zadd(key, profile['user_id'], last_seen_on)
total += 1
if profile['is_premium']:
pipeline.zadd(premium_key, profile['user_id'], last_seen_on)
premium += 1
else:
pipeline.zrem(premium_key, profile['user_id'])
if profile['last_seen_on'] > SUBSCRIBER_EXPIRE and not muted_feed:
active += 1
if profile['is_premium']:
active_premium += 1
pipeline.execute()
if entire_feed_counted:
now = int(datetime.datetime.now().strftime('%s'))
r.zadd(key, -1, now)
r.expire(key, settings.SUBSCRIBER_EXPIRE*24*60*60)
r.zadd(premium_key, -1, now)
r.expire(premium_key, settings.SUBSCRIBER_EXPIRE*24*60*60)
logging.info(" ---> [%-30s] ~SN~FBCounting subscribers, storing in ~SBredis~SN: ~FMt:~SB~FM%s~SN a:~SB%s~SN p:~SB%s~SN ap:~SB%s" %
(feed.log_title[:30], total, active, premium, active_premium))
@classmethod
def count_all_feed_subscribers_for_user(self, user):
r = redis.Redis(connection_pool=settings.REDIS_FEED_SUB_POOL)
if not isinstance(user, User):
user = User.objects.get(pk=user)
active_feed_ids = [us['feed_id'] for us in UserSubscription.objects.filter(user=user.pk, active=True).values('feed_id')]
muted_feed_ids = [us['feed_id'] for us in UserSubscription.objects.filter(user=user.pk, active=False).values('feed_id')]
logging.user(user, "~SN~FBRefreshing user last_login_on for ~SB%s~SN/~SB%s subscriptions~SN" %
(len(active_feed_ids), len(muted_feed_ids)))
for feed_ids in [active_feed_ids, muted_feed_ids]:
for feeds_group in chunks(feed_ids, 20):
pipeline = r.pipeline()
for feed_id in feeds_group:
key = 's:%s' % feed_id
premium_key = 'sp:%s' % feed_id
last_seen_on = int(user.profile.last_seen_on.strftime('%s'))
if feed_ids is muted_feed_ids:
last_seen_on = 0
pipeline.zadd(key, user.pk, last_seen_on)
if user.profile.is_premium:
pipeline.zadd(premium_key, user.pk, last_seen_on)
else:
pipeline.zrem(premium_key, user.pk)
pipeline.execute()
def import_reader_starred_items(self, count=20):
importer = GoogleReaderImporter(self.user)
importer.import_starred_items(count=count)
def send_new_user_email(self):
if not self.user.email or not self.send_emails:
return
user = self.user
text = render_to_string('mail/email_new_account.txt', locals())
html = render_to_string('mail/email_new_account.xhtml', locals())
subject = "Welcome to NewsBlur, %s" % (self.user.username)
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending email for new user: %s" % self.user.email)
def send_opml_export_email(self, reason=None, force=False):
if not self.user.email:
return
emails_sent = MSentEmail.objects.filter(receiver_user_id=self.user.pk,
email_type='opml_export')
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
for email in emails_sent:
if email.date_sent > day_ago and not force:
logging.user(self.user, "~SN~FMNot sending opml export email, already sent today.")
return
MSentEmail.record(receiver_user_id=self.user.pk, email_type='opml_export')
exporter = OPMLExporter(self.user)
opml = exporter.process()
params = {
'feed_count': UserSubscription.objects.filter(user=self.user).count(),
'reason': reason,
}
user = self.user
text = render_to_string('mail/email_opml_export.txt', params)
html = render_to_string('mail/email_opml_export.xhtml', params)
subject = "Backup OPML file of your NewsBlur sites"
filename= 'NewsBlur Subscriptions - %s.xml' % datetime.datetime.now().strftime('%Y-%m-%d')
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.attach(filename, opml, 'text/xml')
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending OPML backup email to: %s" % self.user.email)
def send_first_share_to_blurblog_email(self, force=False):
from apps.social.models import MSocialProfile, MSharedStory
if not self.user.email:
return
params = dict(receiver_user_id=self.user.pk, email_type='first_share')
try:
MSentEmail.objects.get(**params)
if not force:
# Return if email already sent
return
except MSentEmail.DoesNotExist:
MSentEmail.objects.create(**params)
social_profile = MSocialProfile.objects.get(user_id=self.user.pk)
params = {
'shared_stories': MSharedStory.objects.filter(user_id=self.user.pk).count(),
'blurblog_url': social_profile.blurblog_url,
'blurblog_rss': social_profile.blurblog_rss
}
user = self.user
text = render_to_string('mail/email_first_share_to_blurblog.txt', params)
html = render_to_string('mail/email_first_share_to_blurblog.xhtml', params)
subject = "Your shared stories on NewsBlur are available on your Blurblog"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending first share to blurblog email to: %s" % self.user.email)
def send_new_premium_email(self, force=False):
# subs = UserSubscription.objects.filter(user=self.user)
# message = """Woohoo!
#
# User: %(user)s
# Feeds: %(feeds)s
#
# Sincerely,
# NewsBlur""" % {'user': self.user.username, 'feeds': subs.count()}
# mail_admins('New premium account', message, fail_silently=True)
if not self.user.email or not self.send_emails:
return
params = dict(receiver_user_id=self.user.pk, email_type='new_premium')
try:
MSentEmail.objects.get(**params)
if not force:
# Return if email already sent
return
except MSentEmail.DoesNotExist:
MSentEmail.objects.create(**params)
user = self.user
text = render_to_string('mail/email_new_premium.txt', locals())
html = render_to_string('mail/email_new_premium.xhtml', locals())
subject = "Thanks for going premium on NewsBlur!"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending email for new premium: %s" % self.user.email)
def send_forgot_password_email(self, email=None):
if not self.user.email and not email:
print "Please provide an email address."
return
if not self.user.email and email:
self.user.email = email
self.user.save()
user = self.user
text = render_to_string('mail/email_forgot_password.txt', locals())
html = render_to_string('mail/email_forgot_password.xhtml', locals())
subject = "Forgot your password on NewsBlur?"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending email for forgotten password: %s" % self.user.email)
def send_new_user_queue_email(self, force=False):
if not self.user.email:
print "Please provide an email address."
return
params = dict(receiver_user_id=self.user.pk, email_type='new_user_queue')
try:
MSentEmail.objects.get(**params)
if not force:
# Return if email already sent
return
except MSentEmail.DoesNotExist:
MSentEmail.objects.create(**params)
user = self.user
text = render_to_string('mail/email_new_user_queue.txt', locals())
html = render_to_string('mail/email_new_user_queue.xhtml', locals())
subject = "Your free account is now ready to go on NewsBlur"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending email for new user queue: %s" % self.user.email)
def send_upload_opml_finished_email(self, feed_count):
if not self.user.email:
print "Please provide an email address."
return
user = self.user
text = render_to_string('mail/email_upload_opml_finished.txt', locals())
html = render_to_string('mail/email_upload_opml_finished.xhtml', locals())
subject = "Your OPML upload is complete. Get going with NewsBlur!"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
logging.user(self.user, "~BB~FM~SBSending email for OPML upload: %s" % self.user.email)
def send_import_reader_finished_email(self, feed_count):
if not self.user.email:
print "Please provide an email address."
return
user = self.user
text = render_to_string('mail/email_import_reader_finished.txt', locals())
html = render_to_string('mail/email_import_reader_finished.xhtml', locals())
subject = "Your Google Reader import is complete. Get going with NewsBlur!"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
logging.user(self.user, "~BB~FM~SBSending email for Google Reader import: %s" % self.user.email)
def send_import_reader_starred_finished_email(self, feed_count, starred_count):
if not self.user.email:
print "Please provide an email address."
return
user = self.user
text = render_to_string('mail/email_import_reader_starred_finished.txt', locals())
html = render_to_string('mail/email_import_reader_starred_finished.xhtml', locals())
subject = "Your Google Reader starred stories import is complete. Get going with NewsBlur!"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
logging.user(self.user, "~BB~FM~SBSending email for Google Reader starred stories import: %s" % self.user.email)
def send_launch_social_email(self, force=False):
if not self.user.email or not self.send_emails:
logging.user(self.user, "~FM~SB~FRNot~FM sending launch social email for user, %s: %s" % (self.user.email and 'opt-out: ' or 'blank', self.user.email))
return
params = dict(receiver_user_id=self.user.pk, email_type='launch_social')
try:
MSentEmail.objects.get(**params)
if not force:
# Return if email already sent
logging.user(self.user, "~FM~SB~FRNot~FM sending launch social email for user, sent already: %s" % self.user.email)
return
except MSentEmail.DoesNotExist:
MSentEmail.objects.create(**params)
delta = datetime.datetime.now() - self.last_seen_on
months_ago = delta.days / 30
user = self.user
data = dict(user=user, months_ago=months_ago)
text = render_to_string('mail/email_launch_social.txt', data)
html = render_to_string('mail/email_launch_social.xhtml', data)
subject = "NewsBlur is now a social news reader"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending launch social email for user: %s months, %s" % (months_ago, self.user.email))
def send_launch_turntouch_email(self, force=False):
if not self.user.email or not self.send_emails:
logging.user(self.user, "~FM~SB~FRNot~FM sending launch TT email for user, %s: %s" % (self.user.email and 'opt-out: ' or 'blank', self.user.email))
return
params = dict(receiver_user_id=self.user.pk, email_type='launch_turntouch')
try:
MSentEmail.objects.get(**params)
if not force:
# Return if email already sent
logging.user(self.user, "~FM~SB~FRNot~FM sending launch social email for user, sent already: %s" % self.user.email)
return
except MSentEmail.DoesNotExist:
MSentEmail.objects.create(**params)
delta = datetime.datetime.now() - self.last_seen_on
months_ago = delta.days / 30
user = self.user
data = dict(user=user, months_ago=months_ago)
text = render_to_string('mail/email_launch_turntouch.txt', data)
html = render_to_string('mail/email_launch_turntouch.xhtml', data)
subject = "Introducing Turn Touch for NewsBlur"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending launch TT email for user: %s months, %s" % (months_ago, self.user.email))
def send_launch_turntouch_end_email(self, force=False):
if not self.user.email or not self.send_emails:
logging.user(self.user, "~FM~SB~FRNot~FM sending launch TT end email for user, %s: %s" % (self.user.email and 'opt-out: ' or 'blank', self.user.email))
return
params = dict(receiver_user_id=self.user.pk, email_type='launch_turntouch_end')
try:
MSentEmail.objects.get(**params)
if not force:
# Return if email already sent
logging.user(self.user, "~FM~SB~FRNot~FM sending launch TT end email for user, sent already: %s" % self.user.email)
return
except MSentEmail.DoesNotExist:
MSentEmail.objects.create(**params)
delta = datetime.datetime.now() - self.last_seen_on
months_ago = delta.days / 30
user = self.user
data = dict(user=user, months_ago=months_ago)
text = render_to_string('mail/email_launch_turntouch_end.txt', data)
html = render_to_string('mail/email_launch_turntouch_end.xhtml', data)
subject = "Last day to back Turn Touch: NewsBlur's beautiful remote"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
logging.user(self.user, "~BB~FM~SBSending launch TT end email for user: %s months, %s" % (months_ago, self.user.email))
def grace_period_email_sent(self, force=False):
emails_sent = MSentEmail.objects.filter(receiver_user_id=self.user.pk,
email_type='premium_expire_grace')
day_ago = datetime.datetime.now() - datetime.timedelta(days=360)
for email in emails_sent:
if email.date_sent > day_ago and not force:
logging.user(self.user, "~SN~FMNot sending premium expire grace email, already sent before.")
return True
def send_premium_expire_grace_period_email(self, force=False):
if not self.user.email:
logging.user(self.user, "~FM~SB~FRNot~FM~SN sending premium expire grace for user: %s" % (self.user))
return
if self.grace_period_email_sent(force=force):
return
if self.premium_expire and self.premium_expire < datetime.datetime.now():
self.premium_expire = datetime.datetime.now()
self.save()
delta = datetime.datetime.now() - self.last_seen_on
months_ago = delta.days / 30
user = self.user
data = dict(user=user, months_ago=months_ago)
text = render_to_string('mail/email_premium_expire_grace.txt', data)
html = render_to_string('mail/email_premium_expire_grace.xhtml', data)
subject = "Your premium account on NewsBlur has one more month!"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
MSentEmail.record(receiver_user_id=self.user.pk, email_type='premium_expire_grace')
logging.user(self.user, "~BB~FM~SBSending premium expire grace email for user: %s months, %s" % (months_ago, self.user.email))
def send_premium_expire_email(self, force=False):
if not self.user.email:
logging.user(self.user, "~FM~SB~FRNot~FM sending premium expire for user: %s" % (self.user))
return
emails_sent = MSentEmail.objects.filter(receiver_user_id=self.user.pk,
email_type='premium_expire')
day_ago = datetime.datetime.now() - datetime.timedelta(days=360)
for email in emails_sent:
if email.date_sent > day_ago and not force:
logging.user(self.user, "~FM~SBNot sending premium expire email, already sent before.")
return
delta = datetime.datetime.now() - self.last_seen_on
months_ago = delta.days / 30
user = self.user
data = dict(user=user, months_ago=months_ago)
text = render_to_string('mail/email_premium_expire.txt', data)
html = render_to_string('mail/email_premium_expire.xhtml', data)
subject = "Your premium account on NewsBlur has expired"
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
MSentEmail.record(receiver_user_id=self.user.pk, email_type='premium_expire')
logging.user(self.user, "~BB~FM~SBSending premium expire email for user: %s months, %s" % (months_ago, self.user.email))
def autologin_url(self, next=None):
return reverse('autologin', kwargs={
'username': self.user.username,
'secret': self.secret_token
}) + ('?' + next + '=1' if next else '')
@classmethod
def doublecheck_paypal_payments(cls, days=14):
payments = PayPalIPN.objects.filter(txn_type='subscr_payment',
updated_at__gte=datetime.datetime.now()
- datetime.timedelta(days)
).order_by('-created_at')
for payment in payments:
try:
profile = Profile.objects.get(user__username=payment.custom)
except Profile.DoesNotExist:
logging.debug(" ---> ~FRCouldn't find user: ~SB~FC%s" % payment.custom)
continue
profile.setup_premium_history(check_premium=True)
class StripeIds(models.Model):
user = models.ForeignKey(User, related_name='stripe_ids')
stripe_id = models.CharField(max_length=24, blank=True, null=True)
def __unicode__(self):
return "%s: %s" % (self.user.username, self.stripe_id)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
else:
Profile.objects.get_or_create(user=instance)
post_save.connect(create_profile, sender=User)
def paypal_signup(sender, **kwargs):
ipn_obj = sender
try:
user = User.objects.get(username__iexact=ipn_obj.custom)
except User.DoesNotExist:
user = User.objects.get(email__iexact=ipn_obj.payer_email)
logging.user(user, "~BC~SB~FBPaypal subscription signup")
try:
if not user.email:
user.email = ipn_obj.payer_email
user.save()
except:
pass
user.profile.activate_premium()
user.profile.cancel_premium_stripe()
user.profile.cancel_premium_paypal(second_most_recent_only=True)
subscription_signup.connect(paypal_signup)
def paypal_payment_history_sync(sender, **kwargs):
ipn_obj = sender
try:
user = User.objects.get(username__iexact=ipn_obj.custom)
except User.DoesNotExist:
user = User.objects.get(email__iexact=ipn_obj.payer_email)
logging.user(user, "~BC~SB~FBPaypal subscription payment")
try:
user.profile.setup_premium_history(check_premium=True)
except:
return {"code": -1, "message": "User doesn't exist."}
payment_was_successful.connect(paypal_payment_history_sync)
def paypal_payment_was_flagged(sender, **kwargs):
ipn_obj = sender
try:
user = User.objects.get(username__iexact=ipn_obj.custom)
except User.DoesNotExist:
if ipn_obj.payer_email:
user = User.objects.get(email__iexact=ipn_obj.payer_email)
try:
user.profile.setup_premium_history(check_premium=True)
logging.user(user, "~BC~SB~FBPaypal subscription payment flagged")
except:
return {"code": -1, "message": "User doesn't exist."}
payment_was_flagged.connect(paypal_payment_was_flagged)
def paypal_recurring_payment_history_sync(sender, **kwargs):
ipn_obj = sender
try:
user = User.objects.get(username__iexact=ipn_obj.custom)
except User.DoesNotExist:
user = User.objects.get(email__iexact=ipn_obj.payer_email)
logging.user(user, "~BC~SB~FBPaypal subscription recurring payment")
try:
user.profile.setup_premium_history(check_premium=True)
except:
return {"code": -1, "message": "User doesn't exist."}
recurring_payment.connect(paypal_recurring_payment_history_sync)
def stripe_signup(sender, full_json, **kwargs):
stripe_id = full_json['data']['object']['customer']
try:
profile = Profile.objects.get(stripe_id=stripe_id)
logging.user(profile.user, "~BC~SB~FBStripe subscription signup")
profile.activate_premium()
profile.cancel_premium_paypal()
profile.retrieve_stripe_ids()
except Profile.DoesNotExist:
return {"code": -1, "message": "User doesn't exist."}
zebra_webhook_customer_subscription_created.connect(stripe_signup)
def stripe_payment_history_sync(sender, full_json, **kwargs):
stripe_id = full_json['data']['object']['customer']
try:
profile = Profile.objects.get(stripe_id=stripe_id)
logging.user(profile.user, "~BC~SB~FBStripe subscription payment")
profile.setup_premium_history(check_premium=True)
except Profile.DoesNotExist:
return {"code": -1, "message": "User doesn't exist."}
zebra_webhook_charge_succeeded.connect(stripe_payment_history_sync)
def change_password(user, old_password, new_password, only_check=False):
user_db = authenticate(username=user.username, password=old_password)
if user_db is None:
blank = blank_authenticate(user.username)
if blank and not only_check:
user.set_password(new_password or user.username)
user.save()
if user_db is None:
user_db = authenticate(username=user.username, password=user.username)
if not user_db:
return -1
else:
if not only_check:
user_db.set_password(new_password)
user_db.save()
return 1
def blank_authenticate(username, password=""):
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
return
if user.password == "!":
return user
algorithm, salt, hash = user.password.split('$', 2)
encoded_blank = hashlib.sha1(salt + password).hexdigest()
encoded_username = authenticate(username=username, password=username)
if encoded_blank == hash or encoded_username == user:
return user
# Unfinished
class MEmailUnsubscribe(mongo.Document):
user_id = mongo.IntField()
email_type = mongo.StringField()
date = mongo.DateTimeField(default=datetime.datetime.now)
EMAIL_TYPE_FOLLOWS = 'follows'
EMAIL_TYPE_REPLIES = 'replies'
EMAIL_TYOE_PRODUCT = 'product'
meta = {
'collection': 'email_unsubscribes',
'allow_inheritance': False,
'indexes': ['user_id',
{'fields': ['user_id', 'email_type'],
'unique': True,
'types': False}],
}
def __unicode__(self):
return "%s unsubscribed from %s on %s" % (self.user_id, self.email_type, self.date)
@classmethod
def user(cls, user_id):
unsubs = cls.objects(user_id=user_id)
return unsubs
@classmethod
def unsubscribe(cls, user_id, email_type):
cls.objects.create()
class MSentEmail(mongo.Document):
sending_user_id = mongo.IntField()
receiver_user_id = mongo.IntField()
email_type = mongo.StringField()
date_sent = mongo.DateTimeField(default=datetime.datetime.now)
meta = {
'collection': 'sent_emails',
'allow_inheritance': False,
'indexes': ['sending_user_id', 'receiver_user_id', 'email_type'],
}
def __unicode__(self):
return "%s sent %s email to %s" % (self.sending_user_id, self.email_type, self.receiver_user_id)
@classmethod
def record(cls, email_type, receiver_user_id, sending_user_id=None):
cls.objects.create(email_type=email_type,
receiver_user_id=receiver_user_id,
sending_user_id=sending_user_id)
class PaymentHistory(models.Model):
user = models.ForeignKey(User, related_name='payments')
payment_date = models.DateTimeField()
payment_amount = models.IntegerField()
payment_provider = models.CharField(max_length=20)
payment_identifier = models.CharField(max_length=100, null=True)
def __unicode__(self):
return "[%s] $%s/%s" % (self.payment_date.strftime("%Y-%m-%d"), self.payment_amount,
self.payment_provider)
class Meta:
ordering = ['-payment_date']
def canonical(self):
return {
'payment_date': self.payment_date.strftime('%Y-%m-%d'),
'payment_amount': self.payment_amount,
'payment_provider': self.payment_provider,
}
@classmethod
def report(cls, months=26):
output = ""
def _counter(start_date, end_date, output, payments=None):
if not payments:
payments = PaymentHistory.objects.filter(payment_date__gte=start_date, payment_date__lte=end_date)
payments = payments.aggregate(avg=Avg('payment_amount'),
sum=Sum('payment_amount'),
count=Count('user'))
output += "%s-%02d-%02d - %s-%02d-%02d:\t$%.2f\t$%-6s\t%-4s\n" % (
start_date.year, start_date.month, start_date.day,
end_date.year, end_date.month, end_date.day,
round(payments['avg'] if payments['avg'] else 0, 2), payments['sum'] if payments['sum'] else 0, payments['count'])
return payments, output
output += "\nMonthly Totals:\n"
for m in reversed(range(months)):
now = datetime.datetime.now()
start_date = datetime.datetime(now.year, now.month, 1) - dateutil.relativedelta.relativedelta(months=m)
end_time = start_date + datetime.timedelta(days=31)
end_date = datetime.datetime(end_time.year, end_time.month, 1) - datetime.timedelta(seconds=1)
total, output = _counter(start_date, end_date, output)
total = total['sum']
output += "\nMTD Totals:\n"
years = datetime.datetime.now().year - 2009
this_mtd_avg = 0
last_mtd_avg = 0
last_mtd_sum = 0
this_mtd_sum = 0
last_mtd_count = 0
this_mtd_count = 0
for y in reversed(range(years)):
now = datetime.datetime.now()
start_date = datetime.datetime(now.year, now.month, 1) - dateutil.relativedelta.relativedelta(years=y)
end_date = now - dateutil.relativedelta.relativedelta(years=y)
if end_date > now: end_date = now
count, output = _counter(start_date, end_date, output)
if end_date.year != now.year:
last_mtd_avg = count['avg'] or 0
last_mtd_sum = count['sum'] or 0
last_mtd_count = count['count']
else:
this_mtd_avg = count['avg'] or 0
this_mtd_sum = count['sum'] or 0
this_mtd_count = count['count']
output += "\nCurrent Month Totals:\n"
years = datetime.datetime.now().year - 2009
last_month_avg = 0
last_month_sum = 0
last_month_count = 0
for y in reversed(range(years)):
now = datetime.datetime.now()
start_date = datetime.datetime(now.year, now.month, 1) - dateutil.relativedelta.relativedelta(years=y)
end_time = start_date + datetime.timedelta(days=31)
end_date = datetime.datetime(end_time.year, end_time.month, 1) - datetime.timedelta(seconds=1)
if end_date > now:
payments = {'avg': this_mtd_avg / (max(1, last_mtd_avg) / float(max(1, last_month_avg))),
'sum': int(round(this_mtd_sum / (max(1, last_mtd_sum) / float(max(1, last_month_sum))))),
'count': int(round(this_mtd_count / (max(1, last_mtd_count) / float(max(1, last_month_count)))))}
_, output = _counter(start_date, end_date, output, payments=payments)
else:
count, output = _counter(start_date, end_date, output)
last_month_avg = count['avg']
last_month_sum = count['sum']
last_month_count = count['count']
output += "\nYTD Totals:\n"
years = datetime.datetime.now().year - 2009
this_ytd_avg = 0
last_ytd_avg = 0
this_ytd_sum = 0
last_ytd_sum = 0
this_ytd_count = 0
last_ytd_count = 0
for y in reversed(range(years)):
now = datetime.datetime.now()
start_date = datetime.datetime(now.year, 1, 1) - dateutil.relativedelta.relativedelta(years=y)
end_date = now - dateutil.relativedelta.relativedelta(years=y)
count, output = _counter(start_date, end_date, output)
if end_date.year != now.year:
last_ytd_avg = count['avg'] or 0
last_ytd_sum = count['sum'] or 0
last_ytd_count = count['count']
else:
this_ytd_avg = count['avg'] or 0
this_ytd_sum = count['sum'] or 0
this_ytd_count = count['count']
output += "\nYearly Totals:\n"
years = datetime.datetime.now().year - 2009
last_year_avg = 0
last_year_sum = 0
last_year_count = 0
annual = 0
for y in reversed(range(years)):
now = datetime.datetime.now()
start_date = datetime.datetime(now.year, 1, 1) - dateutil.relativedelta.relativedelta(years=y)
end_date = datetime.datetime(now.year, 1, 1) - dateutil.relativedelta.relativedelta(years=y-1) - datetime.timedelta(seconds=1)
if end_date > now:
payments = {'avg': this_ytd_avg / (max(1, last_ytd_avg) / float(max(1, last_year_avg))),
'sum': int(round(this_ytd_sum / (max(1, last_ytd_sum) / float(max(1, last_year_sum))))),
'count': int(round(this_ytd_count / (max(1, last_ytd_count) / float(max(1, last_year_count)))))}
count, output = _counter(start_date, end_date, output, payments=payments)
annual = count['sum']
else:
count, output = _counter(start_date, end_date, output)
last_year_avg = count['avg'] or 0
last_year_sum = count['sum'] or 0
last_year_count = count['count']
total = cls.objects.all().aggregate(sum=Sum('payment_amount'))
output += "\nTotal: $%s\n" % total['sum']
print output
return {'annual': annual, 'output': output}
class MGiftCode(mongo.Document):
gifting_user_id = mongo.IntField()
receiving_user_id = mongo.IntField()
gift_code = mongo.StringField(max_length=12)
duration_days = mongo.IntField()
payment_amount = mongo.IntField()
created_date = mongo.DateTimeField(default=datetime.datetime.now)
meta = {
'collection': 'gift_codes',
'allow_inheritance': False,
'indexes': ['gifting_user_id', 'receiving_user_id', 'created_date'],
}
def __unicode__(self):
return "%s gifted %s on %s: %s (redeemed %s times)" % (self.gifting_user_id, self.receiving_user_id, self.created_date, self.gift_code, self.redeemed)
@property
def redeemed(self):
redeemed_code = MRedeemedCode.objects.filter(gift_code=self.gift_code)
return len(redeemed_code)
@staticmethod
def create_code(gift_code=None):
u = unicode(uuid.uuid4())
code = u[:8] + u[9:13]
if gift_code:
code = gift_code + code[len(gift_code):]
return code
@classmethod
def add(cls, gift_code=None, duration=0, gifting_user_id=None, receiving_user_id=None, payment=0):
return cls.objects.create(gift_code=cls.create_code(gift_code),
gifting_user_id=gifting_user_id,
receiving_user_id=receiving_user_id,
duration_days=duration,
payment_amount=payment)
class MRedeemedCode(mongo.Document):
user_id = mongo.IntField()
gift_code = mongo.StringField()
redeemed_date = mongo.DateTimeField(default=datetime.datetime.now)
meta = {
'collection': 'redeemed_codes',
'allow_inheritance': False,
'indexes': ['user_id', 'gift_code', 'redeemed_date'],
}
def __unicode__(self):
return "%s redeemed %s on %s" % (self.user_id, self.gift_code, self.redeemed_date)
@classmethod
def record(cls, user_id, gift_code):
cls.objects.create(user_id=user_id,
gift_code=gift_code)
@classmethod
def redeem(cls, user, gift_code):
newsblur_gift_code = MGiftCode.objects.filter(gift_code__iexact=gift_code)
if newsblur_gift_code:
newsblur_gift_code = newsblur_gift_code[0]
PaymentHistory.objects.create(user=user,
payment_date=datetime.datetime.now(),
payment_amount=newsblur_gift_code.payment_amount,
payment_provider='newsblur-gift')
else:
# Thinkup / Good Web Bundle
PaymentHistory.objects.create(user=user,
payment_date=datetime.datetime.now(),
payment_amount=12,
payment_provider='good-web-bundle')
cls.record(user.pk, gift_code)
user.profile.activate_premium()
logging.user(user, "~FG~BBRedeeming gift code: %s~FW" % gift_code)
class MCustomStyling(mongo.Document):
user_id = mongo.IntField(unique=True)
custom_css = mongo.StringField()
custom_js = mongo.StringField()
updated_date = mongo.DateTimeField(default=datetime.datetime.now)
meta = {
'collection': 'custom_styling',
'allow_inheritance': False,
'indexes': ['user_id'],
}
def __unicode__(self):
return "%s custom style %s/%s %s" % (self.user_id, len(self.custom_css) if self.custom_css else "-",
len(self.custom_js) if self.custom_js else "-", self.updated_date)
def canonical(self):
return {
'css': self.custom_css,
'js': self.custom_js,
}
@classmethod
def get_user(cls, user_id):
try:
styling = cls.objects.get(user_id=user_id)
except cls.DoesNotExist:
return None
return styling
@classmethod
def save_user(cls, user_id, css, js):
styling = cls.get_user(user_id)
if not css and not js:
if styling:
styling.delete()
return
if not styling:
styling = cls.objects.create(user_id=user_id)
styling.custom_css = css
styling.custom_js = js
styling.save()
class RNewUserQueue:
KEY = "new_user_queue"
@classmethod
def activate_next(cls):
count = cls.user_count()
if not count:
return
user_id = cls.pop_user()
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
logging.debug("~FRCan't activate free account, can't find user ~SB%s~SN. ~FB%s still in queue." % (user_id, count-1))
return
logging.user(user, "~FBActivating free account (%s / %s). %s still in queue." % (user.email, user.profile.last_seen_ip, (count-1)))
user.profile.activate_free()
@classmethod
def activate_all(cls):
count = cls.user_count()
if not count:
logging.debug("~FBNo users to activate, sleeping...")
return
for i in range(count):
cls.activate_next()
@classmethod
def add_user(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
now = time.time()
r.zadd(cls.KEY, user_id, now)
@classmethod
def user_count(cls):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
count = r.zcard(cls.KEY)
return count
@classmethod
def user_position(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
position = r.zrank(cls.KEY, user_id)
if position >= 0:
return position + 1
@classmethod
def pop_user(cls):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
user = r.zrange(cls.KEY, 0, 0)[0]
r.zrem(cls.KEY, user)
return user
|
|
# -*- coding: utf-8 -*-
import os # noqa: F401
import re
import shutil
import time
import unittest
from configparser import ConfigParser
from os import environ
from installed_clients.DataFileUtilClient import DataFileUtil
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.GenomeFileUtilImpl import SDKConfig
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
from GenomeFileUtil.authclient import KBaseAuth as _KBaseAuth
from GenomeFileUtil.core.FastaGFFToGenome import FastaGFFToGenome
from installed_clients.WorkspaceClient import Workspace as workspaceService
class FastaGFFToGenomeUploadTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setting up class')
cls.token = environ.get('KB_AUTH_TOKEN')
config_file = environ['KB_DEPLOYMENT_CONFIG']
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
authServiceUrl = cls.cfg.get('auth-service-url',
"https://kbase.us/services/authorization/Sessions/Login")
auth_client = _KBaseAuth(authServiceUrl)
cls.user_id = auth_client.get_user(cls.token)
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': cls.user_id,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=cls.token)
cls.serviceImpl = GenomeFileUtil(cls.cfg)
cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=cls.token)
cls.scratch = cls.cfg['scratch']
cls.shockURL = cls.cfg['shock-url']
cls.gfu_cfg = SDKConfig(cls.cfg)
cls.prepare_data()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def prepare_data(cls):
cls.importer = FastaGFFToGenome(cls.gfu_cfg)
cls.gff_filename = 'Test_v1.0.gene.gff3.gz'
cls.gff_path = os.path.join(cls.scratch, cls.gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Plant_Data", cls.gff_filename), cls.gff_path)
cls.fa_filename = 'Test_v1.0.fa.gz'
cls.fa_path = os.path.join(cls.scratch, cls.fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Plant_Data", cls.fa_filename), cls.fa_path)
cls.fungal_gff_filename = 'Neucr2.filtered_proteins.BroadModels.gff3.gz'
cls.fungal_gff_path = os.path.join(cls.scratch, cls.fungal_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Fungal_Data", cls.fungal_gff_filename),
cls.fungal_gff_path)
cls.fungal_fa_filename = 'Neucr2_AssemblyScaffolds.fasta.gz'
cls.fungal_fa_path = os.path.join(cls.scratch, cls.fungal_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Fungal_Data", cls.fungal_fa_filename),
cls.fungal_fa_path)
cls.jgi_bacterial_gff_filename = '2547132501.gff.gz'
cls.jgi_bacterial_gff_path = os.path.join(cls.scratch, cls.jgi_bacterial_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_gff_filename),
cls.jgi_bacterial_gff_path)
cls.jgi_bacterial_fa_filename = '2547132501.fna.gz'
cls.jgi_bacterial_fa_path = os.path.join(cls.scratch, cls.jgi_bacterial_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_fa_filename),
cls.jgi_bacterial_fa_path)
cls.jgi_bacterial_gff2_filename = '91705.assembled.gff'
cls.jgi_bacterial_gff2_path = os.path.join(cls.scratch, cls.jgi_bacterial_gff2_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_gff2_filename),
cls.jgi_bacterial_gff2_path)
cls.jgi_bacterial_fa2_filename = '91705.assembled.fna'
cls.jgi_bacterial_fa2_path = os.path.join(cls.scratch, cls.jgi_bacterial_fa2_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_fa2_filename),
cls.jgi_bacterial_fa2_path)
cls.patric_bacterial_gff_filename = '1240778.3.PATRIC.gff.gz'
cls.patric_bacterial_gff_path = os.path.join(cls.scratch, cls.patric_bacterial_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "PATRIC", "Ecoli_O104", cls.patric_bacterial_gff_filename),
cls.patric_bacterial_gff_path)
cls.patric_bacterial_fa_filename = '1240778.3.fna.gz'
cls.patric_bacterial_fa_path = os.path.join(cls.scratch, cls.patric_bacterial_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "PATRIC", "Ecoli_O104", cls.patric_bacterial_fa_filename),
cls.patric_bacterial_fa_path)
cls.refseq_bacterial_gff_filename = 'NC_021490.gff.gz'
cls.refseq_bacterial_gff_path = os.path.join(cls.scratch, cls.refseq_bacterial_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "RefSeq", "Bacterial_Data", cls.refseq_bacterial_gff_filename),
cls.refseq_bacterial_gff_path)
cls.refseq_bacterial_fa_filename = 'NC_021490.fasta.gz'
cls.refseq_bacterial_fa_path = os.path.join(cls.scratch, cls.refseq_bacterial_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "RefSeq", "Bacterial_Data", cls.refseq_bacterial_fa_filename),
cls.refseq_bacterial_fa_path)
def check_minimal_items_exist(self, result):
self.assertTrue('genome_info' in result)
self.assertTrue('genome_ref' in result)
genome_info = result['genome_info']
self.assertEqual(genome_info[10]['Domain'], 'Unknown')
self.assertEqual(genome_info[10]['Genetic code'], '11')
self.assertEqual(genome_info[10]['Name'], 'unknown_taxon')
self.assertEqual(genome_info[10]['Source'], 'Genbank')
self.assertTrue('GC content' in genome_info[10])
self.assertTrue(re.match(r"^\d+?\.\d+?$", genome_info[10]['GC content']) is not None)
self.assertTrue('Number of Protein Encoding Genes' in genome_info[10])
self.assertTrue(genome_info[10]['Number of Protein Encoding Genes'].isdigit())
self.assertTrue('Size' in genome_info[10])
self.assertTrue(genome_info[10]['Size'].isdigit())
self.assertEqual(genome_info[10]['Taxonomy'], 'Unconfirmed Organism: unknown_taxon')
def print_genome_warnings(self, result):
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
if 'warnings' in genome:
print("Genome warnings:" + str(genome['warnings']))
def check_CDS_warnings(self, result, test_name):
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
print("IN TEST NAME : " + str(test_name))
cds_warning_count = 0
cds_with_warning_count = 0
if 'cdss' in genome:
total_cds_count = len(genome['cdss'])
for feature in genome['cdss']:
if 'warnings' in feature:
if test_name == "test_jgi_bacterial_fasta_gff2_to_genome":
print(str(feature['id']) + " warnings:" + str(feature['warnings']))
print("Location: " + str(feature['location']))
print("Translation: " + feature['protein_translation'])
print("DNA Sequence: " + feature["dna_sequence"])
cds_with_warning_count = cds_with_warning_count + 1
cds_warning_count = cds_warning_count + len(feature['warnings'])
print("Total CDS: " + str(total_cds_count))
print("CDS Warning Count: " + str(cds_warning_count))
print("CDSs with a warning Count: " + str(cds_with_warning_count))
print("Percent CDS with warning: " + str((cds_with_warning_count/float(total_cds_count)) * 100))
def test_simple_fasta_gff_to_genome_w_null_params(self):
input_params = {
"fasta_file": {'path': self.fa_path},
"gff_file": {'path': self.gff_path},
"workspace_name": self.getWsName(),
"genome_name": 'MyGenome',
"scientific_name": None,
"taxon_reference": None,
"genetic_code": None,
"source": None,
"taxon_wsname": None,
"release": None,
"type": None
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.assertTrue('genome_info' in result)
self.assertTrue('genome_ref' in result)
genome_info = result['genome_info']
self.assertEqual(genome_info[10]['Domain'], 'Unknown')
self.assertEqual(genome_info[10]['Genetic code'], '11')
self.assertEqual(genome_info[10]['Name'], 'unknown_taxon')
self.assertEqual(genome_info[10]['Source'], 'User')
self.assertTrue('GC content' in genome_info[10])
self.assertTrue(re.match(r"^\d+?\.\d+?$", genome_info[10]['GC content']) is not None)
self.assertTrue('Number of Protein Encoding Genes' in genome_info[10])
self.assertTrue(genome_info[10]['Number of Protein Encoding Genes'].isdigit())
self.assertTrue('Size' in genome_info[10])
self.assertTrue(genome_info[10]['Size'].isdigit())
self.assertEqual(genome_info[10]['Taxonomy'], 'Unconfirmed Organism')
def test_fasta_gff_to_genome_json(self):
input_params = {
'fasta_file': {'path': self.fa_path},
'gff_file': {'path': self.gff_path},
'genome_name': 'Plant',
'workspace_name': self.getWsName(),
'source': 'Genbank',
'type': 'Reference',
'taxon_id': 3694,
'scientific_name': 'Populus trichocarpa'
}
genome_json = self.getImpl().fasta_gff_to_genome_json(self.getContext(), input_params)[0][0]
assert 'features' in genome_json
assert 'feature_counts' in genome_json
assert 'genome_tiers' in genome_json
self.assertEqual(genome_json['domain'], 'Eukaryota')
self.assertEqual(genome_json['genetic_code'], 1)
self.assertEqual(genome_json['scientific_name'], 'Populus trichocarpa')
self.assertEqual(genome_json['source'], 'Genbank')
self.assertTrue('gc_content' in genome_json)
self.assertTrue('dna_size' in genome_json)
self.assertEqual(genome_json['taxonomy'],
'cellular organisms; Eukaryota; Viridiplantae; Streptophyta; ' +
'Streptophytina; Embryophyta; Tracheophyta; Euphyllophyta; ' +
'Spermatophyta; Magnoliopsida; Mesangiospermae; eudicotyledons; ' +
'Gunneridae; Pentapetalae; rosids; fabids; Malpighiales; Salicaceae; ' +
'Saliceae; Populus')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DCT operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import dct_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
fftpack = try_import("scipy.fftpack")
def _modify_input_for_dct(signals, n=None):
""" This is a supporting function for the numpy implementation
of DCT operations. If n < signal size, it returns the first n elements,
else it pads the signal with zeros.
"""
signal = np.array(signals)
if n is None or n == signal.shape[-1]:
signal_mod = signal
elif n >= 1:
signal_len = signal.shape[-1]
if n <= signal_len:
signal_mod = signal[..., 0:n]
else:
output_shape = list(signal.shape)
output_shape[-1] = n
signal_mod = np.zeros(output_shape)
signal_mod[..., 0:signal.shape[-1]] = signal
if n:
assert signal_mod.shape[-1] == n
return signal_mod
def _np_dct1(signals, n=None, norm=None):
"""Computes the DCT-I manually with NumPy."""
# X_k = (x_0 + (-1)**k * x_{N-1} +
# 2 * sum_{n=0}^{N-2} x_n * cos(\frac{pi}{N-1} * n * k) k=0,...,N-1
del norm
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size - 1) * k / (dct_size - 1))
dct[..., k] = 2 * np.sum(
signals_mod[..., 1:-1] * phi, axis=-1) + (
signals_mod[..., 0] + (-1)**k * signals_mod[..., -1])
return dct
def _np_dct2(signals, n=None, norm=None):
"""Computes the DCT-II manually with NumPy."""
# X_k = sum_{n=0}^{N-1} x_n * cos(\frac{pi}{N} * (n + 0.5) * k) k=0,...,N-1
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * (np.arange(dct_size) + 0.5) * k / dct_size)
dct[..., k] = np.sum(signals_mod * phi, axis=-1)
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
if norm == "ortho":
# The orthonormal scaling includes a factor of 0.5 which we combine with
# the overall scaling of 2.0 to cancel.
dct[..., 0] *= np.sqrt(1.0 / dct_size)
dct[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
dct *= 2.0
return dct
def _np_dct3(signals, n=None, norm=None):
"""Computes the DCT-III manually with NumPy."""
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
signals_mod = np.array(signals_mod) # make a copy so we can modify
if norm == "ortho":
signals_mod[..., 0] *= np.sqrt(4.0 / dct_size)
signals_mod[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
signals_mod *= 2.0
dct = np.zeros_like(signals_mod)
# X_k = 0.5 * x_0 +
# sum_{n=1}^{N-1} x_n * cos(\frac{pi}{N} * n * (k + 0.5)) k=0,...,N-1
half_x0 = 0.5 * signals_mod[..., 0]
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size) * (k + 0.5) / dct_size)
dct[..., k] = half_x0 + np.sum(signals_mod[..., 1:] * phi, axis=-1)
return dct
NP_DCT = {1: _np_dct1, 2: _np_dct2, 3: _np_dct3}
NP_IDCT = {1: _np_dct1, 2: _np_dct3, 3: _np_dct2}
class DCTOpsTest(parameterized.TestCase, test.TestCase):
def _compare(self, signals, n, norm, dct_type, atol=5e-4, rtol=5e-4):
"""Compares (I)DCT to SciPy (if available) and a NumPy implementation."""
np_dct = NP_DCT[dct_type](signals, n=n, norm=norm)
tf_dct = dct_ops.dct(signals, n=n, type=dct_type, norm=norm).eval()
self.assertAllClose(np_dct, tf_dct, atol=atol, rtol=rtol)
np_idct = NP_IDCT[dct_type](signals, n=None, norm=norm)
tf_idct = dct_ops.idct(signals, type=dct_type, norm=norm).eval()
self.assertAllClose(np_idct, tf_idct, atol=atol, rtol=rtol)
if fftpack:
scipy_dct = fftpack.dct(signals, n=n, type=dct_type, norm=norm)
self.assertAllClose(scipy_dct, tf_dct, atol=atol, rtol=rtol)
scipy_idct = fftpack.idct(signals, type=dct_type, norm=norm)
self.assertAllClose(scipy_idct, tf_idct, atol=atol, rtol=rtol)
# Verify inverse(forward(s)) == s, up to a normalization factor.
# Since `n` is not implemented for IDCT operation, re-calculating tf_dct without n.
tf_dct = dct_ops.dct(signals, type=dct_type, norm=norm).eval()
tf_idct_dct = dct_ops.idct(
tf_dct, type=dct_type, norm=norm).eval()
tf_dct_idct = dct_ops.dct(
tf_idct, type=dct_type, norm=norm).eval()
if norm is None:
if dct_type == 1:
tf_idct_dct *= 0.5 / (signals.shape[-1] - 1)
tf_dct_idct *= 0.5 / (signals.shape[-1] - 1)
else:
tf_idct_dct *= 0.5 / signals.shape[-1]
tf_dct_idct *= 0.5 / signals.shape[-1]
self.assertAllClose(signals, tf_idct_dct, atol=atol, rtol=rtol)
self.assertAllClose(signals, tf_dct_idct, atol=atol, rtol=rtol)
@parameterized.parameters([
[[2]], [[3]], [[10]], [[2, 20]], [[2, 3, 25]]])
@test_util.run_deprecated_v1
def test_random(self, shape):
"""Test randomly generated batches of data."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signals = np.random.rand(*shape).astype(np.float32)
n = np.random.randint(1, 2 * signals.shape[-1])
n = np.random.choice([None, n])
# Normalization not implemented for orthonormal.
self._compare(signals, n, norm=None, dct_type=1)
for norm in (None, "ortho"):
self._compare(signals, n=n, norm=norm, dct_type=2)
self._compare(signals, n=n, norm=norm, dct_type=3)
def test_error(self):
signals = np.random.rand(10)
# Unsupported type.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=5)
# Invalid n.
with self.assertRaises(ValueError):
dct_ops.dct(signals, n=-2)
# DCT-I normalization not implemented.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=1, norm="ortho")
# DCT-I requires at least two inputs.
with self.assertRaises(ValueError):
dct_ops.dct(np.random.rand(1), type=1)
# Unknown normalization.
with self.assertRaises(ValueError):
dct_ops.dct(signals, norm="bad")
with self.assertRaises(NotImplementedError):
dct_ops.dct(signals, axis=0)
if __name__ == "__main__":
test.main()
|
|
from __future__ import absolute_import, division, print_function
from itertools import count
from operator import getitem
from .compatibility import zip_longest
from .core import add, inc # noqa: F401
from .core import (istask, get_dependencies, subs, toposort, flatten,
reverse_dict, ishashable, preorder_traversal)
from .rewrite import END
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP
>>> dsk # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
>>> dependencies # doctest: +SKIP
{'x': set(), 'out': set(['x'])}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
out = dict()
seen = set()
dependencies = dict()
stack = list(set(flatten(keys)))
while stack:
key = stack.pop()
out[key] = dsk[key]
deps = get_dependencies(dsk, key, as_list=True) # fuse needs lists
dependencies[key] = deps
unseen = [d for d in deps if d not in seen]
stack.extend(unseen)
seen.update(unseen)
return out, dependencies
def fuse(dsk, keys=None, dependencies=None):
""" Return new dask graph with linear sequence of tasks fused together.
If specified, the keys in ``keys`` keyword argument are *not* fused.
Supply ``dependencies`` from output of ``cull`` if available to avoid
recomputing dependencies.
Parameters
----------
dsk: dict
keys: list
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
Examples
--------
>>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dsk, dependencies = fuse(d)
>>> dsk # doctest: +SKIP
{'c': (inc, (inc, 1))}
>>> dsk, dependencies = fuse(d, keys=['b'])
>>> dsk # doctest: +SKIP
{'b': (inc, 1), 'c': (inc, 'b')}
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
if dependencies is None:
dependencies = dict((key, get_dependencies(dsk, key, as_list=True))
for key in dsk)
# locate all members of linear chains
child2parent = {}
unfusible = set()
for parent in dsk:
deps = dependencies[parent]
has_many_children = len(deps) > 1
for child in deps:
if keys is not None and child in keys:
unfusible.add(child)
elif child in child2parent:
del child2parent[child]
unfusible.add(child)
elif has_many_children:
unfusible.add(child)
elif child not in unfusible:
child2parent[child] = parent
# construct the chains from ancestor to descendant
chains = []
parent2child = dict(map(reversed, child2parent.items()))
while child2parent:
child, parent = child2parent.popitem()
chain = [child, parent]
while parent in child2parent:
parent = child2parent.pop(parent)
del parent2child[parent]
chain.append(parent)
chain.reverse()
while child in parent2child:
child = parent2child.pop(child)
del child2parent[child]
chain.append(child)
chains.append(chain)
dependencies = dict((k, set(v)) for k, v in dependencies.items())
# create a new dask with fused chains
rv = {}
fused = set()
for chain in chains:
child = chain.pop()
val = dsk[child]
while chain:
parent = chain.pop()
dependencies[parent].update(dependencies.pop(child))
dependencies[parent].remove(child)
val = subs(dsk[parent], child, val)
fused.add(child)
child = parent
fused.add(child)
rv[child] = val
for key, val in dsk.items():
if key not in fused:
rv[key] = val
return rv, dependencies
def _flat_set(x):
if x is None:
return set()
elif isinstance(x, set):
return x
elif not isinstance(x, (list, set)):
x = [x]
return set(x)
def inline(dsk, keys=None, inline_constants=True, dependencies=None):
""" Return new dask with the given keys inlined with their values.
Inlines all constants if ``inline_constants`` keyword is True. Note that
the constant keys will remain in the graph, to remove them follow
``inline`` with ``cull``.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')}
>>> inline(d) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, 'y')}
>>> inline(d, keys='y') # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, (inc, 1))}
>>> inline(d, keys='y', inline_constants=False) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 'x', (inc, 'x'))}
"""
if dependencies and isinstance(next(iter(dependencies.values())), list):
dependencies = {k: set(v) for k, v in dependencies.items()}
keys = _flat_set(keys)
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
if inline_constants:
keys.update(k for k, v in dsk.items() if
(ishashable(v) and v in dsk) or
(not dependencies[k] and not istask(v)))
# Keys may depend on other keys, so determine replace order with toposort.
# The values stored in `keysubs` do not include other keys.
replaceorder = toposort(dict((k, dsk[k]) for k in keys if k in dsk),
dependencies=dependencies)
keysubs = {}
for key in replaceorder:
val = dsk[key]
for dep in keys & dependencies[key]:
if dep in keysubs:
replace = keysubs[dep]
else:
replace = dsk[dep]
val = subs(val, dep, replace)
keysubs[key] = val
# Make new dask with substitutions
dsk2 = keysubs.copy()
for key, val in dsk.items():
if key not in dsk2:
for item in keys & dependencies[key]:
val = subs(val, item, keysubs[item])
dsk2[key] = val
return dsk2
def inline_functions(dsk, output, fast_functions=None, inline_constants=False,
dependencies=None):
""" Inline cheap functions into larger operations
Examples
--------
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline_functions(dsk, [], [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
Protect output keys. In the example below ``i`` is not inlined because it
is marked as an output key.
>>> inline_functions(dsk, ['i', 'out'], [inc, double]) # doctest: +SKIP
{'out': (add, 'i', (double, 'y')),
'i': (inc, 'x'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
output = set(output)
fast_functions = set(fast_functions)
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v)
and functions_of(v).issubset(fast_functions)
and dependents[k]
and k not in output]
if keys:
dsk = inline(dsk, keys, inline_constants=inline_constants,
dependencies=dependencies)
for k in keys:
del dsk[k]
return dsk
def functions_of(task):
""" Set of functions contained within nested task
Examples
--------
>>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP
>>> functions_of(task) # doctest: +SKIP
set([add, mul, inc])
"""
if istask(task):
args = set.union(*map(functions_of, task[1:])) if task[1:] else set()
return set([unwrap_partial(task[0])]) | args
if isinstance(task, (list, tuple)):
if not task:
return set()
return set.union(*map(functions_of, task))
return set()
def unwrap_partial(func):
while hasattr(func, 'func'):
func = func.func
return func
def dealias(dsk, keys=None, dependencies=None):
""" Remove aliases from dask
Removes and renames aliases using ``inline``. Optional ``keys`` keyword
argument protects keys from being deleted. This is useful to protect keys
that would be expected by a scheduler. If not provided, all inlined aliases
are removed.
Examples
--------
>>> dsk = {'a': (range, 5),
... 'b': 'a',
... 'c': 'b',
... 'd': (sum, 'c'),
... 'e': 'd',
... 'f': (inc, 'd')}
>>> dealias(dsk) # doctest: +SKIP
{'a': (range, 5),
'd': (sum, 'a'),
'f': (inc, 'd')}
>>> dsk = {'a': (range, 5),
... 'b': 'a'}
>>> dealias(dsk) # doctest: +SKIP
{'a': (range, 5)}
>>> dealias(dsk, keys=['a', 'b']) # doctest: +SKIP
{'a': (range, 5),
'b': 'a'}
"""
keys = keys or set()
if not isinstance(keys, set):
keys = set(keys)
if not dependencies:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
aliases = set(k for k, task in dsk.items() if
ishashable(task) and task in dsk)
dsk2 = inline(dsk, aliases, inline_constants=False)
for k in aliases.difference(keys):
del dsk2[k]
return dsk2
def equivalent(term1, term2, subs=None):
"""Determine if two terms are equivalent, modulo variable substitution.
Equivalent to applying substitutions in `subs` to `term2`, then checking if
`term1 == term2`.
If a subterm doesn't support comparison (i.e. `term1 == term2` errors),
returns `False`.
Parameters
----------
term1, term2 : terms
subs : dict, optional
Mapping of substitutions from `term2` to `term1`
Examples
--------
>>> from operator import add
>>> term1 = (add, 'a', 'b')
>>> term2 = (add, 'x', 'y')
>>> subs = {'x': 'a', 'y': 'b'}
>>> equivalent(term1, term2, subs)
True
>>> subs = {'x': 'a'}
>>> equivalent(term1, term2, subs)
False
"""
# Quick escape for special cases
head_type = type(term1)
if type(term2) != head_type:
# If terms aren't same type, fail
return False
elif head_type not in (tuple, list):
# For literals, just compare
try:
# `is` is tried first, to allow objects that don't implement `==`
# to work for cases where term1 is term2. If `is` returns False,
# and `==` errors, then the only thing we can do is return False.
return term1 is term2 or term1 == term2
except:
return False
pot1 = preorder_traversal(term1)
pot2 = preorder_traversal(term2)
subs = {} if subs is None else subs
for t1, t2 in zip_longest(pot1, pot2, fillvalue=END):
if t1 is END or t2 is END:
# If terms aren't same length: fail
return False
elif ishashable(t2) and t2 in subs:
val = subs[t2]
else:
val = t2
try:
if t1 is not t2 and t1 != val:
return False
except:
return False
return True
def dependency_dict(dsk):
"""Create a dict matching ordered dependencies to keys.
Examples
--------
>>> from operator import add
>>> dsk = {'a': 1, 'b': 2, 'c': (add, 'a', 'a'), 'd': (add, 'b', 'a')}
>>> dependency_dict(dsk) # doctest: +SKIP
{(): ['a', 'b'], ('a', 'a'): ['c'], ('b', 'a'): ['d']}
"""
dep_dict = {}
for key in dsk:
deps = tuple(get_dependencies(dsk, key, True))
dep_dict.setdefault(deps, []).append(key)
return dep_dict
def _possible_matches(dep_dict, deps, subs):
deps2 = []
for d in deps:
v = subs.get(d, None)
if v is not None:
deps2.append(v)
else:
return []
deps2 = tuple(deps2)
return dep_dict.get(deps2, [])
def _sync_keys(dsk1, dsk2, dsk2_topo):
dep_dict1 = dependency_dict(dsk1)
subs = {}
for key2 in toposort(dsk2):
deps = tuple(get_dependencies(dsk2, key2, True))
# List of keys in dsk1 that have terms that *may* match key2
possible_matches = _possible_matches(dep_dict1, deps, subs)
if possible_matches:
val2 = dsk2[key2]
for key1 in possible_matches:
val1 = dsk1[key1]
if equivalent(val1, val2, subs):
subs[key2] = key1
break
return subs
def sync_keys(dsk1, dsk2):
"""Return a dict matching keys in `dsk2` to equivalent keys in `dsk1`.
Parameters
----------
dsk1, dsk2 : dict
Examples
--------
>>> from operator import add, mul
>>> dsk1 = {'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5)}
>>> dsk2 = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> sync_keys(dsk1, dsk2) # doctest: +SKIP
{'x': 'a', 'y': 'b'}
"""
return _sync_keys(dsk1, dsk2, toposort(dsk2))
def merge_sync(dsk1, dsk2):
"""Merge two dasks together, combining equivalent tasks.
If a task in `dsk2` exists in `dsk1`, the task and key from `dsk1` is used.
If a task in `dsk2` has the same key as a task in `dsk1` (and they aren't
equivalent tasks), then a new key is created for the task in `dsk2`. This
prevents name conflicts.
Parameters
----------
dsk1, dsk2 : dict
Variable names in `dsk2` are replaced with equivalent ones in `dsk1`
before merging.
Returns
-------
new_dsk : dict
The merged dask.
key_map : dict
A mapping between the keys from `dsk2` to their new names in `new_dsk`.
Examples
--------
>>> from operator import add, mul
>>> dsk1 = {'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5)}
>>> dsk2 = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> new_dsk, key_map = merge_sync(dsk1, dsk2)
>>> new_dsk # doctest: +SKIP
{'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5), 'z': (mul, 'b', 2)}
>>> key_map # doctest: +SKIP
{'x': 'a', 'y': 'b', 'z': 'z'}
Conflicting names are replaced with auto-generated names upon merging.
>>> dsk1 = {'a': 1, 'res': (add, 'a', 1)}
>>> dsk2 = {'x': 1, 'res': (add, 'x', 2)}
>>> new_dsk, key_map = merge_sync(dsk1, dsk2)
>>> new_dsk # doctest: +SKIP
{'a': 1, 'res': (add, 'a', 1), 'merge_1': (add, 'a', 2)}
>>> key_map # doctest: +SKIP
{'x': 'a', 'res': 'merge_1'}
"""
dsk2_topo = toposort(dsk2)
sd = _sync_keys(dsk1, dsk2, dsk2_topo)
new_dsk = dsk1.copy()
for key in dsk2_topo:
if key in sd:
new_key = sd[key]
else:
if key in dsk1:
new_key = next(merge_sync.names)
else:
new_key = key
sd[key] = new_key
task = dsk2[key]
for a, b in sd.items():
task = subs(task, a, b)
new_dsk[new_key] = task
return new_dsk, sd
# store the name iterator in the function
merge_sync.names = ('merge_%d' % i for i in count(1))
def fuse_selections(dsk, head1, head2, merge):
"""Fuse selections with lower operation.
Handles graphs of the form:
``{key1: (head1, key2, ...), key2: (head2, ...)}``
Parameters
----------
dsk : dict
dask graph
head1 : function
The first element of task1
head2 : function
The first element of task2
merge : function
Takes ``task1`` and ``task2`` and returns a merged task to
replace ``task1``.
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> merge = lambda t1, t2: (load, t2[1], t2[2], t1[2])
>>> dsk2 = fuse_selections(dsk, getitem, load, merge)
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
dsk2 = dict()
for k, v in dsk.items():
try:
if (istask(v) and v[0] == head1 and v[1] in dsk and
istask(dsk[v[1]]) and dsk[v[1]][0] == head2):
dsk2[k] = merge(v, dsk[v[1]])
else:
dsk2[k] = v
except TypeError:
dsk2[k] = v
return dsk2
def fuse_getitem(dsk, func, place):
""" Fuse getitem with lower operation
Parameters
----------
dsk: dict
dask graph
func: function
A function in a task to merge
place: int
Location in task to insert the getitem key
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> dsk2 = fuse_getitem(dsk, load, 3) # columns in arg place 3
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
return fuse_selections(dsk, getitem, func,
lambda a, b: tuple(b[:place]) + (a[2],) + tuple(b[place + 1:]))
|
|
from functools import reduce
import torch
from torch._utils import _accumulate
from ..function import Function, InplaceFunction
class Index(Function):
def __init__(self, index):
super(Index, self).__init__()
self.index = index
def forward(self, i):
self.input_size = i.size()
result = i.index(self.index)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
grad_input = grad_output.new(self.input_size).zero_()
grad_input._set_index(self.index, grad_output)
return grad_input
class SetItem(InplaceFunction):
def __init__(self, index, value=None):
super(SetItem, self).__init__(True)
self.index = index
self.value = value
def forward(self, i, value=None):
self.mark_dirty(i)
if value is None: # value is scalar
value = self.value
else: # value is Tensor
self.value_size = value.size()
i._set_index(self.index, value)
return i
def backward(self, grad_output):
if self.value is None: # value is Tensor
grad_input = grad_output.clone()
grad_input._set_index(self.index, 0)
grad_value = grad_output.index(self.index).clone()
grad_value = grad_value.view(self.value_size)
return grad_input, grad_value
else:
grad_input = grad_output.clone()
grad_input._set_index(self.index, 0)
return grad_input
class NoGrad(Function):
def forward(self, i):
result = i.new(i)
self.mark_non_differentiable(result)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
assert False, "backward of NoGrad should never be called"
def _do_forward(self, *args, **kwargs):
result = super(NoGrad, self)._do_forward(*args, **kwargs)
self.requires_grad = False
return result
__call__ = _do_forward
class Transpose(Function):
def __init__(self, *dims):
super(Transpose, self).__init__()
assert len(dims) == 2
self.dims = dims
def forward(self, i):
result = i.transpose(*self.dims)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
return grad_output.transpose(*self.dims)
class View(Function):
def __init__(self, *sizes):
super(View, self).__init__()
self.sizes = sizes
def forward(self, i):
self.input_size = i.size()
result = i.view(*self.sizes)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
# TODO: not sure if this clone is necessary
return grad_output.contiguous().view(self.input_size)
class Expand(Function):
def __init__(self, sizes):
super(Expand, self).__init__()
self.sizes = sizes
self.expanded_dims = []
def forward(self, i):
result = i.expand(*self.sizes)
self.num_unsqueezed = len(self.sizes) - i.dim()
self.expanded_dims = [dim for dim, (expanded, original)
in enumerate(zip(self.sizes[self.num_unsqueezed:], i.size()))
if expanded != original]
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
grad_input = grad_output
for i in range(self.num_unsqueezed):
grad_input = grad_input.sum(0).squeeze(0)
for dim in self.expanded_dims:
grad_input = grad_input.sum(dim)
return grad_input
class Type(Function):
def __init__(self, dest_type):
super(Type, self).__init__()
self.dest_type = dest_type
def forward(self, i):
assert self.dest_type != type(i)
self.input_type = type(i)
return i.type(self.dest_type)
def backward(self, grad_output):
return grad_output.type(self.input_type)
class CudaTransfer(Function):
def __init__(self, device_id=None, async=False):
super(CudaTransfer, self).__init__()
self.device_id = device_id
self.async = async
def forward(self, i):
self.source_device = -1 if not i.is_cuda else i.get_device()
self.source_was_cuda = i.is_cuda
if self.device_id:
return i.cuda(self.device_id, async=self.async)
else:
return i.cuda(async=self.async)
def backward(self, grad_output):
if self.source_device != -1:
return grad_output.cuda(self.source_device)
elif self.source_was_cuda:
return grad_output
else:
return grad_output.cpu()
class Permute(Function):
def __init__(self, dim_indices):
super(Permute, self).__init__()
self.dim_indices = dim_indices
self.rev_dim_indices = [None for _ in range(len(dim_indices))]
for i, dim_idx in enumerate(self.dim_indices):
self.rev_dim_indices[dim_idx] = i
def forward(self, i):
result = i.permute(*self.dim_indices)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
return grad_output.permute(*self.rev_dim_indices)
class IndexAdd(InplaceFunction):
def __init__(self, dim, inplace=False):
super(IndexAdd, self).__init__(inplace)
self.dim = dim
def forward(self, tensor1, index, tensor2):
assert not self.needs_input_grad[1]
if self.needs_input_grad[2]:
self.save_for_backward(index)
if not self.inplace:
tensor1 = tensor1.clone()
else:
self.mark_dirty(tensor1)
return tensor1.index_add_(self.dim, index, tensor2)
def backward(self, grad_output):
grad_tensor1 = grad_tensor2 = None
if self.needs_input_grad[0]:
grad_tensor1 = grad_output
if self.needs_input_grad[2]:
index, = self.saved_tensors
grad_tensor2 = grad_output.index_select(self.dim, index)
return grad_tensor1, None, grad_tensor2
class IndexCopy(InplaceFunction):
def __init__(self, dim, inplace=False):
super(IndexCopy, self).__init__(inplace)
self.dim = dim
def forward(self, tensor1, index, tensor2):
assert not self.needs_input_grad[1]
if any(self.needs_input_grad):
self.save_for_backward(index)
if not self.inplace:
tensor1 = tensor1.clone()
else:
self.mark_dirty(tensor1)
return tensor1.index_copy_(self.dim, index, tensor2)
def backward(self, grad_output):
grad_tensor1 = grad_tensor2 = None
if any(self.needs_input_grad):
index, = self.saved_tensors
if self.needs_input_grad[0]:
grad_tensor1 = grad_output.clone().index_fill_(self.dim, index, 0)
if self.needs_input_grad[2]:
grad_tensor2 = grad_output.index_select(self.dim, index)
return grad_tensor1, None, grad_tensor2
class IndexFill(InplaceFunction):
def __init__(self, dim, value, inplace=False):
super(IndexFill, self).__init__(inplace)
self.dim = dim
self.value = value
def forward(self, tensor, index):
assert not self.needs_input_grad[1]
if self.needs_input_grad[0]:
self.save_for_backward(index)
if not self.inplace:
tensor = tensor.clone()
else:
self.mark_dirty(tensor)
return tensor.index_fill_(self.dim, index, self.value)
def backward(self, grad_output):
grad_tensor = None
if self.needs_input_grad[0]:
index, = self.saved_tensors
grad_tensor = grad_output.clone().index_fill_(self.dim, index, 0)
return grad_tensor, None
class IndexSelect(Function):
def __init__(self, dim):
super(IndexSelect, self).__init__()
self.dim = dim
def forward(self, tensor, index):
assert not self.needs_input_grad[1]
if self.needs_input_grad[0]:
self.save_for_backward(index)
self.input_size = tensor.size()
return tensor.index_select(self.dim, index)
def backward(self, grad_output):
grad_tensor = None
if self.needs_input_grad[0]:
index, = self.saved_tensors
grad_tensor = grad_output.new(*self.input_size).zero_()
grad_tensor.index_add_(self.dim, index, grad_output)
return grad_tensor, None
class Concat(Function):
def __init__(self, dim):
super(Concat, self).__init__()
self.dim = dim
def forward(self, *inputs):
self.input_sizes = [i.size(self.dim) for i in inputs]
return torch.cat(inputs, self.dim)
def backward(self, grad_output):
return tuple(grad_output.narrow(self.dim, end - size, size) for size, end
in zip(self.input_sizes, _accumulate(self.input_sizes)))
class Resize(Function):
def __init__(self, *sizes):
super(Resize, self).__init__()
self.sizes = sizes
self.numel = reduce(lambda x, y: x * y, sizes, 1)
def forward(self, tensor):
if tensor.numel() != self.numel:
raise RuntimeError(("requested resize to {} ({} elements in total), "
"but the given tensor has a size of {} ({} elements). "
"autograd's resize can only change the shape of a given "
"tensor, while preserving the number of elements. ").format(
'x'.join(map(str, self.sizes)), self.numel,
'x'.join(map(str, tensor.size())), tensor.numel()))
self.input_sizes = tensor.size()
result = tensor.new(tensor).resize_(*self.sizes)
self.mark_shared_storage((tensor, result))
return result
def backward(self, grad_output):
assert grad_output.numel() == self.numel
return grad_output.new(grad_output).resize_(self.input_sizes)
class Clone(Function):
def forward(self, input):
return input.clone()
def backward(self, grad_output):
return grad_output
class Squeeze(Function):
def __init__(self, dim=None):
super(Squeeze, self).__init__()
self.dim = dim
def forward(self, input):
self.input_size = input.size()
self.numel = input.numel()
if self.dim is not None:
result = input.squeeze(self.dim)
else:
result = input.squeeze()
self.mark_shared_storage((input, result))
return result
def backward(self, grad_output):
assert grad_output.numel() == self.numel
return grad_output.contiguous().view(self.input_size)
class Unsqueeze(Function):
def __init__(self, dim):
super(Unsqueeze, self).__init__()
self.dim = dim
def forward(self, input):
result = input.unsqueeze(self.dim)
self.mark_shared_storage((input, result))
return result
def backward(self, grad_output):
return grad_output.squeeze(self.dim)
class MaskedCopy(InplaceFunction):
def forward(self, tensor1, mask, tensor2):
assert not self.needs_input_grad[1], "MaskedCopy can't differentiate " \
"the mask"
if not self.inplace:
tensor1 = tensor1.clone()
else:
self.mark_dirty(tensor1)
self.save_for_backward(mask)
return tensor1.masked_copy_(mask, tensor2)
def backward(self, grad_output):
mask, = self.saved_tensors
grad_tensor1 = grad_tensor2 = None
if self.needs_input_grad[0]:
grad_tensor1 = grad_output.clone().masked_fill_(mask, 0)
if self.needs_input_grad[2]:
grad_tensor2 = grad_output.masked_select(mask)
return grad_tensor1, None, grad_tensor2
class MaskedFill(InplaceFunction):
def __init__(self, value, inplace=False):
super(MaskedFill, self).__init__(inplace)
self.value = value
def forward(self, tensor, mask):
assert not self.needs_input_grad[1], "MaskedFill can't differentiate " \
"the mask"
if not self.inplace:
tensor = tensor.clone()
else:
self.mark_dirty(tensor)
self.save_for_backward(mask)
return tensor.masked_fill_(mask, self.value)
def backward(self, grad_output):
mask, = self.saved_tensors
grad_tensor = None
if self.needs_input_grad[0]:
grad_tensor = grad_output.clone().masked_fill_(mask, 0)
return grad_tensor, None
class MaskedSelect(Function):
def forward(self, tensor, mask):
assert not self.needs_input_grad[1], "MaskedSelect can't differentiate " \
"the mask"
self.input_size = tensor.size()
self.save_for_backward(mask)
return tensor.masked_select(mask)
def backward(self, grad_output):
mask, = self.saved_tensors
grad_tensor = None
if self.needs_input_grad[0]:
# TODO: remove zero
grad_tensor = grad_output.new(self.input_size).zero_()
grad_tensor.masked_copy_(mask, grad_output)
return grad_tensor, None
class _MultiSelectionFunction(Function):
def __init__(self, dim, return_indices):
super(_MultiSelectionFunction, self).__init__()
self.dim = dim
self.return_indices = return_indices
def forward(self, input):
fn = getattr(input, self.__class__.__name__.lower())
self.input_size = input.size()
output, indices = fn(*self.args)
if self.return_indices:
self.save_for_backward(indices)
self.mark_non_differentiable(indices)
return output, indices
else:
self.indices = indices
return output
def backward(self, grad_output, grad_indices=None):
grad_input = grad_output.new(self.input_size).zero_()
if self.return_indices:
indices, = self.saved_tensors
else:
indices = self.indices
dim = self.dim if self.dim is not None else grad_output.dim() - 1
return grad_input.scatter_(dim, indices, grad_output)
class Sort(_MultiSelectionFunction):
def __init__(self, dim=None, descending=False, return_indices=True):
super(Sort, self).__init__(dim, return_indices)
self.descending = descending
def forward(self, input):
dim = self.dim if self.dim is not None else input.dim() - 1
self.args = (dim, self.descending)
return super(Sort, self).forward(input)
class Topk(_MultiSelectionFunction):
def __init__(self, k, dim=None, largest=True, sort=True, return_indices=True):
super(Topk, self).__init__(dim, return_indices)
self.k = k
self.largest = largest
self.sort = sort
def forward(self, input):
dim = self.dim if self.dim is not None else input.dim() - 1
self.args = (self.k, dim, self.largest, self.sort)
return super(Topk, self).forward(input)
class Chunk(Function):
def __init__(self, num_chunks, dim=0):
super(Chunk, self).__init__()
self.num_chunks = num_chunks
self.dim = dim
def forward(self, i):
self.input_size = i.size()
result = i.chunk(self.num_chunks, self.dim)
self.mark_shared_storage(*((i, chunk) for chunk in result))
return result
def backward(self, *grad_output):
grad_input = grad_output[0].new(self.input_size)
offset = 0
for grad in grad_output:
grad_size = grad.size(self.dim)
grad_input.narrow(self.dim, offset, grad_size).copy_(grad)
offset += grad_size
return grad_input
class Gather(Function):
def __init__(self, dim):
super(Gather, self).__init__()
self.dim = dim
def forward(self, input, index):
assert not self.needs_input_grad[1], "Gather can't differentiate " \
"the index"
self.input_size = input.size()
self.save_for_backward(index)
return input.gather(self.dim, index)
def backward(self, grad_output):
index, = self.saved_tensors
grad_input = grad_output.new(self.input_size).zero_()
return grad_input.scatter_(self.dim, index, grad_output), None
class Scatter(InplaceFunction):
def __init__(self, dim, inplace=False):
super(Scatter, self).__init__(inplace)
self.dim = dim
def forward(self, input, index, source):
assert not self.needs_input_grad[1], "Scatter can't differentiate " \
"the index"
if self.inplace:
self.mark_dirty(input)
else:
input = input.clone()
self.save_for_backward(index)
return input.scatter_(self.dim, index, source)
def backward(self, grad_output):
index, = self.saved_tensors
grad_input = grad_source = None
if self.needs_input_grad[0]:
grad_input = grad_output.clone()
grad_input.scatter_(self.dim, index, 0)
if self.needs_input_grad[2]:
grad_source = grad_output.gather(self.dim, index)
return grad_input, None, grad_source
class Repeat(Function):
def __init__(self, repeats):
super(Repeat, self).__init__()
self.repeats = repeats
def forward(self, input):
return input.repeat(self.repeats)
def backward(self, grad_output):
grad_input = grad_output
for dim, repeat in enumerate(self.repeats):
if repeat == 1:
continue
grad_input = sum(grad_input.chunk(repeat, dim))
return grad_input
class Cumsum(Function):
def __init__(self, dim):
super(Cumsum, self).__init__()
self.dim = dim
def forward(self, input):
return torch.cumsum(input, dim=self.dim)
def backward(self, grad_output):
grad_input = torch.cumsum(-grad_output, dim=self.dim)
end_idx = grad_input.size(self.dim) - 1
grad_sum = grad_input.narrow(self.dim, end_idx, 1)
grad_input -= grad_sum.expand_as(grad_input)
grad_input += grad_output
return grad_input
# TODO: unfold
|
|
import ui
import dice
class Cell(object):
alias = None
default_kick_msg = "Ouch! You hurt your foot!"
default_open_msg = "You see nothing you could open."
default_close_msg = "You see nothing you could open."
default_enter_msg = "You don't see stairs."
default_descr = "<no-descr>"
default_properties = {}
def __init__(self, **kwargs):
self.__dict__.update(self.default_properties)
self.__dict__.update(kwargs)
def gfx(self):
return self.default_gfx
def describe(self):
return self.default_descr
def is_opaque(self):
return self.default_is_opaque
def is_walkable(self):
return self.default_is_walkable
# Actions on cell by monsters
def kick(self, level, who, x, y):
ui.ifpc(who, self.default_kick_msg)
def walk(self, level, who, x, y):
if self.is_walkable():
level.move_monster(who, x, y)
def open(self, level, who, x, y):
ui.ifpc(who, self.default_open_msg)
def close(self, level, who, x, y):
ui.ifpc(who, self.default_close_msg)
def enter(self, level, who, x, y):
ui.ifpc(who, self.default_enter_msg)
class Door(Cell):
alias = '+'
default_properties = {'state': 'closed', 'material': 'metal'}
def is_walkable(self):
return self.state in ('open', 'smashed')
def is_opaque(self):
return self.state == 'closed'
def kick(self, level, who, x, y):
if self.state in ('open', 'smashed'):
ui.ifpc(who, "You kick at air.")
elif self.state == 'closed':
ui.ifpc(who, "You smash the door to pieces.")
self.state = 'smashed'
level.set_cell(x, y, self)
def walk(self, level, who, x, y):
if self.state != 'closed':
super(Door, self).walk(level, who, x, y)
else:
bonk_tab = {'wood': "*bong!*", 'metal': "*plang!*"}
ui.ifpc(who, bonk_tab[self.material])
def open(self, level, who, x, y):
if self.state == "closed":
ui.ifpc(who, "You open the door.")
self.state = 'open'
else:
ui.ifpc(who, "The door is already open.")
level.set_cell(x, y, self)
def close(self, level, who, x, y):
if self.state == "open":
ui.ifpc(who, "You close the door.")
self.state = 'closed'
else:
ui.ifpc(who, "The door is already closed.")
level.set_cell(x, y, self)
_ch_map = {'open': '/', 'closed': '+', 'smashed': '.'}
_col_map = {'wood': 6, 'metal': 8}
def gfx(self):
return (Door._ch_map[self.state], Door._col_map[self.material])
class Floor(Cell):
alias = '.'
default_is_walkable = True
default_is_opaque = False
default_gfx = ('.', 7)
default_descr = "floor."
class Grass(Cell):
default_is_walkable = True
default_is_opaque = False
default_gfx = ('.', 10)
default_descr = "gras"
class Tree(Cell):
alias = 'T'
default_is_walkable = True
default_is_opaque = False
default_gfx = ('T', 10)
default_descr = "tree"
default_properties = {'looted': False}
def kick(self, level, who, x, y):
ui.ifpc(who, "You kick the tree.")
if dice.chance(20):
ui.ifpc(who, "You hurt your foot!")
if not self.looted:
if dice.chance(20):
ui.ifpc(who, "Some apples drop down.")
if dice.chance(20):
self.looted = True
class Wall(Cell):
alias = '#'
default_is_walkable = False
default_is_opaque = True
default_gfx = ('#', 7)
default_descr = "a wall"
class Water(Cell):
alias = '='
default_is_walkable = False
default_is_opaque = False
default_gfx = ('=', 1)
default_descr = "water"
def kick(self, level, who, x, y):
ui.ifpc(who, "*splash*")
class Lava(Cell):
default_is_walkable = False
default_is_opaque = False
default_gfx = ('=', 4)
default_descr = "lava"
class Altar(Cell):
alias = '_'
default_is_walkable = True
default_is_opaque = False
default_properties = {'alignment': 'neutral'}
_col_map = {'lawful': 15, 'neutral': 8, 'evil': 7}
def gfx(self):
return '_', Altar._col_map[self.alignment]
class AbstractPortal(Cell):
default_is_walkable = True
default_is_opaque = False
default_properties = {'connection': (None, 0, 0)} # (level, x, y)
def enter(self, level, who, x, y):
if not who.is_pc: return
new_level, x, y = self.connection
if not new_level:
ui.ifpc(who, "This path leads nowhere...")
else:
level.remove_monster(who)
new_level.nudge_monster_at(x, y)
new_level.add_monster(who, x, y)
class StairsUp(AbstractPortal):
alias = '<'
default_gfx = ('<', 7)
class StairsDown(AbstractPortal):
alias = '>'
default_gfx = ('>', 7)
class Tunnel(AbstractPortal):
alias = '*'
default_gfx = ('*', 7)
# Throne, Ice
####################################################################
def gfx(cell):
return cell.gfx()
def describe(cell):
return cell.describe()
def is_walkable(cell):
return cell.is_walkable()
def is_opaque(cell):
return cell.is_opaque()
def walk(level, who, cell, x, y):
return cell.walk(level, who, x, y)
def kick(level, who, cell, x, y):
return cell.kick(level, who, x, y)
def open(level, who, cell, x, y):
return cell.open(level, who, x, y)
def close(level, who, cell, x, y):
return cell.close(level, who, x, y)
def enter(level, who, cell, x, y):
return cell.enter(level, who, x, y)
####################################################################
db = {
'floor': Floor,
'grass': Grass,
'wall': Wall,
'door': Door,
'water': Water,
'tree': Tree,
'stairs_up': StairsUp,
'stairs_down': StairsDown,
'tunnel': Tunnel,
'altar': Altar,
}
db_alias = {cell_class.alias: cell_class for cell_class in db.values() if cell_class.alias is not None}
def make_cell_from_alias(ch, *args, **kwargs):
return db_alias.get(ch, Floor)(*args, **kwargs)
|
|
from typing import List
import numpy as np
from numpy import ndarray
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.cross_section import strip
from gdsfactory.geometry.functions import path_length
from gdsfactory.routing.manhattan import (
_is_horizontal,
_is_vertical,
remove_flat_angles,
)
from gdsfactory.types import ComponentFactory, CrossSectionFactory
def path_length_matched_points(
list_of_waypoints: List[ndarray],
margin: float = 0.0,
modify_segment_i: int = -2,
extra_length: float = 0.0,
nb_loops: int = 1,
bend: ComponentFactory = bend_euler,
cross_section: CrossSectionFactory = strip,
**kwargs,
) -> List[ndarray]:
"""
Several types of paths won't match correctly.
We do not try to handle all the corner cases here.
You will need to modify the input list of waypoints in some cases.
Args:
list_of_waypoints: [[p1, p2, p3,...], [q1, q2, q3,...], ...]
the number of turns have to be identical
(usually means same number of points.
exception is if there are some flat angles)
margin: some extra space to budget for in addition to the bend radius
in most cases, the default is fine
modify_segment_i: index of the segment which accomodates the new turns
default is next to last segment (-2)
extra_length: distance added to all path length compensation.
Useful is we want to add space for extra taper on all branches
nb_loops: number of extra loops added in the path
if nb_loops==0, no extra loop is added, instead, in each route,
the segment indexed by `modify_segment_i` is elongated to match
the longuest route in `list_of_waypoints`
bend: bend function
cross_section: cross_section factory
**kwargs
Returns: another list of waypoints where
- the path_lenth of each waypoints list are identical
- the number of turns are identical
"""
if nb_loops >= 1:
return path_length_matched_points_add_waypoints(
list_of_waypoints=list_of_waypoints,
modify_segment_i=modify_segment_i,
bend=bend,
margin=margin,
extra_length=extra_length,
nb_loops=nb_loops,
cross_section=cross_section,
**kwargs,
)
else:
return path_length_matched_points_modify_segment(
list_of_waypoints=list_of_waypoints,
modify_segment_i=modify_segment_i,
extra_length=extra_length,
)
def path_length_matched_points_modify_segment(
list_of_waypoints,
modify_segment_i,
extra_length,
):
if not isinstance(list_of_waypoints, list):
raise ValueError(
"list_of_waypoints should be a list, got {}".format(type(list_of_waypoints))
)
list_of_waypoints = [
remove_flat_angles(waypoints) for waypoints in list_of_waypoints
]
lengths = [path_length(waypoints) for waypoints in list_of_waypoints]
L0 = max(lengths)
N = len(list_of_waypoints[0])
# Find how many turns there are per path
nb_turns = [len(waypoints) - 2 for waypoints in list_of_waypoints]
# The paths have to have the same number of turns, otherwise this algo
# cannot path length match
if min(nb_turns) != max(nb_turns):
raise ValueError(
f"Number of turns in paths have to be identical got {nb_turns}"
)
if modify_segment_i < 0:
modify_segment_i = modify_segment_i + N + 1
list_new_waypoints = []
# For each list of waypoints, modify one segment in-place
for i, waypoints in enumerate(list_of_waypoints):
p_s0, p_s1, p_next = waypoints[modify_segment_i - 1 : modify_segment_i + 2]
p_s0 = np.array(p_s0)
p_s1 = np.array(p_s1)
L = lengths[i]
# Path length compensation length
dL = (L0 - L) / 2
# Additional fixed length
dL = dL + extra_length
# Modify the segment to accomodate for path length matching
# Two cases: vertical or horizontal segment
if _is_vertical(p_s0, p_s1):
sx = np.sign(p_next[0] - p_s1[0])
dx = -sx * dL
dp = (dx, 0)
# Sequence of displacements to apply
elif _is_horizontal(p_s0, p_s1):
sy = np.sign(p_next[1] - p_s1[1])
dy = -sy * dL
dp = (0, dy)
waypoints[modify_segment_i - 1] = p_s0 + dp
waypoints[modify_segment_i] = p_s1 + dp
list_new_waypoints += [waypoints]
return list_new_waypoints
def path_length_matched_points_add_waypoints(
list_of_waypoints: List[ndarray],
modify_segment_i: int = -2,
bend: ComponentFactory = bend_euler,
margin: float = 0.0,
extra_length: float = 0.0,
nb_loops: int = 1,
cross_section: CrossSectionFactory = strip,
**kwargs,
) -> List[ndarray]:
"""
Args:
list_of_waypoints: a list of list_of_points:
[[p1, p2, p3,...], [q1, q2, q3,...], ...]
- the number of turns have to be identical
(usually means same number of points. exception is if there are
some flat angles)
modify_segment_i: index of the segment which accomodates the new turns
default is next to last segment
bend: for bends
margin: some extra space to budget for in addition to the bend radius
in most cases, the default is fine
extra_length: distance added to all path length compensation.
Useful is we want to add space for extra taper on all branches
nb_loops: number of extra loops added in the path
cross_section: factory
**kwargs: cross_section settings
returns:
another list of waypoints where:
- the path_lenth of each waypoints list are identical
- the number of turns are identical
Several types of paths won't match correctly. We do not try to handle
all the corner cases here. If the paths are not well behaved,
the input list_of_waypoints needs to be modified.
To have flexibility in the path length, we need to add 4 bends
One path has to be converted in this way:
.. code::
__
| |
| | This length is adjusted to make all path with the same length
| |
___| |___
"""
if not isinstance(list_of_waypoints, list):
raise ValueError(
f"list_of_waypoints should be a list, got {type(list_of_waypoints)}"
)
list_of_waypoints = [
remove_flat_angles(waypoints) for waypoints in list_of_waypoints
]
lengths = [path_length(waypoints) for waypoints in list_of_waypoints]
L0 = max(lengths)
N = len(list_of_waypoints[0])
# Find how many turns there are per path
nb_turns = [len(waypoints) - 2 for waypoints in list_of_waypoints]
# The paths have to have the same number of turns, otherwise cannot path-length
# match with this algorithm
if min(nb_turns) != max(nb_turns):
raise ValueError(
f"Number of turns in paths have to be identical got {nb_turns}"
)
# Get the points for the segment we need to modify
bend90 = bend(cross_section=cross_section, **kwargs)
a = margin + bend90.info["dy"]
if modify_segment_i < 0:
modify_segment_i = modify_segment_i + N + 1
list_new_waypoints = []
for i, waypoints in enumerate(list_of_waypoints):
p_s0, p_s1, p_next = waypoints[modify_segment_i - 2 : modify_segment_i + 1]
p_s1 = np.array(p_s1)
L = lengths[i]
# Path length compensation length
dL = (L0 - L) / (2 * nb_loops)
# Additional fixed length
dL = dL + extra_length
# Generate a new sequence of points which will replace this segment
# Two cases: vertical or horizontal segment
if _is_vertical(p_s0, p_s1):
sx = np.sign(p_next[0] - p_s1[0])
sy = np.sign(p_s1[1] - p_s0[1])
dx = sx * (2 * a + dL)
dy = sy * 2 * a
# First new point to insert
q0 = p_s1 + (0, -2 * nb_loops * dy)
# Sequence of displacements to apply
seq = [(dx, 0), (0, dy), (-dx, 0), (0, dy)] * nb_loops
seq.pop() # Remove last point to avoid flat angle with next point
elif _is_horizontal(p_s0, p_s1):
sy = np.sign(p_next[1] - p_s1[1])
sx = np.sign(p_s1[0] - p_s0[0])
dx = sx * 2 * a
dy = sy * (2 * a + dL)
# First new point to insert
q0 = p_s1 + (-2 * dx * nb_loops, 0)
# Sequence of displacements to apply
seq = [(0, dy), (dx, 0), (0, -dy), (dx, 0)] * nb_loops
seq.pop() # Remove last point to avoid flat angle with next point
# Generate points to insert
qs = [q0]
for dp in seq:
qs += [qs[-1] + dp]
inserted_points = np.stack(qs, axis=0)
waypoints = np.array(waypoints)
# Insert the points
new_points = np.vstack(
[
waypoints[: modify_segment_i - 1],
inserted_points,
waypoints[modify_segment_i - 1 :],
]
)
list_new_waypoints += [new_points]
return list_new_waypoints
if __name__ == "__main__":
import gdsfactory as gf
c = gf.Component()
c1 = c << gf.components.straight_array(n=4, spacing=50)
c2 = c << gf.components.straight_array(n=4, spacing=20)
c1.y = 0
c2.y = 0
c2.x = 300
routes = gf.routing.get_bundle_path_length_match(
c1.get_ports_list(prefix="E"),
c2.get_ports_list(prefix="W"),
radius=5,
layer=(2, 0),
)
for route in routes:
c.add(route.references)
c.show()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.ml
~~~~~~~~~~~~~~~~~~
Lexers for ML family languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['SMLLexer', 'OcamlLexer', 'OpaLexer']
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
.. versionadded:: 1.5
"""
name = 'Standard ML'
aliases = ['sml']
filenames = ['*.sml', '*.sig', '*.fun']
mimetypes = ['text/x-standardml', 'application/x-standardml']
alphanumid_reserved = set((
# Core
'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
# Modules
'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
'struct', 'structure', 'where',
))
symbolicid_reserved = set((
# Core
':', r'\|', '=', '=>', '->', '#',
# Modules
':>',
))
nonid_reserved = set(('(', ')', '[', ']', '{', '}', ',', ';', '...', '_'))
alphanumid_re = r"[a-zA-Z][\w']*"
symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
# A character constant is a sequence of the form #s, where s is a string
# constant denoting a string of size one character. This setup just parses
# the entire string as either a String.Double or a String.Char (depending
# on the argument), even if the String.Char is an erronous
# multiple-character string.
def stringy(whatkind):
return [
(r'[^"\\]', whatkind),
(r'\\[\\"abtnvfr]', String.Escape),
# Control-character notation is used for codes < 32,
# where \^@ == \000
(r'\\\^[\x40-\x5e]', String.Escape),
# Docs say 'decimal digits'
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\\s+\\', String.Interpol),
(r'"', whatkind, '#pop'),
]
# Callbacks for distinguishing tokens and reserved words
def long_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
else:
token = Name.Namespace
yield match.start(1), token, match.group(1)
yield match.start(2), Punctuation, match.group(2)
def end_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
elif match.group(1) in self.symbolicid_reserved:
token = Error
else:
token = Name
yield match.start(1), token, match.group(1)
def id_callback(self, match):
str = match.group(1)
if str in self.alphanumid_reserved:
token = Keyword.Reserved
elif str in self.symbolicid_reserved:
token = Punctuation
else:
token = Name
yield match.start(1), token, str
tokens = {
# Whitespace and comments are (almost) everywhere
'whitespace': [
(r'\s+', Text),
(r'\(\*', Comment.Multiline, 'comment'),
],
'delimiters': [
# This lexer treats these delimiters specially:
# Delimiters define scopes, and the scope is how the meaning of
# the `|' is resolved - is it a case/handle expression, or function
# definition by cases? (This is not how the Definition works, but
# it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
(r'\(|\[|\{', Punctuation, 'main'),
(r'\)|\]|\}', Punctuation, '#pop'),
(r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
(r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
(r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
],
'core': [
# Punctuation that doesn't overlap symbolic identifiers
(r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved),
Punctuation),
# Special constants: strings, floats, numbers in decimal and hex
(r'#"', String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'~?0x[0-9a-fA-F]+', Number.Hex),
(r'0wx[0-9a-fA-F]+', Number.Hex),
(r'0w\d+', Number.Integer),
(r'~?\d+\.\d+[eE]~?\d+', Number.Float),
(r'~?\d+\.\d+', Number.Float),
(r'~?\d+[eE]~?\d+', Number.Float),
(r'~?\d+', Number.Integer),
# Labels
(r'#\s*[1-9][0-9]*', Name.Label),
(r'#\s*(%s)' % alphanumid_re, Name.Label),
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
(r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
# Regular identifiers, long and otherwise
(r'\'[\w\']*', Name.Decorator),
(r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
(r'(%s)' % alphanumid_re, id_callback),
(r'(%s)' % symbolicid_re, id_callback),
],
'dotted': [
(r'(%s)(\.)' % alphanumid_re, long_id_callback),
(r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
(r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
(r'\s+', Error),
(r'\S+', Error),
],
# Main parser (prevents errors in files that have scoping errors)
'root': [
default('main')
],
# In this scope, I expect '|' to not be followed by a function name,
# and I expect 'and' to be followed by a binding site
'main': [
include('whitespace'),
# Special behavior of val/and/fun
(r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
(r'\b(fun)\b(?!\')', Keyword.Reserved,
('#pop', 'main-fun', 'fname')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# In this scope, I expect '|' and 'and' to be followed by a function
'main-fun': [
include('whitespace'),
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
# Special behavior of val/and/fun
(r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
(r'\b(val)\b(?!\')', Keyword.Reserved,
('#pop', 'main', 'vname')),
# Special behavior of '|' and '|'-manipulating keywords
(r'\|', Punctuation, 'fname'),
(r'\b(case|handle)\b(?!\')', Keyword.Reserved,
('#pop', 'main')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# Character and string parsers
'char': stringy(String.Char),
'string': stringy(String.Double),
'breakout': [
(r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
],
# Dealing with what comes after module system keywords
'sname': [
include('whitespace'),
include('breakout'),
(r'(%s)' % alphanumid_re, Name.Namespace),
default('#pop'),
],
# Dealing with what comes after the 'fun' (or 'and' or '|') keyword
'fname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)' % alphanumid_re, Name.Function, '#pop'),
(r'(%s)' % symbolicid_re, Name.Function, '#pop'),
# Ignore interesting function declarations like "fun (x + y) = ..."
default('#pop'),
],
# Dealing with what comes after the 'val' (or 'and') keyword
'vname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
(r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
# Ignore interesting patterns like 'val (x, y)'
default('#pop'),
],
# Dealing with what comes after the 'type' (or 'and') keyword
'tname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# A type binding includes most identifiers
'typbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
include('breakout'),
include('core'),
(r'\S+', Error, '#pop'),
],
# Dealing with what comes after the 'datatype' (or 'and') keyword
'dname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(=)(\s*)(datatype)',
bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
(r'=(?!%s)' % symbolicid_re, Punctuation,
('#pop', 'datbind', 'datcon')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# common case - A | B | C of int
'datbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
(r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(\|)(\s*)(%s)' % alphanumid_re,
bygroups(Punctuation, Text, Name.Class)),
(r'(\|)(\s+)(%s)' % symbolicid_re,
bygroups(Punctuation, Text, Name.Class)),
include('breakout'),
include('core'),
(r'\S+', Error),
],
# Dealing with what comes after an exception
'ename': [
include('whitespace'),
(r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
include('breakout'),
include('core'),
(r'\S+', Error),
],
'datcon': [
include('whitespace'),
(r'(%s)' % alphanumid_re, Name.Class, '#pop'),
(r'(%s)' % symbolicid_re, Name.Class, '#pop'),
(r'\S+', Error, '#pop'),
],
# Series of type variables
'tyvarseq': [
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
(r'\'[\w\']*', Name.Decorator),
(alphanumid_re, Name),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
(symbolicid_re, Name),
],
'comment': [
(r'[^(*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[(*)]', Comment.Multiline),
],
}
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
.. versionadded:: 0.7
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = (
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
)
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~'
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or')
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
default('#pop'),
],
}
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
.. versionadded:: 1.5
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
mimetypes = ['text/x-opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = (
'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
'else', 'end', 'external', 'forall', 'function', 'if', 'import',
'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
'type', 'val', 'with', 'xml_parser',
)
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
include('comments'),
(r'\s+', Text),
],
'root': [
include('comments-and-spaces'),
# keywords
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@' + ident_re + r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from
# the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a
# '{' or else we will have errors when parsing {} for instance
(r'\{', Operator, '#push'),
(r'\}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type
# definitions so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?=\{)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly
# translate in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
include('comments-and-spaces'),
(r'->', Keyword.Type),
default(('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type
# expressions: record types, tuple types, type constructors, basic type
# and type variables
'type-1': [
include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?\{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
default('#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
include('comments-and-spaces'),
default(('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
default('#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
default('#pop'),
],
'type-arrow': [
include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc)
# correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
default('#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
include('comments-and-spaces'),
(r'[^()/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\{', Keyword.Type, '#push'),
(r'\}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# default(('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# include('comments-and-spaces'),
# (r'\}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the copy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?=\{)', String.Single, ('#pop', 'root')),
(r'[^"\'{`=<>]+', String.Single, '#pop'),
(r'\{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'\{', Operator, 'root'),
(r'[^<{]+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
|
|
from modularodm.exceptions import ValidationError
from modularodm import Q
from rest_framework import exceptions
from rest_framework import serializers as ser
from api.base.exceptions import Conflict
from api.base.serializers import (
JSONAPISerializer, IDField,
LinksField, RelationshipField, DateByVersion,
)
from api.base.utils import absolute_reverse, get_user_auth
from api.taxonomies.serializers import TaxonomyField
from api.nodes.serializers import (
NodeCitationSerializer,
NodeLicenseSerializer,
get_license_details
)
from framework.exceptions import PermissionsError
from website.util import permissions
from website.exceptions import NodeStateError
from website.project import signals as project_signals
from osf.models import BaseFileNode, PreprintService, PreprintProvider, Node, NodeLicense
class PrimaryFileRelationshipField(RelationshipField):
def get_object(self, file_id):
return BaseFileNode.load(file_id)
def to_internal_value(self, data):
file = self.get_object(data)
return {'primary_file': file}
class NodeRelationshipField(RelationshipField):
def get_object(self, node_id):
return Node.load(node_id)
def to_internal_value(self, data):
node = self.get_object(data)
return {'node': node}
class PreprintProviderRelationshipField(RelationshipField):
def get_object(self, node_id):
return PreprintProvider.load(node_id)
def to_internal_value(self, data):
provider = self.get_object(data)
return {'provider': provider}
class PreprintLicenseRelationshipField(RelationshipField):
def to_internal_value(self, license_id):
license = NodeLicense.load(license_id)
if license:
return {'license_type': license}
raise exceptions.NotFound('Unable to find specified license.')
class PreprintSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'date_created',
'date_modified',
'date_published',
'provider',
'is_published',
'subjects',
])
id = IDField(source='_id', read_only=True)
subjects = ser.SerializerMethodField()
date_created = DateByVersion(read_only=True)
date_modified = DateByVersion(read_only=True)
date_published = DateByVersion(read_only=True)
doi = ser.CharField(source='article_doi', required=False, allow_null=True)
is_published = ser.BooleanField(required=False)
is_preprint_orphan = ser.BooleanField(read_only=True)
license_record = NodeLicenseSerializer(required=False, source='license')
citation = RelationshipField(
related_view='preprints:preprint-citation',
related_view_kwargs={'preprint_id': '<_id>'}
)
identifiers = RelationshipField(
related_view='preprints:identifier-list',
related_view_kwargs={'preprint_id': '<_id>'}
)
node = NodeRelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
read_only=False
)
license = PreprintLicenseRelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<license.node_license._id>'},
read_only=False
)
provider = PreprintProviderRelationshipField(
related_view='preprint_providers:preprint_provider-detail',
related_view_kwargs={'provider_id': '<provider._id>'},
read_only=False
)
primary_file = PrimaryFileRelationshipField(
related_view='files:file-detail',
related_view_kwargs={'file_id': '<primary_file._id>'},
lookup_url_kwarg='file_id',
read_only=False
)
links = LinksField(
{
'self': 'get_preprint_url',
'html': 'get_absolute_html_url',
'doi': 'get_article_doi_url',
'preprint_doi': 'get_preprint_doi_url'
}
)
class Meta:
type_ = 'preprints'
def get_subjects(self, obj):
return [
[
TaxonomyField().to_representation(subj) for subj in hier
] for hier in obj.subject_hierarchy
]
def get_preprint_url(self, obj):
return absolute_reverse('preprints:preprint-detail', kwargs={'preprint_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version']})
def get_absolute_url(self, obj):
return self.get_preprint_url(obj)
def get_article_doi_url(self, obj):
return 'https://dx.doi.org/{}'.format(obj.article_doi) if obj.article_doi else None
def get_preprint_doi_url(self, obj):
doi_identifier = obj.get_identifier('doi')
return 'https://dx.doi.org/{}'.format(doi_identifier.value) if doi_identifier else None
def run_validation(self, *args, **kwargs):
# Overrides construtor for validated_data to allow writes to a SerializerMethodField
# Validation for `subjects` happens in the model
_validated_data = super(PreprintSerializer, self).run_validation(*args, **kwargs)
if 'subjects' in self.initial_data:
_validated_data['subjects'] = self.initial_data['subjects']
return _validated_data
def update(self, preprint, validated_data):
assert isinstance(preprint, PreprintService), 'You must specify a valid preprint to be updated'
assert isinstance(preprint.node, Node), 'You must specify a preprint with a valid node to be updated.'
auth = get_user_auth(self.context['request'])
if not preprint.node.has_permission(auth.user, 'admin'):
raise exceptions.PermissionDenied(detail='User must be an admin to update a preprint.')
save_node = False
save_preprint = False
recently_published = False
primary_file = validated_data.pop('primary_file', None)
if primary_file:
self.set_field(preprint.set_primary_file, primary_file, auth)
save_node = True
if 'subjects' in validated_data:
subjects = validated_data.pop('subjects', None)
self.set_field(preprint.set_subjects, subjects, auth)
save_preprint = True
if 'article_doi' in validated_data:
preprint.node.preprint_article_doi = validated_data['article_doi']
save_node = True
if 'license_type' in validated_data or 'license' in validated_data:
license_details = get_license_details(preprint, validated_data)
self.set_field(preprint.set_preprint_license, license_details, auth)
save_preprint = True
published = validated_data.pop('is_published', None)
if published is not None:
self.set_field(preprint.set_published, published, auth)
save_preprint = True
recently_published = published
if save_node:
try:
preprint.node.save()
except ValidationError as e:
# Raised from invalid DOI
raise exceptions.ValidationError(detail=e.messages[0])
if save_preprint:
preprint.save()
# Send preprint confirmation email signal to new authors on preprint! -- only when published
# TODO: Some more thought might be required on this; preprints made from existing
# nodes will send emails making it seem like a new node.
if recently_published:
for author in preprint.node.contributors:
if author != auth.user:
project_signals.contributor_added.send(preprint.node, contributor=author, auth=auth, email_template='preprint')
return preprint
def set_field(self, func, val, auth, save=False):
try:
func(val, auth)
except PermissionsError as e:
raise exceptions.PermissionDenied(detail=e.message)
except ValueError as e:
raise exceptions.ValidationError(detail=e.message)
except NodeStateError as e:
raise exceptions.ValidationError(detail=e.message)
class PreprintCreateSerializer(PreprintSerializer):
# Overrides PreprintSerializer to make id nullable, adds `create`
id = IDField(source='_id', required=False, allow_null=True)
def create(self, validated_data):
node = validated_data.pop('node', None)
if not node:
raise exceptions.NotFound('Unable to find Node with specified id.')
elif node.is_deleted:
raise exceptions.ValidationError('Cannot create a preprint from a deleted node.')
auth = get_user_auth(self.context['request'])
if not node.has_permission(auth.user, permissions.ADMIN):
raise exceptions.PermissionDenied
primary_file = validated_data.pop('primary_file', None)
if not primary_file:
raise exceptions.ValidationError(detail='You must specify a valid primary_file to create a preprint.')
provider = validated_data.pop('provider', None)
if not provider:
raise exceptions.ValidationError(detail='You must specify a valid provider to create a preprint.')
if PreprintService.find(Q('node', 'eq', node) & Q('provider', 'eq', provider)).count():
conflict = PreprintService.find_one(Q('node', 'eq', node) & Q('provider', 'eq', provider))
raise Conflict('Only one preprint per provider can be submitted for a node. Check `meta[existing_resource_id]`.', meta={'existing_resource_id': conflict._id})
preprint = PreprintService(node=node, provider=provider)
self.set_field(preprint.set_primary_file, primary_file, auth)
preprint.save()
preprint.node._has_abandoned_preprint = True
preprint.node.save()
return self.update(preprint, validated_data)
class PreprintCitationSerializer(NodeCitationSerializer):
class Meta:
type_ = 'preprint-citation'
|
|
from collections.abc import Iterable
from numbers import Integral, Real
import numpy as np
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from .data import EV_PER_MEV
class ProbabilityTables(EqualityMixin):
r"""Unresolved resonance region probability tables.
Parameters
----------
energy : Iterable of float
Energies in eV at which probability tables exist
table : numpy.ndarray
Probability tables for each energy. This array is of shape (N, 6, M)
where N is the number of energies and M is the number of bands. The
second dimension indicates whether the value is for the cumulative
probability (0), total (1), elastic (2), fission (3), :math:`(n,\gamma)`
(4), or heating number (5).
interpolation : {2, 5}
Interpolation scheme between tables
inelastic_flag : int
A value less than zero indicates that the inelastic cross section is
zero within the unresolved energy range. A value greater than zero
indicates the MT number for a reaction whose cross section is to be used
in the unresolved range.
absorption_flag : int
A value less than zero indicates that the "other absorption" cross
section is zero within the unresolved energy range. A value greater than
zero indicates the MT number for a reaction whose cross section is to be
used in the unresolved range.
multiply_smooth : bool
Indicate whether probability table values are cross sections (False) or
whether they must be multiply by the corresponding "smooth" cross
sections (True).
Attributes
----------
energy : Iterable of float
Energies in eV at which probability tables exist
table : numpy.ndarray
Probability tables for each energy. This array is of shape (N, 6, M)
where N is the number of energies and M is the number of bands. The
second dimension indicates whether the value is for the cumulative
probability (0), total (1), elastic (2), fission (3), :math:`(n,\gamma)`
(4), or heating number (5).
interpolation : {2, 5}
Interpolation scheme between tables
inelastic_flag : int
A value less than zero indicates that the inelastic cross section is
zero within the unresolved energy range. A value greater than zero
indicates the MT number for a reaction whose cross section is to be used
in the unresolved range.
absorption_flag : int
A value less than zero indicates that the "other absorption" cross
section is zero within the unresolved energy range. A value greater than
zero indicates the MT number for a reaction whose cross section is to be
used in the unresolved range.
multiply_smooth : bool
Indicate whether probability table values are cross sections (False) or
whether they must be multiply by the corresponding "smooth" cross
sections (True).
"""
def __init__(self, energy, table, interpolation, inelastic_flag=-1,
absorption_flag=-1, multiply_smooth=False):
self.energy = energy
self.table = table
self.interpolation = interpolation
self.inelastic_flag = inelastic_flag
self.absorption_flag = absorption_flag
self.multiply_smooth = multiply_smooth
@property
def absorption_flag(self):
return self._absorption_flag
@property
def energy(self):
return self._energy
@property
def inelastic_flag(self):
return self._inelastic_flag
@property
def interpolation(self):
return self._interpolation
@property
def multiply_smooth(self):
return self._multiply_smooth
@property
def table(self):
return self._table
@absorption_flag.setter
def absorption_flag(self, absorption_flag):
cv.check_type('absorption flag', absorption_flag, Integral)
self._absorption_flag = absorption_flag
@energy.setter
def energy(self, energy):
cv.check_type('probability table energies', energy, Iterable, Real)
self._energy = energy
@inelastic_flag.setter
def inelastic_flag(self, inelastic_flag):
cv.check_type('inelastic flag', inelastic_flag, Integral)
self._inelastic_flag = inelastic_flag
@interpolation.setter
def interpolation(self, interpolation):
cv.check_value('interpolation', interpolation, [2, 5])
self._interpolation = interpolation
@multiply_smooth.setter
def multiply_smooth(self, multiply_smooth):
cv.check_type('multiply by smooth', multiply_smooth, bool)
self._multiply_smooth = multiply_smooth
@table.setter
def table(self, table):
cv.check_type('probability tables', table, np.ndarray)
self._table = table
def to_hdf5(self, group):
"""Write probability tables to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
group.attrs['interpolation'] = self.interpolation
group.attrs['inelastic'] = self.inelastic_flag
group.attrs['absorption'] = self.absorption_flag
group.attrs['multiply_smooth'] = int(self.multiply_smooth)
group.create_dataset('energy', data=self.energy)
group.create_dataset('table', data=self.table)
@classmethod
def from_hdf5(cls, group):
"""Generate probability tables from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.ProbabilityTables
Probability tables
"""
interpolation = group.attrs['interpolation']
inelastic_flag = group.attrs['inelastic']
absorption_flag = group.attrs['absorption']
multiply_smooth = bool(group.attrs['multiply_smooth'])
energy = group['energy'].value
table = group['table'].value
return cls(energy, table, interpolation, inelastic_flag,
absorption_flag, multiply_smooth)
@classmethod
def from_ace(cls, ace):
"""Generate probability tables from an ACE table
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
Returns
-------
openmc.data.ProbabilityTables
Unresolved resonance region probability tables
"""
# Check if URR probability tables are present
idx = ace.jxs[23]
if idx == 0:
return None
N = int(ace.xss[idx]) # Number of incident energies
M = int(ace.xss[idx+1]) # Length of probability table
interpolation = int(ace.xss[idx+2])
inelastic_flag = int(ace.xss[idx+3])
absorption_flag = int(ace.xss[idx+4])
multiply_smooth = (int(ace.xss[idx+5]) == 1)
idx += 6
# Get energies at which tables exist
energy = ace.xss[idx : idx+N]*EV_PER_MEV
idx += N
# Get probability tables
table = ace.xss[idx : idx+N*6*M].copy()
table.shape = (N, 6, M)
# Convert units on heating numbers
table[:,5,:] *= EV_PER_MEV
return cls(energy, table, interpolation, inelastic_flag,
absorption_flag, multiply_smooth)
|
|
from rpython.rlib.rarithmetic import r_singlefloat, r_uint
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
r_uint32 = rffi.r_uint
assert r_uint32.BITS == 32
UINT32MAX = 2 ** 32 - 1
# keep in sync with the C code in pypy__decay_jit_counters below
ENTRY = lltype.Struct('timetable_entry',
('times', lltype.FixedSizeArray(rffi.FLOAT, 5)),
('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5)))
class JitCounter:
"""A process translated with the JIT contains one prebuilt instance
of this class. It is used for three things:
* It maps greenkey hashes to counters, to know when we have seen this
greenkey enough to reach the 'threshold' or 'function_threshold'
parameters. This is done in a lossy way by a fixed-size 'timetable'.
* It handles the counters on the failing guards, for 'trace_eagerness'.
This is done in the same 'timetable'.
* It records the JitCell objects that are created when we compile
a loop, in a non-lossy dictionary-like strurcture. This is done
in the 'celltable'.
The 'timetable' is a table of DEFAULT_SIZE entries, each of which
containing 5 entries. From a hash value, we use the index number
'_get_index(hash)', and then we look in all five entries for a
matching '_get_subhash(hash)'. The five entries are roughly kept
sorted by decreasing recorded time. The hash value itself should be
computed accordingly: we only use bits 21:32 for _get_index and
bits 0:16 for _get_subhash. (This organization is "probably good"
to get not-too-random behavior; another motivation for it was for
the STM branch, to avoid pointless conflicts between threads.)
The time value stored in the timetable is a (short-precision)
floating-point number. The idea is that a value of 0.0 means
absent, and values go up to the maximum of 1.0.
'compute_threshold(threshold)' returns basically the fraction
1.0/threshold, corresponding to the 'increment' value for the
following APIs.
'tick(hash, increment)' adds 'increment' to the time value stored
with the 'hash'. Remember that only bits 0:16,21:32 of the hash
are used; in case of collision between two hashes, they will grow
twice as fast, because each tick() call will contribute to the
colliding time value.
'fetch_next_hash()' returns a "random" hash value suitable for
using in tick() later. Used when compiling guards; when the
guard actually fails, we'll tick() the guard's stored random hash.
'reset(hash)', 'change_current_fraction(hash, new_time_value)'
change the time value associated with a hash. The former resets
it to zero, and the latter changes it to the given value (which
should be a value close to 1.0).
'set_decay(decay)', 'decay_all_counters()' is used to globally
reduce all the stored time values. They all get multiplied by
a fraction close to (but smaller than) 1.0, computed from the
'decay' parameter.
'install_new_cell(hash, newcell)' adds the new JitCell to the
celltable, at the index given by 'hash' (bits 21:32). Unlike
the timetable, the celltable stores a linked list of JitCells
for every entry, and so it is not lossy.
'lookup_chain(hash)' returns the first JitCell at 'hash'. You can
then walk the chain by following the '.next' attributes until you
reach None.
'cleanup_chain(hash)' resets the timetable's 'hash' entry and
cleans up the celltable at 'hash'. It removes those JitCells
for which 'cell.should_remove_jitcell()' returns True.
"""
DEFAULT_SIZE = 2048
def __init__(self, size=DEFAULT_SIZE, translator=None):
"NOT_RPYTHON"
self.size = size
self.shift = 16
while (UINT32MAX >> self.shift) != size - 1:
self.shift += 1
assert self.shift < 999, "size is not a power of two <= 2**16"
#
# The table of timings. This is a 5-ways associative cache.
# We index into it using a number between 0 and (size - 1),
# and we're getting a 32-bytes-long entry; then this entry
# contains 5 possible ways, each occupying 6 bytes: 4 bytes
# for a float, and the 2 lowest bytes from the original hash.
self.timetable = lltype.malloc(rffi.CArray(ENTRY), self.size,
flavor='raw', zero=True,
track_allocation=False)
self._nexthash = r_uint(0)
#
# The table of JitCell entries, recording already-compiled loops
self.celltable = [None] * size
#
if translator is not None:
class Glob:
step = 0
glob = Glob()
def invoke_after_minor_collection():
# After 32 minor collections, we call decay_all_counters().
# The "--jit decay=N" option measures the amount the
# counters are then reduced by.
glob.step += 1
if glob.step == 32:
glob.step = 0
self.decay_all_counters()
if not hasattr(translator, '_jit2gc'):
translator._jit2gc = {}
translator._jit2gc['invoke_after_minor_collection'] = (
invoke_after_minor_collection)
def compute_threshold(self, threshold):
"""Return the 'increment' value corresponding to the given number."""
if threshold <= 0:
return 0.0 # no increment, never reach 1.0
return 1.0 / (threshold - 0.001)
def _get_index(self, hash):
"""Return the index (< self.size) from a hash. This truncates
the hash to 32 bits, and then keep the *highest* remaining bits.
Be sure that hash is computed correctly, by multiplying with
a large odd number or by fetch_next_hash()."""
hash32 = r_uint(r_uint32(hash)) # mask off the bits higher than 32
index = hash32 >> self.shift # shift, resulting in a value < size
return index # return the result as a r_uint
_get_index._always_inline_ = True
@staticmethod
def _get_subhash(hash):
return hash & 65535
def fetch_next_hash(self):
result = self._nexthash
# note: all three "1" bits in the following constant are needed
# to make test_counter.test_fetch_next_index pass. The first
# is to increment the "subhash" (lower 16 bits of the hash).
# The second is to increment the "index" portion of the hash.
# The third is so that after 65536 passes, the "index" is
# incremented by one more (by overflow), so that the next
# 65536 passes don't end up with the same subhashes.
self._nexthash = result + r_uint(1 | (1 << self.shift) |
(1 << (self.shift - 16)))
return result
def _swap(self, p_entry, n):
if float(p_entry.times[n]) > float(p_entry.times[n + 1]):
return n + 1
else:
x = p_entry.times[n]
p_entry.times[n] = p_entry.times[n + 1]
p_entry.times[n + 1] = x
x = p_entry.subhashes[n]
p_entry.subhashes[n] = p_entry.subhashes[n + 1]
p_entry.subhashes[n + 1] = x
return n
_swap._always_inline_ = True
def _tick_slowpath(self, p_entry, subhash):
if p_entry.subhashes[1] == subhash:
n = self._swap(p_entry, 0)
elif p_entry.subhashes[2] == subhash:
n = self._swap(p_entry, 1)
elif p_entry.subhashes[3] == subhash:
n = self._swap(p_entry, 2)
elif p_entry.subhashes[4] == subhash:
n = self._swap(p_entry, 3)
else:
n = 4
while n > 0 and float(p_entry.times[n - 1]) == 0.0:
n -= 1
p_entry.subhashes[n] = rffi.cast(rffi.USHORT, subhash)
p_entry.times[n] = r_singlefloat(0.0)
return n
def tick(self, hash, increment):
p_entry = self.timetable[self._get_index(hash)]
subhash = self._get_subhash(hash)
#
if p_entry.subhashes[0] == subhash:
n = 0
else:
n = self._tick_slowpath(p_entry, subhash)
#
counter = float(p_entry.times[n]) + increment
if counter < 1.0:
p_entry.times[n] = r_singlefloat(counter)
return False
else:
# when the bound is reached, we immediately reset the value to 0.0
self.reset(hash)
return True
tick._always_inline_ = True
def change_current_fraction(self, hash, new_fraction):
"""Change the value stored for 'hash' to be the given 'new_fraction',
which should be a float equal to or slightly lower than 1.0.
"""
p_entry = self.timetable[self._get_index(hash)]
subhash = self._get_subhash(hash)
# find in 'n' the index that will be overwritten: the first within
# range(5) that contains either the right subhash, or a null time
# (or, if there isn't any, then just n == 4 will do).
n = 0
while n < 4 and (p_entry.subhashes[n] != subhash and
float(p_entry.times[n]) != 0.0):
n += 1
# move one step to the right all elements [n - 1, n - 2, ..., 0],
# (this overwrites the old item at index 'n')
while n > 0:
n -= 1
p_entry.subhashes[n + 1] = p_entry.subhashes[n]
p_entry.times[n + 1] = p_entry.times[n]
# insert the new hash at index 0. This is a good approximation,
# because change_current_fraction() should be used for
# new_fraction == value close to 1.0.
p_entry.subhashes[0] = rffi.cast(rffi.USHORT, subhash)
p_entry.times[0] = r_singlefloat(new_fraction)
def reset(self, hash):
p_entry = self.timetable[self._get_index(hash)]
subhash = self._get_subhash(hash)
for i in range(5):
if p_entry.subhashes[i] == subhash:
p_entry.times[i] = r_singlefloat(0.0)
def lookup_chain(self, hash):
return self.celltable[self._get_index(hash)]
def cleanup_chain(self, hash):
self.reset(hash)
self.install_new_cell(hash, None)
def install_new_cell(self, hash, newcell):
index = self._get_index(hash)
cell = self.celltable[index]
keep = newcell
while cell is not None:
nextcell = cell.next
if not cell.should_remove_jitcell():
cell.next = keep
keep = cell
cell = nextcell
self.celltable[index] = keep
def set_decay(self, decay):
"""Set the decay, from 0 (none) to 1000 (max)."""
if decay < 0:
decay = 0
elif decay > 1000:
decay = 1000
self.decay_by_mult = 1.0 - (decay * 0.001)
def decay_all_counters(self):
# Called during a minor collection by the GC, to gradually decay
# counters that didn't reach their maximum. Thus if a counter
# is incremented very slowly, it will never reach the maximum.
# This avoids altogether the JIT compilation of rare paths.
# We also call this function when any maximum bound is reached,
# to avoid sudden bursts of JIT-compilation (the next one will
# not reach the maximum bound immmediately after). This is
# important in corner cases where we would suddenly compile more
# than one loop because all counters reach the bound at the same
# time, but where compiling all but the first one is pointless.
p = rffi.cast(rffi.CCHARP, self.timetable)
pypy__decay_jit_counters(p, self.decay_by_mult, self.size)
# this function is written directly in C; gcc will optimize it using SSE
eci = ExternalCompilationInfo(post_include_bits=["""
static void pypy__decay_jit_counters(char *data, double f1, long size) {
struct rpy_jitcnt { float times[5]; unsigned short subhashes[5]; };
struct rpy_jitcnt *p = (struct rpy_jitcnt *)data;
float f = (float)f1;
long i;
for (i=0; i<size; i++) {
p->times[0] *= f;
p->times[1] *= f;
p->times[2] *= f;
p->times[3] *= f;
p->times[4] *= f;
++p;
}
}
"""])
pypy__decay_jit_counters = rffi.llexternal(
"pypy__decay_jit_counters", [rffi.CCHARP, lltype.Float, lltype.Signed],
lltype.Void, compilation_info=eci, _nowrapper=True, sandboxsafe=True)
# ____________________________________________________________
#
# A non-RPython version that avoids issues with rare random collisions,
# which make all tests brittle
class DeterministicJitCounter(JitCounter):
def __init__(self):
from collections import defaultdict
JitCounter.__init__(self, size=8)
def make_null_entry():
return lltype.malloc(ENTRY, immortal=True, zero=True)
self.timetable = defaultdict(make_null_entry)
self.celltable = defaultdict(lambda: None)
def _get_index(self, hash):
"NOT_RPYTHON"
return hash
def decay_all_counters(self):
"NOT_RPYTHON"
pass
def _clear_all(self):
self.timetable.clear()
self.celltable.clear()
|
|
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import os
from . import templates
from . import matcher_base_t
from . import variable
from . import cpptypes
from . import namespace
from . import calldef
from . import calldef_members
from .. import utils
class declaration_matcher_t(matcher_base_t):
"""
Instance of this class will match declarations by next criteria:
- declaration name, also could be fully qualified name
Example: `wstring` or `::std::wstring`
- declaration type
Example: :class:`class_t`, :class:`namespace_t`,
:class:`enumeration_t`
- location within file system ( file or directory )
"""
def __init__(
self,
name=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param decl_type: declaration type to match by. For example
:class:`enumeration_t`.
:type decl_type: any class that derives from :class:`declaration_t`
class
:param name: declaration name, could be full name.
:type name: str
:param header_dir: absolute directory path
:type header_dir: str
:param header_file: absolute file path
:type header_file: str
"""
# An other option is that pygccxml will create absolute path using
# os.path.abspath function. But I think this is just wrong, because
# abspath builds path using current working directory. This behavior
# is fragile and very difficult to find a bug.
matcher_base_t.__init__(self)
self.decl_type = decl_type
self.__name = None
self.__opt_is_tmpl_inst = None
self.__opt_tmpl_name = None
self.__opt_is_full_name = None
self.__decl_name_only = None
# Set the name through the setter.
self.name = name
self.header_dir = header_dir
self.header_file = header_file
if self.header_dir:
self.header_dir = utils.normalize_path(self.header_dir)
if not os.path.isabs(self.header_dir):
raise RuntimeError(
"Path to header directory should be absolute!")
if self.header_file:
self.header_file = utils.normalize_path(self.header_file)
if not os.path.isabs(self.header_file):
raise RuntimeError("Path to header file should be absolute!")
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
if not self.__name:
self.__opt_is_tmpl_inst = None
self.__opt_tmpl_name = None
self.__opt_is_full_name = None
self.__decl_name_only = None
else:
self.__opt_is_tmpl_inst = templates.is_instantiation(self.__name)
self.__opt_tmpl_name = templates.name(self.__name)
if self.__opt_is_tmpl_inst:
if '::' in self.__opt_tmpl_name:
self.__opt_is_full_name = True
self.__decl_name_only = \
self.__opt_tmpl_name.split('::')[-1]
else:
self.__opt_is_full_name = False
self.__decl_name_only = self.__opt_tmpl_name
self.__name = templates.normalize(name)
else:
if '::' in self.__name:
self.__opt_is_full_name = True
self.__decl_name_only = self.__name.split('::')[-1]
else:
self.__opt_is_full_name = False
self.__decl_name_only = self.__name
def __str__(self):
msg = []
if self.decl_type is not None:
msg.append('(decl type==%s)' % self.decl_type.__name__)
if self.name is not None:
msg.append('(name==%s)' % self.name)
if self.header_dir is not None:
msg.append('(header dir==%s)' % self.header_dir)
if self.header_file is not None:
msg.append('(header file==%s)' % self.header_file)
if not msg:
msg.append('any')
return ' and '.join(msg)
def __call__(self, decl):
if self.decl_type is not None:
if not isinstance(decl, self.decl_type):
return False
if self.name is not None:
if not self.check_name(decl):
return False
if self.header_dir is not None:
if decl.location:
decl_dir = os.path.abspath(
os.path.dirname(decl.location.file_name))
decl_dir = utils.normalize_path(decl_dir)
if decl_dir[:len(self.header_dir)] != self.header_dir:
return False
else:
return False
if self.header_file is not None:
if decl.location:
decl_file = os.path.abspath(decl.location.file_name)
decl_file = utils.normalize_path(decl_file)
if decl_file != self.header_file:
return False
else:
return False
return True
def check_name(self, decl):
assert self.name is not None
if self.__opt_is_tmpl_inst:
if not self.__opt_is_full_name:
if self.name != templates.normalize_name(decl) \
and self.name != templates.normalize_partial_name(decl):
return False
else:
if self.name != templates.normalize_full_name_true(decl) and \
self.name != templates.normalize_full_name_false(decl):
return False
else:
if not self.__opt_is_full_name:
if self.name != decl.name and self.name != decl.partial_name:
return False
else:
if self.name != templates.normalize_full_name_true(decl) and \
self.name != templates.normalize_full_name_false(decl):
return False
return True
def is_full_name(self):
return self.__opt_is_full_name
@property
def decl_name_only(self):
return self.__decl_name_only
class variable_matcher_t(declaration_matcher_t):
"""
Instance of this class will match variables by next criteria:
- :class:`declaration_matcher_t` criteria
- variable type. Example: :class:`int_t` or 'int'
"""
def __init__(
self,
name=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param decl_type: variable type
:type decl_type: string or instance of :class:`type_t` derived class
"""
declaration_matcher_t.__init__(
self,
name=name,
decl_type=variable.variable_t,
header_dir=header_dir,
header_file=header_file)
self._decl_type = decl_type
def __call__(self, decl):
if not super(variable_matcher_t, self).__call__(decl):
return False
if self._decl_type is not None:
if isinstance(self._decl_type, cpptypes.type_t):
if self._decl_type != decl.decl_type:
return False
else:
if self._decl_type != decl.decl_type.decl_string:
return False
return True
def __str__(self):
msg = [super(variable_matcher_t, self).__str__()]
if msg == ['any']:
msg = []
if self._decl_type is not None:
msg.append('(value type==%s)' % str(self._decl_type))
if not msg:
msg.append('any')
return ' and '.join(msg)
class namespace_matcher_t(declaration_matcher_t):
"""Instance of this class will match namespaces by name."""
def __init__(self, name=None):
declaration_matcher_t.__init__(
self,
name=name,
decl_type=namespace.namespace_t)
def __call__(self, decl):
if self.name and decl.name == '':
# unnamed namespace have same name as thier parent, we should
# prevent this happens. The price is: user should search for
# unnamed namespace directly.
return False
return super(namespace_matcher_t, self).__call__(decl)
class calldef_matcher_t(declaration_matcher_t):
"""
Instance of this class will match callable by the following criteria:
* :class:`declaration_matcher_t` criteria
* return type. For example: :class:`int_t` or 'int'
* argument types
"""
def __init__(
self,
name=None,
return_type=None,
arg_types=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param return_type: callable return type
:type return_type: string or instance of :class:`type_t` derived class
:type arg_types: list
:param arg_types: list of function argument types. `arg_types` can
contain.
Any item within the list could be string or instance
of :class:`type_t` derived class. If you don't want
some argument to participate in match you can put
None.
For example:
.. code-block:: python
calldef_matcher_t( arg_types=[ 'int &', None ] )
will match all functions that takes 2 arguments, where the first one is
reference to integer and second any
"""
if None is decl_type:
decl_type = calldef.calldef_t
declaration_matcher_t.__init__(
self,
name=name,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file)
self.return_type = return_type
self.arg_types = arg_types
def __call__(self, decl):
if not super(calldef_matcher_t, self).__call__(decl):
return False
if self.return_type is not None \
and not self.__compare_types(self.return_type, decl.return_type):
return False
if self.arg_types:
if isinstance(self.arg_types, (list, tuple)):
if len(self.arg_types) != len(decl.arguments):
return False
for type_or_str, arg in zip(self.arg_types, decl.arguments):
if type_or_str is None:
continue
else:
if not self.__compare_types(
type_or_str, arg.decl_type):
return False
return True
@staticmethod
def __compare_types(type_or_str, decl_type):
assert type_or_str
if decl_type is None:
return False
if isinstance(type_or_str, cpptypes.type_t):
if type_or_str != decl_type:
return False
else:
if type_or_str != decl_type.decl_string:
return False
return True
def __str__(self):
msg = [super(calldef_matcher_t, self).__str__()]
if msg == ['any']:
msg = []
if self.return_type is not None:
msg.append('(return type==%s)' % str(self.return_type))
if self.arg_types:
for i, arg_type in enumerate(self.arg_types):
if arg_type is None:
msg.append('(arg %d type==any)' % i)
else:
msg.append('(arg %d type==%s)' % (i, str(arg_type)))
if not msg:
msg.append('any')
return ' and '.join(msg)
class operator_matcher_t(calldef_matcher_t):
"""
Instance of this class will match operators by next criteria:
* :class:`calldef_matcher_t` criteria
* operator symbol: =, !=, (), [] and etc
"""
def __init__(
self,
name=None,
symbol=None,
return_type=None,
arg_types=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param symbol: operator symbol
:type symbol: str
"""
if None is decl_type:
decl_type = calldef_members.operator_t
calldef_matcher_t.__init__(
self,
name=name,
return_type=return_type,
arg_types=arg_types,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file)
self.symbol = symbol
def __call__(self, decl):
if not super(operator_matcher_t, self).__call__(decl):
return False
if self.symbol is not None:
if self.symbol != decl.symbol:
return False
return True
def __str__(self):
msg = [super(operator_matcher_t, self).__str__()]
if msg == ['any']:
msg = []
if self.symbol is not None:
msg.append('(symbol==%s)' % str(self.symbol))
if not msg:
msg.append('any')
return ' and '.join(msg)
|
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Str, Float, Int
from traitsui.api import Item
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.pyscripts.commands.core import Command
from pychron.pyscripts.commands.valve import ValveCommand
class Lock(Command):
pass
class Ramp(Command):
pass
class SetLight(Command):
pass
class Unlock(Command):
pass
class Wake(Command):
pass
class Disable(Command):
pass
class DrillPoint(Command):
pass
class Enable(Command):
pass
class MovingExtract(Command):
pass
class Prepare(Command):
pass
class SetMotor(Command):
pass
class SetMotorLock(Command):
pass
class Snapshot(Command):
pass
class Autofocus(Command):
pass
class StartVideoRecording(Command):
pass
class StopVideoRecording(Command):
pass
class VideoRecording(Command):
pass
class TracePath(Command):
pass
class Degas(Command):
pass
class PowerMap(Command):
pass
class Open(ValveCommand):
description = "Open a valve"
example = """1. open("V")
2. open(description="Bone to Turbo")
"""
class Close(ValveCommand):
description = "Close a valve"
example = """1. open("V")
2. close(description="Bone to Turbo")
"""
class Unlock(ValveCommand):
description = "Unlock a valve"
example = """1. unlock("V")
2. unlock(description="Bone to Turbo")
"""
class Lock(ValveCommand):
description = "Lock a valve"
example = """1. lock("V")
2. lock(description="Bone to Turbo")
"""
class IsOpen(ValveCommand):
description = "Check if a valve is Open"
example = """1. is_open("V")
2. is_open(description="Bone to Turbo")
"""
class IsClosed(ValveCommand):
description = "Check if a valve is Closed"
example = """1. is_closed("V")
2. is_closed(description="Bone to Turbo")
"""
class NameCommand(Command):
name = Str
def _get_view(self):
return Item("name", width=300)
def _to_string(self):
return self._keyword("name", self.name)
class Release(NameCommand):
description = ""
example = ""
class Acquire(NameCommand):
description = "Acquire a resource"
example = """acquire('foo')"""
class MoveToPosition(Command):
position = Str
def _get_view(self):
return Item("position")
def _to_string(self):
return "{}".format(self.position)
class ExecutePattern(Command):
description = "Execute a pattern"
example = 'execute_pattern("diamond")'
class ValueCommand(Command):
value = Float
def _get_view(self):
return Item("value")
def _to_string(self):
return "{}".format(self.value)
class Extract(ValueCommand):
description = "Set extraction device to specified value"
example = ""
class EndExtract(Command):
description = ""
example = ""
class SetTray(Command):
description = ""
example = ""
class SetResource(Command):
description = ""
example = ""
class GetResourceValue(Command):
description = ""
example = ""
class SetPositionCommand(ValueCommand):
pass
class SetX(SetPositionCommand):
pass
class SetY(SetPositionCommand):
pass
class SetZ(SetPositionCommand):
pass
class SetXy(Command):
xvalue = Float
yvalue = Float
def _get_view(self):
return Item("xvalue", "yvalue")
def _to_string(self):
return "{},{}".format(self.xvalue, self.yvalue)
class GetValue(Command):
pass
class Waitfor(Command):
timeout = Int
def _get_view(self):
return Item("timeout")
def _to_string(self):
return "waitfor(timeout={})".format(self.timeout)
class LoadPipette(Command):
pipette_name = Str
def _get_view(self):
return Item("pipette_name")
def _to_string(self):
return "load_pipette('{}')".format(self.pipette_name)
class ExtractPipette(Command):
pipette_name = Str
def _get_view(self):
return Item("pipette_name")
def _to_string(self):
return "extract_pipette('{}')".format(self.pipette_name)
# class HeatSample(Command):
# value = Float
# def _get_view(self):
# return Item('value')
#
# def _to_string(self):
# return '{}'.format(self.value)
# ============= EOF =============================================
|
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import googlecl
import inspect
import logging
import os
import sys
from googlecl.base import Task
safe_encode = googlecl.safe_encode
service_name = __name__.split('.')[-1]
LOGGER_NAME = __name__
SECTION_HEADER = service_name.upper()
LOG = logging.getLogger(LOGGER_NAME)
class BaseFormatter(object):
"""Base class for formatters."""
def __init__(self, avail_fields, fields, sep=','):
"""Init formatter
Args:
avail_fields: list of tuples [(field_name, format_spec), ...] for all
possible fields
fields: string, list of <sep>-separated requested fields names.
sep: string, separator, comma by default
"""
if fields:
self.fields = fields.split(sep)
else:
self.fields = [item[0] for item in avail_fields]
self.avail_fields = avail_fields
avail_dict = dict(avail_fields)
self.format = ' '.join(avail_dict[name] for name in self.fields)
@property
def header(self):
"""Make output header.
Uses names of available fields as column headers. replaces
'_' with ' ' and capitalizes them. Utilizes the same format as
used for body lines: self.format
Returns: string, header.
"""
return self.format % \
dict([(item[0], item[0].replace('_', ' ').capitalize()) \
for item in self.avail_fields])
def get_line(self, entry):
"""Get formatted entry. Abstract method.
Args:
entry: entry object
Returns:
string, formatted entry.
"""
raise NotImplementedError("Abstract method %s.%s called" % \
(self.__class__.__name__,
inspect.stack()[0][3] ))
def output(self, entries, stream=sys.stdout):
"""Output list of entries to the output stream.
Args:
entries: list of entries.
stream: output stream.
"""
if self.header:
stream.write(self.header + os.linesep)
for entry in entries:
stream.write(self.get_line(entry) + os.linesep)
class PortfolioFormatter(BaseFormatter):
avail_fields = [('id', '%(id)3s'), ('title', '%(title)-15s'),
('curr', '%(curr)-4s'),
('gain', '%(gain)-10s'),
('gain_persent', '%(gain_persent)-14s'),
('cost_basis', '%(cost_basis)-10s'),
('days_gain', '%(days_gain)-10s'),
('market_value', '%(market_value)-10s')]
def __init__(self, fields):
super(self.__class__, self).__init__(self.avail_fields, fields)
def get_line(self, entry):
data = entry.portfolio_data
return self.format % \
{'id': entry.portfolio_id, 'title': entry.portfolio_title,
'curr': data.currency_code,
'gain': data.gain and data.gain.money[0].amount,
'gain_persent': '%-14.2f' % (float(data.gain_percentage) * 100,),
'cost_basis': data.cost_basis and data.cost_basis.money[0].amount,
'days_gain': data.days_gain and data.days_gain.money[0].amount,
'market_value': data.market_value and data.market_value.money[0].amount
}
class PositionFormatter(BaseFormatter):
avail_fields = [('ticker', '%(ticker)-14s'), ('shares', '%(shares)-10s'),
('gain', '%(gain)-10s'),
('gain_persent', '%(gain_persent)-14s'),
('cost_basis', '%(cost_basis)-10s'),
('days_gain', '%(days_gain)-10s'),
('market_value', '%(market_value)-10s')]
def __init__(self, fields):
super(self.__class__, self).__init__(self.avail_fields, fields)
def get_line(self, entry):
data = entry.position_data
return self.format % \
{'ticker': entry.ticker_id, 'shares': data.shares,
'gain': data.gain and data.gain.money[0].amount,
'gain_persent': '%-14.2f' % (float(data.gain_percentage) * 100,),
'cost_basis': data.cost_basis and data.cost_basis.money[0].amount,
'days_gain': data.days_gain and data.days_gain.money[0].amount,
'market_value': data.market_value and data.market_value.money[0].amount
}
class TransactionFormatter(BaseFormatter):
avail_fields = [('id', '%(id)-3s'), ('type', '%(type)-12s'),
('shares', '%(shares)-10s'), ('price', '%(price)-10s'),
('commission', '%(commission)-10s'),
('date', '%(date)-10s'), ('notes', '%(notes)-30s')]
def __init__(self, fields):
super(self.__class__, self).__init__(self.avail_fields, fields)
def get_line(self, entry):
data = entry.transaction_data
if data.date:
data.date = data.date[:10] # stip isoformat tail
return self.format % \
{'id': entry.transaction_id, 'type': data.type, 'shares': data.shares,
'price': data.price.money[0].amount,
'commission': data.commission.money[0].amount,
'date': data.date or '', 'notes': data.notes or ''}
#===============================================================================
# Each of the following _run_* functions execute a particular task.
#
# Keyword arguments:
# client: Client to the service being used.
# options: Contains all attributes required to perform the task
# args: Additional arguments passed in on the command line, may or may not be
# required
#===============================================================================
# Portfolio-related tasks
def _run_create(client, options, args):
client.CreatePortfolio(options.title, options.currency)
def _run_delete(client, options, args):
entries = client.get_portfolio_entries(options.title, positions=True)
if entries:
client.DeleteEntryList(entries, 'portfolio', options.prompt)
def _run_list(client, options, args):
entries = client.get_portfolio_entries(returns=True)
if entries:
PortfolioFormatter(options.fields).output(entries)
else:
LOG.info('No portfolios found')
# Position-related tasks
def _run_create_position(client, options, args):
# Quote from Developer's Guide:
# You can't directly create, update, or delete position entries;
# positions are derived from transactions.
# Therefore, to create or modify a position, send appropriate
# transactions on that position.
pfl = client.get_portfolio(options.title, positions=True)
if pfl:
# create empty transaction
client.create_transaction(pfl, "Buy", options.ticker)
def _run_delete_positions(client, options, args):
positions = client.get_positions(portfolio_title=options.title,
ticker_id=options.ticker)
client.DeleteEntryList(positions, 'position', options.prompt,
callback=lambda pos: client.DeletePosition(position_entry=pos))
def _run_list_positions(client, options, args):
positions = client.get_positions(options.title, options.ticker,
include_returns=True)
if positions:
PositionFormatter(options.fields).output(positions)
else:
LOG.info('No positions found in this portfolio')
# Transaction-related tasks
def _run_create_transaction(client, options, args):
pfl = client.get_portfolio(options.title)
if pfl:
client.create_transaction(pfl, options.ttype, options.ticker,
options.shares, options.price,
options.currency, options.commission,
options.date, options.notes)
def _run_delete_transactions(client, options, args):
transactions = client.get_transactions(portfolio_title=options.title,
ticker_id=options.ticker,
transaction_id=options.txnid)
client.DeleteEntryList(transactions, 'transaction', options.prompt)
def _run_list_transactions(client, options, args):
transactions = client.get_transactions(portfolio_title=options.title,
ticker_id=options.ticker,
transaction_id=options.txnid)
TransactionFormatter(options.fields).output(transactions)
TASKS = {'create': Task('Create a portfolio',
callback=_run_create,
required=['title', 'currency']),
'delete': Task('Delete portfolios',
callback=_run_delete,
required=['title']),
'list': Task('List portfolios',
callback=_run_list,
optional=['fields']),
'create-pos': Task('Create position',
callback=_run_create_position,
required=['title', 'ticker']),
'delete-pos': Task('Delete positions',
callback=_run_delete_positions,
required=['title'],
optional=['ticker']),
'list-pos': Task('List positions',
callback=_run_list_positions,
required=['title'],
optional=['fields']),
'create-txn': Task('Create transaction',
callback=_run_create_transaction,
required=['title', 'ticker', 'ttype',
'shares', 'price'],
optional=['shares', 'price', 'date',
'commission', 'currency', 'notes']),
'list-txn': Task('List transactions',
callback=_run_list_transactions,
required=['title', 'ticker']),
'delete-txn': Task('Delete transactions',
callback=_run_delete_transactions,
required=['title', 'ticker'],
optional=['txnid']),
}
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'Zone',
'Record',
'DNSDriver'
]
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
from libcloud.dns.types import RecordType
class Zone(object):
"""
DNS zone.
"""
def __init__(self, id, domain, type, ttl, driver, extra=None):
"""
:param id: Zone id.
:type id: ``str``
:param domain: The name of the domain.
:type domain: ``str``
:param type: Zone type (master, slave).
:type type: ``str``
:param ttl: Default TTL for records in this zone (in seconds).
:type ttl: ``int``
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.domain = domain
self.type = type
self.ttl = ttl or None
self.driver = driver
self.extra = extra or {}
def list_records(self):
return self.driver.list_records(zone=self)
def create_record(self, name, type, data, extra=None):
return self.driver.create_record(name=name, zone=self, type=type,
data=data, extra=extra)
def update(self, domain=None, type=None, ttl=None, extra=None):
return self.driver.update_zone(zone=self, domain=domain, type=type,
ttl=ttl, extra=extra)
def delete(self):
return self.driver.delete_zone(zone=self)
def __repr__(self):
return ('<Zone: domain=%s, ttl=%s, provider=%s ...>' %
(self.domain, self.ttl, self.driver.name))
class Record(object):
"""
Zone record / resource.
"""
def __init__(self, id, name, type, data, zone, driver, extra=None):
"""
:param id: Record id
:type id: ``str``
:param name: Hostname or FQDN.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param zone: Zone instance.
:type zone: :class:`Zone`
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.type = type
self.data = data
self.zone = zone
self.driver = driver
self.extra = extra or {}
def update(self, name=None, type=None, data=None, extra=None):
return self.driver.update_record(record=self, name=name, type=type,
data=data, extra=extra)
def delete(self):
return self.driver.delete_record(record=self)
def __repr__(self):
return ('<Record: zone=%s, name=%s, type=%s, data=%s, provider=%s '
'...>' %
(self.zone.id, self.name, RecordType.__repr__(self.type),
self.data, self.driver.name))
class DNSDriver(BaseDriver):
"""
A base DNSDriver class to derive from
This class is always subclassed by a specific driver.
"""
connectionCls = ConnectionUserAndKey
name = None
website = None
def __init__(self, key, secret=None, secure=True, host=None, port=None,
**kwargs):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Weither to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: ``None``
"""
super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure,
host=host, port=port, **kwargs)
def list_record_types(self):
"""
Return a list of RecordType objects supported by the provider.
:return: ``list`` of :class:`RecordType`
"""
return list(self.RECORD_TYPE_MAP.keys())
def iterate_zones(self):
"""
Return a generator to iterate over available zones.
:rtype: ``generator`` of :class:`Zone`
"""
raise NotImplementedError(
'iterate_zones not implemented for this driver')
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
return list(self.iterate_zones())
def iterate_records(self, zone):
"""
Return a generator to iterate over records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:rtype: ``generator`` of :class:`Record`
"""
raise NotImplementedError(
'iterate_records not implemented for this driver')
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: L{Zone}
:return: ``list`` of :class:`Record`
"""
return list(self.iterate_records(zone))
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'get_zone not implemented for this driver')
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'get_record not implemented for this driver')
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'create_zone not implemented for this driver')
def update_zone(self, zone, domain, type='master', ttl=None, extra=None):
"""
Update en existing zone.
:param zone: Zone to update.
:type zone: :class:`Zone`
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'update_zone not implemented for this driver')
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'create_record not implemented for this driver')
def update_record(self, record, name, type, data, extra):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'update_record not implemented for this driver')
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_zone not implemented for this driver')
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_record not implemented for this driver')
def _string_to_record_type(self, string):
"""
Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
"""
string = string.upper()
record_type = getattr(RecordType, string)
return record_type
|
|
import numpy as np
import helper
import problem_unittests as tests
from collections import Counter
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.contrib import seq2seq
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
counter = Counter(text)
vocab = sorted(counter, key=counter.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab)}
int_to_vocab = {ii: word for ii, word in enumerate(vocab)}
return vocab_to_int, int_to_vocab
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
return {
".": "||period||",
",": "||comma||",
"\"": "||quotation_mark||",
";": "||semicolon||",
"!": "||exclamation_mark||",
"?": "||question_mark||",
"(": "||left_parentheses||",
")": "||right_parentheses||",
"--": "||dash||",
"\n": "||return||"
}
tests.test_tokenize(token_lookup)
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
inputs = tf.placeholder(tf.int32, [None, None], name="input")
targets = tf.placeholder(tf.int32, [None, None], name="targets")
rate = tf.placeholder(tf.float32, name="learning_rate")
return inputs, targets, rate
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm])
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name="initial_state")
return cell, initial_state
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
return outputs, tf.identity(final_state, name="final_state")
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embed = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return logits, final_state
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
words_per_batch = (batch_size*seq_length)
word_batches = len(int_text) // words_per_batch
word_to_use = int_text[:word_batches*words_per_batch]
batches = [np.array([np.zeros((batch_size, seq_length)), np.zeros((batch_size, seq_length))]) for i in range(0, word_batches)]
batches = np.array(batches)
seq = 0
for idx in range(0, len(word_to_use), word_batches*seq_length):
batch_index = 0
for ii in range(idx, idx+(word_batches*seq_length), seq_length):
batches[batch_index][0][seq] = np.add(batches[batch_index][0][seq], np.array(word_to_use[ii: ii+seq_length]))
if ii+seq_length+1 > len(word_to_use):
last = word_to_use[ii + 1: ii + seq_length]
last.extend([word_to_use[0]])
batches[batch_index][1][seq] = np.add(batches[batch_index][1][seq],
np.array(last))
else:
batches[batch_index][1][seq] = np.add(batches[batch_index][1][seq],
np.array(word_to_use[ii+1: ii+seq_length+1]))
batch_index += 1
seq += 1
return batches
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 500
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 50
save_dir = './save'
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
helper.save_params((seq_length, save_dir))
|
|
# -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
from urllib2 import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
try:
from hashlib import md5
except ImportError: # pragma: no cover
from md5 import new as md5
#: HTTP_STATUS_CODES is "exported" from this module.
#: XXX: move to werkzeug.consts or something
from werkzeug._internal import HTTP_STATUS_CODES, _dump_date, \
_ExtendedCookie, _ExtendedMorsel, _decode_unicode
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_pop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
'upgrade'
])
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in options.iteritems():
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iterable.iteritems():
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = parts.next()[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == 'basic':
try:
username, password = auth_info.decode('base64').split(':', 1)
except Exception, e:
return
return Authorization('basic', {'username': username,
'password': password})
elif auth_type == 'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'nc', 'cnonce', \
'response':
if not key in auth_map:
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
begin = int(item)
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, basestring):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_pop_headers
def parse_cookie(header, charset='utf-8', errors='replace',
cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
if cls is None:
cls = TypeConversionDict
cookie = _ExtendedCookie()
cookie.load(header)
result = {}
# decode to unicode and skip broken items. Our extended morsel
# and extended cookie will catch CookieErrors and convert them to
# `None` items which we have to skip here.
for key, value in cookie.iteritems():
if value.value is not None:
result[key] = _decode_unicode(unquote_header_value(value.value),
charset, errors)
return cls(result)
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=None, httponly=False, charset='utf-8',
sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
try:
key = str(key)
except UnicodeError:
raise TypeError('invalid key %r' % key)
if isinstance(value, unicode):
value = value.encode(charset)
value = quote_header_value(value)
morsel = _ExtendedMorsel(key, value)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, basestring):
expires = cookie_date(expires)
morsel['expires'] = expires
elif max_age is not None and sync_expires:
morsel['expires'] = cookie_date(time() + max_age)
if domain and ':' in domain:
# The port part of the domain should NOT be used. Strip it
domain = domain.split(':', 1)[0]
if domain:
assert '.' in domain, (
"Setting \"domain\" for a cookie on a server running localy (ex: "
"localhost) is not supportted by complying browsers. You should "
"have something like: \"127.0.0.1 localhost dev.localhost\" on "
"your hosts file and then point your server to run on "
"\"dev.localhost\" and also set \"domain\" for \"dev.localhost\""
)
for k, v in (('path', path), ('domain', domain), ('secure', secure),
('max-age', max_age), ('httponly', httponly)):
if v is not None and v is not False:
morsel[k] = str(v)
return morsel.output(header='').lstrip()
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
LanguageAccept, Headers
|
|
from netlib.http import cookies
from netlib.tutils import raises
def test_read_token():
tokens = [
[("foo", 0), ("foo", 3)],
[("foo", 1), ("oo", 3)],
[(" foo", 1), ("foo", 4)],
[(" foo;", 1), ("foo", 4)],
[(" foo=", 1), ("foo", 4)],
[(" foo=bar", 1), ("foo", 4)],
]
for q, a in tokens:
assert cookies._read_token(*q) == a
def test_read_quoted_string():
tokens = [
[('"foo" x', 0), ("foo", 5)],
[('"f\oo" x', 0), ("foo", 6)],
[(r'"f\\o" x', 0), (r"f\o", 6)],
[(r'"f\\" x', 0), (r"f" + '\\', 5)],
[('"fo\\\"" x', 0), ("fo\"", 6)],
[('"foo" x', 7), ("", 8)],
]
for q, a in tokens:
assert cookies._read_quoted_string(*q) == a
def test_read_pairs():
vals = [
[
"one",
[["one", None]]
],
[
"one=two",
[["one", "two"]]
],
[
"one=",
[["one", ""]]
],
[
'one="two"',
[["one", "two"]]
],
[
'one="two"; three=four',
[["one", "two"], ["three", "four"]]
],
[
'one="two"; three=four; five',
[["one", "two"], ["three", "four"], ["five", None]]
],
[
'one="\\"two"; three=four',
[["one", '"two'], ["three", "four"]]
],
]
for s, lst in vals:
ret, off = cookies._read_pairs(s)
assert ret == lst
def test_pairs_roundtrips():
pairs = [
[
"",
[]
],
[
"one=uno",
[["one", "uno"]]
],
[
"one",
[["one", None]]
],
[
"one=uno; two=due",
[["one", "uno"], ["two", "due"]]
],
[
'one="uno"; two="\due"',
[["one", "uno"], ["two", "due"]]
],
[
'one="un\\"o"',
[["one", 'un"o']]
],
[
'one="uno,due"',
[["one", 'uno,due']]
],
[
"one=uno; two; three=tre",
[["one", "uno"], ["two", None], ["three", "tre"]]
],
[
"_lvs2=zHai1+Hq+Tc2vmc2r4GAbdOI5Jopg3EwsdUT9g=; "
"_rcc2=53VdltWl+Ov6ordflA==;",
[
["_lvs2", "zHai1+Hq+Tc2vmc2r4GAbdOI5Jopg3EwsdUT9g="],
["_rcc2", "53VdltWl+Ov6ordflA=="]
]
]
]
for s, lst in pairs:
ret, off = cookies._read_pairs(s)
assert ret == lst
s2 = cookies._format_pairs(lst)
ret, off = cookies._read_pairs(s2)
assert ret == lst
def test_cookie_roundtrips():
pairs = [
[
"one=uno",
[["one", "uno"]]
],
[
"one=uno; two=due",
[["one", "uno"], ["two", "due"]]
],
]
for s, lst in pairs:
ret = cookies.parse_cookie_header(s)
assert ret == lst
s2 = cookies.format_cookie_header(ret)
ret = cookies.parse_cookie_header(s2)
assert ret == lst
def test_parse_set_cookie_pairs():
pairs = [
[
"one=uno",
[
["one", "uno"]
]
],
[
"one=un\x20",
[
["one", "un\x20"]
]
],
[
"one=uno; foo",
[
["one", "uno"],
["foo", None]
]
],
[
"mun=1.390.f60; "
"expires=sun, 11-oct-2015 12:38:31 gmt; path=/; "
"domain=b.aol.com",
[
["mun", "1.390.f60"],
["expires", "sun, 11-oct-2015 12:38:31 gmt"],
["path", "/"],
["domain", "b.aol.com"]
]
],
[
r'rpb=190%3d1%2616726%3d1%2634832%3d1%2634874%3d1; '
'domain=.rubiconproject.com; '
'expires=mon, 11-may-2015 21:54:57 gmt; '
'path=/',
[
['rpb', r'190%3d1%2616726%3d1%2634832%3d1%2634874%3d1'],
['domain', '.rubiconproject.com'],
['expires', 'mon, 11-may-2015 21:54:57 gmt'],
['path', '/']
]
],
]
for s, lst in pairs:
ret = cookies._parse_set_cookie_pairs(s)
assert ret == lst
s2 = cookies._format_set_cookie_pairs(ret)
ret2 = cookies._parse_set_cookie_pairs(s2)
assert ret2 == lst
def test_parse_set_cookie_header():
vals = [
[
"", None
],
[
";", None
],
[
"one=uno",
("one", "uno", ())
],
[
"one=uno; foo=bar",
("one", "uno", (("foo", "bar"),))
],
[
"one=uno; foo=bar; foo=baz",
("one", "uno", (("foo", "bar"), ("foo", "baz")))
],
]
for s, expected in vals:
ret = cookies.parse_set_cookie_header(s)
if expected:
assert ret[0] == expected[0]
assert ret[1] == expected[1]
assert ret[2].items(multi=True) == expected[2]
s2 = cookies.format_set_cookie_header(*ret)
ret2 = cookies.parse_set_cookie_header(s2)
assert ret2[0] == expected[0]
assert ret2[1] == expected[1]
assert ret2[2].items(multi=True) == expected[2]
else:
assert ret is None
def test_refresh_cookie():
# Invalid expires format, sent to us by Reddit.
c = "rfoo=bar; Domain=reddit.com; expires=Thu, 31 Dec 2037 23:59:59 GMT; Path=/"
assert cookies.refresh_set_cookie_header(c, 60)
c = "MOO=BAR; Expires=Tue, 08-Mar-2011 00:20:38 GMT; Path=foo.com; Secure"
assert "00:21:38" in cookies.refresh_set_cookie_header(c, 60)
c = "foo,bar"
with raises(ValueError):
cookies.refresh_set_cookie_header(c, 60)
# https://github.com/mitmproxy/mitmproxy/issues/773
c = ">=A"
assert cookies.refresh_set_cookie_header(c, 60)
# https://github.com/mitmproxy/mitmproxy/issues/1118
c = "foo:bar=bla"
assert cookies.refresh_set_cookie_header(c, 0)
c = "foo/bar=bla"
assert cookies.refresh_set_cookie_header(c, 0)
|
|
"""
Test Results for discrete models from Stata
"""
import os
import numpy as np
#### Discrete Model Tests ####
# Note that there is a slight refactor of the classes, so that one dataset
# might be used for more than one model
cur_dir = os.path.abspath(os.path.dirname(__file__))
class Anes(object):
def __init__(self):
"""
Results are from Stata 11 (checked vs R nnet package).
"""
self.nobs = 944
def mnlogit_basezero(self):
params = [-.01153598, .29771435, -.024945, .08249144, .00519655,
-.37340167, -.08875065, .39166864, -.02289784, .18104276,
.04787398, -2.2509132, -.1059667, .57345051, -.01485121,
-.00715242, .05757516, -3.6655835, -.0915567, 1.2787718,
-.00868135, .19982796, .08449838, -7.6138431, -.0932846,
1.3469616, -.01790407, .21693885, .08095841, -7.0604782,
-.14088069, 2.0700801, -.00943265, .3219257, .10889408,
-12.105751]
self.params = np.reshape(params, (6,-1), order='F')
bse = [.0342823657, .093626795, .0065248584, .0735865799,
.0176336937, .6298376313, .0391615553, .1082386919,
.0079144618, .0852893563, .0222809297, .7631899491,
.0570382292, .1585481337, .0113313133, .1262913234,
.0336142088, 1.156541492, .0437902764, .1288965854,
.0084187486, .0941250559, .0261963632, .9575809602,
.0393516553, .1171860107, .0076110152, .0850070091,
.0229760791, .8443638283, .042138047, .1434089089,
.0081338625, .0910979921, .025300888, 1.059954821]
self.bse = np.reshape(bse, (6,-1), order='F')
self.yhat = np.loadtxt(os.path.join(cur_dir,'yhat_mnlogit.csv'))
self.phat = np.loadtxt(os.path.join(cur_dir,'phat_mnlogit.csv'))
self.cov_params = None
self.llf = -1461.922747312
self.llnull = -1750.34670999
self.llr = 576.8479253554
self.llr_pvalue = 1.8223179e-102
self.prsquared = .1647810465387
self.df_model = 30
self.df_resid = 944 - 36
self.J = 7
self.K = 6
self.aic = 2995.84549462
self.bic = 3170.45003661
z = [-.3364988051, 3.179798597, -3.823070772, 1.121012042,
.2946945327, -.5928538661, -2.266269864, 3.618564069,
-2.893164162, 2.122688754, 2.148652536, -2.949348555,
-1.857818873, 3.616885888, -1.310634214, -.0566342868,
1.712822091, -3.169435381, -2.090799808, 9.920912816,
-1.031191864, 2.123004903, 3.225576554, -7.951122047,
-2.370538224, 11.49421878, -2.352389066, 2.552011323,
3.523595639, -8.361890935, -3.34331327, 14.43480847,
-1.159676452, 3.533839715, 4.303962885, -11.42100649]
self.z = np.reshape(z, (6,-1), order='F')
pvalues = [0.7364947525, 0.0014737744, 0.0001317999, 0.2622827367,
0.7682272401, 0.5532789548, 0.0234348654, 0.0002962422,
0.0038138191, 0.0337799420, 0.0316619538, 0.0031844460,
0.0631947400, 0.0002981687, 0.1899813744, 0.9548365214,
0.0867452747, 0.0015273542, 0.0365460134, 3.37654e-23,
0.3024508550, 0.0337534410, 0.0012571921, 1.84830e-15,
0.0177622072, 1.41051e-30, 0.0186532528, 0.0107103038,
0.0004257334, 6.17209e-17, 0.0008278439, 3.12513e-47,
0.2461805610, 0.0004095694, 0.0000167770, 3.28408e-30]
self.pvalues = np.reshape(pvalues, (6,-1), order='F')
conf_int = [[[-0.0787282, 0.0556562], [0.1142092, 0.4812195],
[-0.0377335, -0.0121565], [-0.0617356, 0.2267185], [-0.0293649,
0.0397580], [-1.6078610, 0.8610574]], [[-0.1655059, -0.0119954],
[0.1795247, 0.6038126], [-0.0384099, -0.0073858], [0.0138787,
0.3482068], [0.0042042, 0.0915438], [-3.7467380, -0.7550884]],
[[-0.2177596, 0.0058262], [0.2627019, 0.8841991], [-0.0370602,
0.0073578], [-0.2546789, 0.2403740], [-0.0083075, 0.1234578],
[-5.9323630,-1.3988040]],[[-0.1773841, -0.0057293], [1.0261390,
1.5314040], [-0.0251818, 0.0078191], [0.0153462, 0.3843097],
[0.0331544, 0.1358423], [-9.4906670, -5.7370190]], [[-0.1704124,
-0.0161568], [1.1172810, 1.5766420], [-0.0328214, -0.0029868],
[0.0503282, 0.3835495], [0.0359261, 0.1259907], [-8.7154010,
-5.4055560]], [[-0.2234697, -0.0582916], [1.7890040, 2.3511560],
[-0.0253747, 0.0065094], [0.1433769, 0.5004745], [0.0593053,
0.1584829], [-14.1832200, -10.0282800]]]
self.conf_int = np.asarray(conf_int)
# margins, dydx(*) predict(outcome(#))
self.margeff_dydx_overall = np.array([
[0.00868085993550, -0.09779854015456, 0.00272556969847,
-0.01992376579372, -0.00603133322764],
[0.00699386733148, -0.05022430802614, -0.00211003909752,
-0.00536980000265, -0.00554366741814],
[-0.00391040848820, -0.02824717135857, -0.00100551299310,
0.00664337806861, 0.00097987356999],
[-0.00182580888015, -0.00573744730031, -0.00004249256428,
-0.00546669558488, 0.00054101121854],
[-0.00098558129923, 0.01985550937033, 0.00047972250012,
0.00172605778905, 0.00211291403209],
[-0.00153469551647, 0.03755346502013, -0.00068531143399,
0.00472471794347, 0.00254733486106],
[-0.00741820702809, 0.12459834487569, 0.00063806819375,
0.01766610701188, 0.00539385283759]
]).T
self.margeff_dydx_overall_se = np.array([
[.0038581061, .0080471125, .0007068488, .0082318967, .0020261706],
[.003904378, .0073600286, .000756431, .0084381578, .0020482238],
[.003137126, .0056813182, .0006601377, .0068932588, .0018481806],
[.0019427783, .0031904763, .0003865411, .004361789, .0011523221],
[.0029863227, .0054076092, .0005886612, .0064426365, .0018886818],
[.0035806552, .0069497362, .000722511, .0078287717, .0022352393],
[.0033641608, .008376629, .0006774697, .0073505286, .0021660086]
]).T
self.margeff_dydx_mean = np.array([
[0.01149887431225, -0.13784207091973, 0.00273313385873,
-0.02542974260540, -0.00855346837482],
[0.01114846831102, -0.09864273512889, -0.00222435063712,
-0.01214617126321, -0.00903581444579],
[-0.00381702868421, -0.05132297961269, -0.00116763216994,
0.00624203027060, 0.00021912081810],
[-0.00233455327258, -0.00928554037343, -0.00000206561214,
-0.00775415690571, 0.00060004460394],
[-0.00352579921274, 0.06412187169362, 0.00073938948643,
0.00747778063206, 0.00459965010365],
[-0.00574308219449, 0.11126535089794, -0.00057337915464,
0.01467424346725, 0.00641760846097],
[-0.00722687818452, 0.12170608820238, 0.00049490419675,
0.01693601418978, 0.00575285798725]]).T
self.margeff_dydx_mean_se = np.array([
[.0043729758, .0110343353, .0008149907, .0092551389, .0023752071],
[.004875051, .0124746358, .0009613152, .0105665812, .0026524426],
[.0040718954, .0103613938, .0008554615, .0089931297, .0024374625],
[.0026430804, .0070845916, .0005364369, .0057654258, .0015988838],
[.0037798151, .0103849291, .0007393481, .0082021938, .0023489261],
[.0045654631, .0130329403, .0009128134, .0100053262, .0028048602],
[.0027682389, .0113292677, .0005325113, .0061289353, .0017330763]
]).T
self.margeff_dydx_dummy_overall = np.array([
[0.00549149574321, -0.05348235321783, 0.00298963549049,
-0.01479461677951, -0.00332167981255, -0.26502967041815],
[0.00345677928276, -0.00950322030929, -0.00189456107189,
0.00033893662061, -0.00314690167350, -0.21040878091828],
[-0.00645089013284, 0.00401746940204, -0.00083948249351,
0.01114202556889, 0.00277069841472, -0.15967397659686],
[-0.00215436802341, -0.00366545199370, -0.00000002297812,
-0.00457368049644, 0.00065303026027, -0.00094772782001],
[0.00058038428936, -0.00369080100124, 0.00035948233235,
-0.00018863693013, 0.00079351293461, 0.12640653743480],
[0.00217597030999, -0.01279456622853, -0.00091882392767,
0.00001651192759, -0.00037998290789, 0.27175070356670],
[-0.00309932483642, 0.07911868907484, 0.00030378521102,
0.00805941631677, 0.00263129901425, 0.23790291475181]]).T
self.margeff_dydx_dummy_overall_se = np.array([
[.0037314453, .0094102332, .000688838, .0079744554, .0019365971,
.0243914836],
[.0038215262, .0095938828, .0007410885, .008259353, .0019984087,
.0317628806],
[.0031045718, .00785814, .0006504353, .0067892866, .0018060332,
0.0262803561],
[.0019756086, .0051031194, .0003862449, .0043621673, .0011796953,
.0219999601],
[.0029714074, .0081732018, .0005715192, .0064742872, .0019130195,
.0331694192],
[.0034443743, .0097296187, .0006774867, .0075996454, .0021993881,
.038600835],
[.0032003518, .0098741227, .0006335772, .0070902078, .0021003227,
.0255727127]]).T
self.margeff_eydx_dummy_overall = np.array([
[.03939188, -.65758371, .01750922, -.12131806, -.03613241,
-3.2132513],
[.02752366, -.383165, -.00830021, -.03652935, -.03286046,
-1.8741853],
[-.05006681, -.2719659, -.00626481, .06525323, .01012554,
-2.0058029],
[-.05239558, -.22549142, .00025015, -.13104416, .01114517,
-.27052009],
[-.00296374, .25627809, .00140513, .03358712, .02296041,
1.3302701],
[.00328283, .2800168, -.0083912, .04332782, .01575863,
1.8441023],
[-.03257068, .98346111, -.00122118, .10847807, .0406456,
2.9119099]]).T
self.margeff_eydx_dummy_overall_se = np.array([
[.0272085605, .0777760394, .0052427952, .0584011446, .0148618012,
.5796921383],
[.0262290023, .0724479385, .005174736, .0567743614, .0144447083,
.3015738731],
[.0321415498, .0895589422, .0067480662, .0701460193, .0190451865,
.3904138447],
[.0511305319, .1420904068, .0102342163, .1129912244, .0308618233,
.3693799595],
[.0340186217, .0991711703, .0065812158, .0737441012, .0212966336,
.2346982385],
[.0289250212, .0840662279, .0056743561, .0631772185, .0177278895,
.2089516714],
[.0318251305, .1085637405, .0062400589, .0699123044, .0201045606,
.3727166284]]).T
# taken from gretl
self.resid = np.loadtxt(os.path.join(cur_dir,'mnlogit_resid.csv'),
delimiter=",")
class DiscreteL1(object):
def __init__(self):
"""
Special results for L1 models
Uses the Spector data and a script to generate the baseline results
"""
pass
def logit(self):
"""
Results generated with:
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = 3 * np.array([0, 1, 1, 1])
res2 = sm.Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
"""
nan = np.nan
self.params = [-4.10271595, 0., 0.15493781, 0.]
self.conf_int = [[-9.15205122, 0.94661932], [nan, nan],
[-0.06539482, 0.37527044], [ nan, nan]]
self.bse = [ 2.5762388 , nan, 0.11241668, nan]
self.nnz_params = 2
self.aic = 42.091439368583671
self.bic = 45.022911174183122
self.cov_params = [[ 6.63700638, nan, -0.28636261, nan],
[nan, nan, nan, nan], [-0.28636261, nan, 0.01263751, nan],
[nan, nan, nan, nan]]
def sweep(self):
"""
Results generated with
params = np.zeros((3, 4))
alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5], [0.5, 0.5, 1, 1]])
model = sm.Logit(data.endog, data.exog)
for i in range(3):
alpha = alphas[i, :]
res2 = model.fit_regularized(method="l1", alpha=alpha, disp=0, acc=1e-10,
maxiter=1000, trim_mode='off')
params[i, :] = res2.params
print params
"""
self.params = [[-10.37593611, 2.27080968, 0.06670638, 2.05723691],
[ -5.32670811, 1.18216019, 0.01402395, 1.45178712],
[ -3.92630318, 0.90126958, -0. , 1.09498178]]
def probit(self):
"""
Results generated with
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10])
res2 = sm.Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
"""
nan = np.nan
self.params = [-5.40476992, 1.25018458, 0.04744558, 0. ]
self.conf_int = [[-9.44077951, -1.36876033],
[ 0.03716721, 2.46320194],
[-0.09727571, 0.19216687],
[ np.nan, np.nan]]
self.bse = [ 2.05922641, 0.61889778, 0.07383875, np.nan]
self.nnz_params = 3
self.aic = 38.399773877542927
self.bic = 42.796981585942106
self.cov_params = [[ 4.24041339, -0.83432592, -0.06827915, nan],
[-0.83432592, 0.38303447, -0.01700249, nan],
[-0.06827915, -0.01700249, 0.00545216, nan],
[ nan, nan, nan, nan]]
def mnlogit(self):
"""
Results generated with
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10)
"""
self.params = [[ 0.00100163, -0.05864195, -0.06147822, -0.04769671, -0.05222987,
-0.09522432],
[ 0. , 0.03186139, 0.12048999, 0.83211915, 0.92330292,
1.5680646 ],
[-0.0218185 , -0.01988066, -0.00808564, -0.00487463, -0.01400173,
-0.00562079],
[ 0. , 0.03306875, 0. , 0.02362861, 0.05486435,
0.14656966],
[ 0. , 0.04448213, 0.03252651, 0.07661761, 0.07265266,
0.0967758 ],
[ 0.90993803, -0.50081247, -2.08285102, -5.26132955, -4.86783179,
-9.31537963]]
self.conf_int = [[[ -0.0646223 , 0.06662556],
[ np.nan, np.nan],
[ -0.03405931, -0.00957768],
[ np.nan, np.nan],
[ np.nan, np.nan],
[ 0.26697895, 1.55289711]],
[[ -0.1337913 , 0.01650741],
[ -0.14477255, 0.20849532],
[ -0.03500303, -0.00475829],
[ -0.11406121, 0.18019871],
[ 0.00479741, 0.08416684],
[ -1.84626136, 0.84463642]],
[[ -0.17237962, 0.04942317],
[ -0.15146029, 0.39244026],
[ -0.02947379, 0.01330252],
[ np.nan, np.nan],
[ -0.02501483, 0.09006785],
[ -3.90379391, -0.26190812]],
[[ -0.12938296, 0.03398954],
[ 0.62612955, 1.03810876],
[ -0.02046322, 0.01071395],
[ -0.13738534, 0.18464256],
[ 0.03017236, 0.12306286],
[ -6.91227465, -3.61038444]],
[[ -0.12469773, 0.02023799],
[ 0.742564 , 1.10404183],
[ -0.02791975, -0.00008371],
[ -0.08491561, 0.19464431],
[ 0.0332926 , 0.11201273],
[ -6.29331126, -3.44235233]],
[[ -0.17165567, -0.01879296],
[ 1.33994079, 1.79618841],
[ -0.02027503, 0.00903345],
[ -0.00267819, 0.29581751],
[ 0.05343135, 0.14012026],
[-11.10419107, -7.52656819]]]
self.bse = [[ 0.03348221, 0.03834221, 0.05658338, 0.04167742, 0.03697408,
0.03899631],
[ np.nan, 0.09012101, 0.13875269, 0.10509867, 0.09221543,
0.11639184],
[ 0.00624543, 0.00771564, 0.01091253, 0.00795351, 0.00710116,
0.00747679],
[ np.nan, 0.07506769, np.nan, 0.08215148, 0.07131762,
0.07614826],
[ np.nan, 0.02024768, 0.02935837, 0.02369699, 0.02008204,
0.02211492],
[ 0.32804638, 0.68646613, 0.92906957, 0.84233441, 0.72729881,
0.91267567]]
self.nnz_params = 32
self.aic = 3019.4391360294126
self.bic = 3174.6431733460686
class Spector(object):
"""
Results are from Stata 11
"""
def __init__(self):
self.nobs = 32
def logit(self):
self.params = [2.82611297201, .0951576702557, 2.37868772835,
-13.0213483201]
self.cov_params = [[1.59502033639, -.036920566629, .427615725153,
-4.57347950298], [-.036920566629, .0200375937069,
.0149126464275, -.346255757562], [.427615725153 ,
.0149126464275, 1.13329715236, -2.35916128427],
[-4.57347950298, -.346255757562, -2.35916128427,
24.3179625937]]
self.bse = [1.26294114526, .141554207662, 1.06456430165, 4.93132462871]
self.resid_pearson = [-.1652382, -.2515266, -.4800059, -.1630655,
.8687437, -.1900454, -.165002, -.2331563,
-.3535812, .6647838, -.1583799, -.4843181,
-.689527, 2.043449, -.7516119, -.1764176,
-.2380445, -.2003426, -1.199277, .7164842,
-.255713, .3242821, -.5646816, -2.400189,
.4392082, 1.038473, .75747, -.6659256,
.4336657, .2404583, -1.060033, 2.829577]
self.resid_dev = [-.2321102, -.3502712, -.6439626, -.2290982,
1.060478, -.2663844, -.2317827, -.3253788, -.4853875,
.8555557, -.2225972, -.6491808, -.8819993, 1.813269,
-.9463985, -.247583, -.3320177, -.2805444, -1.335131,
.9103027, -.3559217, .4471892, -.744005, -1.955074,
.5939538, 1.209638, .952332, -.8567857, .5870719, .335292,
-1.227311, 2.096639]
# from gretl
self.resid_generalized = [-0.026578, -0.059501, -0.187260,
-0.025902, 0.430107, -0.034858, -0.026504, -0.051559,
-0.111127, 0.306489, -0.024470, -0.189997, -0.322240,
0.806789, -0.360990, -0.030184, -0.053626, -0.038588,
-0.589872, 0.339214, -0.061376, 0.095153, -0.241772,
-0.852091, 0.161709, 0.518867, 0.364579, -0.307219,
0.158296, 0.054660, -0.529117, 0.888969]
self.phat = np.array([ .02657799236476,
.05950126051903,
.18725991249084,
.02590163610876,
.56989300251007,
.03485824912786,
.02650404907763,
.05155897513032,
.11112663894892,
.69351142644882,
.02447037212551,
.18999740481377,
.32223951816559,
.1932111531496,
.36098992824554,
.03018374741077,
.05362640321255,
.03858831897378,
.58987241983414,
.66078591346741,
.06137581542134,
.90484726428986,
.24177247285843,
.85209089517593,
.8382905125618,
.48113295435905,
.63542068004608,
.30721867084503,
.84170418977737,
.94534027576447,
.52911710739136,
.1110308393836])
self.yhat = np.array([-3.6007342338562,
-2.7604126930237,
-1.4679137468338,
-3.6272060871124,
.28141465783119,
-3.3209850788116,
-3.6035962104797,
-2.9120934009552,
-2.0792844295502,
.81658720970154,
-3.6855175495148,
-1.4500269889832,
-.74349880218506,
-1.429278254509,
-.57107019424438,
-3.4698030948639,
-2.8705959320068,
-3.2154531478882,
.36343798041344,
.66679841279984,
-2.7273993492126,
2.2522828578949,
-1.1429864168167,
1.7510952949524,
1.6455633640289,
-.07550399750471,
.55554306507111,
-.81315463781357,
1.6709630489349,
2.8504176139832,
.11660042405128,
-2.0802545547485])
self.llf = -12.8896334653335
self.llnull = -20.5917296966173
self.df_model = 3
self.df_resid = 32 - 4 #TODO: is this right? not reported in stata
self.llr = 15.4041924625676
self.prsquared = .374038332124624
self.llr_pvalue = .00150187761112892
self.aic = 33.779266930667
self.bic = 39.642210541866
self.z = [2.237723415, 0.6722348408, 2.234423721, -2.640537645]
self.conf_int = [[.3507938,5.301432],[-.1822835,.3725988],[.29218,
4.465195],[-22.68657,-3.35613]]
self.pvalues = [.0252390974, .5014342039, .0254552063, .0082774596]
# taken from margins command
self.margeff_nodummy_dydx = [.36258084688424,.01220841099085,
.30517768382304]
self.margeff_nodummy_dydx_se = [.1094412, .0177942, .0923796]
self.margeff_nodummy_dydxmean = [.53385885781692,.01797548988961,
.44933926079386]
self.margeff_nodummy_dydxmean_se = [.237038, .0262369, .1967626]
self.margeff_nodummy_dydxmedian = [.25009492465091,.00842091261329,
.2105003352955]
self.margeff_nodummy_dydxmedian_se = [.1546708, .0134314, .0928183]
self.margeff_nodummy_dydxzero = [6.252993785e-06,2.105437138e-07,
5.263030788e-06]
self.margeff_nodummy_dydxzero_se = [.0000288, 9.24e-07, .000025]
self.margeff_nodummy_dyex = [1.1774000792198,.27896245178384,
.16960002159996]
self.margeff_nodummy_dyex_se = [.3616481, .4090679, .0635583]
self.margeff_nodummy_dyexmean = [1.6641381583512,.39433730945339,
.19658592659731]
self.margeff_nodummy_dyexmean_se = [.7388917, .5755722, .0860836]
#NOTE: PSI at median should be a NaN or 'omitted'
self.margeff_nodummy_dyexmedian = [.76654095836557,.18947053379898,0]
self.margeff_nodummy_dyexmedian_se = [ .4740659, .302207, 0]
#NOTE: all should be NaN
self.margeff_nodummy_dyexzero = [0,0,0]
self.margeff_nodummy_dyexzero_se = [0,0,0]
self.margeff_nodummy_eydx = [1.8546366266779,.06244722072812,
1.5610138123033]
self.margeff_nodummy_eydx_se = [.847903, .0930901, .7146715]
self.margeff_nodummy_eydxmean = [2.1116143062702,.0710998816585,
1.7773072368626]
self.margeff_nodummy_eydxmean_se = [ 1.076109, .1081501, .9120842]
self.margeff_nodummy_eydxmedian = [2.5488082240624,.0858205793373,
2.1452853812126]
self.margeff_nodummy_eydxmedian_se = [1.255377, .1283771, 1.106872]
self.margeff_nodummy_eydxzero = [2.8261067189993,.0951574597115,
2.3786824653103]
self.margeff_nodummy_eydxzero_se = [1.262961, .1415544, 1.064574]
self.margeff_nodummy_eyex = [5.4747106798973,1.3173389907576,
.44600395466634]
self.margeff_nodummy_eyex_se = [2.44682, 1.943525, .1567618]
self.margeff_nodummy_eyexmean = [6.5822977203268,1.5597536538833,
.77757191612739]
self.margeff_nodummy_eyexmean_se = [3.354433, 2.372543, .3990368]
self.margeff_nodummy_eyexmedian = [7.8120973525952,1.9309630350892,0]
self.margeff_nodummy_eyexmedian_se = [3.847731951, 2.888485089, 0]
self.margeff_nodummy_eyexzero = [0,0,0]
self.margeff_nodummy_eyexzero_se = [0,0,0]
# for below GPA = 2.0, psi = 1
self.margeff_nodummy_atexog1 = [.1456333017086,.00490359933927,
.12257689308426]
self.margeff_nodummy_atexog1_se = [.145633, .0111226, .1777101]
# for below GPA at mean, tuce = 21, psi = 0
self.margeff_nodummy_atexog2 = [.25105129214546,.00845311433473,
.2113052923675]
self.margeff_nodummy_atexog2_se = [.1735778, .012017, .0971515]
# must get this from older margeff or i.psi then margins
self.margeff_dummy_dydx = [.36258084688424,.01220841099085,
.35751515254729]
self.margeff_dummy_dydx_se = [.1094412, .0177942, .1420034]
self.margeff_dummy_dydxmean = [.53385885781692,.01797548988961,
.4564984096959]
self.margeff_dummy_dydxmean_se = [.237038, .0262369, .1810537]
#self.margeff_dummy_dydxmedian
# from margeff
self.margeff_dummy_count_dydx_median = [0.250110487483923,
0.008426867847905, 0.441897738279663]
self.margeff_dummy_count_dydx_median_se = [.1546736661, .0134551951,
.1792363708]
# estimate with i.psi for the below then use margins
self.margeff_dummy_eydx = [1.8546366266779,.06244722072812,
1.5549034398832]
self.margeff_dummy_eydx_se = [.847903, .0930901, .7283702]
# ie
# margins, eydx(*) at((mean) _all)
self.margeff_dummy_eydxmean = [2.1116143062702,.0710998816585,
1.6631775707188]
self.margeff_dummy_eydxmean_se = [1.076109, .1081501, .801205]
# Factor variables not allowed in below
# test raises
#self.margeff_dummy_dydxzero
#self.margeff_dummy_eydxmedian
#self.margeff_dummy_eydxzero
#self.margeff_dummy_dyex
#self.margeff_dummy_dyexmean
#self.margeff_dummy_dyexmedian
#self.margeff_dummy_dyexzero
#self.margeff_dummy_eyex
#self.margeff_count_dummy_dydx_median
#self.margeff_count_dummy_dydx_median_se
#NOTE: need old version of margeff for nodisc but at option is broken
# stata command is margeff, count nodisc
# this can be replicated with the new results by margeff
# and then using margins for the last value
self.margeff_count_dydx = [.3625767598018, .0122068569914, .3051777]
self.margeff_count_dydx_se = [.1094379569, .0177869773, .0923796]
# middle value taken from margeff rest from margins
self.margeff_count_dydxmean = [.5338588, 0.01797186545386,
.4493393 ]
self.margeff_count_dydxmean_se = [.237038, .0262211, .1967626]
# with new version of margeff this is just a call to
# margeff
# mat list e(margeff_b), nonames format(%17.16g)
self.margeff_count_dummy_dydxoverall = [.362576759801767,
.012206856991439, .357515163621704]
# AFAICT, an easy way to get se is
# mata
# V = st_matrix("e(margeff_V)")
# se = diagonal(cholesky(diag(V)))
# last SE taken from margins with i.psi, don't know how they
# don't know why margeff is different, but trust official results
self.margeff_count_dummy_dydxoverall_se = [.1094379569, .0177869773,
.1420034]
#.1574340751 ]
# from new margeff
self.margeff_count_dummy_dydxmean = [0.533849340033768,
0.017971865453858, 0.456498405282412]
self.margeff_count_dummy_dydxmean_se = [.2370202503, .0262210796,
.1810536852 ]
# for below GPA = 2.0, psi = 1
self.margeff_dummy_atexog1 = [.1456333017086,.00490359933927,
.0494715429937]
self.margeff_dummy_atexog1_se = [.145633, .0111226, .0731368]
# for below GPA at mean, tuce = 21, psi = 0
self.margeff_dummy_atexog2 = [.25105129214546,.00845311433473,
.44265645632553]
self.margeff_dummy_atexog2_se = [.1735778, .012017, .1811925]
#The test for the prediction table was taken from Gretl
#Gretl Output matched the Stata output here for params and SE
self.pred_table = np.array([[18, 3], [3, 8]])
def probit(self):
self.params = [1.62581025407, .051728948442, 1.42633236818,
-7.45232041607]
self.cov_params = [[.481472955383, -.01891350017, .105439226234,
-1.1696681354], [-.01891350017, .00703757594, .002471864882,
-.101172838897], [.105439226234, .002471864882, .354070126802,
-.594791776765], [-1.1696681354, -.101172838897, -.594791776765,
6.46416639958]]
self.bse = [.693882522754, .083890261293, .595037920474, 2.54247249731]
self.llf = -12.8188033249334
self.llnull = -20.5917296966173
self.df_model = 3
self.df_resid = 32 - 4
self.llr = 15.5458527433678
self.prsquared = .377478069409622
self.llr_pvalue = .00140489496775855
self.aic = 33.637606649867
self.bic = 39.500550261066
self.z = [ 2.343062695, .6166263836, 2.397044489, -2.931131182]
self.conf_int = [[.2658255,2.985795],[-.1126929,.2161508],[.2600795,
2.592585],[-12.43547,-2.469166]]
self.pvalues = [.0191261688, .537481188, .0165279168, .0033773013]
self.phat = [.0181707, .0530805, .1899263, .0185707, .5545748,
.0272331, .0185033, .0445714, .1088081, .6631207,
.0161024, .1935566, .3233282, .1951826, .3563406,
.0219654, .0456943, .0308513, .5934023, .6571863,
.0619288, .9045388, .2731908, .8474501, .8341947,
.488726, .6424073, .3286732, .8400168, .9522446,
.5399595, .123544]
self.yhat = np.array([-2.0930860042572,
-1.615691781044,
-.87816804647446,
-2.0842070579529,
.13722851872444,
-1.9231110811234,
-2.0856919288635,
-1.6999372243881,
-1.2328916788101,
.42099541425705,
-2.1418602466583,
-.86486464738846,
-.45841211080551,
-.85895526409149,
-.36825761198997,
-2.0147502422333,
-1.6881184577942,
-1.8684275150299,
.23630557954311,
.40479621291161,
-1.538782119751,
1.3078554868698,
-.60319095849991,
1.025558590889,
.97087496519089,
-.02826354466379,
.36490100622177,
-.44357979297638,
.99452745914459,
1.6670187711716,
.10033150017262,
-1.1574513912201])
self.resid_dev = [-.191509, -.3302762, -.6490455, -.1936247, 1.085867,
-.2349926, -.1932698, -.3019776, -.4799906, .9064196,
-.1801855, -.6559291, -.8838201, 1.807661, -.9387071,
-.2107617, -.3058469, -.2503485, -1.341589, .9162835,
-.3575735, .447951, -.7988633, -1.939208, .6021435,
1.196623, .9407793, -.8927477, .59048, .3128364,
-1.246147, 2.045071]
# Stata doesn't have it, but I think it's just oversight
self.resid_pearson = None
# generalized residuals from gretl
self.resid_generalized = [-0.045452, -0.114220, -0.334908,
-0.046321, 0.712624, -0.064538,
-0.046175, -0.098447, -0.209349,
0.550593, -0.040906, -0.340339,
-0.530763, 1.413373, -0.579170,
-0.053593, -0.100556, -0.071855,
-0.954156, 0.559294, -0.130167,
0.187523, -0.457597, -1.545643,
0.298511, 0.815964, 0.581013,
-0.538579, 0.289631, 0.104405,
-0.862836, 1.652638]
self.pred_table = np.array([[18, 3], [3, 8]])
class RandHIE(object):
"""
Results obtained from Stata 11
"""
def __init__(self):
self.nobs = 20190
def poisson(self):
self.params = [-.052535114675, -.247086797633, .035290201794,
-.03457750643, .271713973711, .033941474461, -.012635035534,
.054056326828, .206115121809, .700352877227]
self.cov_params = None
self.bse = [.00288398915279, .01061725196728, .00182833684966,
.00161284852954, .01223913844387, .00056476496963,
.00925061122826, .01530987068312, .02627928267502,
.01116266712362]
predict = np.loadtxt(os.path.join(cur_dir, 'yhat_poisson.csv'),
delimiter=",")
self.phat = predict[:,0]
self.yhat = predict[:,1]
self.llf = -62419.588535018
self.llnull = -66647.181687959
self.df_model = 9
self.df_resid = self.nobs - self.df_model - 1
self.llr = 8455.186305881856
self.prsquared = .0634324369893758
self.llr_pvalue = 0
self.aic = 124859.17707
self.bic = 124938.306497
self.z = [-18.21612769, -23.27219872, 19.30180524, -21.43878101,
22.20041672, 60.09840604, -1.36585953, 3.53081538, 7.84325525,
62.74063980]
self.conf_int = [[ -.0581876, -.0468826],[-0.2678962, -0.2262774],
[0.0317067, 0.0388737],[-0.0377386, -0.0314164],
[0.2477257, 0.2957022], [0.0328346, 0.0350484],[-0.0307659,
0.0054958], [0.0240495, 0.0840631],[0.1546087, 0.2576216],
[0.6784745, 0.7222313]]
self.pvalues = [3.84415e-74, 8.4800e-120, 5.18652e-83, 5.8116e-102,
3.4028e-109, 0, .1719830562, .0004142808, 4.39014e-15, 0]
# from stata
# use margins and put i. in front of dummies
self.margeff_dummy_overall = [-0.15027280560599, -0.66568074771099,
0.10094500919706, -0.09890639687842,
0.77721770295360, 0.09708707452600,
-0.03608195237609, 0.15804581481115,
0.65104087597053]
self.margeff_dummy_overall_se = [.008273103, .0269856266,
.0052466639, .0046317555, .0351582169, .0016652181,
.0263736472, .0457480115, .0913901155]
# just use margins
self.margeff_nodummy_overall = [-0.15027280560599, -0.70677348928158,
0.10094500919705, -0.09890639687842,
0.77721770295359, 0.09708707452600,
-0.03614158359367, 0.15462412033340,
0.58957704430148]
self.margeff_nodummy_overall_se = [.008273103, .0305119343,
.0052466639, .0046317555,
.0351582168, .0016652181,
.0264611158, .0437974779,
.0752099666]
# taken from gretl
self.resid = np.loadtxt(os.path.join(cur_dir,'poisson_resid.csv'),
delimiter=",")
def negativebinomial_nb2_bfgs(self):
# R 2.15.1 MASS 7.3-22 glm.nb()
self.params = [-0.0579469537244314,
-0.267787718814838, 0.0412060770911646, -0.0381376804392121,
0.268915772213171, 0.0381637446219235, -0.0441338846217674,
0.0172521803400544, 0.177960787443151,0.663556087183864,
# lnalpha from stata
1.292953339909746
]
# alpha and stderr from stata
self.lnalpha_std_err = .0143932
self.lnalpha = 0.256929012449
self.bse = [0.00607085853920512, 0.0226125368090765,
0.00405542008282773, 0.00344455937127785, 0.0298855063286547,
0.00142421904710063, 0.0199374393307107, 0.0358416931939136,
0.0741013728607101, 0.0250354082637892,
# from stata
.0186098
]
self.z = [-9.54510030998327, -11.8424447940467,
10.1607419822296, -11.071860382846, 8.99820030672628,
26.7962605187844, -2.21361850384595, 0.481343898758222,
2.40158556546135, 26.5047040652267]
self.pvalues = [1.35975947860026e-21,
2.35486776488278e-32, 2.96808970292151e-24,
1.71796558863781e-28, 2.2944789508802e-19,
3.57231639404726e-158, 0.0268550333379416, 0.630272102021494,
0.0163241908407114, 8.55476622951356e-155]
self.fittedvalues = [0.892904166867786, 0.892904166867786, 0.892904166867786,
0.892904166867786, 0.892904166867786, 0.937038051489553,
0.937038051489553, 0.937038051489553, 0.937038051489553,
0.937038051489553]
#self.aic = 86789.3241530713 # This is what R reports
self.aic = 86789.32415307125484 # from Stata
self.df_resid = 20180
self.df_model = 9
# R conf_int: 1.96 * bse, not profile likelihood via R's confint()
self.conf_int = [
# from Stata
[-.0698826, -.0460113],
[-.3122654, -.2233101],
[ .0330781, .049334],
[-.0448006, -.0314748],
[ .2102246, .3276069],
[ .0352959, .0410316],
[-.0834356, -.0048321],
[-.0535908, .0880951],
[ .0324115, .3235101],
[ .6150055, .7121067],
# from Stata
[ 1.256989, 1.329947]
]
self.bic = 86876.36652289562335 # stata
self.llnull = -44199.27443563430279 # stata
self.llr = 1631.224718197351 # stata
self.llf = -43383.66207653563 # stata
self.df_model = 9.0
self.llr_pvalue = 0.0
def negativebinomial_nb1_bfgs(self):
# Unpublished implementation intended for R's COUNT package. Sent by
# J.Hilbe (of Cambridge UP NBin book) and Andrew Robinson to Vincent
# Arel-Bundock on 2012-12-06.
#self.params = [-0.065309744899923, -0.296016207412261,
# 0.0411724098925173, -0.0320460533573259, 0.19083354227553,
# 0.0318566232844115, -0.0331972813313092, -0.0484691550721231,
# 0.111971860837541, 0.757560143822609,
# 3.73086958562569]
# from Stata
self.params = [-.065317260803762961, -.296023807893893376,
.041187021258044826, -.032028789543547605,
.19065933246421754, .031871625115758778,
-.033250849053302826, -.04850769174426571,
.111813637465757343, .757277086555503409,
3.731151380800305]
# lnalpha and lnalpha_std_err are from stata
self.lnalpha = 1.316716867203
self.lnalpha_std_err = .0168876692
self.bse = [0.00536019929563678,
0.0196998350459769, 0.00335779098766272, 0.00301145915122889,
0.0237984097096245, 0.00107360844112751, 0.0167174614755359,
0.0298037989274781, 0.0546838603596457,0.0214703279904911,
0.0630011409376052]
self.z = [-12.1842008660173, -15.0263292419148,
12.2617548393554, -10.6413707601675, 8.0187518663633,
29.6724784046551, -1.98578482623631, -1.62627439508848,
2.04762173155154, 35.2840508145997,
# From R, this is alpha/bse(alpha)
59.2190796881069
# taken from Stata even though they don't report it
# lnalpha/bse(lnalpha)
#77.968995
]
self.conf_int = [
[-0.075815736,-0.0548037543],
[-0.334627884,-0.2574045307],
[ 0.034591140, 0.0477536802],
[-0.037948513,-0.0261435934],
[ 0.144188659, 0.2374784253],
[ 0.029752351, 0.0339608958],
[-0.065963506,-0.0004310568],
[-0.106884601, 0.0099462908],
[ 0.004791495, 0.2191522271],
[ 3.607387349, 3.8543518219],
[ 0.715478301, 0.7996419867]]
# from Stata
self.llf = -43278.75612911823
self.llnull = -44199.2744356343
self.llr = 1841.036613032149
self.aic = 86579.51225823645655
self.bic = 86666.55462806082505
self.llr_pvalue = 0.0
self.df_model = 9.0
self.df_resid = 20180.0
# Smoke tests TODO: check against other stats package
self.pvalues = [3.65557865e-034, 5.24431864e-051,
1.42921171e-034, 2.09797259e-026, 1.15949461e-015,
1.56785415e-193, 4.71746349e-002, 1.04731854e-001,
4.07534831e-002, 1.95504975e-272, 0.00000000e+000]
self.conf_int = [[-.0758236, -.054811],
[-.3346363, -.2574113],
[ .0346053, .0477687],
[-.0379314, -.0261261],
[ .1440119, .2373067],
[ .0297667, .0339766],
[-.0660178, -.0004839],
[-.1069241, .0099087],
[ .0046266, .2190007],
[ .7151889, .7993652],
# from stata for alpha no lnalpha
[ 3.609675, 3.856716]]
#[ 1.28360034e+00, 1.34979803e+00]]
self.fittedvalues = [ 0.8487497 , 0.8487497 , 0.8487497 , 0.8487497,
0.8487497 , 0.88201746, 0.88201746, 0.88201746, 0.88201746,
0.88201746]
def negativebinomial_geometric_bfgs(self):
# Smoke tests TODO: Cross check with other stats package
self.params = [-0.05768894, -0.26646696, 0.04088528, -0.03795503,
0.26885821, 0.03802523, -0.04308456, 0.01931675, 0.18051684,
0.66469896]
self.bse = [ 0.00553867, 0.02061988, 0.00375937, 0.0030924 ,
0.02701658, 0.00132201, 0.01821646, 0.03271784, 0.06666231,
0.02250053]
self.pvalues = [ 2.10310916e-025, 3.34666368e-038, 1.50697768e-027,
1.25468406e-034, 2.48155744e-023, 6.18745348e-182,
1.80230194e-002, 5.54919603e-001, 6.77044178e-003,
8.44913440e-192]
self.z = [-10.41567024, -12.92281571, 10.8755779 , -12.27364916,
9.95160202, 28.76323587, -2.36514487, 0.59040434,
2.70792943, 29.54148082]
self.aic = 87101.159433012392 # old value 87101.160011780419
self.bic = 87180.288860125467 # old value 87180.289438893495
self.df_model = 9.0
self.df_resid = 20180.0
self.llf = -43540.58000589021
self.llnull = -44586.650971362695 # old value -44199.27443567125
self.llr = 2092.1425097129977 # old value 1317.3888595620811
self.llr_pvalue = 0 # old value 5.4288002863296022e-278
self.fittedvalues = [ 0.89348994, 0.89348994, 0.89348994,
0.89348994, 0.89348994, 0.9365745 , 0.9365745 , 0.9365745 ,
0.9365745 , 0.9365745 ]
self.conf_int = [[-0.06854453, -0.04683335],
[-0.30688118, -0.22605273],
[ 0.03351706, 0.04825351],
[-0.04401602, -0.03189404],
[ 0.21590669, 0.32180972],
[ 0.03543415, 0.04061632],
[-0.07878816, -0.00738096],
[-0.04480903, 0.08344253],
[ 0.04986111, 0.31117258],
[ 0.62059873, 0.70879919]]
|
|
from __future__ import division
from sympy.assumptions.ask import Q
from sympy.core.numbers import oo
from sympy.core.relational import Equality
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, symbols)
from sympy.sets.sets import (EmptySet, Interval, Union)
from sympy.simplify.simplify import simplify
from sympy.logic.boolalg import (
And, Boolean, Equivalent, ITE, Implies, Nand, Nor, Not, Or,
POSform, SOPform, Xor, conjuncts, disjuncts,
distribute_or_over_and, distribute_and_over_or,
eliminate_implications, is_nnf, is_cnf, is_dnf, simplify_logic,
to_nnf, to_cnf, to_dnf, to_int_repr, bool_map, true, false,
BooleanAtom, is_literal, term_to_integer, integer_to_term,
truth_table)
from sympy.utilities.pytest import raises, XFAIL
from sympy.utilities import cartes
A, B, C, D= symbols('A,B,C,D')
def test_overloading():
"""Test that |, & are overloaded as expected"""
assert A & B == And(A, B)
assert A | B == Or(A, B)
assert (A & B) | C == Or(And(A, B), C)
assert A >> B == Implies(A, B)
assert A << B == Implies(B, A)
assert ~A == Not(A)
assert A ^ B == Xor(A, B)
def test_And():
assert And() is true
assert And(A) == A
assert And(True) is true
assert And(False) is false
assert And(True, True ) is true
assert And(True, False) is false
assert And(False, False) is false
assert And(True, A) == A
assert And(False, A) is false
assert And(True, True, True) is true
assert And(True, True, A) == A
assert And(True, False, A) is false
assert And(2, A) == A
assert And(2, 3) is true
assert And(A < 1, A >= 1) is false
e = A > 1
assert And(e, e.canonical) == e.canonical
g, l, ge, le = A > B, B < A, A >= B, B <= A
assert And(g, l, ge, le) == And(l, le)
def test_Or():
assert Or() is false
assert Or(A) == A
assert Or(True) is true
assert Or(False) is false
assert Or(True, True ) is true
assert Or(True, False) is true
assert Or(False, False) is false
assert Or(True, A) is true
assert Or(False, A) == A
assert Or(True, False, False) is true
assert Or(True, False, A) is true
assert Or(False, False, A) == A
assert Or(2, A) is true
assert Or(A < 1, A >= 1) is true
e = A > 1
assert Or(e, e.canonical) == e
g, l, ge, le = A > B, B < A, A >= B, B <= A
assert Or(g, l, ge, le) == Or(g, ge)
def test_Xor():
assert Xor() is false
assert Xor(A) == A
assert Xor(A, A) is false
assert Xor(True, A, A) is true
assert Xor(A, A, A, A, A) == A
assert Xor(True, False, False, A, B) == ~Xor(A, B)
assert Xor(True) is true
assert Xor(False) is false
assert Xor(True, True ) is false
assert Xor(True, False) is true
assert Xor(False, False) is false
assert Xor(True, A) == ~A
assert Xor(False, A) == A
assert Xor(True, False, False) is true
assert Xor(True, False, A) == ~A
assert Xor(False, False, A) == A
assert isinstance(Xor(A, B), Xor)
assert Xor(A, B, Xor(C, D)) == Xor(A, B, C, D)
assert Xor(A, B, Xor(B, C)) == Xor(A, C)
assert Xor(A < 1, A >= 1, B) == Xor(0, 1, B) == Xor(1, 0, B)
e = A > 1
assert Xor(e, e.canonical) == Xor(0, 0) == Xor(1, 1)
def test_Not():
raises(TypeError, lambda: Not(True, False))
assert Not(True) is false
assert Not(False) is true
assert Not(0) is true
assert Not(1) is false
assert Not(2) is false
def test_Nand():
assert Nand() is false
assert Nand(A) == ~A
assert Nand(True) is false
assert Nand(False) is true
assert Nand(True, True ) is false
assert Nand(True, False) is true
assert Nand(False, False) is true
assert Nand(True, A) == ~A
assert Nand(False, A) is true
assert Nand(True, True, True) is false
assert Nand(True, True, A) == ~A
assert Nand(True, False, A) is true
def test_Nor():
assert Nor() is true
assert Nor(A) == ~A
assert Nor(True) is false
assert Nor(False) is true
assert Nor(True, True ) is false
assert Nor(True, False) is false
assert Nor(False, False) is true
assert Nor(True, A) is false
assert Nor(False, A) == ~A
assert Nor(True, True, True) is false
assert Nor(True, True, A) is false
assert Nor(True, False, A) is false
def test_Implies():
raises(ValueError, lambda: Implies(A, B, C))
assert Implies(True, True) is true
assert Implies(True, False) is false
assert Implies(False, True) is true
assert Implies(False, False) is true
assert Implies(0, A) is true
assert Implies(1, 1) is true
assert Implies(1, 0) is false
assert A >> B == B << A
assert (A < 1) >> (A >= 1) == (A >= 1)
assert (A < 1) >> (S(1) > A) is true
assert A >> A is true
def test_Equivalent():
assert Equivalent(A, B) == Equivalent(B, A) == Equivalent(A, B, A)
assert Equivalent() is true
assert Equivalent(A, A) == Equivalent(A) is true
assert Equivalent(True, True) == Equivalent(False, False) is true
assert Equivalent(True, False) == Equivalent(False, True) is false
assert Equivalent(A, True) == A
assert Equivalent(A, False) == Not(A)
assert Equivalent(A, B, True) == A & B
assert Equivalent(A, B, False) == ~A & ~B
assert Equivalent(1, A) == A
assert Equivalent(0, A) == Not(A)
assert Equivalent(A, Equivalent(B, C)) != Equivalent(Equivalent(A, B), C)
assert Equivalent(A < 1, A >= 1) is false
assert Equivalent(A < 1, A >= 1, 0) is false
assert Equivalent(A < 1, A >= 1, 1) is false
assert Equivalent(A < 1, S(1) > A) == Equivalent(1, 1) == Equivalent(0, 0)
assert Equivalent(Equality(A, B), Equality(B, A)) is true
def test_equals():
assert Not(Or(A, B)).equals( And(Not(A), Not(B)) ) is True
assert Equivalent(A, B).equals((A >> B) & (B >> A)) is True
assert ((A | ~B) & (~A | B)).equals((~A & ~B) | (A & B)) is True
assert (A >> B).equals(~A >> ~B) is False
assert (A >> (B >> A)).equals(A >> (C >> A)) is False
raises(NotImplementedError, lambda: And(A, A < B).equals(And(A, B > A)))
def test_simplification():
"""
Test working of simplification methods.
"""
set1 = [[0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 1, 0]]
set2 = [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1]]
from sympy.abc import w, x, y, z
assert SOPform([x, y, z], set1) == Or(And(Not(x), z), And(Not(z), x))
assert Not(SOPform([x, y, z], set2)) == Not(Or(And(Not(x), Not(z)), And(x, z)))
assert POSform([x, y, z], set1 + set2) is true
assert SOPform([x, y, z], set1 + set2) is true
assert SOPform([Dummy(), Dummy(), Dummy()], set1 + set2) is true
minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1],
[1, 1, 1, 1]]
dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
assert (
SOPform([w, x, y, z], minterms, dontcares) ==
Or(And(Not(w), z), And(y, z)))
assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z)
# test simplification
ans = And(A, Or(B, C))
assert simplify_logic(A & (B | C)) == ans
assert simplify_logic((A & B) | (A & C)) == ans
assert simplify_logic(Implies(A, B)) == Or(Not(A), B)
assert simplify_logic(Equivalent(A, B)) == \
Or(And(A, B), And(Not(A), Not(B)))
assert simplify_logic(And(Equality(A, 2), C)) == And(Equality(A, 2), C)
assert simplify_logic(And(Equality(A, 2), A)) == And(Equality(A, 2), A)
assert simplify_logic(And(Equality(A, B), C)) == And(Equality(A, B), C)
assert simplify_logic(Or(And(Equality(A, 3), B), And(Equality(A, 3), C))) \
== And(Equality(A, 3), Or(B, C))
e = And(A, x**2 - x)
assert simplify_logic(e) == And(A, x*(x - 1))
assert simplify_logic(e, deep=False) == e
# check input
ans = SOPform([x, y], [[1, 0]])
assert SOPform([x, y], [[1, 0]]) == ans
assert POSform([x, y], [[1, 0]]) == ans
raises(ValueError, lambda: SOPform([x], [[1]], [[1]]))
assert SOPform([x], [[1]], [[0]]) is true
assert SOPform([x], [[0]], [[1]]) is true
assert SOPform([x], [], []) is false
raises(ValueError, lambda: POSform([x], [[1]], [[1]]))
assert POSform([x], [[1]], [[0]]) is true
assert POSform([x], [[0]], [[1]]) is true
assert POSform([x], [], []) is false
# check working of simplify
assert simplify((A & B) | (A & C)) == And(A, Or(B, C))
assert simplify(And(x, Not(x))) == False
assert simplify(Or(x, Not(x))) == True
def test_bool_map():
"""
Test working of bool_map function.
"""
minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1],
[1, 1, 1, 1]]
from sympy.abc import a, b, c, w, x, y, z
assert bool_map(Not(Not(a)), a) == (a, {a: a})
assert bool_map(SOPform([w, x, y, z], minterms),
POSform([w, x, y, z], minterms)) == \
(And(Or(Not(w), y), Or(Not(x), y), z), {x: x, w: w, z: z, y: y})
assert bool_map(SOPform([x, z, y],[[1, 0, 1]]),
SOPform([a, b, c],[[1, 0, 1]])) != False
function1 = SOPform([x,z,y],[[1, 0, 1], [0, 0, 1]])
function2 = SOPform([a,b,c],[[1, 0, 1], [1, 0, 0]])
assert bool_map(function1, function2) == \
(function1, {y: a, z: b})
def test_bool_symbol():
"""Test that mixing symbols with boolean values
works as expected"""
assert And(A, True) == A
assert And(A, True, True) == A
assert And(A, False) is false
assert And(A, True, False) is false
assert Or(A, True) is true
assert Or(A, False) == A
def test_is_boolean():
assert true.is_Boolean
assert (A & B).is_Boolean
assert (A | B).is_Boolean
assert (~A).is_Boolean
assert (A ^ B).is_Boolean
def test_subs():
assert (A & B).subs(A, True) == B
assert (A & B).subs(A, False) is false
assert (A & B).subs(B, True) == A
assert (A & B).subs(B, False) is false
assert (A & B).subs({A: True, B: True}) is true
assert (A | B).subs(A, True) is true
assert (A | B).subs(A, False) == B
assert (A | B).subs(B, True) is true
assert (A | B).subs(B, False) == A
assert (A | B).subs({A: True, B: True}) is true
"""
we test for axioms of boolean algebra
see http://en.wikipedia.org/wiki/Boolean_algebra_(structure)
"""
def test_commutative():
"""Test for commutativity of And and Or"""
A, B = map(Boolean, symbols('A,B'))
assert A & B == B & A
assert A | B == B | A
def test_and_associativity():
"""Test for associativity of And"""
assert (A & B) & C == A & (B & C)
def test_or_assicativity():
assert ((A | B) | C) == (A | (B | C))
def test_double_negation():
a = Boolean()
assert ~(~a) == a
# test methods
def test_eliminate_implications():
from sympy.abc import A, B, C, D
assert eliminate_implications(Implies(A, B, evaluate=False)) == (~A) | B
assert eliminate_implications(
A >> (C >> Not(B))) == Or(Or(Not(B), Not(C)), Not(A))
assert eliminate_implications(Equivalent(A, B, C, D)) == \
(~A | B) & (~B | C) & (~C | D) & (~D | A)
def test_conjuncts():
assert conjuncts(A & B & C) == set([A, B, C])
assert conjuncts((A | B) & C) == set([A | B, C])
assert conjuncts(A) == set([A])
assert conjuncts(True) == set([True])
assert conjuncts(False) == set([False])
def test_disjuncts():
assert disjuncts(A | B | C) == set([A, B, C])
assert disjuncts((A | B) & C) == set([(A | B) & C])
assert disjuncts(A) == set([A])
assert disjuncts(True) == set([True])
assert disjuncts(False) == set([False])
def test_distribute():
assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C))
assert distribute_or_over_and(And(A, Or(B, C))) == Or(And(A, B), And(A, C))
def test_to_nnf():
assert to_nnf(true) is true
assert to_nnf(false) is false
assert to_nnf(A) == A
assert to_nnf(A | ~A | B) is true
assert to_nnf(A & ~A & B) is false
assert to_nnf(A >> B) == ~A | B
assert to_nnf(Equivalent(A, B, C)) == (~A | B) & (~B | C) & (~C | A)
assert to_nnf(A ^ B ^ C) == \
(A | B | C) & (~A | ~B | C) & (A | ~B | ~C) & (~A | B | ~C)
assert to_nnf(ITE(A, B, C)) == (~A | B) & (A | C)
assert to_nnf(Not(A | B | C)) == ~A & ~B & ~C
assert to_nnf(Not(A & B & C)) == ~A | ~B | ~C
assert to_nnf(Not(A >> B)) == A & ~B
assert to_nnf(Not(Equivalent(A, B, C))) == And(Or(A, B, C), Or(~A, ~B, ~C))
assert to_nnf(Not(A ^ B ^ C)) == \
(~A | B | C) & (A | ~B | C) & (A | B | ~C) & (~A | ~B | ~C)
assert to_nnf(Not(ITE(A, B, C))) == (~A | ~B) & (A | ~C)
assert to_nnf((A >> B) ^ (B >> A)) == (A & ~B) | (~A & B)
assert to_nnf((A >> B) ^ (B >> A), False) == \
(~A | ~B | A | B) & ((A & ~B) | (~A & B))
def test_to_cnf():
assert to_cnf(~(B | C)) == And(Not(B), Not(C))
assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C))
assert to_cnf(A >> B) == (~A) | B
assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C)
assert to_cnf(A & (B | C) | ~A & (B | C), True) == B | C
assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A)))
assert to_cnf(Equivalent(A, B & C)) == \
(~A | B) & (~A | C) & (~B | ~C | A)
assert to_cnf(Equivalent(A, B | C), True) == \
And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A)))
def test_to_dnf():
assert to_dnf(~(B | C)) == And(Not(B), Not(C))
assert to_dnf(A & (B | C)) == Or(And(A, B), And(A, C))
assert to_dnf(A >> B) == (~A) | B
assert to_dnf(A >> (B & C)) == (~A) | (B & C)
assert to_dnf(Equivalent(A, B), True) == \
Or(And(A, B), And(Not(A), Not(B)))
assert to_dnf(Equivalent(A, B & C), True) == \
Or(And(A, B, C), And(Not(A), Not(B)), And(Not(A), Not(C)))
def test_to_int_repr():
x, y, z = map(Boolean, symbols('x,y,z'))
def sorted_recursive(arg):
try:
return sorted(sorted_recursive(x) for x in arg)
except TypeError: # arg is not a sequence
return arg
assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \
sorted_recursive([[1, 2], [1, 3]])
assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \
sorted_recursive([[1, 2], [3, -1]])
def test_is_nnf():
from sympy.abc import A, B
assert is_nnf(true) is True
assert is_nnf(A) is True
assert is_nnf(~A) is True
assert is_nnf(A & B) is True
assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), False) is True
assert is_nnf((A | B) & (~A | ~B)) is True
assert is_nnf(Not(Or(A, B))) is False
assert is_nnf(A ^ B) is False
assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), True) is False
def test_is_cnf():
x, y, z = symbols('x,y,z')
assert is_cnf(x) is True
assert is_cnf(x | y | z) is True
assert is_cnf(x & y & z) is True
assert is_cnf((x | y) & z) is True
assert is_cnf((x & y) | z) is False
def test_is_dnf():
x, y, z = symbols('x,y,z')
assert is_dnf(x) is True
assert is_dnf(x | y | z) is True
assert is_dnf(x & y & z) is True
assert is_dnf((x & y) | z) is True
assert is_dnf((x | y) & z) is False
def test_ITE():
A, B, C = map(Boolean, symbols('A,B,C'))
assert ITE(True, False, True) is false
assert ITE(True, True, False) is true
assert ITE(False, True, False) is false
assert ITE(False, False, True) is true
assert isinstance(ITE(A, B, C), ITE)
A = True
assert ITE(A, B, C) == B
A = False
assert ITE(A, B, C) == C
B = True
assert ITE(And(A, B), B, C) == C
assert ITE(Or(A, False), And(B, True), False) is false
def test_ITE_diff():
# analogous to Piecewise.diff
x = symbols('x')
assert ITE(x > 0, x**2, x).diff(x) == ITE(x > 0, 2*x, 1)
def test_is_literal():
assert is_literal(True) is True
assert is_literal(False) is True
assert is_literal(A) is True
assert is_literal(~A) is True
assert is_literal(Or(A, B)) is False
assert is_literal(Q.zero(A)) is True
assert is_literal(Not(Q.zero(A))) is True
assert is_literal(Or(A, B)) is False
assert is_literal(And(Q.zero(A), Q.zero(B))) is False
def test_operators():
# Mostly test __and__, __rand__, and so on
assert True & A == A & True == A
assert False & A == A & False == False
assert A & B == And(A, B)
assert True | A == A | True == True
assert False | A == A | False == A
assert A | B == Or(A, B)
assert ~A == Not(A)
assert True >> A == A << True == A
assert False >> A == A << False == True
assert A >> True == True << A == True
assert A >> False == False << A == ~A
assert A >> B == B << A == Implies(A, B)
assert True ^ A == A ^ True == ~A
assert False ^ A == A ^ False == A
assert A ^ B == Xor(A, B)
def test_true_false():
x = symbols('x')
assert true is S.true
assert false is S.false
assert true is not True
assert false is not False
assert true
assert not false
assert true == True
assert false == False
assert not (true == False)
assert not (false == True)
assert not (true == false)
assert hash(true) == hash(True)
assert hash(false) == hash(False)
assert len(set([true, True])) == len(set([false, False])) == 1
assert isinstance(true, BooleanAtom)
assert isinstance(false, BooleanAtom)
# We don't want to subclass from bool, because bool subclasses from
# int. But operators like &, |, ^, <<, >>, and ~ act differently on 0 and
# 1 then we want them to on true and false. See the docstrings of the
# various And, Or, etc. functions for examples.
assert not isinstance(true, bool)
assert not isinstance(false, bool)
# Note: using 'is' comparison is important here. We want these to return
# true and false, not True and False
assert Not(true) is false
assert Not(True) is false
assert Not(false) is true
assert Not(False) is true
assert ~true is false
assert ~false is true
for T, F in cartes([True, true], [False, false]):
assert And(T, F) is false
assert And(F, T) is false
assert And(F, F) is false
assert And(T, T) is true
assert And(T, x) == x
assert And(F, x) is false
if not (T is True and F is False):
assert T & F is false
assert F & T is false
if not F is False:
assert F & F is false
if not T is True:
assert T & T is true
assert Or(T, F) is true
assert Or(F, T) is true
assert Or(F, F) is false
assert Or(T, T) is true
assert Or(T, x) is true
assert Or(F, x) == x
if not (T is True and F is False):
assert T | F is true
assert F | T is true
if not F is False:
assert F | F is false
if not T is True:
assert T | T is true
assert Xor(T, F) is true
assert Xor(F, T) is true
assert Xor(F, F) is false
assert Xor(T, T) is false
assert Xor(T, x) == ~x
assert Xor(F, x) == x
if not (T is True and F is False):
assert T ^ F is true
assert F ^ T is true
if not F is False:
assert F ^ F is false
if not T is True:
assert T ^ T is false
assert Nand(T, F) is true
assert Nand(F, T) is true
assert Nand(F, F) is true
assert Nand(T, T) is false
assert Nand(T, x) == ~x
assert Nand(F, x) is true
assert Nor(T, F) is false
assert Nor(F, T) is false
assert Nor(F, F) is true
assert Nor(T, T) is false
assert Nor(T, x) is false
assert Nor(F, x) == ~x
assert Implies(T, F) is false
assert Implies(F, T) is true
assert Implies(F, F) is true
assert Implies(T, T) is true
assert Implies(T, x) == x
assert Implies(F, x) is true
assert Implies(x, T) is true
assert Implies(x, F) == ~x
if not (T is True and F is False):
assert T >> F is false
assert F << T is false
assert F >> T is true
assert T << F is true
if not F is False:
assert F >> F is true
assert F << F is true
if not T is True:
assert T >> T is true
assert T << T is true
assert Equivalent(T, F) is false
assert Equivalent(F, T) is false
assert Equivalent(F, F) is true
assert Equivalent(T, T) is true
assert Equivalent(T, x) == x
assert Equivalent(F, x) == ~x
assert Equivalent(x, T) == x
assert Equivalent(x, F) == ~x
assert ITE(T, T, T) is true
assert ITE(T, T, F) is true
assert ITE(T, F, T) is false
assert ITE(T, F, F) is false
assert ITE(F, T, T) is true
assert ITE(F, T, F) is false
assert ITE(F, F, T) is true
assert ITE(F, F, F) is false
def test_bool_as_set():
x = symbols('x')
assert And(x <= 2, x >= -2).as_set() == Interval(-2, 2)
assert Or(x >= 2, x <= -2).as_set() == Interval(-oo, -2) + Interval(2, oo)
assert Not(x > 2).as_set() == Interval(-oo, 2)
# issue 10240
assert Not(And(x > 2, x < 3)).as_set() == \
Union(Interval(-oo,2),Interval(3,oo))
assert true.as_set() == S.UniversalSet
assert false.as_set() == EmptySet()
@XFAIL
def test_multivariate_bool_as_set():
x, y = symbols('x,y')
assert And(x >= 0, y >= 0).as_set() == Interval(0, oo)*Interval(0, oo)
assert Or(x >= 0, y >= 0).as_set() == S.Reals*S.Reals - \
Interval(-oo, 0, True, True)*Interval(-oo, 0, True, True)
def test_all_or_nothing():
x = symbols('x', real=True)
args = x >=- oo, x <= oo
v = And(*args)
if v.func is And:
assert len(v.args) == len(args) - args.count(S.true)
else:
assert v == True
v = Or(*args)
if v.func is Or:
assert len(v.args) == 2
else:
assert v == True
def test_canonical_atoms():
assert true.canonical == true
assert false.canonical == false
def test_issue_8777():
x = symbols('x')
assert And(x > 2, x < oo).as_set() == Interval(2, oo, left_open=True)
assert And(x >= 1, x < oo).as_set() == Interval(1, oo)
assert (x < oo).as_set() == Interval(-oo, oo)
assert (x > -oo).as_set() == Interval(-oo, oo)
def test_issue_8975():
x = symbols('x')
assert Or(And(-oo < x, x <= -2), And(2 <= x, x < oo)).as_set() == \
Interval(-oo, -2) + Interval(2, oo)
def test_term_to_integer():
assert term_to_integer([1, 0, 1, 0, 0, 1, 0]) == 82
assert term_to_integer('0010101000111001') == 10809
def test_integer_to_term():
assert integer_to_term(777) == [1, 1, 0, 0, 0, 0, 1, 0, 0, 1]
assert integer_to_term(123, 3) == [1, 1, 1, 1, 0, 1, 1]
assert integer_to_term(456, 16) == [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0]
def test_truth_table():
x, y = symbols('x,y')
assert list(truth_table(And(x, y), [x, y], input=False)) == [False, False, False, True]
assert list(truth_table(x | y, [x, y], input=False)) == [False, True, True, True]
assert list(truth_table(x >> y, [x, y], input=False)) == [True, True, False, True]
def test_issue_8571():
x = symbols('x')
for t in (S.true, S.false):
raises(TypeError, lambda: +t)
raises(TypeError, lambda: -t)
raises(TypeError, lambda: abs(t))
# use int(bool(t)) to get 0 or 1
raises(TypeError, lambda: int(t))
for o in [S.Zero, S.One, x]:
for _ in range(2):
raises(TypeError, lambda: o + t)
raises(TypeError, lambda: o - t)
raises(TypeError, lambda: o % t)
raises(TypeError, lambda: o*t)
raises(TypeError, lambda: o/t)
raises(TypeError, lambda: o**t)
o, t = t, o # do again in reversed order
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for operating different options on the weight matrix."""
import abc
import textwrap
from typing import List, Optional, Sequence, Tuple, Union
import deprecation
import ml_collections
import numpy as np
from reservoir_nn.utils import weight_properties
from scipy import stats
class WeightTransformation(abc.ABC):
"""Base class for WeightTransformation.
A WeightTransformation operates on one or more np.ndarray's.
"""
@abc.abstractmethod
def apply_transform(self, *args: np.ndarray) -> np.ndarray:
raise NotImplementedError()
@deprecation.deprecated(
details='Use WeightTransformation.apply_transform(*args). cl/366874369')
def __call__(self, *args: np.ndarray) -> np.ndarray:
return self.apply_transform(*args)
def batch_apply(
self, *args: Union[np.ndarray, Tuple[np.ndarray,
...]]) -> List[np.ndarray]:
"""Applies the transform to each of the args."""
r = []
for inputs in args:
if not isinstance(inputs, tuple):
inputs = (inputs,)
r.append(self.apply_transform(*inputs))
return r
class Chain(WeightTransformation, list):
"""Chains transformations to apply them in sequence."""
def __init__(self,
transformations: Optional[Sequence[WeightTransformation]] = None
):
if transformations is None:
list.__init__(self, [])
else:
list.__init__(self, transformations)
def __repr__(self):
transformation_str = ',\n'.join([repr(t) for t in self])
transformation_str = textwrap.indent(transformation_str, ' ')
return '{}([\n{}\n])'.format(self.__class__.__name__, transformation_str)
def apply_transform(self, *args: np.ndarray) -> np.ndarray:
inputs = args
for i, transformation in enumerate(self):
try:
output = transformation.apply_transform(*inputs)
# output will become inputs for the next transformation:
inputs = (output,)
except Exception as e:
raise Exception(
f'Error in {transformation}, the transformation at index position '
f'{i} of this Chain') from e
return output
class FlipSignEntireRows(WeightTransformation):
"""Flips signs of a number of rows of the matrix to negative.
Attributes:
flip_proportion: proportion of the number of rows to be flipped
"""
def __init__(self, flip_proportion: float):
self.flip_proportion = flip_proportion
def apply_transform(self, matrix: np.ndarray, seed: int = -1) -> np.ndarray:
# number of rows
num_rows = matrix.shape[0]
# number of rows to flip sign
num_flips = int(num_rows * self.flip_proportion)
# randomly pick ids of rows to flip sign
if seed > -1:
np.random.seed(seed)
ids_row_flip = np.random.choice(num_rows, num_flips, replace=False)
# flip sign of the selected rows
matrix = np.copy(matrix)
matrix[ids_row_flip] *= -1
return matrix
class ScaleWeightByDistance(WeightTransformation):
"""Scales connection weights of neurons with their distances.
Attributes:
scaling_function: either 'linear', 'quadratic', or 'exponential'.
box_size: length of one side of the cubic box, which is the region that
contains the neurons. Only used for scaling_function = 'exponential'.
Returns:
weights scaled with distances.
Raises:
ValueError: if wrong input for scaling_function is provided.
"""
def __init__(self, scaling_function, box_size=None):
valid_functions = {'linear', 'quadratic', 'exponential'}
if scaling_function not in valid_functions:
raise ValueError(
f'Wrong input for scale_weight_by_distance in scaling_function: '
f'{scaling_function}. Must be one of {valid_functions}.')
elif (scaling_function == 'exponential') and (box_size is None):
raise ValueError(
f'Missing argument for scale_weight_by_distance: scaling_function is '
f'{scaling_function}, but box_size is {box_size}')
self.scaling_function = scaling_function
self.box_size = box_size
def apply_transform(self, weights: np.ndarray,
distances: np.ndarray) -> np.ndarray:
"""Runs the step.
Args:
weights: weight matrix, each element is the connection weight between two
neurons (currently representing segments).
distances: distance matrix of the same size as weights, each element is
the distance between the two connected neurons.
Returns:
Scaled weight matrix.
"""
size = weights.shape[0]
# pad diagonal of distances with 1.0 since they are zero:
for n in range(size):
distances[n][n] = 1.0
if self.scaling_function == 'linear':
weights = weights / distances
elif self.scaling_function == 'quadratic':
weights = weights / distances**2
else: # self.scaling_function == 'exponential'
weights = weights * np.exp(-2 * distances / self.box_size)
return weights
class RandomizeNonZeroWeight(WeightTransformation):
"""Starts with a weight matrix and makes it random uniformly.
Based on the sparsity of the weight matrix, replace it with another one
with the same sparsity, but the non-zero values are chosen randomly
and placed at random locations.
"""
def __init__(self):
pass
def apply_transform(self, matrix: np.ndarray) -> np.ndarray:
"""Executes the transformation.
Args:
matrix: a square numpy array.
Returns:
A random numpy array of the same size and sparsity with those of input.
"""
# calculate how many values of weight matrix are zero
num_zero = np.sum((matrix == 0))
# initialize a 1D random matrix
random_weight = np.random.uniform(size=matrix.size)
# select randomly indices of values that will be made zero
ids_zero = np.random.choice(range(matrix.size), num_zero, replace=False)
# make these values zero
random_weight[ids_zero] = 0
return random_weight.reshape(matrix.shape)
class RandomizeNonZeroWeightKde(WeightTransformation):
"""Randomizes the weight matrix using kernel-density estimate.
This function produces a random weight matrix of the same size and sparsity
with those of the original matrix. The non-zero elements are sampled from
the probability density function of the original non-zero elements.
Attributes:
rng: The random state to use for generating arrays of random numbers.
Default is the global np.random module.
"""
def __init__(self, rng: np.random.RandomState = np.random):
self.rng = rng
def apply_transform(self, matrix: np.ndarray, seed: int = -1) -> np.ndarray:
"""Executes the transformation.
Args:
matrix: a square numpy array.
seed: Optional seed for np.random.seed()
Returns:
A random numpy array of the same sparsity and size with those of input.
"""
if seed > -1:
np.random.seed(seed)
# indices of the non-zero:
id_non_zeros = (matrix != 0)
# get non-zero elements
weight_non_zero = matrix[id_non_zeros].ravel()
# number of non-zero elements:
num_non_zeros = weight_non_zero.size
# There must be at least 2 non-zero elements:
if num_non_zeros < 2:
raise ValueError(
f'Expecting matrix of at least 2 non-zeros, but got {num_non_zeros}.')
# Non-zero elements must not the same:
if not np.sum(weight_non_zero != weight_non_zero[0]):
raise ValueError(
f'Expecting different non-zeros, but got only {weight_non_zero[0]}.')
# calculate the probability density function:
density = stats.gaussian_kde(weight_non_zero)
# get new non-zero weights:
weight_non_zero = density.resample(num_non_zeros).ravel()
# initiate a new random weight matrix:
random_weight = np.zeros(matrix.size)
# select randomly indices of values that will be made non-zero
id_non_zeros = self.rng.choice(
range(matrix.size), num_non_zeros, replace=False)
# assign non-zero weights from a sample of the density function:
random_weight[id_non_zeros] = weight_non_zero
return random_weight.reshape(matrix.shape)
class CutOffSmallWeightsInRows(WeightTransformation):
"""Cuts off smallest weights of the weight matrix.
So that number of non-zeros per row does not exceed non_zeros_per_row_limit.
Attributes:
non_zeros_per_row_limit: limit of non-zeros to be retained per row.
"""
def __init__(self, non_zeros_per_row_limit: int):
self.non_zeros_per_row_limit = non_zeros_per_row_limit
def apply_transform(self, matrix: np.ndarray) -> np.ndarray:
"""Executes the transformation.
Args:
matrix: the weight matrix, a 2d square numpy array.
Returns:
matrix_transformed: the transformed weight matrix.
"""
num_row, num_col = matrix.shape
# number of smallest elements to be removed per row:
num_remove_per_row = num_col - self.non_zeros_per_row_limit
matrix_transformed = matrix.copy()
if num_remove_per_row > 0:
for i in range(num_row):
weight_row = matrix_transformed[i]
# get indices of num_remove_per_row smallest elements of this row
small_weight_indices = weight_row.argsort()[:num_remove_per_row]
# and set these values zero:
weight_row[small_weight_indices] = 0.0
return matrix_transformed
class ScaleToZeroOne(WeightTransformation):
"""Scales the weight matrix to the [0, 1] range."""
def __init__(self):
pass
def apply_transform(self, matrix: np.ndarray) -> np.ndarray:
matrix = matrix - matrix.min()
return matrix / matrix.max()
class ScaleSpectralRadius(WeightTransformation):
"""Scales the weight matrix to specified spectral radius."""
def __init__(self, spectral_radius: float = 1.0):
self.spectral_radius = spectral_radius
def apply_transform(self, matrix: np.ndarray) -> np.ndarray:
matrix_spectral_radius = weight_properties.get_spectral_radius(matrix)
return matrix / matrix_spectral_radius * self.spectral_radius
class GetSubMatrix(WeightTransformation):
"""Returns a sub-matrix composed of `num_neurons` first rows/columns.
If the original matrix already has number of rows/columns less than or equal
to the input `num_neurons`, the original matrix is returned.
Attributes:
num_neurons: Number of first rows/columns to be retained.
"""
def __init__(self, num_neurons: int):
self.num_neurons = num_neurons
def apply_transform(self, matrix: np.ndarray) -> np.ndarray:
# numbers of rows and columns in the original matrix
num_rows, num_columns = matrix.shape
if num_rows != num_columns:
raise ValueError(f'A square matrix is expected, but input has number '
f'of rows = {num_rows}, which is different from '
f'number of columns = {num_columns}')
if num_rows > self.num_neurons:
return matrix[:self.num_neurons, :self.num_neurons]
return matrix
class ResizeNumRows(WeightTransformation):
"""Resizes the weight matrix to the target number of rows.
If the original matrix already has number of rows larger than or equal to
to the target number of rows, it is trimmed. Otherwise, new rows are added
that are built to maintain sparsity and KDE distribution of the original.
Attributes:
target_num_rows: The target number of rows.
rng: The random state to use for generating arrays of random numbers.
Default is the global np.random module.
"""
def __init__(self,
target_num_rows: int,
rng: np.random.RandomState = np.random):
if target_num_rows < 1:
raise ValueError(
f'Expecting `target_num_rows` > 0, but getting {target_num_rows}')
self.target_num_rows = target_num_rows
self.rng = rng
def apply_transform(self, matrix: np.ndarray) -> np.ndarray:
original_num_rows = matrix.shape[0]
# If the original matrix is larger than or equal to the target
if original_num_rows >= self.target_num_rows:
return matrix[:self.target_num_rows, :]
# Otherwise:
# Number of new rows to be added to the matrix
num_additional_rows = self.target_num_rows - original_num_rows
# If the additional is larger than or equal to the original
num_new_blocks = num_additional_rows // original_num_rows
new_matrix = [matrix]
for _ in range(num_new_blocks):
# The addition is of the same sparsity and KDE distribution
random_kde = RandomizeNonZeroWeightKde(self.rng).apply_transform(matrix)
new_matrix.append(random_kde)
# Add the remainders
num_remainder_rows = num_additional_rows % original_num_rows
if num_remainder_rows:
random_kde = RandomizeNonZeroWeightKde(self.rng).apply_transform(
matrix[:num_remainder_rows, :])
new_matrix.append(random_kde)
return np.vstack(new_matrix)
class ResizeNumColumns(WeightTransformation):
"""Resizes the weight matrix to the target number of columns.
If the original matrix already has number of columns larger than or equal to
to the target number of columns, it is trimmed. Otherwise, new columns are
added while maintaining sparsity and KDE distribution of the original matrix.
Attributes:
target_num_columns: The target number of columns.
rng: The random state to use for generating arrays of random numbers.
Default is the global np.random module.
"""
def __init__(self,
target_num_columns: int,
rng: np.random.RandomState = np.random):
if target_num_columns < 1:
raise ValueError(
f'Expecting target_num_columns > 0, but getting {target_num_columns}')
self.target_num_columns = target_num_columns
self.rng = rng
def apply_transform(self, matrix: np.ndarray) -> np.ndarray:
original_num_columns = matrix.shape[1]
# If the original matrix is larger than or equal to the target
if original_num_columns >= self.target_num_columns:
return matrix[:, :self.target_num_columns]
# Otherwise:
# Number of new columns to be added to the matrix
num_additional_columns = self.target_num_columns - original_num_columns
# If the additional is larger than or equal to the original
num_new_blocks = num_additional_columns // original_num_columns
new_matrix = [matrix]
for _ in range(num_new_blocks):
# The addition is of the same sparsity and KDE distribution
random_kde = RandomizeNonZeroWeightKde(self.rng).apply_transform(matrix)
new_matrix.append(random_kde)
# Add the remainders
num_remainder_columns = num_additional_columns % original_num_columns
if num_remainder_columns:
random_kde = RandomizeNonZeroWeightKde(self.rng).apply_transform(
matrix[:, :num_remainder_columns])
new_matrix.append(random_kde)
return np.hstack(new_matrix)
def flip_sign_entire_rows(matrix: np.ndarray,
flip_proportion: float) -> np.ndarray:
"""Flips signs of a number of rows of the matrix to negative.
Args:
matrix: the matrix to be transformed in numpy array
flip_proportion: proportion of the number of rows to be flipped
Returns:
The weight matrix with signs randomly flipped in some rows.
"""
return FlipSignEntireRows(flip_proportion).apply_transform(matrix)
def randomize_non_zero_weights_kde(matrix: np.ndarray) -> np.ndarray:
"""Randomizes the weight matrix using kernel-density estimate.
This function produces a random weight matrix of the same size and sparsity
with those of the original matrix. The non-zero elements are sampled from
the probability density function of the original non-zero elements.
Args:
matrix: a square numpy array.
Returns:
A random numpy array of the same sparsity and size with those of input.
"""
return RandomizeNonZeroWeightKde().apply_transform(matrix)
def randomize_non_zero_weights(matrix: np.ndarray) -> np.ndarray:
"""Starts with a weight matrix and makes it random uniformly.
Based on the sparsity of the weight matrix, replace it with another one
with the same sparsity, but the non-zero values are chosen randomly
and placed at random locations.
Args:
matrix: A square matrix.
Returns:
A random numpy array of the same size and sparsity with those of input.
"""
return RandomizeNonZeroWeight().apply_transform(matrix)
def shuffle_weights(matrix: np.ndarray) -> np.ndarray:
"""Shuffles the weights in the weight matrix.
Args:
matrix: the weight matrix.
Returns:
matrix_shuffled: A shuffled matrix.
"""
nrows, ncols = matrix.shape
matrix_shuffled = np.reshape(matrix, (nrows * ncols))
np.random.shuffle(matrix_shuffled)
matrix_shuffled = np.reshape(matrix_shuffled, (nrows, ncols))
return matrix_shuffled
def assign_random_signs(matrix: np.ndarray,
inhibitory_proportion: float) -> np.ndarray:
"""Assigns plus or minus signs randomly to the weight matrix.
given the proportion of connections that should be inhibitory.
Args:
matrix: The weight matrix.
inhibitory_proportion: A [0, 1] number, proportion of inhibitory
connections.
Returns:
The resulting matrix.
"""
# Make all the connections positive
matrix = np.abs(matrix)
# Generate random matrix
random_matrix = np.random.random(matrix.shape)
# Select the portion randomly to reverse the sign
inhibitory_mask = random_matrix < inhibitory_proportion
matrix[inhibitory_mask] = -matrix[inhibitory_mask]
return matrix
def scale_weight_by_distance(weights,
distances,
scaling_function,
box_size=None):
"""Scales connection weights of neurons with their distances.
Args:
weights: weight matrix, each element is the connection weight between two
neurons (currently representing segments).
distances: distance matrix of the same size as weights, each element is the
distance between the two connected neurons.
scaling_function: either 'linear', 'quadratic', or 'exponential'.
box_size: length of one side of the cubic box, which is the region that
contains the neurons. Only used for scaling_function = 'exponential'.
Returns:
weights scaled with distances.
"""
return ScaleWeightByDistance(scaling_function,
box_size).apply_transform(weights, distances)
def make_sparse(weight_array: np.ndarray,
zero_weight_proportion: float) -> np.ndarray:
"""Sets an arbitrary percentage (pct) of weights in an input matrix to 0.
Args:
weight_array: The weight array
zero_weight_proportion: A [0,1] number, proportion of weights that should
be set to 0
Returns:
The resulting array.
"""
num_zeros_initial = np.sum(weight_array == 0)
if num_zeros_initial >= (weight_array.size * zero_weight_proportion):
print('This matrix is already sparser than requested')
return weight_array
idx = np.random.choice(
np.arange(weight_array.size),
replace=False,
size=int((weight_array.size * zero_weight_proportion) -
num_zeros_initial))
weight_array[np.unravel_index(idx, weight_array.shape)] = 0
return weight_array
def cutoff_small_weights_in_rows(matrix: np.ndarray,
non_zeros_per_row_limit: int) -> np.ndarray:
"""Cuts off smallest weights of the weight matrix.
So that number of non-zeros per row does not exceed non_zeros_per_row_limit.
Args:
matrix: the weight matrix, a 2d square numpy array.
non_zeros_per_row_limit: limit of non-zeros to be retained per row.
Returns:
matrix_transformed: the transformed weight matrix.
"""
return CutOffSmallWeightsInRows(non_zeros_per_row_limit).apply_transform(
matrix)
def transform_weight(weight_matrix: np.ndarray, distance_matrix: np.ndarray,
params: ml_collections.ConfigDict) -> np.ndarray:
"""Transforms the weight matrix with the following step.
1. Scale the connection weights with the distances between connected neurons.
2. Only keep largest weights on each row.
3. If signaled, a random weight matrix is generated to replace the original
while retaining the size and sparsity of the original.
4. Scale the weight matrix to [0, 1] assuming sparsity > 0.
5. Convert a proportion of neurons to inhibitory (all elements in row made 0).
Parameters to provide:
distance_scaling_function - name of method to scale weight by distance.
Values are 'none', 'linear', 'quadratic', 'exponential'.
box_size - length of one side of the cubic brain region from which
the weight and distance matrices were extracted.
num_cutoff - number of non zeros to keep on each row of the weight matrix,
the rest are made zero. Make num_cutoff arbitrarily large to keep all.
random_weights - boolean, whether to replace the original weight matrix
with a random matrix of the same size and sparsity with the original.
kde_random_weights - boolean. If random_weights is True, this parameter
decides whether the random weights are generated with the same
distribution of those in the original weight matrix.
inhibitory_neurons_proportion - proportion of rows are made negative.
Args:
weight_matrix: 2D square numpy array.
distance_matrix: 2D square numpy array of the same shape of weight_matrix.
params: ConfigDict of parameters used for transformation, listed above.
Returns:
The transformed weight matrix.
"""
transformation = setup_weight_transformation(params)
if params.distance_scaling_function != 'none':
return transformation.apply_transform(weight_matrix, distance_matrix)
return transformation.apply_transform(weight_matrix)
def setup_weight_transformation(params: ml_collections.ConfigDict) -> Chain:
"""Sets up the chain of transformations to transform the weight matrix.
The chain might include:
1. Scale the connection weights with the distances between connected neurons.
2. Only keep largest weights on each row.
3. If signaled, a random weight matrix is generated to replace the original
while retaining the size and sparsity of the original.
4. Scale the weight matrix to [0, 1] assuming sparsity > 0.
5. Convert a proportion of neurons to inhibitory (all elements in row made 0).
Args:
params: ConfigDict of parameters used for transformation, including:
distance_scaling_function - name of method to scale weight by distance.
Values are 'none', 'linear', 'quadratic', 'exponential'. box_size - length
of one side of the cubic brain region from which the weight and distance
matrices were extracted. num_cutoff - number of non zeros to keep on each
row of the weight matrix, the rest are made zero. Make num_cutoff
arbitrarily large to keep all. random_weights - boolean, whether to
replace the original weight matrix with a random matrix of the same size
and sparsity with the original. kde_random_weights - boolean. If
random_weights is True, this parameter decides whether the random weights
are generated with the same distribution of those in the original weight
matrix. inhibitory_neurons_proportion - proportion of rows are made
negative.
Returns:
The chained transformations.
"""
transformation = Chain()
# scale connection weight by distance between connected neurons:
if params.distance_scaling_function != 'none':
transformation.append(
ScaleWeightByDistance(params.distance_scaling_function,
params.box_size))
# only keep largest weights on each row:
transformation.append(CutOffSmallWeightsInRows(params.num_cutoff))
# if random weights should be used
if params.random_weights:
# if kernel-density estimate is used to randomize non-zero elements:
if params.kde_random_weights:
transformation.append(RandomizeNonZeroWeightKde())
else:
transformation.append(RandomizeNonZeroWeight())
# scale weights to [0, 1] range:
transformation.append(ScaleToZeroOne())
# convert neurons to inhibitory:
if params.inhibitory_neurons_proportion > 0:
transformation.append(
FlipSignEntireRows(params.inhibitory_neurons_proportion))
return transformation
def chain_weight_matrices(matrices: List[np.ndarray]) -> List[np.ndarray]:
"""Chains the matrices in the list.
The purpose is to trim the matrices so their multiplication can be done with
their order in the chain.
Suppose we have two matrices in the list, matrix A represents the connections
between two sets of neurons Set_1 and Set_2 (the two sets might fully or
partially overlap, or separate completely), and matrix B represents the
connections between Set_3 and Set_4. Ideally, the realistic representation of
the actual brain is to find the common subset of Set_2 and Set_3 and trim both
A and B according to this subset. For simplicity, we assume either Set_2 is a
subset of Set_3 or vice versa depending on their sizes.
In practice, we often have matrices from separate regions and the
post-synaptic neurons of the preceding regions might not overlap with the
pre-synaptic neurons of the succeeding regions. In this case, we don't have a
good way to tell which neurons are important to keep. For simplicity, we keep
neurons that appear first and discard those that appear last in the matrix.
Therefore, for each pair of adjacent matrices, the number of columns of the
preceding matrix is compared to the number of rows of the succeeding matrix,
and the larger is trimmed down to be equal to the smaller, and in this
process, the neurons that are trimmed appear last in the original matrix.
Args:
matrices: The list of matrices to be chained.
Returns:
The list of chained matrices.
"""
num_matrices = len(matrices)
if num_matrices < 2:
return matrices
new_matrices = []
for i in range(num_matrices - 1):
# compare num_columns of the preceding to num_rows of the succeeding:
smaller_num_neurons = min(matrices[i].shape[1], matrices[i + 1].shape[0])
# append the new matrix at position i to the new list:
new_matrices.append(matrices[i][:, :smaller_num_neurons])
# update the matrix at position i+1 in the old list:
matrices[i + 1] = matrices[i + 1][:smaller_num_neurons, :]
# append the last matrix to the new list:
new_matrices.append(matrices[num_matrices - 1])
return new_matrices
def resize_weight_matrices(
reservoir_weights: List[np.ndarray],
reservoirs_num_neurons: Tuple[int, ...],
) -> List[np.ndarray]:
"""Resizes the weight matrices to the target numbers of neurons.
Args:
reservoir_weights: The weight matrices to be transformed.
reservoirs_num_neurons: The target numbers of neurons of the reservoirs. The
number of elements must be the same as that of `reservoir_weights`.
Returns:
The transformed weight matrices.
"""
# Check length matching of `reservoir_weights` and `reservoirs_num_neurons`
if len(reservoir_weights) != len(reservoirs_num_neurons):
raise ValueError(
f'`reservoirs_num_neurons` has {len(reservoirs_num_neurons)} elements '
f'but `reservoir_weights` has {len(reservoir_weights)} elements.')
for i, num_neurons in enumerate(reservoirs_num_neurons):
resize_rows = ResizeNumRows(num_neurons)
reservoir_weights[i] = resize_rows.apply_transform(reservoir_weights[i])
resize_columns = ResizeNumColumns(num_neurons)
reservoir_weights[i] = resize_columns.apply_transform(reservoir_weights[i])
return reservoir_weights
|
|
# Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for sam_reader CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
from third_party.nucleus.io import clif_postproc
from third_party.nucleus.io.python import sam_reader
from third_party.nucleus.protos import reads_pb2
from third_party.nucleus.protos import reference_pb2
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import ranges
class SamReaderTest(parameterized.TestCase):
def setUp(self):
self.bam = test_utils.genomics_core_testdata('test.bam')
self.options = reads_pb2.SamReaderOptions()
def test_bam_iterate(self):
reader = sam_reader.SamReader.from_file(
reads_path=self.bam, ref_path='', options=self.options)
with reader:
iterable = reader.iterate()
self.assertIsInstance(iterable, clif_postproc.WrappedCppIterable)
self.assertEqual(test_utils.iterable_len(iterable), 106)
def test_bam_query(self):
reader = sam_reader.SamReader.from_file(
reads_path=self.bam, ref_path='', options=self.options)
expected = [(ranges.parse_literal('chr20:10,000,000-10,000,100'), 106),
(ranges.parse_literal('chr20:10,000,000-10,000,000'), 45)]
with reader:
for interval, n_expected in expected:
with reader.query(interval) as iterable:
self.assertIsInstance(iterable, clif_postproc.WrappedCppIterable)
self.assertEqual(test_utils.iterable_len(iterable), n_expected)
def test_bam_samples(self):
reader = sam_reader.SamReader.from_file(
reads_path=self.bam, ref_path='', options=self.options)
with reader:
self.assertLen(reader.header.read_groups, 1)
self.assertEqual(reader.header.read_groups[0].sample_id, 'NA12878')
def test_sam_contigs(self):
reader = sam_reader.SamReader.from_file(
reads_path=self.bam, ref_path='', options=self.options)
with reader:
self.assertEqual([
reference_pb2.ContigInfo(name='chrM', pos_in_fasta=0, n_bases=16571),
reference_pb2.ContigInfo(
name='chr1', pos_in_fasta=1, n_bases=249250621),
reference_pb2.ContigInfo(
name='chr2', pos_in_fasta=2, n_bases=243199373),
reference_pb2.ContigInfo(
name='chr3', pos_in_fasta=3, n_bases=198022430),
reference_pb2.ContigInfo(
name='chr4', pos_in_fasta=4, n_bases=191154276),
reference_pb2.ContigInfo(
name='chr5', pos_in_fasta=5, n_bases=180915260),
reference_pb2.ContigInfo(
name='chr6', pos_in_fasta=6, n_bases=171115067),
reference_pb2.ContigInfo(
name='chr7', pos_in_fasta=7, n_bases=159138663),
reference_pb2.ContigInfo(
name='chr8', pos_in_fasta=8, n_bases=146364022),
reference_pb2.ContigInfo(
name='chr9', pos_in_fasta=9, n_bases=141213431),
reference_pb2.ContigInfo(
name='chr10', pos_in_fasta=10, n_bases=135534747),
reference_pb2.ContigInfo(
name='chr11', pos_in_fasta=11, n_bases=135006516),
reference_pb2.ContigInfo(
name='chr12', pos_in_fasta=12, n_bases=133851895),
reference_pb2.ContigInfo(
name='chr13', pos_in_fasta=13, n_bases=115169878),
reference_pb2.ContigInfo(
name='chr14', pos_in_fasta=14, n_bases=107349540),
reference_pb2.ContigInfo(
name='chr15', pos_in_fasta=15, n_bases=102531392),
reference_pb2.ContigInfo(
name='chr16', pos_in_fasta=16, n_bases=90354753),
reference_pb2.ContigInfo(
name='chr17', pos_in_fasta=17, n_bases=81195210),
reference_pb2.ContigInfo(
name='chr18', pos_in_fasta=18, n_bases=78077248),
reference_pb2.ContigInfo(
name='chr19', pos_in_fasta=19, n_bases=59128983),
reference_pb2.ContigInfo(
name='chr20', pos_in_fasta=20, n_bases=63025520),
reference_pb2.ContigInfo(
name='chr21', pos_in_fasta=21, n_bases=48129895),
reference_pb2.ContigInfo(
name='chr22', pos_in_fasta=22, n_bases=51304566),
reference_pb2.ContigInfo(
name='chrX', pos_in_fasta=23, n_bases=155270560),
reference_pb2.ContigInfo(
name='chrY', pos_in_fasta=24, n_bases=59373566),
], list(reader.header.contigs))
def test_context_manager(self):
"""Test that we can use context manager to do two queries in sequence."""
reader = sam_reader.SamReader.from_file(
reads_path=self.bam, ref_path='', options=self.options)
region = ranges.parse_literal('chr20:10,000,000-10,000,100')
with reader:
with reader.query(region) as query_iterable1:
self.assertIsNotNone(query_iterable1)
self.assertIsInstance(query_iterable1, clif_postproc.WrappedCppIterable)
with reader.query(region) as query_iterable2:
self.assertIsNotNone(query_iterable2)
self.assertIsInstance(query_iterable2, clif_postproc.WrappedCppIterable)
def test_from_file_raises_with_missing_bam(self):
# redacted
with self.assertRaisesRegex(ValueError, 'Could not open missing.bam'):
sam_reader.SamReader.from_file(
reads_path='missing.bam', ref_path='', options=self.options)
def test_ops_on_closed_reader_raise(self):
reader = sam_reader.SamReader.from_file(
reads_path=self.bam, ref_path='', options=self.options)
with reader:
pass
# At this point the reader is closed.
with self.assertRaisesRegex(ValueError, 'Cannot Iterate a closed'):
reader.iterate()
with self.assertRaisesRegex(ValueError, 'Cannot Query a closed'):
reader.query(ranges.parse_literal('chr20:10,000,000-10,000,100'))
@parameterized.parameters('test.sam', 'unindexed.bam')
def test_query_without_index_raises(self, unindexed_file_name):
path = test_utils.genomics_core_testdata(unindexed_file_name)
window = ranges.parse_literal('chr20:10,000,000-10,000,100')
with sam_reader.SamReader.from_file(
reads_path=path, ref_path='', options=self.options) as reader:
with self.assertRaisesRegex(ValueError, 'Cannot query without an index'):
reader.query(window)
def test_query_raises_with_bad_range(self):
with sam_reader.SamReader.from_file(
reads_path=self.bam, ref_path='', options=self.options) as reader:
with self.assertRaisesRegex(ValueError, 'Unknown reference_name'):
reader.query(ranges.parse_literal('XXX:1-10'))
with self.assertRaisesRegex(ValueError, 'unknown reference interval'):
reader.query(ranges.parse_literal('chr20:10-5'))
def test_sam_iterate_raises_on_malformed_record(self):
malformed = test_utils.genomics_core_testdata('malformed.sam')
reader = sam_reader.SamReader.from_file(
reads_path=malformed, ref_path='', options=self.options)
iterable = iter(reader.iterate())
self.assertIsNotNone(next(iterable))
with self.assertRaises(ValueError):
list(iterable)
def test_headless_sam_raises(self):
headerless = test_utils.genomics_core_testdata('headerless.sam')
with self.assertRaisesRegex(ValueError,
'Could not parse file with bad SAM header'):
sam_reader.SamReader.from_file(
reads_path=headerless, ref_path='', options=self.options)
if __name__ == '__main__':
absltest.main()
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import subprocess
# Default client libs
import cinderclient.client
import glanceclient
import heatclient.client
import keystoneclient.apiclient.exceptions
import keystoneclient.v2_0.client
import netaddr
from neutronclient.common import exceptions as exc
import neutronclient.v2_0.client
import novaclient.client
from novaclient import exceptions as nova_exceptions
import swiftclient
from tempest.api.network import common as net_common
from tempest.common import isolated_creds
from tempest.common.utils import data_utils
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
import tempest.manager
from tempest.openstack.common import log
import tempest.test
import fusionclient.client
LOG = log.getLogger(__name__)
# NOTE(afazekas): Workaround for the stdout logging
LOG_nova_client = logging.getLogger('novaclient.client')
LOG_nova_client.addHandler(log.NullHandler())
LOG_cinder_client = logging.getLogger('cinderclient.client')
LOG_cinder_client.addHandler(log.NullHandler())
class OfficialClientManager(tempest.manager.Manager):
"""
Manager that provides access to the official python clients for
calling various OpenStack APIs.
"""
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
HEATCLIENT_VERSION = '1'
FUSIONCLIENT_VERSION = '1'
def __init__(self, username, password, tenant_name):
super(OfficialClientManager, self).__init__()
self.compute_client = self._get_compute_client(username,
password,
tenant_name)
self.identity_client = self._get_identity_client(username,
password,
tenant_name)
# self.image_client = self._get_image_client()
self.network_client = self._get_network_client()
self.volume_client = self._get_volume_client(username,
password,
tenant_name)
# self.object_storage_client = self._get_object_storage_client(
# username,
# password,
# tenant_name)
self.orchestration_client = self._get_orchestration_client(
username,
password,
tenant_name)
self.fusion_client = self._get_fusion_client(
username,
password,
tenant_name)
def _get_compute_client(self, username, password, tenant_name):
# Novaclient will not execute operations for anyone but the
# identified user, so a new client needs to be created for
# each user that operations need to be performed for.
self._validate_credentials(username, password, tenant_name)
auth_url = self.config.identity.uri
dscv = self.config.identity.disable_ssl_certificate_validation
region = self.config.identity.region
client_args = (username, password, tenant_name, auth_url)
# Create our default Nova client to use in testing
service_type = self.config.compute.catalog_type
return novaclient.client.Client(self.NOVACLIENT_VERSION,
*client_args,
service_type=service_type,
region_name=region,
no_cache=True,
insecure=dscv,
http_log_debug=True)
def _get_image_client(self):
token = self.identity_client.auth_token
region = self.config.identity.region
endpoint = self.identity_client.service_catalog.url_for(
attr='region', filter_value=region,
service_type='image', endpoint_type='publicURL')
dscv = self.config.identity.disable_ssl_certificate_validation
return glanceclient.Client('1', endpoint=endpoint, token=token,
insecure=dscv)
def _get_volume_client(self, username, password, tenant_name):
auth_url = self.config.identity.uri
region = self.config.identity.region
return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
username,
password,
tenant_name,
auth_url,
region_name=region,
http_log_debug=True)
def _get_object_storage_client(self, username, password, tenant_name):
auth_url = self.config.identity.uri
# add current tenant to Member group.
keystone_admin = self._get_identity_client(
self.config.identity.admin_username,
self.config.identity.admin_password,
self.config.identity.admin_tenant_name)
# enable test user to operate swift by adding Member role to him.
roles = keystone_admin.roles.list()
member_role = [role for role in roles if role.name == 'Member'][0]
# NOTE(maurosr): This is surrounded in the try-except block cause
# neutron tests doesn't have tenant isolation.
try:
keystone_admin.roles.add_user_role(self.identity_client.user_id,
member_role.id,
self.identity_client.tenant_id)
except keystoneclient.apiclient.exceptions.Conflict:
pass
return swiftclient.Connection(auth_url, username, password,
tenant_name=tenant_name,
auth_version='2')
def _get_orchestration_client(self, username=None, password=None,
tenant_name=None):
if not username:
username = self.config.identity.admin_username
if not password:
password = self.config.identity.admin_password
if not tenant_name:
tenant_name = self.config.identity.tenant_name
self._validate_credentials(username, password, tenant_name)
keystone = self._get_identity_client(username, password, tenant_name)
region = self.config.identity.region
token = keystone.auth_token
try:
endpoint = keystone.service_catalog.url_for(
attr='region',
filter_value=region,
service_type='orchestration',
endpoint_type='publicURL')
except keystoneclient.exceptions.EndpointNotFound:
return None
else:
return heatclient.client.Client(self.HEATCLIENT_VERSION,
endpoint,
token=token,
username=username,
password=password)
def _get_fusion_client(self, username=None, password=None,
tenant_name=None):
if not username:
username = self.config.identity.admin_username
if not password:
password = self.config.identity.admin_password
if not tenant_name:
tenant_name = self.config.identity.tenant_name
self._validate_credentials(username, password, tenant_name)
keystone = self._get_identity_client(username, password, tenant_name)
region = self.config.identity.region
token = keystone.auth_token
try:
endpoint = keystone.service_catalog.url_for(
attr='region',
filter_value=region,
service_type='orchestration',
endpoint_type='publicURL')
except keystoneclient.exceptions.EndpointNotFound:
return None
else:
return fusionclient.client.Client(self.FUSIONCLIENT_VERSION,
endpoint,
token=token,
username=username,
password=password)
def _get_identity_client(self, username, password, tenant_name):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
self._validate_credentials(username, password, tenant_name)
auth_url = self.config.identity.uri
dscv = self.config.identity.disable_ssl_certificate_validation
return keystoneclient.v2_0.client.Client(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url,
insecure=dscv)
def _get_network_client(self):
# The intended configuration is for the network client to have
# admin privileges and indicate for whom resources are being
# created via a 'tenant_id' parameter. This will often be
# preferable to authenticating as a specific user because
# working with certain resources (public routers and networks)
# often requires admin privileges anyway.
username = self.config.identity.admin_username
password = self.config.identity.admin_password
tenant_name = self.config.identity.admin_tenant_name
self._validate_credentials(username, password, tenant_name)
auth_url = self.config.identity.uri
dscv = self.config.identity.disable_ssl_certificate_validation
return neutronclient.v2_0.client.Client(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url,
insecure=dscv)
class OfficialClientTest(tempest.test.BaseTestCase):
"""
Official Client test base class for scenario testing.
Official Client tests are tests that have the following characteristics:
* Test basic operations of an API, typically in an order that
a regular user would perform those operations
* Test only the correct inputs and action paths -- no fuzz or
random input data is sent, only valid inputs.
* Use only the default client tool for calling an API
"""
@classmethod
def setUpClass(cls):
super(OfficialClientTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(
__name__, tempest_client=False,
network_resources=cls.network_resources)
username, password, tenant_name = cls.credentials()
cls.manager = OfficialClientManager(username, password, tenant_name)
cls.compute_client = cls.manager.compute_client
# cls.image_client = cls.manager.image_client
cls.identity_client = cls.manager.identity_client
# cls.network_client = cls.manager.network_client
# cls.volume_client = cls.manager.volume_client
# cls.object_storage_client = cls.manager.object_storage_client
cls.orchestration_client = cls.manager.orchestration_client
cls.fusion_client = cls.manager.fusion_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
def _get_credentials(cls, get_creds, prefix):
if cls.config.compute.allow_tenant_isolation:
username, tenant_name, password = get_creds()
else:
username = getattr(cls.config.identity, prefix + 'username')
password = getattr(cls.config.identity, prefix + 'password')
tenant_name = getattr(cls.config.identity, prefix + 'tenant_name')
return username, password, tenant_name
@classmethod
def credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_primary_creds, '')
@classmethod
def alt_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_alt_creds, 'alt_')
@classmethod
def admin_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'admin_')
@classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because scenario tests are typically run in a
# specific order, and because test methods in scenario tests
# generally create resources in a particular order, we destroy
# resources in the reverse order in which resources are added to
# the scenario test class object
while cls.os_resources:
thing = cls.os_resources.pop()
LOG.debug("Deleting %r from shared resources of %s" %
(thing, cls.__name__))
try:
# OpenStack resources are assumed to have a delete()
# method which destroys the resource...
thing.delete()
except Exception as e:
# If the resource is already missing, mission accomplished.
# add status code as workaround for bug 1247568
if (e.__class__.__name__ == 'NotFound' or
hasattr(e, 'status_code') and e.status_code == 404):
continue
raise
def is_deletion_complete():
# Deletion testing is only required for objects whose
# existence cannot be checked via retrieval.
if isinstance(thing, dict):
return True
try:
thing.get()
except Exception as e:
# Clients are expected to return an exception
# called 'NotFound' if retrieval fails.
if e.__class__.__name__ == 'NotFound':
return True
raise
return False
# Block until resource deletion has completed or timed-out
tempest.test.call_until_true(is_deletion_complete, 10, 1)
cls.isolated_creds.clear_isolated_creds()
super(OfficialClientTest, cls).tearDownClass()
@classmethod
def set_resource(cls, key, thing):
LOG.debug("Adding %r to shared resources of %s" %
(thing, cls.__name__))
cls.resource_keys[key] = thing
cls.os_resources.append(thing)
@classmethod
def get_resource(cls, key):
return cls.resource_keys[key]
@classmethod
def remove_resource(cls, key):
thing = cls.resource_keys[key]
cls.os_resources.remove(thing)
del cls.resource_keys[key]
def status_timeout(self, things, thing_id, expected_status,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
"""
Given a thing and an expected status, do a loop, sleeping
for a configurable amount of time, checking for the
expected status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
self._status_timeout(things, thing_id,
expected_status=expected_status,
error_status=error_status,
not_found_exception=not_found_exception)
def delete_timeout(self, things, thing_id,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
"""
Given a thing, do a loop, sleeping
for a configurable amount of time, checking for the
deleted status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
self._status_timeout(things,
thing_id,
allow_notfound=True,
error_status=error_status,
not_found_exception=not_found_exception)
def _status_timeout(self,
things,
thing_id,
expected_status=None,
allow_notfound=False,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
log_status = expected_status if expected_status else ''
if allow_notfound:
log_status += ' or NotFound' if log_status != '' else 'NotFound'
def check_status():
# python-novaclient has resources available to its client
# that all implement a get() method taking an identifier
# for the singular resource to retrieve.
try:
thing = things.get(thing_id)
except not_found_exception:
if allow_notfound:
return True
else:
raise
new_status = thing.status
# Some components are reporting error status in lower case
# so case sensitive comparisons can really mess things
# up.
if new_status.lower() == error_status.lower():
message = ("%s failed to get to expected status. "
"In %s state.") % (thing, new_status)
raise exceptions.BuildErrorException(message)
elif new_status == expected_status and expected_status is not None:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
"Currently in %s status",
thing, log_status, new_status)
if not tempest.test.call_until_true(
check_status,
self.config.compute.build_timeout,
self.config.compute.build_interval):
message = ("Timed out waiting for thing %s "
"to become %s") % (thing_id, log_status)
raise exceptions.TimeoutException(message)
def _create_loginable_secgroup_rule_nova(self, client=None,
secgroup_id=None):
if client is None:
client = self.compute_client
if secgroup_id is None:
sgs = client.security_groups.list()
for sg in sgs:
if sg.name == 'default':
secgroup_id = sg.id
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = client.security_group_rules.create(secgroup_id,
**ruleset)
self.set_resource(sg_rule.id, sg_rule)
rules.append(sg_rule)
return rules
def create_server(self, client=None, name=None, image=None, flavor=None,
create_kwargs={}):
if client is None:
client = self.compute_client
if name is None:
name = data_utils.rand_name('scenario-server-')
if image is None:
image = self.config.compute.image_ref
if flavor is None:
flavor = self.config.compute.flavor_ref
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
self.set_resource(name, server)
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = client.servers.get(server.id)
self.set_resource(name, server)
LOG.debug("Created server: %s", server)
return server
def create_volume(self, client=None, size=1, name=None,
snapshot_id=None, imageRef=None):
if client is None:
client = self.volume_client
if name is None:
name = data_utils.rand_name('scenario-volume-')
LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
volume = client.volumes.create(size=size, display_name=name,
snapshot_id=snapshot_id,
imageRef=imageRef)
self.set_resource(name, volume)
self.assertEqual(name, volume.display_name)
self.status_timeout(client.volumes, volume.id, 'available')
LOG.debug("Created volume: %s", volume)
return volume
def create_server_snapshot(self, server, compute_client=None,
image_client=None, name=None):
if compute_client is None:
compute_client = self.compute_client
if image_client is None:
image_client = self.image_client
if name is None:
name = data_utils.rand_name('scenario-snapshot-')
LOG.debug("Creating a snapshot image for server: %s", server.name)
image_id = compute_client.servers.create_image(server, name)
self.addCleanup(image_client.images.delete, image_id)
self.status_timeout(image_client.images, image_id, 'active')
snapshot_image = image_client.images.get(image_id)
self.assertEqual(name, snapshot_image.name)
LOG.debug("Created snapshot image %s for server %s",
snapshot_image.name, server.name)
return snapshot_image
def create_keypair(self, client=None, name=None):
if client is None:
client = self.compute_client
if name is None:
name = data_utils.rand_name('scenario-keypair-')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
self.set_resource(name, keypair)
return keypair
def get_remote_client(self, server_or_ip, username=None, private_key=None):
if isinstance(server_or_ip, basestring):
ip = server_or_ip
else:
network_name_for_ssh = self.config.compute.network_for_ssh
ip = server_or_ip.networks[network_name_for_ssh][0]
if username is None:
username = self.config.scenario.ssh_user
if private_key is None:
private_key = self.keypair.private_key
return RemoteClient(ip, username, pkey=private_key)
def _log_console_output(self, servers=None):
if not servers:
servers = self.compute_client.servers.list()
for server in servers:
LOG.debug('Console output for %s', server.id)
LOG.debug(server.get_console_output())
class NetworkScenarioTest(OfficialClientTest):
"""
Base class for network scenario tests
"""
@classmethod
def check_preconditions(cls):
if (cls.config.service_available.neutron):
cls.enabled = True
# verify that neutron_available is telling the truth
try:
cls.network_client.list_networks()
except exc.EndpointNotFound:
cls.enabled = False
raise
else:
cls.enabled = False
msg = 'Neutron not available'
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(NetworkScenarioTest, cls).setUpClass()
if cls.config.compute.allow_tenant_isolation:
cls.tenant_id = cls.isolated_creds.get_primary_tenant().id
else:
cls.tenant_id = cls.manager._get_identity_client(
cls.config.identity.username,
cls.config.identity.password,
cls.config.identity.tenant_name).tenant_id
def _create_network(self, tenant_id, namestart='network-smoke-'):
name = data_utils.rand_name(namestart)
body = dict(
network=dict(
name=name,
tenant_id=tenant_id,
),
)
result = self.network_client.create_network(body=body)
network = net_common.DeletableNetwork(client=self.network_client,
**result['network'])
self.assertEqual(network.name, name)
self.set_resource(name, network)
return network
def _list_networks(self):
nets = self.network_client.list_networks()
return nets['networks']
def _list_subnets(self):
subnets = self.network_client.list_subnets()
return subnets['subnets']
def _list_routers(self):
routers = self.network_client.list_routers()
return routers['routers']
def _list_ports(self):
ports = self.network_client.list_ports()
return ports['ports']
def _get_tenant_own_network_num(self, tenant_id):
nets = self._list_networks()
ownnets = [value for value in nets if tenant_id == value['tenant_id']]
return len(ownnets)
def _get_tenant_own_subnet_num(self, tenant_id):
subnets = self._list_subnets()
ownsubnets = ([value for value in subnets
if tenant_id == value['tenant_id']])
return len(ownsubnets)
def _get_tenant_own_port_num(self, tenant_id):
ports = self._list_ports()
ownports = ([value for value in ports
if tenant_id == value['tenant_id']])
return len(ownports)
def _create_subnet(self, network, namestart='subnet-smoke-'):
"""
Create a subnet for the given network within the cidr block
configured for tenant networks.
"""
cfg = self.config.network
tenant_cidr = netaddr.IPNetwork(cfg.tenant_network_cidr)
result = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(cfg.tenant_network_mask_bits):
body = dict(
subnet=dict(
ip_version=4,
network_id=network.id,
tenant_id=network.tenant_id,
cidr=str(subnet_cidr),
),
)
try:
result = self.network_client.create_subnet(body=body)
break
except exc.NeutronClientException as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = net_common.DeletableSubnet(client=self.network_client,
**result['subnet'])
self.assertEqual(subnet.cidr, str(subnet_cidr))
self.set_resource(data_utils.rand_name(namestart), subnet)
return subnet
def _create_port(self, network, namestart='port-quotatest-'):
name = data_utils.rand_name(namestart)
body = dict(
port=dict(name=name,
network_id=network.id,
tenant_id=network.tenant_id))
result = self.network_client.create_port(body=body)
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_common.DeletablePort(client=self.network_client,
**result['port'])
self.set_resource(name, port)
return port
def _get_server_port_id(self, server):
result = self.network_client.list_ports(device_id=server.id)
ports = result.get('ports', [])
self.assertEqual(len(ports), 1,
"Unable to determine which port to target.")
return ports[0]['id']
def _create_floating_ip(self, server, external_network_id):
port_id = self._get_server_port_id(server)
body = dict(
floatingip=dict(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=server.tenant_id,
)
)
result = self.network_client.create_floatingip(body=body)
floating_ip = net_common.DeletableFloatingIp(
client=self.network_client,
**result['floatingip'])
self.set_resource(data_utils.rand_name('floatingip-'), floating_ip)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id = self._get_server_port_id(server)
floating_ip.update(port_id=port_id)
self.assertEqual(port_id, floating_ip.port_id)
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
"""
:param floating_ip: type DeletableFloatingIp
"""
floating_ip.update(port_id=None)
self.assertEqual(None, floating_ip.port_id)
return floating_ip
def _ping_ip_address(self, ip_address, should_succeed=True):
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
return (proc.returncode == 0) == should_succeed
return tempest.test.call_until_true(
ping, self.config.compute.ping_timeout, 1)
def _check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True):
"""
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self._ping_ip_address(ip_address,
should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
linux_client = self.get_remote_client(ip_address, username,
private_key)
linux_client.validate_authentication()
def _create_security_group_nova(self, client=None,
namestart='secgroup-smoke-',
tenant_id=None):
if client is None:
client = self.compute_client
# Create security group
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
secgroup = client.security_groups.create(sg_name, sg_desc)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(secgroup.description, sg_desc)
self.set_resource(sg_name, secgroup)
# Add rules to the security group
self._create_loginable_secgroup_rule_nova(client, secgroup.id)
return secgroup
def _create_security_group_neutron(self, tenant_id, client=None,
namestart='secgroup-smoke-'):
if client is None:
client = self.network_client
secgroup = self._create_empty_security_group(namestart=namestart,
client=client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule_neutron(secgroup=secgroup)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
def _create_empty_security_group(self, tenant_id, client=None,
namestart='secgroup-smoke-'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
client = self.network_client
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
body = dict(security_group=sg_dict)
result = client.create_security_group(body=body)
secgroup = net_common.DeletableSecurityGroup(
client=client,
**result['security_group']
)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
self.set_resource(sg_name, secgroup)
return secgroup
def _default_security_group(self, tenant_id, client=None):
"""Get default secgroup for given tenant_id.
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
client = self.network_client
sgs = [
sg for sg in client.list_security_groups().values()[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertTrue(len(sgs) > 0, msg)
if len(sgs) > 1:
msg = "Found %d default security groups" % len(sgs)
raise exc.NeutronClientNoUniqueMatch(msg=msg)
return net_common.DeletableSecurityGroup(client=client,
**sgs[0])
def _create_security_group_rule(self, client=None, secgroup=None,
tenant_id=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: type DeletableSecurityGroup.
:param secgroup_id: search for secgroup by id
default -- choose default secgroup for given tenant_id
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if client is None:
client = self.network_client
if secgroup is None:
secgroup = self._default_security_group(tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id,
)
ruleset.update(kwargs)
body = dict(security_group_rule=dict(ruleset))
sg_rule = client.create_security_group_rule(body=body)
sg_rule = net_common.DeletableSecurityGroupRule(
client=client,
**sg_rule['security_group_rule']
)
self.set_resource(sg_rule.id, sg_rule)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
return sg_rule
def _create_loginable_secgroup_rule_neutron(self, client=None,
secgroup=None):
"""These rules are intended to permit inbound ssh and icmp
traffic from all sources, so no group_id is provided.
Setting a group_id would only permit traffic from ports
belonging to the same security group.
"""
if client is None:
client = self.network_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
)
]
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
client=client, secgroup=secgroup, **ruleset)
except exc.NeutronClientException as ex:
# if rule already exist - skip rule and continue
if not (ex.status_code is 409 and 'Security group rule'
' already exists' in ex.message):
raise ex
else:
self.assertEqual(r_direction, sg_rule.direction)
rules.append(sg_rule)
return rules
def _ssh_to_server(self, server, private_key):
ssh_login = self.config.compute.image_ssh_user
return self.get_remote_client(server,
username=ssh_login,
private_key=private_key)
def _show_quota_network(self, tenant_id):
quota = self.network_client.show_quota(tenant_id)
return quota['quota']['network']
def _show_quota_subnet(self, tenant_id):
quota = self.network_client.show_quota(tenant_id)
return quota['quota']['subnet']
def _show_quota_port(self, tenant_id):
quota = self.network_client.show_quota(tenant_id)
return quota['quota']['port']
def _get_router(self, tenant_id):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
router_id = self.config.network.public_router_id
network_id = self.config.network.public_network_id
if router_id:
result = self.network_client.show_router(router_id)
return net_common.AttributeDict(**result['router'])
elif network_id:
router = self._create_router(tenant_id)
router.add_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, tenant_id, namestart='router-smoke-'):
name = data_utils.rand_name(namestart)
body = dict(
router=dict(
name=name,
admin_state_up=True,
tenant_id=tenant_id,
),
)
result = self.network_client.create_router(body=body)
router = net_common.DeletableRouter(client=self.network_client,
**result['router'])
self.assertEqual(router.name, name)
self.set_resource(name, router)
return router
def _create_networks(self, tenant_id=None):
"""Create a network with a subnet connected to a router.
:returns: network, subnet, router
"""
if tenant_id is None:
tenant_id = self.tenant_id
network = self._create_network(tenant_id)
router = self._get_router(tenant_id)
subnet = self._create_subnet(network)
subnet.add_to_router(router.id)
self.networks.append(network)
self.subnets.append(subnet)
self.routers.append(router)
return network, subnet, router
class OrchestrationScenarioTest(OfficialClientTest):
"""
Base class for orchestration scenario tests
"""
@classmethod
def setUpClass(cls):
super(OrchestrationScenarioTest, cls).setUpClass()
if not cls.config.service_available.heat:
raise cls.skipException("Heat support is required")
@classmethod
def credentials(cls):
username = cls.config.identity.admin_username
password = cls.config.identity.admin_password
tenant_name = cls.config.identity.tenant_name
return username, password, tenant_name
def _load_template(self, base_file, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
file_name)
with open(filepath) as f:
return f.read()
@classmethod
def _stack_rand_name(cls):
return data_utils.rand_name(cls.__name__ + '-')
@classmethod
def _get_default_network(cls):
networks = cls.network_client.list_networks()
for net in networks['networks']:
if net['name'] == cls.config.compute.fixed_network_name:
return net
class FusionScenarioTest(OfficialClientTest):
"""
Base class for orchestration scenario tests
"""
@classmethod
def setUpClass(cls):
super(FusionScenarioTest, cls).setUpClass()
if not cls.config.service_available.heat:
raise cls.skipException("Heat support is required")
@classmethod
def credentials(cls):
username = cls.config.identity.admin_username
password = cls.config.identity.admin_password
tenant_name = cls.config.identity.tenant_name
return username, password, tenant_name
def _load_template(self, base_file, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
file_name)
with open(filepath) as f:
return f.read()
@classmethod
def _stack_rand_name(cls):
return data_utils.rand_name(cls.__name__ + '-')
# @classmethod
# def _get_default_network(cls):
# networks = cls.network_client.list_networks()
# for net in networks['networks']:
# if net['name'] == cls.config.compute.fixed_network_name:
# return net
#
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Subscriptions."""
from gcloud.exceptions import NotFound
from gcloud.pubsub._helpers import topic_name_from_path
from gcloud.pubsub.message import Message
class Subscription(object):
"""Subscriptions receive messages published to their topics.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions
:type name: string
:param name: the name of the subscription
:type topic: :class:`gcloud.pubsub.topic.Topic`
:param topic: the topic to which the subscription belongs..
:type ack_deadline: int
:param ack_deadline: the deadline (in seconds) by which messages pulled
from the back-end must be acknowledged.
:type push_endpoint: string
:param push_endpoint: URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
"""
def __init__(self, name, topic, ack_deadline=None, push_endpoint=None):
self.name = name
self.topic = topic
self.ack_deadline = ack_deadline
self.push_endpoint = push_endpoint
@classmethod
def from_api_repr(cls, resource, client, topics=None):
"""Factory: construct a topic given its API representation
:type resource: dict
:param resource: topic resource representation returned from the API
:type client: :class:`gcloud.pubsub.client.Client`
:param client: Client which holds credentials and project
configuration for a topic.
:type topics: dict or None
:param topics: A mapping of topic names -> topics. If not passed,
the subscription will have a newly-created topic.
:rtype: :class:`gcloud.pubsub.subscription.Subscription`
:returns: Subscription parsed from ``resource``.
"""
if topics is None:
topics = {}
topic_path = resource['topic']
topic = topics.get(topic_path)
if topic is None:
# NOTE: This duplicates behavior from Topic.from_api_repr to avoid
# an import cycle.
topic_name = topic_name_from_path(topic_path, client.project)
topic = topics[topic_path] = client.topic(topic_name)
_, _, _, name = resource['name'].split('/')
ack_deadline = resource.get('ackDeadlineSeconds')
push_config = resource.get('pushConfig', {})
push_endpoint = push_config.get('pushEndpoint')
return cls(name, topic, ack_deadline, push_endpoint)
@property
def path(self):
"""URL path for the subscription's APIs"""
project = self.topic.project
return '/projects/%s/subscriptions/%s' % (project, self.name)
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the topic of the
current subscription.
:rtype: :class:`gcloud.pubsub.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self.topic._client
return client
def create(self, client=None):
"""API call: create the subscription via a PUT request
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/create
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
data = {'topic': self.topic.full_name}
if self.ack_deadline is not None:
data['ackDeadlineSeconds'] = self.ack_deadline
if self.push_endpoint is not None:
data['pushConfig'] = {'pushEndpoint': self.push_endpoint}
client = self._require_client(client)
client.connection.api_request(method='PUT', path=self.path, data=data)
def exists(self, client=None):
"""API call: test existence of the subscription via a GET request
See
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/get
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
try:
client.connection.api_request(method='GET', path=self.path)
except NotFound:
return False
else:
return True
def reload(self, client=None):
"""API call: sync local subscription configuration via a GET request
See
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/get
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
data = client.connection.api_request(method='GET', path=self.path)
self.ack_deadline = data.get('ackDeadlineSeconds')
push_config = data.get('pushConfig', {})
self.push_endpoint = push_config.get('pushEndpoint')
def modify_push_configuration(self, push_endpoint, client=None):
"""API call: update the push endpoint for the subscription.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type push_endpoint: string
:param push_endpoint: URL to which messages will be pushed by the
back-end. If None, the application must pull
messages.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
data = {}
config = data['pushConfig'] = {}
if push_endpoint is not None:
config['pushEndpoint'] = push_endpoint
client.connection.api_request(
method='POST', path='%s:modifyPushConfig' % (self.path,),
data=data)
self.push_endpoint = push_endpoint
def pull(self, return_immediately=False, max_messages=1, client=None):
"""API call: retrieve messages for the subscription.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/pull
:type return_immediately: boolean
:param return_immediately: if True, the back-end returns even if no
messages are available; if False, the API
call blocks until one or more messages are
available.
:type max_messages: int
:param max_messages: the maximum number of messages to return.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
:rtype: list of (ack_id, message) tuples
:returns: sequence of tuples: ``ack_id`` is the ID to be used in a
subsequent call to :meth:`acknowledge`, and ``message``
is an instance of :class:`gcloud.pubsub.message.Message`.
"""
client = self._require_client(client)
data = {'returnImmediately': return_immediately,
'maxMessages': max_messages}
response = client.connection.api_request(
method='POST', path='%s:pull' % (self.path,), data=data)
return [(info['ackId'], Message.from_api_repr(info['message']))
for info in response.get('receivedMessages', ())]
def acknowledge(self, ack_ids, client=None):
"""API call: acknowledge retrieved messages for the subscription.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/acknowledge
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
data = {'ackIds': ack_ids}
client.connection.api_request(
method='POST', path='%s:acknowledge' % (self.path,), data=data)
def modify_ack_deadline(self, ack_id, ack_deadline, client=None):
"""API call: update acknowledgement deadline for a retrieved message.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/acknowledge
:type ack_id: string
:param ack_id: ack ID of message being updated
:type ack_deadline: int
:param ack_deadline: new deadline for the message, in seconds
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
data = {'ackIds': [ack_id], 'ackDeadlineSeconds': ack_deadline}
client.connection.api_request(
method='POST', path='%s:modifyAckDeadline' % (self.path,),
data=data)
def delete(self, client=None):
"""API call: delete the subscription via a DELETE request.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/delete
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
client.connection.api_request(method='DELETE', path=self.path)
|
|
"""Support for Epson projector."""
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_ON
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from .const import ATTR_CMODE, DEFAULT_NAME, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
SUPPORT_EPSON = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=80): cv.port,
}
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Epson projector from a config entry."""
unique_id = config_entry.entry_id
projector = hass.data[DOMAIN][unique_id]
projector_entity = EpsonProjectorMediaPlayer(
projector, config_entry.title, unique_id
)
async_add_entities([projector_entity], True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Epson projector."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
"""Representation of Epson Projector Device."""
def __init__(self, projector, name, unique_id):
"""Initialize entity to control Epson projector."""
self._name = name
self._projector = projector
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def async_update(self):
"""Update state of device."""
power_state = await self._projector.get_property(POWER)
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self):
"""Return if projector is available."""
return self._available
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_EPSON
async def async_turn_on(self):
"""Turn on epson."""
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
"""Turn off epson."""
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Get current input sources."""
return self._source
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
async def select_cmode(self, cmode):
"""Set color mode in Epson."""
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
"""Select input source."""
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) sound."""
await self._projector.send_command(MUTE)
async def async_volume_up(self):
"""Increase volume."""
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
"""Decrease volume."""
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
"""Play media via Epson."""
await self._projector.send_command(PLAY)
async def async_media_pause(self):
"""Pause media via Epson."""
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
"""Skip to next."""
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
"""Skip to previous."""
await self._projector.send_command(BACK)
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
|
|
# sqlalchemy/events.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine, Dialect
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and
:class:`.Pool` instances, :class:`.PoolEvents` also accepts
:class:`.Engine` objects and the :class:`.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`.Pool`.
The rationale for :meth:`.PoolEvents.first_connect` is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`.Pool` refers to a single "creator" function (which in terms
of a :class:`.Engine` refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent
connections, such as the database version, the server and client
encoding settings, collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the
lifespan of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event
which occurs upon creation of a new :class:`.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`.PoolEvents.reset` event is usually followed by the
:meth:`.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. versionadded:: 0.8
.. seealso::
:meth:`.ConnectionEvents.rollback`
:meth:`.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation",
without the ``soft`` flag.
The event occurs before a final attempt to call ``.close()`` on the
connection occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
def soft_invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "soft invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked with the ``soft`` flag.
Soft invalidation refers to when the connection record that tracks
this connection will force a reconnect after the current connection
is checked in. It does not actively close the dbapi_connection
at the point at which it is called.
.. versionadded:: 1.0.3
"""
def close(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
The :meth:`.close` event corresponds to a connection that's still
associated with the pool. To intercept close events for detached
connections use :meth:`.close_detached`.
.. versionadded:: 1.1
"""
def detach(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is "detached" from a pool.
This event is emitted after the detach occurs. The connection
is no longer associated with the given connection record.
.. versionadded:: 1.1
"""
def close_detached(self, dbapi_connection):
"""Called when a detached DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
.. versionadded:: 1.1
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`.Connection` and :class:`.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s", statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s", statement)
When the methods are called with a `statement` parameter, such as in
:meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and
:meth:`.dbapi_error`, the statement is the exact SQL string that was
prepared for transmission to the DBAPI ``cursor`` in the connection's
:class:`.Dialect`.
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`.ConnectionEvents` can be established on any
combination of :class:`.Engine`, :class:`.Connection`, as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`.Connection`. However, for performance reasons, the
:class:`.Connection` object determines at instantiation time
whether or not its parent :class:`.Engine` has event listeners
established. Event listeners added to the :class:`.Engine`
class or to an instance of :class:`.Engine` *after* the instantiation
of a dependent :class:`.Connection` instance will usually
*not* be available on that :class:`.Connection` instance. The newly
added listeners will instead take effect for :class:`.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
.. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated
with any :class:`.Connectable` including :class:`.Connection`,
in addition to the existing support for :class:`.Engine`.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap_before_execute(conn, clauseelement,
multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap_before_cursor_execute(conn, cursor, statement,
parameters, context,
executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and \
identifier not in ('before_execute',
'before_cursor_execute', 'handle_error'):
raise exc.ArgumentError(
"Only the 'before_execute', "
"'before_cursor_execute' and 'handle_error' engine "
"event listeners accept the 'retval=True' "
"argument.")
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
See also:
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`.ResultProxy` generated by the execution.
"""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events before execution,
receiving the string SQL statement and DBAPI-specific parameter list to
be invoked against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`.ConnectionEvents`.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as to be passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
See also:
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`.ResultProxy`.
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes.
.. warning::
Code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines. For exception modification, please refer to the
new :meth:`.ConnectionEvents.handle_error` event.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
.. deprecated:: 0.9.7 - replaced by
:meth:`.ConnectionEvents.handle_error`
"""
def handle_error(self, exception_context):
"""Intercept all exceptions processed by the :class:`.Connection`.
This includes all exceptions emitted by the DBAPI as well as
within SQLAlchemy's statement invocation process, including
encoding errors and other statement validation errors. Other areas
in which the event is invoked include transaction begin and end,
result row fetching, cursor creation.
Note that :meth:`.handle_error` may support new kinds of exceptions
and new calling scenarios at *any time*. Code which uses this
event must expect new calling patterns to be present in minor
releases.
To support the wide variety of members that correspond to an exception,
as well as to allow extensibility of the event without backwards
incompatibility, the sole argument received is an instance of
:class:`.ExceptionContext`. This object contains data members
representing detail about the exception.
Use cases supported by this hook include:
* read-only, low-level exception handling for logging and
debugging purposes
* exception re-writing
The hook is called while the cursor from the failed operation
(if any) is still open and accessible. Special cleanup operations
can be called on this cursor; SQLAlchemy will attempt to close
this cursor subsequent to this hook being invoked. If the connection
is in "autocommit" mode, the transaction also remains open within
the scope of this hook; the rollback of the per-statement transaction
also occurs after the hook is called.
The user-defined event handler has two options for replacing
the SQLAlchemy-constructed exception into one that is user
defined. It can either raise this new exception directly, in
which case all further event listeners are bypassed and the
exception will be raised, after appropriate cleanup as taken
place::
@event.listens_for(Engine, "handle_error")
def handle_exception(context):
if isinstance(context.original_exception,
psycopg2.OperationalError) and \\
"failed" in str(context.original_exception):
raise MySpecialException("failed operation")
.. warning:: Because the :meth:`.ConnectionEvents.handle_error`
event specifically provides for exceptions to be re-thrown as
the ultimate exception raised by the failed statement,
**stack traces will be misleading** if the user-defined event
handler itself fails and throws an unexpected exception;
the stack trace may not illustrate the actual code line that
failed! It is advised to code carefully here and use
logging and/or inline debugging if unexpected exceptions are
occurring.
Alternatively, a "chained" style of event handling can be
used, by configuring the handler with the ``retval=True``
modifier and returning the new exception instance from the
function. In this case, event handling will continue onto the
next handler. The "chained" exception is available using
:attr:`.ExceptionContext.chained_exception`::
@event.listens_for(Engine, "handle_error", retval=True)
def handle_exception(context):
if context.chained_exception is not None and \\
"special" in context.chained_exception.message:
return MySpecialException("failed",
cause=context.chained_exception)
Handlers that return ``None`` may remain within this chain; the
last non-``None`` return value is the one that continues to be
passed to the next handler.
When a custom exception is raised or returned, SQLAlchemy raises
this new exception as-is, it is not wrapped by any SQLAlchemy
object. If the exception is not a subclass of
:class:`sqlalchemy.exc.StatementError`,
certain features may not be available; currently this includes
the ORM's feature of adding a detail hint about "autoflush" to
exceptions raised within the autoflush process.
:param context: an :class:`.ExceptionContext` object. See this
class for details on all available members.
.. versionadded:: 0.9.7 Added the
:meth:`.ConnectionEvents.handle_error` hook.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now
invoked when an :class:`.Engine` fails during the initial
call to :meth:`.Engine.connect`, as well as when a
:class:`.Connection` object encounters an error during a
reconnect operation.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is
not fired off when a dialect makes use of the
``skip_user_error_events`` execution option. This is used
by dialects which intend to catch SQLAlchemy-specific exceptions
within specific operations, such as when the MySQL dialect detects
a table not present within the ``has_table()`` dialect method.
Prior to 1.0.0, code which implements :meth:`.handle_error` needs
to ensure that exceptions thrown in these scenarios are re-raised
without modification.
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`.Connection`.
This event is called typically as the direct result of calling
the :meth:`.Engine.connect` method.
It differs from the :meth:`.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`.PoolEvents.checkout` event
in that it is specific to the :class:`.Connection` object, not the
DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although
this DBAPI connection is available here via the
:attr:`.Connection.connection` attribute. But note there can in fact
be multiple :meth:`.PoolEvents.checkout` events within the lifespan
of a single :class:`.Connection` object, if that :class:`.Connection`
is invalidated and re-established. There can also be multiple
:class:`.Connection` objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a :class:`.Connection`
is produced.
:param conn: :class:`.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:ref:`pool_disconnects_pessimistic` - illustrates how to use
:meth:`.ConnectionEvents.engine_connect`
to transparently ensure pooled connections are connected to the
database.
:meth:`.PoolEvents.checkout` the lower-level pool checkout event
for an individual DBAPI connection
:meth:`.ConnectionEvents.set_connection_execution_options` - a copy
of a :class:`.Connection` is also made when the
:meth:`.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`.Connection.execution_options`
method is called.
This method is called after the new :class:`.Connection` has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new :class:`.Connection`
is produced which is inheriting execution options from its parent
:class:`.Engine`; to intercept this condition, use the
:meth:`.ConnectionEvents.engine_connect` event.
:param conn: The newly copied :class:`.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_engine_execution_options` - event
which is called when :meth:`.Engine.execution_options` is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`.Engine.execution_options`
method is called.
The :meth:`.Engine.execution_options` method produces a shallow
copy of the :class:`.Engine` which stores the new options. That new
:class:`.Engine` is passed here. A particular application of this
method is to add a :meth:`.ConnectionEvents.engine_connect` event
handler to the given :class:`.Engine` which will perform some per-
:class:`.Connection` task specific to these execution options.
:param conn: The newly copied :class:`.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_connection_execution_options` - event
which is called when :meth:`.Connection.execution_options` is
called.
"""
def engine_disposed(self, engine):
"""Intercept when the :meth:`.Engine.dispose` method is called.
The :meth:`.Engine.dispose` method instructs the engine to
"dispose" of it's connection pool (e.g. :class:`.Pool`), and
replaces it with a new one. Disposing of the old pool has the
effect that existing checked-in connections are closed. The new
pool does not establish any new connections until it is first used.
This event can be used to indicate that resources related to the
:class:`.Engine` should also be cleaned up, keeping in mind that the
:class:`.Engine` can still be used for new requests in which case
it re-acquires connection resources.
.. versionadded:: 1.0.5
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
.. seealso::
:meth:`.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
class DialectEvents(event.Events):
"""event interface for execution-replacement functions.
These events allow direct instrumentation and replacement
of key dialect functions which interact with the DBAPI.
.. note::
:class:`.DialectEvents` hooks should be considered **semi-public**
and experimental.
These hooks are not for general use and are only for those situations
where intricate re-statement of DBAPI mechanics must be injected onto
an existing dialect. For general-use statement-interception events,
please use the :class:`.ConnectionEvents` interface.
.. seealso::
:meth:`.ConnectionEvents.before_cursor_execute`
:meth:`.ConnectionEvents.before_execute`
:meth:`.ConnectionEvents.after_cursor_execute`
:meth:`.ConnectionEvents.after_execute`
.. versionadded:: 0.9.4
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Dialect
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
target._has_events = True
event_key.base_listen()
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Dialect
elif issubclass(target, Dialect):
return target
elif isinstance(target, Engine):
return target.dialect
else:
return target
def do_connect(self, dialect, conn_rec, cargs, cparams):
"""Receive connection arguments before a connection is made.
Return a DBAPI connection to halt further events from invoking;
the returned connection will be used.
Alternatively, the event can manipulate the cargs and/or cparams
collections; cargs will always be a Python list that can be mutated
in-place and cparams a Python dictionary. Return None to
allow control to pass to the next event handler and ultimately
to allow the dialect to connect normally, given the updated
arguments.
.. versionadded:: 1.0.3
"""
def do_executemany(self, cursor, statement, parameters, context):
"""Receive a cursor to have executemany() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute_no_params(self, cursor, statement, context):
"""Receive a cursor to have execute() with no parameters called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute(self, cursor, statement, parameters, context):
"""Receive a cursor to have execute() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
|
|
import theano
import logging
from theano import tensor as T
from blocks.bricks.base import application
from blocks.initialization import (Constant, Uniform)
import numpy as np
from toolz.itertoolz import interleave
from collections import Counter
from blocks.bricks.conv import (Convolutional, ConvolutionalSequence,
Flattener)
from blocks.bricks import (Initializable, Rectifier, FeedforwardSequence,
MLP, Activation, Softmax, Linear)
from blocks.model import Model
from blocks.graph import ComputationGraph
from blocks.algorithms import (Adam, GradientDescent, CompositeRule,
StepClipping, Scale)
from newblocks import (AsyncUpdate, AsyncRMSProp)
from blocks.bricks.recurrent import LSTM
logger = logging.getLogger(__name__)
class SharedA3CConvNet(FeedforwardSequence, Initializable):
""" Implements the Shared Layers of the Actor-Critic
Parameters
----------
conv_activations : list of `blocks.bricks.base.brick`
activation functions after every convolutional layers
num_channels : int
input channels in the first convolution layer. It is the number
of historic frames used as the input state of the agent.
image_shape : list of int
width and height shape of the resized image
filter_sizes: list of int # FIXME: change the name
num of filters at each convolutional layer
feature_maps : list of [int, int]
size of the filters (width, height) at each convolutional layer
pooling sizes: list of [int,int] # FIXME: not used
size of the pooling layer. One element per convolutional layer
mlp_hiddens: list of int
size of the output layer of the hidden layers. One element per
hidden layer.
mlp_activations: list of `blocks.bricks.base.brick`
activation functions at each hidden layer. One element per layer
conv_step: list of (int, int)
typically called stride
border_mode : str
full or valid are accepted by Blocks. Full will be usually employed.
"""
def __init__(self, conv_activations, num_channels, image_shape,
filter_sizes, feature_maps, pooling_sizes, mlp_hiddens,
mlp_activations, conv_step=None, border_mode='valid',
**kwargs):
if conv_step is None:
self.conv_step = [(1, 1) for i in range(len(conv_activations))]
else:
self.conv_step = conv_step
self.num_channels = num_channels
self.image_shape = image_shape
self.border_mode = border_mode
self.top_mlp_activations = mlp_activations
conv_parameters = zip(filter_sizes, feature_maps)
# Build convolutional layers with corresponding parameters
self.layers = list(interleave([
(Convolutional(filter_size=filter_size,
num_filters=num_filter,
step=self.conv_step[i],
border_mode=self.border_mode,
name='conv_{}'.format(i))
for i, (filter_size, num_filter)
in enumerate(conv_parameters)),
conv_activations]))
# Build the sequence of conv layers
self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
image_size=image_shape)
# We need to flatten the output of the last convolutional layer.
# This brick accepts a tensor of dimension (batch_size, ...) and
# returns a matrix (batch_size, features)
self.top_mlp_dims = mlp_hiddens
# Flatten the output so it can be used by DenseLayers
self.flattener = Flattener()
self.top_mlp = MLP(self.top_mlp_activations, self.top_mlp_dims)
application_methods = [self.conv_sequence.apply,
self.flattener.apply,
self.top_mlp.apply]
# FIXME this line was commented
# self.children = [self.conv_sequence, self.flattener, self.top_mlp]
super(SharedA3CConvNet, self).__init__(application_methods, **kwargs)
@property
def output_dim(self):
return self.top_mlp_dims[-1]
@output_dim.setter
def output_dim(self, value):
self.top_mlp_dims[-1] = value
def _push_allocation_config(self):
self.conv_sequence._push_allocation_config()
conv_out_dim = self.conv_sequence.get_dim('output')
print "Input to MLP hidden layer ", [np.prod(conv_out_dim)]
self.top_mlp.activations = self.top_mlp_activations
self.top_mlp.dims = [np.prod(conv_out_dim)] + self.top_mlp_dims
print "Dimensions of hidden layer", self.top_mlp.dims
class PolicyAndValueA3C(Initializable):
"""
Parameters
----------
conv_activations : list of `blocks.bricks.base.brick`
activation functions after every convolutional layers
num_channels : int
input channels in the first convolution layer. It is the number
of historic frames used as the input state of the agent.
image_shape : list of int
width and height shape of the resized image
filter_sizes: list of int # FIXME: change the name
num of filters at each convolutional layer
feature_maps : list of [int, int]
size of the filters (width, height) at each convolutional layer
pooling sizes: list of [int,int] # FIXME: not used
size of the pooling layer. One element per convolutional layer
mlp_hiddens: list of int
size of the output layer of the hidden layers. One element per
hidden layer.
number_actions: int
number of actions of the Actor (output of the policy network)
mlp_activations: list of `blocks.bricks.base.brick`
activation functions at each hidden layer. One element per layer
activation_policy: instance of :class: `blocks.bricks.base.brick`
activation at the policy layer. Tipically a Softmax because we
want the probabilities of each action
activation_value: instance of :class: `blocks.bricks.base.brick`
the original function is a Linear one which is the default in Blocks.
So None is the default.
conv_step: list of (int, int)
typically called stride
border_mode : str
full or valid are accepted by Blocks. Full will be usually employed.
beta: float
entropy error modulator. Default is 0.01
"""
def __init__(self, conv_activations, num_channels, image_shape,
filter_sizes, feature_maps, pooling_sizes, mlp_hiddens,
number_actions, mlp_activations,
activation_policy=Softmax(), activation_value=None,
conv_step=None, border_mode='valid', beta=1e-2, **kwargs):
self.activation_policy = activation_policy
self.activation_value = activation_value
self.beta = beta
self.shared_a3c = SharedA3CConvNet(conv_activations=conv_activations,
num_channels=num_channels,
image_shape=image_shape,
filter_sizes=filter_sizes,
feature_maps=feature_maps,
pooling_sizes=pooling_sizes,
mlp_hiddens=mlp_hiddens,
mlp_activations=mlp_activations,
conv_step=conv_step,
border_mode=border_mode, **kwargs)
# We build now the policy/value separated networks
print("Dimenson of the last shared layer {}".format(
self.shared_a3c.top_mlp_dims[-1]))
# Policy has one dimension per each action
self.policy = MLP([activation_policy], [
self.shared_a3c.top_mlp_dims[-1]] +
[number_actions], name="mlp_policy")
# The critic has one dimension in the output layer
self.value = MLP([activation_value], [
self.shared_a3c.top_mlp_dims[-1]] + [1],
name="mlp_value")
super(PolicyAndValueA3C, self).__init__(**kwargs)
#self.children = [ self.shared_a3c, self.fork]
self.children = [self.shared_a3c, self.policy, self.value]
@application(inputs=['input_image'], outputs=['output_policy'])
def apply_policy(self, input_image):
output_policy = self.policy.apply(self.shared_a3c.apply(input_image))
return output_policy
@application(inputs=['input_image'], outputs=['output_value'])
def apply_value(self, input_image):
output_value = self.value.apply(self.shared_a3c.apply(input_image))
return output_value
# FIXME: remove this function
@application(inputs=['input_image', 'input_actions', 'input_reward'],
outputs=['total_error', 'p_loss', 'v_loss', 'entropy',
'log_prob', 'advantage', 'v_value',
'sum_p_value'])
def extra_cost(self, input_image, input_actions, input_reward):
p_value = self.policy.apply(self.shared_a3c.apply(input_image))
log_prob = T.log(T.sum(p_value * input_actions, axis=1, keepdims=True))
v_value = self.value.apply(self.shared_a3c.apply(input_image))
advantage = (input_reward[:, None] - v_value)
p_loss = -1 * log_prob * advantage
entropy = -T.sum(p_value * T.log(p_value), axis=1, keepdims=True)
p_loss = p_loss - self.beta * entropy # add entropy penalty
v_loss = 0.5 * T.sqr(input_reward[:, None] - v_value)
total_error = T.mean(p_loss + (0.5 * v_loss.reshape(p_loss.shape)))
return (total_error, p_loss, v_loss, entropy, log_prob, advantage,
v_value, T.sum(p_value, axis=1))
@application(inputs=['input_image', 'input_actions', 'input_reward'],
outputs=['total_error'])
def cost(self, input_image, input_actions, input_reward):
p_value = (self.policy.apply(self.shared_a3c.apply(input_image)))
log_prob = T.log(T.sum((p_value) * input_actions,
axis=1, keepdims=True))
v_value = self.value.apply(self.shared_a3c.apply(input_image))
p_loss = -log_prob * theano.gradient.disconnected_grad(
input_reward[:, None] - v_value)
entropy = -T.sum(p_value * T.log(p_value), axis=1,
keepdims=True)
# encourage action diversity by substracting entropy
p_loss = p_loss - self.beta * entropy
v_loss = T.sqr(input_reward[:, None] - v_value)
total_error = T.mean(p_loss + (0.5 * v_loss))
return total_error
class PolicyAndValueA3CLSTM(Initializable):
"""
Parameters
----------
conv_activations : list of `blocks.bricks.base.brick`
activation functions after every convolutional layers
num_channels : int
input channels in the first convolution layer. It is the number
of historic frames used as the input state of the agent.
image_shape : list of int
width and height shape of the resized image
filter_sizes: list of int # FIXME: change the name
num of filters at each convolutional layer
feature_maps : list of [int, int]
size of the filters (width, height) at each convolutional layer
pooling sizes: list of [int,int] # FIXME: not used
size of the pooling layer. One element per convolutional layer
mlp_hiddens: list of int
size of the output layer of the hidden layers. One element per
hidden layer.
number_actions: int
number of actions of the Actor (output of the policy network)
mlp_activations: list of `blocks.bricks.base.brick`
activation functions at each hidden layer. One element per layer
activation_policy: instance of :class: `blocks.bricks.base.brick`
activation at the policy layer. Tipically a Softmax because we
want the probabilities of each action
activation_value: instance of :class: `blocks.bricks.base.brick`
the original function is a Linear one which is the default in Blocks.
So None is the default.
conv_step: list of (int, int)
typically called stride
border_mode : str
full or valid are accepted by Blocks. Full will be usually employed.
beta: float
entropy error modulator. Default is 0.01
lstm_output_units: int
number of LSTM output units
"""
def __init__(self, conv_activations, num_channels, image_shape,
filter_sizes, feature_maps, pooling_sizes, mlp_hiddens,
number_actions, mlp_activations,
activation_policy=Softmax(), activation_value=None,
conv_step=None, border_mode='valid', beta=1e-2,
lstm_output_units=None, **kwargs):
self.activation_policy = activation_policy
self.activation_value = activation_value
self.beta = beta
self.lstm_output_units = lstm_output_units
self.shared_a3c = SharedA3CConvNet(conv_activations=conv_activations,
num_channels=num_channels,
image_shape=image_shape,
filter_sizes=filter_sizes,
feature_maps=feature_maps,
pooling_sizes=pooling_sizes,
mlp_hiddens=mlp_hiddens,
mlp_activations=mlp_activations,
conv_step=conv_step,
border_mode=border_mode, **kwargs)
# We build now the policy/value separated networks
print("Dimenson of the last shared layer {}".format(
self.shared_a3c.top_mlp_dims[-1]))
# LSTM block
# Preparation to LSTM
print "LSTM UNITS ", self.lstm_output_units
self.linear_to_lstm = Linear(self.shared_a3c.top_mlp_dims[-1],
self.lstm_output_units * 4,
name='linear_to_lstm')
self.lstm_block = LSTM(lstm_output_units, name='lstm')
# activation=Rectifier())
# Policy has one dimension per each action
self.policy = MLP([activation_policy], [
lstm_output_units] +
[number_actions], name="mlp_policy")
# The critic has one dimension in the output layer
self.value = MLP([activation_value], [
lstm_output_units] + [1],
name="mlp_value")
super(PolicyAndValueA3CLSTM, self).__init__(**kwargs)
self.children = [self.shared_a3c, self.linear_to_lstm,
self.lstm_block,
self.policy, self.value]
@application(inputs=['input_image', 'states', 'cells'],
outputs=['output_policy', 'states', 'cells'])
def apply_policy(self, input_image, states, cells):
h, c = self.lstm_block.apply(inputs=self.linear_to_lstm.apply(
self.shared_a3c.apply(input_image)),
states=states, cells=cells)
h = h.sum(axis=1)
c = c.sum(axis=1)
output_policy = self.policy.apply(h)
return output_policy, h, c
@application(inputs=['input_image', 'states', 'cells'],
outputs=['output_value'])
def apply_value(self, input_image, states, cells):
h, c = self.lstm_block.apply(inputs=self.linear_to_lstm.apply(
self.shared_a3c.apply(input_image)),
states=states, cells=cells)
h = h.sum(axis=1)
c = c.sum(axis=1)
output_value = self.value.apply(h)
return output_value
@application(inputs=['input_image', 'input_actions', 'input_reward',
'states', 'cells'],
outputs=['total_error'])
def cost(self, input_image, input_actions, input_reward, states, cells):
h, c = self.lstm_block.apply(inputs=self.linear_to_lstm.apply(
self.shared_a3c.apply(input_image)),
states=states, cells=cells)
h = h.sum(axis=1)
c = c.sum(axis=1)
p_value = self.policy.apply(h)
log_prob = T.log(T.sum((p_value) * input_actions,
axis=1, keepdims=True))
v_value = self.value.apply(h)
p_loss = -log_prob * theano.gradient.disconnected_grad(
input_reward[:, None] - v_value)
entropy = -T.sum(p_value * T.log(p_value), axis=1,
keepdims=True)
# encourage action diversity by substracting entropy
p_loss = p_loss - self.beta * entropy
v_loss = T.sqr(input_reward[:, None] - v_value)
total_error = T.mean(p_loss + (0.5 * v_loss))
return total_error
def build_a3c_network(feature_maps=[16, 32],
conv_sizes=[8, 4],
pool_sizes=[4, 2],
# FIXME: used image_shape elsewhere
image_size=(80, 80),
step_size=[4, 2],
num_channels=10,
mlp_hiddens=[256],
num_actions=10,
lr=0.00025,
clip_c=0.8,
border_mode='full',
async_update=False):
""" Builds the agent networks/functions
Parameters:
-----------
feature_maps : list of [int, int]
size of the filters (width, height) at each convolutional layer
conv_sizes: list of int # FIXME: change the name
num of filters at each convolutional layer
pooling sizes: list of int # FIXME: not used
size of the pooling layer. One element per convolutional layer
image_size : list of int
width and height shape of the resized image
step_size: list of int
typically called stride
num_channels : int
input channels in the first convolution layer. It is the number
of historic frames used as the input state of the agent.
mlp_hiddens: list of int
size of the output layer of the hidden layers. One element per
hidden layer.
num_actions: int
number of actions of the Actor (output of the policy network)
lr : float
learning rate of async rmsprop
clip_c : float
> 0 if gradient should be clipped. FIXME: actually not used
border_mode : str
full or valid are accepted by Blocks. Full will be usually employed.
async_update: bool
true if the network to be created is the shared worker or False if
it is just a worker.
"""
# Activation functions
conv_activations = [Rectifier() for _ in feature_maps]
mlp_activations = [Rectifier() for _ in mlp_hiddens]
conv_subsample = [[step, step] for step in step_size]
policy_and_value_net = PolicyAndValueA3C(
conv_activations,
num_channels,
image_size,
filter_sizes=zip(conv_sizes, conv_sizes),
feature_maps=feature_maps,
pooling_sizes=zip(pool_sizes, pool_sizes),
mlp_hiddens=mlp_hiddens,
number_actions=num_actions,
mlp_activations=mlp_activations,
conv_step=conv_subsample,
border_mode='full',
weights_init=Uniform(width=.2),
biases_init=Constant(.0))
# We push initialization config to set different initialization schemes
# for convolutional layers.
policy_and_value_net.shared_a3c.push_initialization_config()
policy_and_value_net.push_initialization_config()
# Xavier initialization
for i in range(len(policy_and_value_net.shared_a3c.layers)):
if i == 0:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((image_size[0] *
image_size[1] *
num_channels)))
else:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((conv_sizes[(i-1)/2] *
conv_sizes[(i-1)/2] *
feature_maps[(i-1)/2])))
policy_and_value_net.shared_a3c.layers[i].bias_init = Constant(.1)
for i in range(len(policy_and_value_net.shared_a3c.
top_mlp.linear_transformations)):
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].weights_init = Uniform(std=1.0/np.sqrt((conv_sizes[-1] *
conv_sizes[-1] *
feature_maps[-1])))
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].bias_init = Constant(.0)
policy_and_value_net.policy.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.value.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.shared_a3c.initialize()
policy_and_value_net.initialize()
logging.info("Input dim: {} {} {}".format(
*policy_and_value_net.shared_a3c.children[0].get_dim('input_')))
for i, layer in enumerate(policy_and_value_net.shared_a3c.layers):
if isinstance(layer, Activation):
logging.info("Layer {} ({})".format(
i, layer.__class__.__name__))
else:
logging.info("Layer {} ({}) dim: {} {} {}".format(
i, layer.__class__.__name__, *layer.get_dim('output')))
th_input_image = T.tensor4('input_image')
th_reward = T.fvector('input_reward')
th_actions = T.imatrix('input_actions')
policy_network = policy_and_value_net.apply_policy(th_input_image)
value_network = policy_and_value_net.apply_value(th_input_image)
cost_network = policy_and_value_net.cost(th_input_image, th_actions,
th_reward)
# FIXME: added for debug, remove
extracost_network = policy_and_value_net.extra_cost(th_input_image,
th_actions,
th_reward) # DEBUG
cg_policy = ComputationGraph(policy_network)
cg_value = ComputationGraph(value_network)
# Perform some optimization step
cg = ComputationGraph(cost_network)
# FIXME: Remove
cg_extra = ComputationGraph(extracost_network) # DEBUG
# Print shapes of network parameters
shapes = [param.get_value().shape for param in cg.parameters]
logger.info("Parameter shapes: ")
for shape, count in Counter(shapes).most_common():
logger.info(' {:15}: {}'.format(shape, count))
logger.info("Total number of parameters: {}".format(len(shapes)))
# Set up training algorithm
logger.info("Initializing training algorithm")
cost_model = Model(cost_network)
value_model = Model(value_network)
if not async_update:
# A threaded worker: steep gradient descent
# A trick was done here to reuse existent bricks. The system performed
# steepest descent to aggregate the gradients. However, the gradients
# are averaged in a minibatch (instead of being just added). Therefore,
# the agent is going to perform the following operations in each
# minibatch:
# 1) steepes descent with learning rate of 1 to only aggregate the
# gradients.
# 2) undo the update operation to obtain the avg. gradient :
# gradient = parameter_before_minibatch - parameter_after_minibatch
# 3) Multiply the gradient by the length of the minibatch to obtain the
# exact gradient at each minibatch.
algorithm = GradientDescent(
cost=cost_network, parameters=cg.parameters,
step_rule=Scale())
else:
# Async update for the shared worker
# The other part of the trick. A custom optimization block was
# developed
# here to receive as inputs the acc. gradients at each worker
algorithm = AsyncUpdate(parameters=cg.parameters,
inputs=cost_model.get_parameter_dict().keys(),
step_rule=AsyncRMSProp(learning_rate=lr,
# FIXME: put as
# parameter
decay_rate=0.99,
max_scaling=10))
algorithm.initialize()
f_cost = theano.function(inputs=cg.inputs, outputs=cg.outputs)
f_policy = theano.function(inputs=cg_policy.inputs,
outputs=cg_policy.outputs)
f_value = theano.function(inputs=cg_value.inputs, outputs=cg_value.outputs)
# f_extracost = theano.function(inputs=cg_extra.inputs,
# outputs=cg_extra.outputs)
return cost_model, f_policy, f_value, algorithm, f_cost
def build_a3c_network_lstm(feature_maps=[16, 32],
conv_sizes=[8, 4],
pool_sizes=[4, 2],
# FIXME: used image_shape elsewhere
image_size=(80, 80),
step_size=[4, 2],
num_channels=10,
mlp_hiddens=[256],
lstm_output_units=256,
num_actions=10,
lr=0.00025,
clip_c=0.8,
border_mode='full',
async_update=False):
""" Builds the agent networks/functions
Parameters:
-----------
feature_maps : list of [int, int]
size of the filters (width, height) at each convolutional layer
conv_sizes: list of int # FIXME: change the name
num of filters at each convolutional layer
pooling sizes: list of int # FIXME: not used
size of the pooling layer. One element per convolutional layer
image_size : list of int
width and height shape of the resized image
step_size: list of int
typically called stride
num_channels : int
input channels in the first convolution layer. It is the number
of historic frames used as the input state of the agent.
mlp_hiddens: list of int
size of the output layer of the hidden layers. One element per
hidden layer.
lstm_output_units: int
number of units in the lstm output
num_actions: int
number of actions of the Actor (output of the policy network)
lr : float
learning rate of async rmsprop
clip_c : float
> 0 if gradient should be clipped. FIXME: actually not used
border_mode : str
full or valid are accepted by Blocks. Full will be usually employed.
async_update: bool
true if the network to be created is the shared worker or False if
it is just a worker.
"""
# Activation functions
conv_activations = [Rectifier() for _ in feature_maps]
mlp_activations = [Rectifier() for _ in mlp_hiddens]
conv_subsample = [[step, step] for step in step_size]
policy_and_value_net = PolicyAndValueA3CLSTM(
conv_activations,
num_channels,
image_size,
filter_sizes=zip(conv_sizes, conv_sizes),
feature_maps=feature_maps,
pooling_sizes=zip(pool_sizes, pool_sizes),
mlp_hiddens=mlp_hiddens,
lstm_output_units=lstm_output_units,
number_actions=num_actions,
mlp_activations=mlp_activations,
conv_step=conv_subsample,
border_mode='full',
weights_init=Uniform(width=.2),
biases_init=Constant(.0))
# We push initialization config to set different initialization schemes
# for convolutional layers.
policy_and_value_net.shared_a3c.push_initialization_config()
policy_and_value_net.push_initialization_config()
# Xavier initialization
for i in range(len(policy_and_value_net.shared_a3c.layers)):
if i == 0:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((image_size[0] *
image_size[1] *
num_channels)))
else:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((conv_sizes[(i-1)/2] *
conv_sizes[(i-1)/2] *
feature_maps[(i-1)/2])))
policy_and_value_net.shared_a3c.layers[i].bias_init = Constant(.1)
for i in range(len(policy_and_value_net.shared_a3c.
top_mlp.linear_transformations)):
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].weights_init = Uniform(std=1.0/np.sqrt((conv_sizes[-1] *
conv_sizes[-1] *
feature_maps[-1])))
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].bias_init = Constant(.0)
policy_and_value_net.linear_to_lstm.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.linear_to_lstm.biases_init = Constant(.0)
policy_and_value_net.linear_to_lstm.initialize()
policy_and_value_net.lstm_block.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.lstm_block.biases_init = Constant(.0)
policy_and_value_net.lstm_block.initialize()
policy_and_value_net.policy.weights_init = Uniform(
std=1.0/np.sqrt(lstm_output_units))
policy_and_value_net.value.weights_init = Uniform(
std=1.0/np.sqrt(lstm_output_units))
policy_and_value_net.shared_a3c.initialize()
policy_and_value_net.initialize()
logging.info("Input dim: {} {} {}".format(
*policy_and_value_net.shared_a3c.children[0].get_dim('input_')))
for i, layer in enumerate(policy_and_value_net.shared_a3c.layers):
if isinstance(layer, Activation):
logging.info("Layer {} ({})".format(
i, layer.__class__.__name__))
else:
logging.info("Layer {} ({}) dim: {} {} {}".format(
i, layer.__class__.__name__, *layer.get_dim('output')))
th_input_image = T.tensor4('input_image')
th_reward = T.fvector('input_reward')
th_actions = T.imatrix('input_actions')
th_states = T.matrix('states')
th_cells = T.matrix('cells')
policy_network = policy_and_value_net.apply_policy(th_input_image,
th_states,
th_cells)
value_network = policy_and_value_net.apply_value(th_input_image,
th_states,
th_cells)
cost_network = policy_and_value_net.cost(th_input_image, th_actions,
th_reward, th_states,
th_cells)
cg_policy = ComputationGraph(policy_network)
cg_value = ComputationGraph(value_network)
print "POLICY INPUTS ", cg_policy.inputs
print "VALUE INPUTS ", cg_value.inputs
print "POLICY OUTPUTS ", cg_policy.outputs
print "VALUE OUTPUTS ", cg_value.outputs
# Perform some optimization step
cg = ComputationGraph(cost_network)
# Print shapes of network parameters
shapes = [param.get_value().shape for param in cg.parameters]
logger.info("Parameter shapes: ")
for shape, count in Counter(shapes).most_common():
logger.info(' {:15}: {}'.format(shape, count))
logger.info("Total number of parameters: {}".format(len(shapes)))
# Set up training algorithm
logger.info("Initializing training algorithm")
cost_model = Model(cost_network)
value_model = Model(value_network) # FIXME: delete
if not async_update:
# A threaded worker: steep gradient descent
# A trick was done here to reuse existent bricks. The system performed
# steepest descent to aggregate the gradients. However, the gradients
# are averaged in a minibatch (instead of being just added). Therefore,
# the agent is going to perform the following operations in each
# minibatch:
# 1) steepes descent with learning rate of 1 to only aggregate the
# gradients.
# 2) undo the update operation to obtain the avg. gradient :
# gradient = parameter_before_minibatch - parameter_after_minibatch
# 3) Multiply the gradient by the length of the minibatch to obtain the
# exact gradient at each minibatch.
algorithm = GradientDescent(
cost=cost_network, parameters=cg.parameters,
step_rule=Scale())
else:
# Async update for the shared worker
# The other part of the trick. A custom optimization block was
# developed
# here to receive as inputs the acc. gradients at each worker
algorithm = AsyncUpdate(parameters=cg.parameters,
inputs=cost_model.get_parameter_dict().keys(),
step_rule=AsyncRMSProp(learning_rate=lr,
# FIXME: put as
# parameter
decay_rate=0.99,
max_scaling=10))
algorithm.initialize()
print "COST_INPUTS ", cg.inputs
f_cost = theano.function(inputs=cg.inputs, outputs=cg.outputs)
f_policy = theano.function(inputs=cg_policy.inputs,
outputs=cg_policy.outputs)
f_value = theano.function(inputs=cg_value.inputs, outputs=cg_value.outputs)
return cost_model, f_policy, f_value, algorithm, f_cost
if __name__ == "__main__":
""" A small code snippet to test the network """
feature_maps = [32, 64]
conv_sizes = [8, 4]
pool_sizes = [4, 2]
image_size = (80, 80)
step_size = [4, 2]
num_channels = 10
mlp_hiddens = [500, 256]
num_actions = 10
# dropout and gradient clipping
dropout = 0.2
clip_c = 0.8
# Initialize policy network
# Shared A3C
# Initialize value network
conv_activations = [Rectifier() for _ in feature_maps]
mlp_activations = [Rectifier() for _ in mlp_hiddens]
conv_subsample = [[step, step] for step in step_size]
policy_and_value_net = PolicyAndValueA3C(conv_activations, num_channels,
image_size,
filter_sizes=zip(
conv_sizes, conv_sizes),
feature_maps=feature_maps,
pooling_sizes=zip(
pool_sizes, pool_sizes),
mlp_hiddens=mlp_hiddens,
number_actions=num_actions,
mlp_activations=mlp_activations,
conv_step=conv_subsample,
border_mode='full',
weights_init=Uniform(width=.2),
biases_init=Constant(0))
# We push initialization config to set different initialization schemes
# for convolutional layers.
policy_and_value_net.shared_a3c.push_initialization_config()
policy_and_value_net.push_initialization_config()
policy_and_value_net.shared_a3c.layers[0].weights_init = Uniform(width=.2)
policy_and_value_net.shared_a3c.layers[1].weights_init = Uniform(width=.09)
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
0].weights_init = Uniform(width=.08)
policy_and_value_net.policy.weights_init = Uniform(width=.15)
policy_and_value_net.value.weights_init = Uniform(width=.15)
policy_and_value_net.shared_a3c.initialize()
policy_and_value_net.policy.initialize()
policy_and_value_net.value.initialize()
policy_and_value_net.initialize()
logging.info("Input dim: {} {} {}".format(
*policy_and_value_net.shared_a3c.children[0].get_dim('input_')))
for i, layer in enumerate(policy_and_value_net.shared_a3c.layers):
if isinstance(layer, Activation):
logging.info("Layer {} ({})".format(
i, layer.__class__.__name__))
else:
logging.info("Layer {} ({}) dim: {} {} {}".format(
i, layer.__class__.__name__, *layer.get_dim('output')))
x = T.tensor4('features')
policy = policy_and_value_net.apply_policy(x)
value = policy_and_value_net.apply_value(x)
num_batches = 32
random_data = np.array(np.random.randint(128,
size=(num_batches, num_channels,
image_size[0],
image_size[1])),
dtype="float32")
pol_result = policy.eval({x: random_data})
val_result = value.eval({x: random_data})
print "POLICY SHAPE ", np.shape(pol_result)
print "VALUE SHAPE ", np.shape(val_result)
th_reward = T.vector('ereward')
th_actions = T.imatrix('actions')
reward = np.array(np.random.rand((num_batches)), dtype="float32")
actions = np.zeros((num_batches, num_actions), dtype="int32")
for i in range(0, num_batches):
index_action = np.random.randint(num_actions)
actions[i, index_action] = 1
cost_network = policy_and_value_net.cost(x, th_actions, th_reward)
cost_results = cost_network.eval(
{x: random_data, th_actions: actions, th_reward: reward})
# Perform some optimization step
cg = ComputationGraph(cost_network)
# Print shapes
shapes = [param.get_value().shape for param in cg.parameters]
logger.info("Parameter shapes: ")
for shape, count in Counter(shapes).most_common():
logger.info(' {:15}: {}'.format(shape, count))
logger.info("Total number of parameters: {}".format(len(shapes)))
# Set up training algorithm
logger.info("Initializing training algorithm")
algorithm = GradientDescent(
cost=cost_network, parameters=cg.parameters,
step_rule=CompositeRule([StepClipping(clip_c),
Adam()]))
cost_model = Model(cost_network)
logger.info("Cost Model ".format(cost_model.get_parameter_dict()))
# Check A3C-LSTM network
lstm_output_units = mlp_hiddens[-1]
policy_and_value_net_lstm = PolicyAndValueA3CLSTM(
conv_activations, num_channels,
image_size,
filter_sizes=zip(
conv_sizes, conv_sizes),
feature_maps=feature_maps,
pooling_sizes=zip(
pool_sizes, pool_sizes),
mlp_hiddens=mlp_hiddens,
number_actions=num_actions,
mlp_activations=mlp_activations,
conv_step=conv_subsample,
border_mode='full',
weights_init=Uniform(width=.2),
biases_init=Constant(0),
lstm_output_units=lstm_output_units)
# We push initialization config to set different initialization schemes
# for convolutional layers.
policy_and_value_net_lstm.shared_a3c.push_initialization_config()
policy_and_value_net_lstm.push_initialization_config()
policy_and_value_net_lstm.shared_a3c.layers[
0].weights_init = Uniform(width=.2)
policy_and_value_net_lstm.shared_a3c.layers[
1].weights_init = Uniform(width=.09)
policy_and_value_net_lstm.shared_a3c.top_mlp.linear_transformations[
0].weights_init = Uniform(width=.08)
policy_and_value_net_lstm.policy.weights_init = Uniform(width=.15)
policy_and_value_net_lstm.value.weights_init = Uniform(width=.15)
policy_and_value_net_lstm.shared_a3c.initialize()
policy_and_value_net_lstm.policy.initialize()
policy_and_value_net_lstm.value.initialize()
policy_and_value_net_lstm.linear_to_lstm.weights_init = Uniform(width=.15)
policy_and_value_net_lstm.linear_to_lstm.biases_init = Constant(.0)
policy_and_value_net_lstm.linear_to_lstm.initialize()
policy_and_value_net_lstm.lstm_block.initialize()
policy_and_value_net_lstm.initialize()
logging.info("Input dim: {} {} {}".format(
*policy_and_value_net_lstm.shared_a3c.children[0].get_dim('input_')))
for i, layer in enumerate(policy_and_value_net_lstm.shared_a3c.layers):
if isinstance(layer, Activation):
logging.info("Layer {} ({})".format(
i, layer.__class__.__name__))
else:
logging.info("Layer {} ({}) dim: {} {} {}".format(
i, layer.__class__.__name__, *layer.get_dim('output')))
x = T.tensor4('features')
th_states = T.matrix('states')
th_cells = T.matrix('cells')
policy = policy_and_value_net_lstm.apply_policy(
x, th_states,
th_cells)
value = policy_and_value_net_lstm.apply_value(
x, th_states,
th_cells)
num_batches = 32
random_data = np.array(np.random.randint(128,
size=(num_batches, num_channels,
image_size[0],
image_size[1])),
dtype="float32")
random_states = np.array(np.random.rand(1, lstm_output_units),
dtype="float32")
random_cells = np.array(np.random.rand(1, lstm_output_units),
dtype="float32")
pol_result = policy[0].eval(
{x: random_data,
th_states: random_states,
th_cells: random_cells})
val_result = value.eval(
{x: random_data,
th_states: random_states,
th_cells: random_cells})
h_result = policy[1].eval(
{x: random_data,
th_states: random_states,
th_cells: random_cells})
c_result = policy[2].eval(
{x: random_data,
th_states: random_states,
th_cells: random_cells})
print "POLICY SHAPE LSTM ", np.shape(pol_result)
print "VALUE SHAPE LSTM ", np.shape(val_result)
print "H SHAPE LSTM ", np.shape(h_result)
print "C SHAPE LSTM ", np.shape(c_result)
th_reward = T.vector('ereward')
th_actions = T.imatrix('actions')
reward = np.array(np.random.rand((num_batches)), dtype="float32")
actions = np.zeros((num_batches, num_actions), dtype="int32")
for i in range(0, num_batches):
index_action = np.random.randint(num_actions)
actions[i, index_action] = 1
cost_network = policy_and_value_net_lstm.cost(x, th_actions, th_reward,
th_states, th_cells)
cost_results = cost_network.eval(
{x: random_data, th_actions: actions, th_reward: reward,
th_states: random_states, th_cells: random_cells})
# Perform some optimization step
cg = ComputationGraph(cost_network)
# Print shapes
shapes = [param.get_value().shape for param in cg.parameters]
logger.info("Parameter shapes: ")
for shape, count in Counter(shapes).most_common():
logger.info(' {:15}: {}'.format(shape, count))
logger.info("Total number of parameters: {}".format(len(shapes)))
# Set up training algorithm
logger.info("Initializing training algorithm")
algorithm = GradientDescent(
cost=cost_network, parameters=cg.parameters,
step_rule=CompositeRule([StepClipping(clip_c),
Adam()]))
cost_model = Model(cost_network)
logger.info("Cost Model ".format(cost_model.get_parameter_dict()))
# Check differnent result with batch
random_data = np.array(np.random.randint(128,
size=(1, num_channels,
image_size[0],
image_size[1])),
dtype="float32")
random_data = np.concatenate((random_data, random_data),
axis=0)
print "RANDOM_INPUT_", np.shape(random_data)
random_states = np.array(np.random.rand(1, lstm_output_units),
dtype="float32")
random_cells = np.array(np.random.rand(1, lstm_output_units),
dtype="float32")
pol_result = policy[0].eval(
{x: random_data,
th_states: random_states,
th_cells: random_cells})
next_state = policy[1].eval(
{x: random_data,
th_states: random_states,
th_cells: random_cells})
next_cell = policy[2].eval(
{x: random_data,
th_states: random_states,
th_cells: random_cells})
print "POLRESULT ", pol_result
print "NEXT_STATE {} SUM0 {} SUM1 {}".format(np.shape(next_state),
np.sum(next_state[0]),
np.sum(next_state[1]))
print "NEXT_CELL {} SUM0 {} SUM1 {}".format(np.shape(next_cell),
np.sum(next_cell[0]),
np.sum(next_cell[1]))
# Do the same step by step
prev_state = random_states
prev_cell = random_cells
pol_result = policy[0].eval(
{x: [random_data[0]],
th_states: prev_state,
th_cells: prev_cell})
next_state = policy[1].eval(
{x: [random_data[0]],
th_states: prev_state,
th_cells: prev_cell})
next_cell = policy[2].eval(
{x: [random_data[0]],
th_states: prev_state,
th_cells: prev_cell})
print "NEXT_STATE {} SUM1 {}".format(np.shape(next_state),
np.sum(next_state))
print "NEXT_CELL {} SUM1 {}".format(np.shape(next_cell),
np.sum(next_cell))
|
|
#!/Users/lcladm/anaconda2/bin/python
# TODO - python path - assuming condo here
## HEALTH WARNING - BETA CODE IN DEVELOPMENT ##
'''
This standalone application will build a mesh from a nifti classification file.
To keep the procedure as similar as possible to the way mrMesh used to do this,
we will keep this as a standalon application. Matlab reads in the segmented
nifti file using vistasofts own nifti class handler meshBuild>mrmBuild>
meshBuildFromClass - we just dont use the old build_mesh mex file - we do that
bit and any smoothing in this application and send a mesh struture back to
matlab.
AG 2017
'''
import os,sys
import scipy
import vtk
from numpy import *
from scipy.io import loadmat, savemat
from vtk.util import numpy_support
debug = False
#TODO error handling
fileToLoad = sys.argv[1]
fileToSave = sys.argv[2]
# load the voxel data that has been dumped to disk
voxels = scipy.io.loadmat(fileToLoad)
mmPerVox = voxels['mmPerVox'][0]
if debug: print mmPerVox
voxels = voxels['voxels'] #unpack
if debug: print voxels
if debug: print shape(voxels)
extent = shape(voxels)
if debug: print extent
if debug: print extent[0]
if debug: print extent[1]
if debug: print extent[2]
'''
### ------------------------------------------------------------------------------
### this is faster but for now exactly replicate the way mrMesh sets up the volume array
# import voxels to vtk
dataImporter = vtk.vtkImageImport()
data_string = voxels.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
dataImporter.SetDataScalarTypeToUnsignedChar()
dataImporter.SetDataExtent(0, extent[2]-1, 0, extent[1]-1, 0, extent[0]-1) # TODO have to work this out
dataImporter.SetWholeExtent(0, extent[2]-1, 0, extent[1]-1, 0, extent[0]-1) # TODO have to work this out
dataImporter.SetDataSpacing(mmPerVox[0],mmPerVox[1],mmPerVox[2]) # TODO have to work this out
dataImporter.Update()
if debug: print dataImporter.GetOutput()
### ------------------------------------------------------------------------------
'''
### ------- the way mrMesh did it in mesh_build --------------------------------
pArray = map(ord,voxels.tostring()) #unpack
pDims = shape(voxels)
scale = mmPerVox
iSizes = [pDims[0]+2, pDims[1]+2, pDims[2]+2]
nTotalValues = iSizes[0] * iSizes[1] * iSizes[2]
pClassValues = vtk.vtkUnsignedCharArray()
pClassValues.SetNumberOfValues(nTotalValues)
pClassData = vtk.vtkStructuredPoints()
pClassData.SetDimensions(iSizes[0], iSizes[1], iSizes[2])
pClassData.SetOrigin(-scale[0], -scale[1], -scale[2]) #???
pClassData.SetOrigin(-1, -1, -1) #???
pClassData.SetSpacing(scale[0], scale[1], scale[2])
for iSrcZ in range(pDims[2]):
for iSrcY in range(pDims[1]):
iSrcIndex = iSrcZ * pDims[1] * pDims[0] + iSrcY * pDims[0]
iDstIndex = (iSrcZ+1) * iSizes[1] * iSizes[0] + (iSrcY+1) * iSizes[0] + 1
for iSrcX in range(pDims[0]):
fTemp = int(pArray[iSrcIndex])
#if debug: print fTemp, 'iSrcIndex', iSrcIndex, 'iDstIndex', iDstIndex
if fTemp>0:
pClassValues.SetValue(iDstIndex, 0)
else:
pClassValues.SetValue(iDstIndex, 1)
iSrcIndex+=1
iDstIndex+=1
pClassData.GetPointData().SetScalars(pClassValues)
pClassData.Modified()
if debug:
spw = vtk.vtkStructuredPointsWriter()
spw.SetFileTypeToASCII()
spw.SetInputData(pClassData)
spw.SetFileName("/tmp/test-mrMeshPy-structuredPoints.vtk")
spw.Write()
spw.Update()
### ------ Data volume is loaded and constructed - extract some surgfaces -------------
mc = vtk.vtkMarchingCubes()
# mc = vtk.vtkContourFilter() #- could use a contour filter instead?
# mc.SetInputConnection(dataImporter.GetOutputPort()) # later - for use with direct imagedata import
mc.SetInputData(pClassData)
mc.SetValue(0,0.5) #extract 0-th surface at 0.5?
mc.ComputeGradientsOff()
mc.ComputeNormalsOff()
mc.ComputeScalarsOff()
mc.Update()
if debug:
print mc.GetOutput()
write = vtk.vtkPolyDataWriter()
write.SetFileName('/htmp/test-mrMeshPy-marchingCubesOutput.txt')
write.SetFileTypeToASCII()
write.SetInputData(mc.GetOutput())
write.Write()
write.Update()
# ---- extract center surface - edges are normally extracted to (the cube around the edge of the volume)--------
confilter = vtk.vtkPolyDataConnectivityFilter()
confilter.SetInputConnection(mc.GetOutputPort())
confilter.SetExtractionModeToClosestPointRegion()
confilter.SetClosestPoint(extent[0]/2.0,extent[1]/2.0,extent[2]/2.0) # center of volume
confilter.Update()
# ---- Normals ---------------------
# normals already computed by mc algorithm so this code is obsolete
normals = vtk.vtkPolyDataNormals()
normals.ComputePointNormalsOn()
normals.SplittingOff()
normals.SetInputConnection(confilter.GetOutputPort())
#normals.SetInputData(discrete.GetOutput())
normals.Update()
print normals.GetOutput()
norm = normals.GetOutput().GetPointData().GetNormals()
output_normals = array(numpy_support.vtk_to_numpy(norm).transpose(),'d')
####if debug: print output_normals
# ---- Initial vertices - unsmoothed ---------------------
init_verts = normals.GetOutput().GetPoints().GetData()
output_init_verts = array(numpy_support.vtk_to_numpy(init_verts).transpose(),'d')
if debug: print output_init_verts
# ---- Polys (triangles) ---------------------
triangles = normals.GetOutput().GetPolys().GetData()
tmp_triangles = numpy_support.vtk_to_numpy(triangles)
# N.B. the polygon data returned here have 4 values for poly - the first is the number
# of vertices that describe the polygo (ironically always 3) and the next 3 are the
# indices of the vertices that make up the polygon
# so first we need to reshape data from a vector
tmp_triangles = reshape(tmp_triangles,(len(tmp_triangles)/4,4))
# and then we drop the first column (all 3's)
output_triangles = array((tmp_triangles[:,1:4]).transpose(),'d') #remember zero index here, add one for matlab
if debug: print output_triangles
# -------- smoothed version of mesh ----------------
smooth = vtk.vtkSmoothPolyDataFilter()
smooth.SetNumberOfIterations(32) #standard value sused in old mrMesh
smooth.SetRelaxationFactor(0.5) #standard value sused in old mrMesh
smooth.FeatureEdgeSmoothingOff()
smooth.SetFeatureAngle(45)
smooth.SetEdgeAngle(15)
smooth.SetBoundarySmoothing(1)
smooth.SetInputConnection(normals.GetOutputPort())
smooth.Update()
# different smoothing option?
'''
smooth = vtk.vtkWindowedSincPolyDataFilter()
smooth.SetInputConnection(mc.GetOutputPort())
smooth.SetNumberOfIterations(30)
smooth.SetPassBand(0.5)
smooth.SetFeatureAngle(45)
smooth.SetEdgeAngle(15)
smooth.SetBoundarySmoothing(1)
smooth.SetFeatureEdgeSmoothing(0)
smooth.Update()
'''
# ---- Vertices - smoothed ---------------------
smooth_verts = smooth.GetOutput().GetPoints().GetData()
output_smooth_verts = array(numpy_support.vtk_to_numpy(smooth_verts).transpose(),'d')
if debug: print output_smooth_verts
# ---- Curvature ---------------------
curvature = vtk.vtkCurvatures()
curvature.SetInputConnection(smooth.GetOutputPort())
curvature.SetCurvatureTypeToMean()
curvature.Update()
curv = curvature.GetOutput().GetPointData().GetScalars()
output_curvature = array(numpy_support.vtk_to_numpy(curv).transpose(),'d')
if debug: print min(output_curvature)
if debug: print max(output_curvature)
if debug: print output_curvature
# -------- colours based on curvature ------------
# turn curvature into color
tmp_colors = output_curvature.copy()
#min_curv = min(tmp_colors)
#max_curv = max(tmp_colors)
#tmp_colors = (tmp_colors -min_curv) / (max_curv-min_curv) *255
tmp_colors[tmp_colors>=0] = 85 #standard value sused in old mrMesh
tmp_colors[tmp_colors<0] = 160 #standard value sused in old mrMesh
output_colors = vstack((tmp_colors, tmp_colors, tmp_colors, ones((1,len(tmp_colors)))*255))
output_colors = array(output_colors,'d')
if debug: print output_colors
# OK we have all the data we need now, lets write it out to file
data = {} #empty dictionary
data['initVertices'] = output_init_verts
data['initialvertices'] = output_init_verts
data['vertices'] = output_smooth_verts
data['colors'] = output_colors
data['normals'] = output_normals
data['triangles'] = output_triangles
data['curvature'] = output_curvature
# save it out
savemat(fileToSave,data)
# data have been sent, but let's view them here
pdm = vtk.vtkPolyDataMapper()
pdm.SetInputConnection(confilter.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(pdm)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
ren.SetBackground(1,1,1)
renWin.SetSize(500,500)
iren.Initialize()
iren.Start()
pdm = vtk.vtkPolyDataMapper()
pdm.SetInputConnection(curvature.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(pdm)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
ren.SetBackground(1,1,1)
renWin.SetSize(500,500)
iren.Initialize()
iren.Start()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class link_protection_type(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/link-protection-type. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: ISIS LSDB parameters relating to the type of link protection
offered.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "link-protection-type"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"link-protection-type",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_protection_type/state (container)
YANG Description: State parameters of sub-TLV 20.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_protection_type/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 20.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class link_protection_type(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/link-protection-type. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: ISIS LSDB parameters relating to the type of link protection
offered.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "link-protection-type"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"link-protection-type",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_protection_type/state (container)
YANG Description: State parameters of sub-TLV 20.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_protection_type/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 20.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes.
"""
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from horizon import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volume_types \
import forms as volume_types_forms
from openstack_dashboard.dashboards.admin.volume_types \
import tables as volume_types_tables
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
from openstack_dashboard import policy
class VolumeTypesView(tables.MultiTableView, volumes_views.VolumeTableMixIn):
table_classes = (volume_types_tables.VolumeTypesTable,
volume_types_tables.QosSpecsTable)
page_title = _("Volume Types")
template_name = "admin/volume_types/volume_types_tables.html"
def get_volume_types_data(self):
try:
volume_types = \
api.cinder.volume_type_list_with_qos_associations(self.request)
except Exception:
volume_types = []
exceptions.handle(self.request,
_("Unable to retrieve volume types"))
encryption_allowed = policy.check(
(("volume", "volume_extension:volume_type_encryption"),),
self.request)
if encryption_allowed:
# Gather volume type encryption information
try:
vol_type_enc_list = api.cinder.volume_encryption_type_list(
self.request)
except Exception:
vol_type_enc_list = []
msg = _(
'Unable to retrieve volume type encryption information.')
exceptions.handle(self.request, msg)
vol_type_enc_dict = OrderedDict([(e.volume_type_id, e) for e in
vol_type_enc_list])
for volume_type in volume_types:
vol_type_enc = vol_type_enc_dict.get(volume_type.id, None)
if vol_type_enc is not None:
volume_type.encryption = vol_type_enc
volume_type.encryption.name = volume_type.name
else:
volume_type.encryption = None
return volume_types
def get_qos_specs_data(self):
try:
qos_specs = api.cinder.qos_spec_list(self.request)
except Exception:
qos_specs = []
exceptions.handle(self.request,
_("Unable to retrieve QoS specs"))
return qos_specs
INDEX_URL = 'horizon:admin:volume_types:index'
class CreateVolumeTypeView(forms.ModalFormView):
form_class = volume_types_forms.CreateVolumeType
modal_id = "create_volume_type_modal"
template_name = 'admin/volume_types/create_volume_type.html'
submit_label = _("Create Volume Type")
submit_url = reverse_lazy("horizon:admin:volume_types:create_type")
success_url = reverse_lazy('horizon:admin:volume_types:index')
page_title = _("Create Volume Type")
class VolumeTypeEncryptionDetailView(views.HorizonTemplateView):
template_name = "admin/volume_types/volume_encryption_type_detail.html"
page_title = _("Volume Type Encryption Details")
def get_context_data(self, **kwargs):
context = super(VolumeTypeEncryptionDetailView, self).\
get_context_data(**kwargs)
context["volume_type_encryption"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
volume_type_id = self.kwargs['volume_type_id']
self._volume_type_encryption = api.cinder.\
volume_encryption_type_get(self.request, volume_type_id)
volume_type_list = api.cinder.volume_type_list(self.request)
for volume_type in volume_type_list:
if volume_type.id == volume_type_id:
self.name = volume_type.name
self._volume_type_encryption.name = self.name
except Exception:
redirect = reverse(INDEX_URL)
exceptions.handle(self.request,
_('Unable to retrieve volume type encryption'
' details.'),
redirect=redirect)
return None
return self._volume_type_encryption
class CreateVolumeTypeEncryptionView(forms.ModalFormView):
form_class = volume_types_forms.CreateVolumeTypeEncryption
form_id = "create_volume_form"
modal_id = "create_volume_type_modal"
template_name = "admin/volume_types/create_volume_type_encryption.html"
submit_label = _("Create Volume Type Encryption")
submit_url = "horizon:admin:volume_types:create_type_encryption"
success_url = reverse_lazy(INDEX_URL)
page_title = _("Create an Encrypted Volume Type")
@memoized.memoized_method
def get_name(self):
if not hasattr(self, "name"):
self.name = _get_volume_type_name(self.request, self.kwargs)
return self.name
def get_context_data(self, **kwargs):
context = super(CreateVolumeTypeEncryptionView, self).\
get_context_data(**kwargs)
context['volume_type_id'] = self.kwargs['volume_type_id']
args = (self.kwargs['volume_type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
name = self.get_name()
return {'name': name,
'volume_type_id': self.kwargs['volume_type_id']}
class EditVolumeTypeView(forms.ModalFormView):
form_class = volume_types_forms.EditVolumeType
template_name = 'admin/volume_types/update_volume_type.html'
success_url = reverse_lazy('horizon:admin:volume_types:index')
cancel_url = reverse_lazy('horizon:admin:volume_types:index')
submit_label = _('Edit')
@memoized.memoized_method
def get_data(self):
try:
volume_type_id = self.kwargs['type_id']
volume_type = api.cinder.volume_type_get(self.request,
volume_type_id)
except Exception:
error_message = _(
'Unable to retrieve volume type for: "%s"') \
% volume_type_id
exceptions.handle(self.request,
error_message,
redirect=self.success_url)
return volume_type
def get_context_data(self, **kwargs):
context = super(EditVolumeTypeView, self).get_context_data(**kwargs)
context['volume_type'] = self.get_data()
return context
def get_initial(self):
volume_type = self.get_data()
return {'id': self.kwargs['type_id'],
'name': volume_type.name,
'is_public': getattr(volume_type, 'is_public', True),
'description': getattr(volume_type, 'description', "")}
def _get_volume_type_name(request, kwargs):
try:
volume_type_list = api.cinder.volume_type_list(request)
for volume_type in volume_type_list:
if volume_type.id == kwargs['volume_type_id']:
return volume_type.name
except Exception:
msg = _('Unable to retrieve volume type name.')
url = reverse(INDEX_URL)
exceptions.handle(request, msg, redirect=url)
class UpdateVolumeTypeEncryptionView(forms.ModalFormView):
form_class = volume_types_forms.UpdateVolumeTypeEncryption
form_id = "update_volume_form"
modal_id = "update_volume_type_modal"
template_name = "admin/volume_types/update_volume_type_encryption.html"
page_title = _("Update an Encrypted Volume Type")
submit_label = _("Update Volume Type Encryption")
submit_url = "horizon:admin:volume_types:update_type_encryption"
success_url = reverse_lazy('horizon:admin:volume_types:index')
def get_object(self):
if not hasattr(self, "_object"):
try:
self._object = api.cinder.\
volume_encryption_type_get(self.request,
self.kwargs['volume_type_id'])
except Exception:
msg = _('Unable to retrieve encryption type.')
url = reverse('horizon:admin:volume_types:index')
exceptions.handle(self.request, msg, redirect=url)
return self._object
@memoized.memoized_method
def get_name(self):
if not hasattr(self, "name"):
self.name = _get_volume_type_name(self.request, self.kwargs)
return self.name
def get_context_data(self, **kwargs):
context = super(UpdateVolumeTypeEncryptionView, self).\
get_context_data(**kwargs)
context['volume_type_id'] = self.kwargs['volume_type_id']
args = (self.kwargs['volume_type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
encryption_type = self.get_object()
name = self.get_name()
return {'volume_type_id': encryption_type.volume_type_id,
'control_location': encryption_type.control_location,
'key_size': encryption_type.key_size,
'provider': encryption_type.provider,
'cipher': encryption_type.cipher,
'name': name}
class CreateQosSpecView(forms.ModalFormView):
form_class = volume_types_forms.CreateQosSpec
modal_id = "create_volume_type_modal"
template_name = 'admin/volume_types/create_qos_spec.html'
success_url = reverse_lazy('horizon:admin:volume_types:index')
page_title = _("Create QoS Spec")
submit_label = _("Create")
submit_url = reverse_lazy("horizon:admin:volume_types:create_qos_spec")
class EditQosSpecConsumerView(forms.ModalFormView):
form_class = volume_types_forms.EditQosSpecConsumer
modal_id = "edit_qos_spec_modal"
template_name = 'admin/volume_types/edit_qos_spec_consumer.html'
submit_label = _("Modify Consumer")
submit_url = "horizon:admin:volume_types:edit_qos_spec_consumer"
success_url = reverse_lazy('horizon:admin:volume_types:index')
page_title = _("Edit QoS Spec Consumer")
def get_context_data(self, **kwargs):
context = super(EditQosSpecConsumerView, self).\
get_context_data(**kwargs)
context['qos_spec_id'] = self.kwargs["qos_spec_id"]
args = (self.kwargs['qos_spec_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
qos_spec_id = self.kwargs['qos_spec_id']
try:
self._object = api.cinder.qos_spec_get(self.request, qos_spec_id)
except Exception:
msg = _('Unable to retrieve QoS Spec details.')
exceptions.handle(self.request, msg)
return self._object
def get_initial(self):
qos_spec = self.get_object()
qos_spec_id = self.kwargs['qos_spec_id']
return {'qos_spec_id': qos_spec_id,
'qos_spec': qos_spec}
class ManageQosSpecAssociationView(forms.ModalFormView):
form_class = volume_types_forms.ManageQosSpecAssociation
modal_id = "associate_qos_spec_modal"
template_name = 'admin/volume_types/associate_qos_spec.html'
submit_label = _("Associate")
submit_url = "horizon:admin:volume_types:manage_qos_spec_association"
success_url = reverse_lazy('horizon:admin:volume_types:index')
page_title = _("Associate QoS Spec with Volume Type")
def get_context_data(self, **kwargs):
context = super(ManageQosSpecAssociationView, self).\
get_context_data(**kwargs)
context['type_id'] = self.kwargs["type_id"]
args = (self.kwargs['type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
type_id = self.kwargs['type_id']
try:
self._object = api.cinder.volume_type_get(self.request, type_id)
except Exception:
msg = _('Unable to retrieve volume type details.')
exceptions.handle(self.request, msg)
return self._object
@memoized.memoized_method
def get_qos_specs(self, *args, **kwargs):
try:
return api.cinder.qos_spec_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve QoS Specs.'))
def find_current_qos_spec_association(self, vol_type_id):
qos_specs = self.get_qos_specs()
if qos_specs:
try:
# find out which QOS Spec is currently associated with this
# volume type, if any
# NOTE - volume type can only have ONE QOS Spec association
for qos_spec in qos_specs:
type_ids = \
api.cinder.qos_spec_get_associations(self.request,
qos_spec.id)
for vtype in type_ids:
if vtype.id == vol_type_id:
return qos_spec
except Exception:
exceptions.handle(
self.request,
_('Unable to retrieve QoS Spec association.'))
return None
def get_initial(self):
volume_type = self.get_object()
vol_type_id = self.kwargs['type_id']
cur_qos_spec_id = None
cur_qos_spec_name = None
qos_spec = self.find_current_qos_spec_association(vol_type_id)
if qos_spec:
cur_qos_spec_id = qos_spec.id
cur_qos_spec_name = qos_spec.name
return {'type_id': vol_type_id,
'name': getattr(volume_type, 'name', None),
'cur_qos_spec_id': cur_qos_spec_id,
'cur_qos_spec_name': cur_qos_spec_name,
'qos_specs': self.get_qos_specs()}
class EditAccessView(forms.ModalFormView):
form_class = volume_types_forms.EditTypeAccessForm
template_name = 'admin/volume_types/update_access.html'
submit_label = _("Save")
submit_url = "horizon:admin:volume_types:edit_access"
success_url = reverse_lazy('horizon:admin:volume_types:index')
cancel_url = reverse_lazy('horizon:admin:volume_types:index')
page_title = _("Edit Volume Type Access")
def get_context_data(self, **kwargs):
context = super(EditAccessView, self).get_context_data(**kwargs)
context['volume_type_id'] = self.kwargs["volume_type_id"]
args = (self.kwargs['volume_type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
return {'volume_type_id': self.kwargs['volume_type_id']}
|
|
from ._base import BaseAPIDispatch, BaseAPIEndpoint
class Usergroups(BaseAPIDispatch):
pass
@Usergroups.register('users')
class Users(BaseAPIDispatch):
pass
@Usergroups.register('create')
class UsergroupsCreate(BaseAPIEndpoint):
"""This method is used to create a User Group.
If successful, the command returns a usergroup object, including preferences:
.. code-block:: json
{
"ok": true,
"usergroup": {
"id": "S0615G0KT",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Marketing Team",
"description": "Marketing gurus, PR experts and product advocates.",
"handle": "marketing-team",
"is_external": false,
"date_create": 1446746793,
"date_update": 1446746793,
"date_delete": 0,
"auto_type": null,
"created_by": "U060RNRCZ",
"updated_by": "U060RNRCZ",
"deleted_by": null,
"prefs": {
"channels": [
],
"groups": [
]
},
"user_count": "0"
}
}
For more information see https://api.slack.com/methods/create
"""
endpoint = 'usergroups.create'
required_args = {
'name',
}
optional_args = {
'channels',
'description',
'handle',
'include_count',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'usergroups:write',
},
'bot': set(),
'user': set(),
}
def __call__(self,
name,
channels=None,
description=None,
handle=None,
include_count=None,
):
"""Create a User Group
:param name: Required. A name for the User Group. Must be unique among User Groups. e.g. My Test Team
:param channels: Optional. A comma separated string of encoded channel IDs for which the User Group uses as a default. e.g. C1234567890,C2345678901,C3456789012
:param description: Optional. A short description of the User Group. e.g.
:param handle: Optional. A mention handle. Must be unique among channels, users and User Groups. e.g. marketing
:param include_count: Optional. Include the number of users in each User Group. e.g. true
"""
optional_kwargs = {}
if channels is not None:
optional_kwargs['channels'] = channels
if description is not None:
optional_kwargs['description'] = description
if handle is not None:
optional_kwargs['handle'] = handle
if include_count is not None:
optional_kwargs['include_count'] = include_count
return BaseAPIEndpoint.__call__(self,
name=name,
**optional_kwargs
)
@Usergroups.register('disable')
class UsergroupsDisable(BaseAPIEndpoint):
"""This method disables an existing User Group.
.. code-block:: json
{
"ok": true,
"usergroup": {
"id": "S0615G0KT",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Marketing Team",
"description": "Marketing gurus, PR experts and product advocates.",
"handle": "marketing-team",
"is_external": false,
"date_create": 1446746793,
"date_update": 1446747568,
"date_delete": 1446747568,
"auto_type": null,
"created_by": "U060RNRCZ",
"updated_by": "U060RNRCZ",
"deleted_by": "U060RNRCZ",
"prefs": {
"channels": [
],
"groups": [
]
},
"user_count": "0"
}
}
When a User Group has been disabled its date_delete parameter will be non-zero.
For more information see https://api.slack.com/methods/disable
"""
endpoint = 'usergroups.disable'
required_args = {
'usergroup',
}
optional_args = {
'include_count',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'usergroups:write',
},
'bot': set(),
'user': set(),
}
def __call__(self,
usergroup,
include_count=None,
):
"""Disable an existing User Group
:param usergroup: Required. The encoded ID of the User Group to disable. e.g. S0604QSJC
:param include_count: Optional. Include the number of users in the User Group. e.g. true
"""
optional_kwargs = {}
if include_count is not None:
optional_kwargs['include_count'] = include_count
return BaseAPIEndpoint.__call__(self,
usergroup=usergroup,
**optional_kwargs
)
@Usergroups.register('enable')
class UsergroupsEnable(BaseAPIEndpoint):
"""This method enables a User Group which was previously disabled.
.. code-block:: json
{
"ok": true,
"usergroup": {
"id": "S0615G0KT",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Marketing Team",
"description": "Marketing gurus, PR experts and product advocates.",
"handle": "marketing-team",
"is_external": false,
"date_create": 1446746793,
"date_update": 1446747767,
"date_delete": 0,
"auto_type": null,
"created_by": "U060RNRCZ",
"updated_by": "U060RNRCZ",
"deleted_by": null,
"prefs": {
"channels": [
],
"groups": [
]
},
"user_count": "0"
}
}
When a User Group is enabled, it's date_delete parameter will be 0 (zero).
For more information see https://api.slack.com/methods/enable
"""
endpoint = 'usergroups.enable'
required_args = {
'usergroup',
}
optional_args = {
'include_count',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'usergroups:write',
},
'bot': set(),
'user': set(),
}
def __call__(self,
usergroup,
include_count=None,
):
"""Enable a User Group
:param usergroup: Required. The encoded ID of the User Group to enable. e.g. S0604QSJC
:param include_count: Optional. Include the number of users in the User Group. e.g. true
"""
optional_kwargs = {}
if include_count is not None:
optional_kwargs['include_count'] = include_count
return BaseAPIEndpoint.__call__(self,
usergroup=usergroup,
**optional_kwargs
)
@Usergroups.register('list')
class UsergroupsList(BaseAPIEndpoint):
"""This method returns a list of all User Groups in the team. This can optionally include disabled User Groups.
Returns a list of usergroup objects, in no particular order:
.. code-block:: json
{
"ok": true,
"usergroups": [
{
"id": "S0614TZR7",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Team Admins",
"description": "A group of all Administrators on your team.",
"handle": "admins",
"is_external": false,
"date_create": 1446598059,
"date_update": 1446670362,
"date_delete": 0,
"auto_type": "admin",
"created_by": "USLACKBOT",
"updated_by": "U060RNRCZ",
"deleted_by": null,
"prefs": {
"channels": [
],
"groups": [
]
},
"user_count": "2"
},
{
"id": "S06158AV7",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Team Owners",
"description": "A group of all Owners on your team.",
"handle": "owners",
"is_external": false,
"date_create": 1446678371,
"date_update": 1446678371,
"date_delete": 0,
"auto_type": "owner",
"created_by": "USLACKBOT",
"updated_by": "USLACKBOT",
"deleted_by": null,
"prefs": {
"channels": [
],
"groups": [
]
},
"user_count": "1"
},
{
"id": "S0615G0KT",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Marketing Team",
"description": "Marketing gurus, PR experts and product advocates.",
"handle": "marketing-team",
"is_external": false,
"date_create": 1446746793,
"date_update": 1446747767,
"date_delete": 1446748865,
"auto_type": null,
"created_by": "U060RNRCZ",
"updated_by": "U060RNRCZ",
"deleted_by": null,
"prefs": {
"channels": [
],
"groups": [
]
},
"user_count": "0"
}
]
}
For more information see https://api.slack.com/methods/list
"""
endpoint = 'usergroups.list'
required_args = {}
optional_args = {
'include_count',
'include_disabled',
'include_users',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'usergroups:read',
},
'bot': set(),
'user': set(),
}
def __call__(self,
include_count=None,
include_disabled=None,
include_users=None,
):
"""List all User Groups for a team
:param include_count: Optional. Include the number of users in each User Group. e.g. true
:param include_disabled: Optional. Include disabled User Groups. e.g. true
:param include_users: Optional. Include the list of users for each User Group. e.g. true
"""
optional_kwargs = {}
if include_count is not None:
optional_kwargs['include_count'] = include_count
if include_disabled is not None:
optional_kwargs['include_disabled'] = include_disabled
if include_users is not None:
optional_kwargs['include_users'] = include_users
return BaseAPIEndpoint.__call__(self,
**optional_kwargs
)
@Usergroups.register('update')
class UsergroupsUpdate(BaseAPIEndpoint):
"""This method updates the properties of an existing User Group.
.. code-block:: json
{
"ok": true,
"usergroup": {
"id": "S0615G0KT",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Marketing Gurus",
"description": "Marketing gurus, PR experts and product advocates.",
"handle": "marketing-team",
"is_external": false,
"date_create": 1446746793,
"date_update": 1446748574,
"date_delete": 0,
"auto_type": null,
"created_by": "U060RNRCZ",
"updated_by": "U060RNRCZ",
"deleted_by": null,
"prefs": {
"channels": [
],
"groups": [
]
},
"user_count": "0"
}
}
For more information see https://api.slack.com/methods/update
"""
endpoint = 'usergroups.update'
required_args = {
'usergroup',
}
optional_args = {
'channels',
'description',
'handle',
'include_count',
'name',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'usergroups:write',
},
'bot': set(),
'user': set(),
}
def __call__(self,
usergroup,
channels=None,
description=None,
handle=None,
include_count=None,
name=None,
):
"""Update an existing User Group
:param usergroup: Required. The encoded ID of the User Group to update. e.g. S0604QSJC
:param channels: Optional. A comma separated string of encoded channel IDs for which the User Group uses as a default. e.g. C1234567890,C2345678901,C3456789012
:param description: Optional. A short description of the User Group. e.g.
:param handle: Optional. A mention handle. Must be unique among channels, users and User Groups. e.g. marketing
:param include_count: Optional. Include the number of users in the User Group. e.g. true
:param name: Optional. A name for the User Group. Must be unique among User Groups. e.g. My Test Team
"""
optional_kwargs = {}
if channels is not None:
optional_kwargs['channels'] = channels
if description is not None:
optional_kwargs['description'] = description
if handle is not None:
optional_kwargs['handle'] = handle
if include_count is not None:
optional_kwargs['include_count'] = include_count
if name is not None:
optional_kwargs['name'] = name
return BaseAPIEndpoint.__call__(self,
usergroup=usergroup,
**optional_kwargs
)
@Users.register('list')
class UsersList(BaseAPIEndpoint):
"""This method returns a list of all users within a User Group.
.. code-block:: json
{
"ok": true,
"users": [
"U060R4BJ4"
]
}
For more information see https://api.slack.com/methods/list
"""
endpoint = 'usergroups.users.list'
required_args = {
'usergroup',
}
optional_args = {
'include_disabled',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'usergroups:read',
},
'bot': set(),
'user': set(),
}
def __call__(self,
usergroup,
include_disabled=None,
):
"""List all users in a User Group
:param usergroup: Required. The encoded ID of the User Group to update. e.g. S0604QSJC
:param include_disabled: Optional. Allow results that involve disabled User Groups. e.g. true
"""
optional_kwargs = {}
if include_disabled is not None:
optional_kwargs['include_disabled'] = include_disabled
return BaseAPIEndpoint.__call__(self,
usergroup=usergroup,
**optional_kwargs
)
@Users.register('update')
class UsersUpdate(BaseAPIEndpoint):
"""This method updates the list of users that belong to a User Group. This method replaces all users in a User Group with the list of users provided in the users parameter.
.. code-block:: json
{
"ok": true,
"usergroup": {
"id": "S0616NG6M",
"team_id": "T060R4BHN",
"is_usergroup": true,
"name": "Marketing Team",
"description": "Marketing gurus, PR experts and product advocates.",
"handle": "marketing-team",
"is_external": false,
"date_create": 1447096577,
"date_update": 1447102109,
"date_delete": 0,
"auto_type": null,
"created_by": "U060R4BJ4",
"updated_by": "U060R4BJ4",
"deleted_by": null,
"prefs": {
"channels": [
],
"groups": [
]
},
"users": [
"U060R4BJ4",
"U060RNRCZ"
],
"user_count": 1
}
}
For more information see https://api.slack.com/methods/update
"""
endpoint = 'usergroups.users.update'
required_args = {
'usergroup',
'users',
}
optional_args = {
'include_count',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'usergroups:write',
},
'bot': set(),
'user': set(),
}
def __call__(self,
usergroup,
users,
include_count=None,
):
"""Update the list of users for a User Group
:param usergroup: Required. The encoded ID of the User Group to update. e.g. S0604QSJC
:param users: Required. A comma separated string of encoded user IDs that represent the entire list of users for the User Group. e.g. U060R4BJ4,U060RNRCZ
:param include_count: Optional. Include the number of users in the User Group. e.g. true
"""
optional_kwargs = {}
if include_count is not None:
optional_kwargs['include_count'] = include_count
return BaseAPIEndpoint.__call__(self,
usergroup=usergroup,
users=users,
**optional_kwargs
)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
script: univariate-analysis-2.py
followinf univariate-analysis.py, used to explore more features of univariate
analysis
In this script we show how to load/compute univariate analysis on other
types of observables, and how to build good-looking autocorrelation functions
by using the stationary hypothesis, where only time differences matter, which
in practice increases the effective sample size.
"""
from __future__ import print_function
from builtins import input
import argparse
import time
from tqdm import tqdm
import matplotlib.pyplot as plt
from tunacell import Experiment, Observable, FilterSet
from tunacell.base.observable import FunctionalObservable
from tunacell.filters.cells import FilterCellIDparity
from tunacell.stats.api import (compute_univariate, load_univariate,
compute_stationary, load_stationary, NoValidTimes)
from tunacell.stats.single import UnivariateIOError, StationaryUnivariateIOError
from tunacell.stats.utils import Regions, CompuParams
from tunacell.plotting.dynamics import plot_onepoint, plot_twopoints, plot_stationary
# close all open plots
plt.close('all')
# Arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('-e', '--experiment', type=str,
help='Path to experiment root folder',
default='~/tmptunacell/simutest')
argparser.add_argument('-i', '--interactive',
help='Ask user to press Enter between parts',
action='store_true')
argparser.add_argument('--time', type=float,
help='Time per figure when non-interactive mode is on',
default=3)
args = argparser.parse_args()
single_plot_timing = args.time
msg = ('==============tunacell=tutorial==============\n'
'== ==\n'
'== Univariate analysis (2/2) ==\n'
'== ==\n'
'== This tutorial shows more details about ==\n'
'== the univariate analysis (statistics of ==\n'
'== single, dynamic observable): ==\n'
'== * import/export of results ==\n'
'== * details of stationary analysis ==\n'
'== * time-lapse, cell cycle observables ==\n'
'== (refer to comments in code to get more ==\n'
'== details) ==\n'
'== ==\n'
'==============tunacell=tutorial==============\n')
print(msg)
print()
# =============================================================================
# We start with the same settings as in univariate-analysis.py.
# We first load the univariate analysis that we performed, and exported
# in univariate-anbalysis.py (please run that script before starting this one)
# =============================================================================
msg = 'Loading experiment with evenID condition (from part 1/2)'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
# define the exp instance, no filter applied
path_to_exp = args.experiment
exp = Experiment(path_to_exp)
# define a condition
even = FilterCellIDparity('even')
condition = FilterSet(label='evenID', filtercell=even)
ou = Observable(name='exact-growth-rate', raw='ou')
# Reference values
md = exp.metadata
params = md['ornstein_uhlenbeck_params']
ref_mean = params['target']
ref_var = params['noise']/(2 * params['spring'])
ref_decayrate = params['spring']
print('Loading univariate results (computed and exported in part 1/2)')
# loading univariate analysis for the ou observable
univariate = load_univariate(exp, ou, cset=[condition, ])
# =============================================================================
# The last command would raise an exception of type UnivariateIOError if
# computations had not been exported before. We can use this property
# to try loading results, and if it fails, start the computation.
# Below we do so for a few other observables
# =============================================================================
# TIME-LAPSE OBSERVABLES (time-series per cell)
print('Defining a bunch of observables, time-lapse, and cell-cycle')
# local estimate of growth rate by using the differentiation of size measurement
# (the raw column 'exp_ou_int' plays the role of cell size in our simulations)
gr = Observable(name='approx-growth-rate', raw='exp_ou_int',
differentiate=True, scale='log',
local_fit=True, time_window=15.)
# dynamic, functional observable: twice the growth rate
ou2 = FunctionalObservable(name='double-growth-rate', f=lambda x : 2 * x, observables=[ou, ])
# time-aligned upon root cell division for size analysis
# fixing tref allows to align timeseries to a common origin; the 'root' option
# means that it will be aligned to each colony root cell division time
size = Observable(name='size', raw='exp_ou_int', tref='root')
continuous_obs = [ou, gr, ou2, size]
# SOME CELL-CYCLE TYPE OBSERVABLES (one value per cell)
# cell-cycle average growth rate
average_gr = Observable(name='average-growth-rate', raw='ou',
differentiate=False, scale='linear',
local_fit=False, mode='average', timing='g')
# size at cell division
division_size = Observable(name='division-size', raw='exp_ou_int',
differentiate=False, scale='log',
local_fit=False, mode='division', timing='g')
# increase in cell size timed at division time
increase = Observable(name='added-size', raw='exp_ou_int',
mode='net-increase-additive', timing='d')
cycle_obs = [average_gr, division_size, increase]
# Start computations
univariates_store = {}
figs = []
msg = 'Computing dynamic univariate statistics...'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
for obs in continuous_obs + cycle_obs:
print('* {} ...'.format(obs.name))
try:
univ = load_univariate(exp, obs, cset=[condition, ])
except UnivariateIOError:
univ = compute_univariate(exp, obs, cset=[condition, ])
univ.export_text() # save as text files
# store univariate object in a dic indexed by observable
univariates_store[obs] = univ
# some options for plotting functions
trefs = [40., 80., 150.]
grefs = [1, 2]
if obs in [ou, gr]:
kwargs = {'mean_ref': ref_mean,
'var_ref': ref_var}
kwargs2 = {'show_exp_decay': ref_decayrate,
'trefs': trefs}
elif obs in [ou2, ]:
kwargs = {'mean_ref': 2 * ref_mean,
'var_ref': 4 * ref_var}
kwargs2 = {'show_exp_decay': ref_decayrate,
'trefs': trefs}
elif obs in [size, increase]:
kwargs = {}
kwargs2 = {'trefs': trefs}
elif obs in [average_gr, ]:
kwargs = {'mean_ref': ref_mean}
kwargs2 = {'trefs': grefs}
else:
kwargs = {}
kwargs2 = {'trefs': grefs}
# print('Ok')
fig = plot_onepoint(univ, show_ci=True, save=True, verbose=False, **kwargs)
fig.show()
figs.append(fig)
fig2 = plot_twopoints(univ, save=True, verbose=False, **kwargs2)
# figs.append(fig2) # commented: too much figures
if args.interactive:
ans = input('Press Enter to close these figures and proceed to stationary autocorrelation analysis')
else:
for seconds in tqdm(range(10*len(figs)), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
# =============================================================================
# A look at the onepoint functions allows the user to identify regions of time
# where the process looks stationary. There is a function to define such
# regions, and in fact, we already use one in the previous computations,
# defined by default: the region 'ALL' that comprises all time values.
# Here we mention the used region explicitely, and we stick to the 'ALL'
# region since we find the process stationary on the entire time course
# =============================================================================
regions = Regions(exp)
regions.reset() # eliminate all regions except 'ALL'
steady_region = regions.get('ALL')
# and we need to use some computation options (more on that elsewhere)
# define computation options
options = CompuParams() # leaving to default is safe
# =============================================================================
# Now we proceed in the same way: try to load, if it fails, compute.
# We call the plotting function accordingly.
# =============================================================================
msg = 'Computing stationary autocorrelation functions...'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
figs = []
for obs in continuous_obs + cycle_obs:
print('* {} ...'.format(obs.name))
# need the univariate object to compute stationary statistics
univ = univariates_store[obs]
try:
stat = load_stationary(univ, steady_region, options)
except StationaryUnivariateIOError:
try:
stat = compute_stationary(univ, steady_region, options)
stat.export_text() # save as text files
except NoValidTimes:
stat = None
# print('Ok')
# plotting features
if obs in [ou, gr, ou2]:
kwargs = {'show_exp_decay': ref_decayrate}
else:
kwargs = {}
if stat is not None:
fig = plot_stationary(stat, save=True, verbose=False, **kwargs)
fig.show()
figs.append(fig)
if args.interactive:
ans = input('Press Enter to close these figures and proceed')
else:
for seconds in tqdm(range(10*len(figs)), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
# =============================================================================
# For the sake of demonstration, we define here another, smaller region
# =============================================================================
msg = 'Selecting a smaller region of time'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
regions.add(name='beginning', tmin=0., tmax=100.)
reg = regions.get('beginning')
# we just start the computation for exact-growth-rate
univ = univariates_store[ou]
try:
stat = load_stationary(univ, reg, options)
except StationaryUnivariateIOError:
stat = compute_stationary(univ, reg, options)
stat.export_text()
fig = plot_stationary(stat, save=True, verbose=False, show_exp_decay=ref_decayrate)
fig.show()
if args.interactive:
ans = input('Press Enter to close these figures and terminate script')
else:
for seconds in tqdm(range(10), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
|
|
from __future__ import unicode_literals
from future.builtins import super
from datetime import timedelta
from django.contrib.auth.models import User
from django.contrib.messages import info, error
from django.shortcuts import get_object_or_404, redirect
from django.utils.timezone import now
from django.views.generic import ListView, CreateView, DetailView, TemplateView
from mezzanine.accounts import get_profile_model
from mezzanine.conf import settings
from mezzanine.generic.models import ThreadedComment, Keyword
from mezzanine.utils.views import paginate
from drum.links.forms import LinkForm
from drum.links.models import Link, WaveSurfComment
from drum.links.utils import order_by_score
# Returns the name to be used for reverse profile lookups from the user
# object. That's "profile" for the ``drum.links.Profile``, but otherwise
# depends on the model specified in ``AUTH_PROFILE_MODULE``.
USER_PROFILE_RELATED_NAME = get_profile_model().user.field.related_query_name()
class UserFilterView(ListView):
"""
List view that puts a ``profile_user`` variable into the context,
which is optionally retrieved by a ``username`` urlpattern var.
If a user is loaded, ``object_list`` is filtered by the loaded
user. Used for showing lists of links and comments.
"""
def get_context_data(self, **kwargs):
context = super(UserFilterView, self).get_context_data(**kwargs)
try:
username = self.kwargs["username"]
except KeyError:
profile_user = None
else:
users = User.objects.select_related(USER_PROFILE_RELATED_NAME)
lookup = {"username__iexact": username, "is_active": True}
profile_user = get_object_or_404(users, **lookup)
qs = context["object_list"].filter(user=profile_user)
context["object_list"] = qs
# Update context_object_name variable
context_object_name = self.get_context_object_name(context["object_list"])
context[context_object_name] = context["object_list"]
context["profile_user"] = profile_user
context["no_data"] = ("Whoa, there's like, literally no data here, "
"like seriously, I totally got nothin.")
return context
class ScoreOrderingView(UserFilterView):
"""
List view that optionally orders ``object_list`` by calculated
score. Subclasses must defined a ``date_field`` attribute for the
related model, that's used to determine time-scaled scoring.
Ordering by score is the default behaviour, but can be
overridden by passing ``False`` to the ``by_score`` arg in
urlpatterns, in which case ``object_list`` is sorted by most
recent, using the ``date_field`` attribute. Used for showing lists
of links and comments.
"""
def get_context_data(self, **kwargs):
context = super(ScoreOrderingView, self).get_context_data(**kwargs)
qs = context["object_list"]
context["by_score"] = self.kwargs.get("by_score", True)
if context["by_score"]:
qs = order_by_score(qs, self.score_fields, self.date_field)
else:
qs = qs.order_by("-" + self.date_field)
context["object_list"] = paginate(qs, self.request.GET.get("page", 1),
settings.ITEMS_PER_PAGE, settings.MAX_PAGING_LINKS)
# Update context_object_name variable
context_object_name = self.get_context_object_name(context["object_list"])
context[context_object_name] = context["object_list"]
context["title"] = self.get_title(context)
return context
class LinkView(object):
"""
List and detail view mixin for links - just defines the correct
queryset.
"""
def get_queryset(self):
return Link.objects.published().select_related(
"user",
"user__%s" % USER_PROFILE_RELATED_NAME
)
class LinkList(LinkView, ScoreOrderingView):
"""
List view for links, which can be for all users (homepage) or
a single user (links from user's profile page). Links can be
order by score (homepage, profile links) or by most recently
created ("newest" main nav item).
"""
date_field = "publish_date"
score_fields = ["rating_sum", "comments_count"]
def get_queryset(self):
queryset = super(LinkList, self).get_queryset()
tag = self.kwargs.get("tag")
if tag:
queryset = queryset.filter(keywords__keyword__slug=tag)
return queryset.prefetch_related("keywords__keyword")
def get_title(self, context):
tag = self.kwargs.get("tag")
if tag:
return get_object_or_404(Keyword, slug=tag).title
if context["by_score"]:
return "" # Homepage
if context["profile_user"]:
return "Links by %s" % getattr(
context["profile_user"],
USER_PROFILE_RELATED_NAME
)
else:
return "Newest"
class LinkCreate(CreateView):
"""
Link creation view - assigns the user to the new link, as well
as setting Mezzanine's ``gen_description`` attribute to ``False``,
so that we can provide our own descriptions.
"""
form_class = LinkForm
model = Link
def form_valid(self, form):
hours = getattr(settings, "ALLOWED_DUPLICATE_LINK_HOURS", None)
if hours and form.instance.link:
lookup = {
"link": form.instance.link,
"publish_date__gt": now() - timedelta(hours=hours),
}
try:
link = Link.objects.get(**lookup)
except Link.DoesNotExist:
pass
else:
error(self.request, "Link exists")
return redirect(link)
form.instance.user = self.request.user
form.instance.gen_description = False
info(self.request, "Link created")
return super(LinkCreate, self).form_valid(form)
class LinkDetail(LinkView, DetailView):
"""
Link detail view - threaded comments and rating are implemented
in its template.
"""
pass
class CommentList(ScoreOrderingView):
"""
List view for comments, which can be for all users ("comments" and
"best" main nav items) or a single user (comments from user's
profile page). Comments can be order by score ("best" main nav item)
or by most recently created ("comments" main nav item, profile
comments).
"""
date_field = "submit_date"
score_fields = ["rating_sum"]
def get_queryset(self):
qs = ThreadedComment.objects.filter(is_removed=False, is_public=True)
select = ["user", "user__%s" % (USER_PROFILE_RELATED_NAME)]
prefetch = ["content_object"]
return qs.select_related(*select).prefetch_related(*prefetch)
def get_title(self, context):
if context["profile_user"]:
return "Comments by %s" % getattr(
context["profile_user"],
USER_PROFILE_RELATED_NAME
)
elif context["by_score"]:
return "Best comments"
else:
return "Latest comments"
class WaveSurfCommentList(ScoreOrderingView):
date_field = "submit_date"
score_fields = ["rating_sum"]
def get_queryset(self):
qs = WaveSurfComment.objects.filter(is_removed=False, is_public=True)
select = ["user", "user__%s" % (USER_PROFILE_RELATED_NAME)]
prefetch = ["content_object"]
return qs.select_related(*select).prefetch_related(*prefetch)
def get_title(self, context):
if context["profile_user"]:
return "Comments by %s" % getattr(
context["profile_user"],
USER_PROFILE_RELATED_NAME
)
elif context["by_score"]:
return "Best comments"
else:
return "Latest comments"
class TagList(TemplateView):
template_name = "links/tag_list.html"
|
|
import copy
import pymc3 as pm
from lasagne import init
from gelato.specs.base import DistSpec, get_default_testval, smart_init
__all__ = [
'get_default_spec',
'set_default_spec',
'PartialSpec',
'UniformSpec',
'FlatSpec',
'NormalSpec',
'BetaSpec',
'ExponentialSpec',
'LaplaceSpec',
'StudentTSpec',
'CauchySpec',
'HalfCauchySpec',
'GammaSpec',
'WeibullSpec',
'LognormalSpec',
'ChiSquaredSpec',
'HalfNormalSpec',
'WaldSpec',
'ParetoSpec',
'InverseGammaSpec',
'ExGaussianSpec',
'VonMisesSpec',
'SkewNormalSpec',
# 'HalfStudentTSpec',
# 'NormalMixtureSpec'
]
_default_spec = DistSpec(pm.Normal, mu=0, sd=10, testval=smart_init)
def get_default_spec(testval=None):
# to avoid init collision
cp = copy.deepcopy(_default_spec)
if testval is None and cp.testval is None:
cp.testval = get_default_testval()
elif testval is not None:
cp.testval = testval
else:
pass
return cp
def set_default_spec(spec):
global _default_spec
_default_spec = spec
class PartialSpec(DistSpec):
spec = None
def __init__(self, *args, **kwargs):
super(PartialSpec, self).__init__(self.spec, *args, **kwargs)
class UniformSpec(PartialSpec):
spec = pm.Uniform
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, lower=0, upper=1):
super(UniformSpec, self).__init__(lower=lower, upper=upper)
class FlatSpec(PartialSpec):
spec = pm.Flat
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self):
super(FlatSpec, self).__init__(testval=init.Uniform(1))
class NormalSpec(PartialSpec):
spec = pm.Normal
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, mu=0, sd=1):
super(NormalSpec, self).__init__(mu=mu, sd=sd)
class BetaSpec(PartialSpec):
spec = pm.Beta
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, alpha=1, beta=1):
super(BetaSpec, self).__init__(alpha=alpha, beta=beta)
class ExponentialSpec(PartialSpec):
spec = pm.Exponential
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, lam=1):
super(ExponentialSpec, self).__init__(lam=lam)
class LaplaceSpec(PartialSpec):
spec = pm.Laplace
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, mu=0, b=1):
super(LaplaceSpec, self).__init__(mu=mu, b=b)
class StudentTSpec(PartialSpec):
spec = pm.StudentT
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, nu, mu=0, sd=1):
super(StudentTSpec, self).__init__(nu=nu, mu=mu, sd=sd)
class CauchySpec(PartialSpec):
spec = pm.Cauchy
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, alpha=0, beta=1):
super(CauchySpec, self).__init__(alpha=alpha, beta=beta)
class HalfCauchySpec(PartialSpec):
spec = pm.HalfCauchy
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, beta):
super(HalfCauchySpec, self).__init__(beta=beta)
class GammaSpec(PartialSpec):
spec = pm.Gamma
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, alpha, beta):
super(GammaSpec, self).__init__(alpha=alpha, beta=beta)
class WeibullSpec(PartialSpec):
spec = pm.Weibull
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, alpha, beta):
super(WeibullSpec, self).__init__(alpha=alpha, beta=beta)
class LognormalSpec(PartialSpec):
spec = pm.Lognormal
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, mu=0, sd=1):
super(LognormalSpec, self).__init__(mu=mu, sd=sd)
class ChiSquaredSpec(PartialSpec):
spec = pm.ChiSquared
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, nu):
super(ChiSquaredSpec, self).__init__(nu=nu)
class HalfNormalSpec(PartialSpec):
spec = pm.HalfNormal
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, sd=1):
super(HalfNormalSpec, self).__init__(sd=sd)
class WaldSpec(PartialSpec):
spec = pm.Wald
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, mu, lam, alpha=0.):
super(WaldSpec, self).__init__(mu=mu, lam=lam, alpha=alpha)
class ParetoSpec(PartialSpec):
spec = pm.Pareto
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, alpha, m):
super(ParetoSpec, self).__init__(alpha=alpha, m=m)
class InverseGammaSpec(PartialSpec):
spec = pm.InverseGamma
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, alpha, beta=1):
super(InverseGammaSpec, self).__init__(alpha=alpha, beta=beta)
class ExGaussianSpec(PartialSpec):
spec = pm.ExGaussian
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, mu, sd, nu):
super(ExGaussianSpec, self).__init__(mu=mu, sigma=sd, nu=nu)
class VonMisesSpec(PartialSpec):
spec = pm.VonMises
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, mu, kappa):
super(VonMisesSpec, self).__init__(mu=mu, kappa=kappa)
class SkewNormalSpec(PartialSpec):
spec = pm.SkewNormal
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, mu=0.0, sd=1, alpha=1):
super(SkewNormalSpec, self).__init__(mu=mu, sd=sd, alpha=alpha)
'''
class HalfStudentTSpec(PartialSpec):
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=pm.HalfStudentT.distribution.__name__,
doc="""Bounded StudentT with support on [0, +inf]\n{doc}""".format(
doc=pm.StudentT.__doc__
)
)
spec = pm.HalfStudentT
def __init__(self, nu, mu=0, sd=1):
super(HalfStudentTSpec, self).__init__(nu=nu, mu=mu, sd=sd)
'''
'''
class NormalMixtureSpec(PartialSpec):
spec = pm.NormalMixture
__doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
dist=spec.__name__,
doc=spec.__doc__
)
def __init__(self, w, mu, sd=None, tau=None):
w = np.asarray(w)
mu = np.asarray(mu)
if sd is not None:
sd = np.asarray(sd)
if tau is not None:
tau = np.asarray(tau)
_, sd = get_tau_sd(tau, sd)
super(NormalMixtureSpec, self).__init__(w=w, mu=mu, sd=sd)
'''
|
|
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for group_snapshot code.
"""
import mock
import webob
from cinder.api.v3 import group_snapshots as v3_group_snapshots
from cinder import context
from cinder import db
from cinder import exception
from cinder.group import api as group_api
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
import cinder.volume
GROUP_MICRO_VERSION = '3.14'
class GroupSnapshotsAPITestCase(test.TestCase):
"""Test Case for group_snapshots API."""
def setUp(self):
super(GroupSnapshotsAPITestCase, self).setUp()
self.controller = v3_group_snapshots.GroupSnapshotsController()
self.volume_api = cinder.volume.API()
self.context = context.get_admin_context()
self.context.project_id = fake.PROJECT_ID
self.context.user_id = fake.USER_ID
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
def test_show_group_snapshot(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context, group_id=group.id)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
res_dict = self.controller.show(req, group_snapshot.id)
self.assertEqual(1, len(res_dict))
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshot']['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshot']['name'])
self.assertEqual('creating', res_dict['group_snapshot']['status'])
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
def test_show_group_snapshot_with_group_snapshot_NotFound(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=GROUP_MICRO_VERSION)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_list_group_snapshots_json(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot1 = utils.create_group_snapshot(
self.context, group_id=group.id)
group_snapshot2 = utils.create_group_snapshot(
self.context, group_id=group.id)
group_snapshot3 = utils.create_group_snapshot(
self.context, group_id=group.id)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(group_snapshot1.id,
res_dict['group_snapshots'][0]['id'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][0]['name'])
self.assertEqual(group_snapshot2.id,
res_dict['group_snapshots'][1]['id'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][1]['name'])
self.assertEqual(group_snapshot3.id,
res_dict['group_snapshots'][2]['id'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][2]['name'])
group_snapshot3.destroy()
group_snapshot2.destroy()
group_snapshot1.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
def test_list_group_snapshots_detail_json(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot1 = utils.create_group_snapshot(
self.context, group_id=group.id)
group_snapshot2 = utils.create_group_snapshot(
self.context, group_id=group.id)
group_snapshot3 = utils.create_group_snapshot(
self.context, group_id=group.id)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/detail' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.detail(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['group_snapshots']))
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshots'][0]['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][0]['name'])
self.assertEqual(group_snapshot1.id,
res_dict['group_snapshots'][0]['id'])
self.assertEqual('creating',
res_dict['group_snapshots'][0]['status'])
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshots'][1]['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][1]['name'])
self.assertEqual(group_snapshot2.id,
res_dict['group_snapshots'][1]['id'])
self.assertEqual('creating',
res_dict['group_snapshots'][1]['status'])
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshots'][2]['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][2]['name'])
self.assertEqual(group_snapshot3.id,
res_dict['group_snapshots'][2]['id'])
self.assertEqual('creating',
res_dict['group_snapshots'][2]['status'])
group_snapshot3.destroy()
group_snapshot2.destroy()
group_snapshot1.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
@mock.patch('cinder.db.volume_type_get')
@mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve')
def test_create_group_snapshot_json(self, mock_quota, mock_vol_type,
mock_validate):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.create(req, body)
self.assertEqual(1, len(res_dict))
self.assertIn('id', res_dict['group_snapshot'])
self.assertTrue(mock_validate.called)
group.destroy()
group_snapshot = objects.GroupSnapshot.get_by_id(
context.get_admin_context(), res_dict['group_snapshot']['id'])
db.volume_destroy(context.get_admin_context(),
volume_id)
group_snapshot.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
@mock.patch('cinder.db.volume_type_get')
def test_create_group_snapshot_when_volume_in_error_status(
self, mock_vol_type, mock_validate):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
status='error',
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
self.assertTrue(mock_validate.called)
group.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
def test_create_group_snapshot_with_no_body(self):
# omit body from the request
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, None)
@mock.patch.object(group_api.API, 'create_group_snapshot',
side_effect=exception.InvalidGroupSnapshot(
reason='Invalid group snapshot'))
def test_create_with_invalid_group_snapshot(self, mock_create_group_snap):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
status='error',
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
group.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
@mock.patch.object(group_api.API, 'create_group_snapshot',
side_effect=exception.GroupSnapshotNotFound(
group_snapshot_id='invalid_id'))
def test_create_with_group_snapshot_not_found(self, mock_create_grp_snap):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
status='error',
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.create,
req, body)
group.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
def test_create_group_snapshot_from_empty_group(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
group.destroy()
def test_delete_group_snapshot_available(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status='available')
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
res_dict = self.controller.delete(req, group_snapshot.id)
group_snapshot = objects.GroupSnapshot.get_by_id(self.context,
group_snapshot.id)
self.assertEqual(202, res_dict.status_int)
self.assertEqual('deleting', group_snapshot.status)
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
def test_delete_group_snapshot_available_used_as_source(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status='available')
group2 = utils.create_group(
self.context, status='creating',
group_snapshot_id=group_snapshot.id,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, group_snapshot.id)
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
group2.destroy()
def test_delete_group_snapshot_with_group_snapshot_NotFound(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=GROUP_MICRO_VERSION)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.delete,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_delete_group_snapshot_with_Invalid_group_snapshot(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status='invalid')
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, group_snapshot.id)
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
|
|
from tcga_encoder.definitions.locations import *
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.analyses.dna_functions import *
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import average_precision_score, precision_recall_curve
from tcga_encoder.analyses.survival_functions import *
def load_data_and_fill( data_location, results_location ):
input_sources = ["RNA","miRNA","METH"]
data_store = load_store( data_location, "data.h5")
model_store = load_store( results_location, "full_vae_model.h5")
fill_store = load_store( results_location, "full_vae_fill.h5")
subtypes = load_subtypes( data_store )
#tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
survival = PanCancerSurvival( data_store )
#pdb.set_trace()
Z,Z_std = load_latent( fill_store )
model_barcodes = Z.index.values
H = load_hidden( fill_store, model_barcodes )
RNA_fair, miRNA_fair, METH_fair = load_fair_data( data_store, model_barcodes )
RNA_scale, miRNA_scale, METH_scale = load_scaled_data( fill_store, model_barcodes )
rna_names = RNA_scale.columns
mirna_names = miRNA_scale.columns
meth_names = METH_scale.columns
h_names = H.columns
z_names = Z.columns
n_rna = len(rna_names)
n_mirna = len(mirna_names)
n_meth = len(meth_names)
n_h = len(h_names)
n_z = len(z_names)
everything_dir = os.path.join( os.path.join( HOME_DIR, results_location ), "everything2" )
check_and_mkdir(everything_dir)
data = EverythingObject()
data.input_sources = input_sources
data.data_store = data_store
data.model_store = model_store
data.fill_store = fill_store
data.subtypes = subtypes
data.survival = survival
data.Z = Z
try:
data.T=data.data_store["/CLINICAL_USED/TISSUE"].loc[ Z.index ]
except:
data.T=data.data_store["/CLINICAL/TISSUE"].loc[ Z.index ]
data.Z_std = Z_std
data.H = H
data.W_input2h = get_hidden_weights( model_store, input_sources, data_store )
data.W_h2z = get_hidden2z_weights( model_store )
data.weighted_W_h2z = join_weights( data.W_h2z, data.W_input2h )
data.dna = data.data_store["/DNA/channel/0"].loc[data.Z.index].fillna(0)
data.RNA_scale = RNA_scale
data.miRNA_scale = miRNA_scale
data.METH_scale = METH_scale
data.RNA_fair = RNA_fair
data.miRNA_fair = miRNA_fair
data.METH_fair = METH_fair
data.rna_names = rna_names
data.mirna_names = mirna_names
data.meth_names = meth_names
data.h_names = h_names
data.z_names = z_names
data.n_rna = n_rna
data.n_mirna = n_mirna
data.n_meth = n_meth
data.n_h = n_h
data.n_z = n_z
data.save_dir = everything_dir
data.data_store.close()
data.fill_store.close()
data.model_store.close()
return data
def merge_tissues( T, tissues ):
ids = np.zeros( len(T), dtype=bool)
s = ""
for tissue in tissues:
s += tissue
ids |= (T[ tissue ]==1).values
T = T.drop(tissue,axis=1)
T[s] = ids.astype(float)
return T
def pearsonr( X, Y ):
XN = X / np.sqrt(np.sum( X*X,0))
YN = Y / np.sqrt(np.sum( Y*Y,0))
pearson = np.dot( XN.T, YN )
p_values = 1.0 - np.abs(pearson)
return pearson,p_values
def auc_and_pvalue( true_y, z_values ):
n_1 = true_y.sum()
n_0 = len(true_y) - n_1
auc = roc_auc_score( true_y, z_values )
if auc < 0.5:
se_auc = auc_standard_error( auc, n_0, n_1 )
else:
se_auc = auc_standard_error( auc, n_1, n_0 )
se_random = auc_standard_error( 0.5, n_1, n_0 )
p_value = auc_p_value( auc, 0.5, se_auc, se_random )
return auc, p_value
class LogisticBinaryClassifierKFold(object):
def __init__(self, K=5, random_state = None ):
self.random_state = random_state
self.K = K
self.M = []
for k in range(K):
self.M.append( LogisticBinaryClassifier() )
def fit_and_prob( self, y, X, penalty = 'l2', C = 0.0 ):
print "LogisticBinaryClassifierKFold penalty/C = ", penalty, C
self.folds = StratifiedKFold(n_splits=self.K, shuffle = True, random_state=self.random_state)
y_prob = np.zeros( y.shape )
k = 0
for train_split, test_split in self.folds.split( X, y ):
self.M[k].fit( y[train_split], X[train_split,:], penalty = penalty, C=C )
y_est = self.M[k].prob( X[test_split,:] )
#pdb.set_trace()
# if np.any(np.isnan(y_est)):
# pdb.set_trace()
y_prob[test_split] = y_est
return y_prob
class LogisticBinaryClassifier(object):
def __init__(self):
pass
def fit( self, y, X, penalty = 'l2', C = 0.0, fit_intercept=True, class_weight="balanced" ):
self.dim = X.shape[1]
self.n = len(y)
self.n_1 = y.sum()
self.n_0 = self.n-self.n_1
self.penalty = penalty
self.C = C
self.M = LogisticRegression(penalty=self.penalty,\
C=self.C, \
intercept_scaling=1.0, \
fit_intercept=fit_intercept, \
class_weight = class_weight)
self.mean = X.mean(0)
self.std = X.std(0)
self.M.fit( self.normalize(X), y )
self.coef_ = self.M.coef_
def normalize(self,X):
return X
return (X-self.mean)/self.std
def predict( self, X ):
return self.M.predict(self.normalize(X)).astype(int)
def prob( self, X ):
return self.M.predict_proba(self.normalize(X))[:,1]
def log_prob( self, X ):
return self.M.predict_log_proba(self.normalize(X))
class GenerativeBinaryClassifierKFold(object):
def __init__(self, K=5, random_state = None ):
self.random_state = random_state
self.K = K
self.M = []
for k in range(K):
self.M.append( GenerativeBinaryClassifier() )
def fit_and_prob( self, y, X, cov_type = "full", ridge = 0.0 ):
print "GenerativeBinaryClassifierKFold ridge = ", ridge
self.folds = StratifiedKFold(n_splits=self.K, shuffle = True, random_state=self.random_state)
y_prob = np.zeros( y.shape )
k = 0
for train_split, test_split in self.folds.split( X, y ):
#print "\t\t\tINFO (%s): running fold %d of %d"%(dna_gene,fold_idx, n_folds)
self.M[k].fit( y[train_split], X[train_split,:], cov_type, ridge )
y_est = self.M[k].prob( X[test_split,:] )
if np.any(np.isnan(y_est)):
pdb.set_trace()
y_prob[test_split] = y_est
return y_prob
class GenerativeBinaryClassifier(object):
def __init__(self):
pass
def fit( self, y, X, cov_type = "full", ridge = 0.0 ):
self.dim = X.shape[1]
self.n = len(y)
self.n_1 = y.sum()
self.n_0 = self.n-self.n_1
self.ridge = ridge
self.pi_1 = float(self.n_1)/float(self.n)
self.pi_0 = float(self.n_0)/float(self.n)
self.log_pi_1 = np.log(self.pi_1)
self.log_pi_0 = np.log(self.pi_0)
self.class_1 = y==1
self.class_0 = y==0
self.class_1_ids = pp.find(self.class_1)
self.class_0_ids = pp.find(self.class_0)
self.mean_1 = X[self.class_1].mean(0)
self.mean_0 = X[self.class_0].mean(0)
if cov_type == "full":
self.cov_1 = np.cov( X[self.class_1].T ) + self.ridge*np.eye(self.dim)
self.cov_0 = np.cov( X[self.class_0].T ) + ridge*np.eye(self.dim)
elif cov_type == "diag":
self.cov_1 = np.diag( X[self.class_1].var(0) ) + self.ridge*np.eye(self.dim)
self.cov_0 = np.diag( X[self.class_0].var(0) )+ self.ridge*np.eye(self.dim)
elif cov_type == "shared":
self.cov_1 = np.cov( X.T ) + self.ridge*np.eye(self.dim)
self.cov_0 = self.cov_1
self.inv_cov_1 = np.linalg.inv(self.cov_1)
self.inv_cov_0 = np.linalg.inv(self.cov_0)
#pdb.set_trace()
def predict( self, X ):
return self.prob(X).astype(int)
def prob( self, X ):
return np.exp(self.log_prob(X))
def log_prob( self, X ):
log_prob_1 = self.log_prob_class( X, self.log_pi_1, self.cov_1, self.inv_cov_1, self.mean_1 )
log_prob_0 = self.log_prob_class( X, self.log_pi_0, self.cov_0, self.inv_cov_0, self.mean_0 )
if np.any(np.isnan(log_prob_1)) or np.any(np.isnan(log_prob_0)):
print self.mean_0
print self.mean_1
print self.cov_1
print self.cov_0
print self.ridge
print np.cov( X[self.class_1].T )
print np.cov( X[self.class_0].T )
pdb.set_trace()
#log_denom = np.log( np.exp(log_prob_1)+np.exp(log_prob_0))
max_ = np.maximum( log_prob_1, log_prob_0 )
log_denom = max_ + np.log( np.exp( log_prob_1-max_ )+np.exp( log_prob_0-max_ ))
# if log_prob_1 > log_prob_0:
# log_denom = log_prob_1 + np.log( 1.0 + np.exp(log_prob_0-log_prob_1) )
# else:
# log_denom = log_prob_0 + np.log( 1.0 + np.exp(log_prob_1-log_prob_0) )
#pdb.set_trace()
return log_prob_1 - log_denom
def log_prob_class(self, X, log_pi, cov, invcov, mean ):
dif = X-mean[np.newaxis,:]
a = log_pi
b = -0.5*np.log( np.linalg.det( 2*np.pi*cov))
c = -0.5*np.sum( np.dot( dif, invcov )*dif, 1 )
return a+b+c
class EverythingObject(object):
def __init__(self):
self.results = {}
def load_store( location, name, mode="r" ):
store_path = os.path.join( HOME_DIR, location )
store_name = os.path.join( store_path, name )
return pd.HDFStore( store_name, mode )
def load_scaled_data( fill_store, barcodes ):
RNA_scale = fill_store["/scaled/RNA"].loc[barcodes]
miRNA_scale = fill_store["/scaled/miRNA"].loc[barcodes]
METH_scale = fill_store["/scaled/METH"].loc[barcodes]
return RNA_scale, miRNA_scale, METH_scale
def load_fair_data( data_store, barcodes ):
RNA_fair = data_store["/RNA/FAIR"].loc[barcodes]
miRNA_fair = data_store["/miRNA/FAIR"].loc[barcodes]
METH_fair = data_store["/METH/FAIR"].loc[barcodes]
return RNA_fair, miRNA_fair, METH_fair
def load_subtypes( data_store ):
sub_bcs = np.array([ x+"_"+y for x,y in np.array(data_store["/CLINICAL/data"]["patient.stage_event.pathologic_stage"].index.tolist(),dtype=str)] )
sub_values = np.array( data_store["/CLINICAL/data"]["patient.stage_event.pathologic_stage"].values, dtype=str )
subtypes = pd.Series( sub_values, index = sub_bcs, name="subtypes")
return subtypes
def load_latent( fill_store ):
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
Z = pd.concat( [Z_train, Z_val], axis = 0 )
Z_train = fill_store["/Z/TRAIN/Z/var"]
Z_val = fill_store["/Z/VAL/Z/var"]
Z_var = pd.concat( [Z_train, Z_val], axis = 0 )
Z_std = np.sqrt(Z_var)
return Z, Z_std
def load_hidden( fill_store, barcodes ):
try:
H = fill_store["hidden"].loc[barcodes]
except:
print "found no hidden"
H = pd.DataFrame( [], index = barcodes )
return H
def join_weights( W_hidden2z, W_hidden ):
W = {}
n_z = W_hidden2z.shape[1]
columns = np.array( ["z_%d"%i for i in range(n_z)])
for input_source, source_w in W_hidden.iteritems():
#pdb.set_trace()
W[ input_source ] = pd.DataFrame( np.dot( source_w, W_hidden2z ), index = source_w.index, columns = columns )
#pdb.set_trace()
return W
def get_hidden2z_weights( model_store ):
layer = "rec_z_space"
model_store.open()
w = model_store[ "%s"%(layer) + "/W/w%d"%(0)].values
model_store.close()
return w
def get_hidden_weights( model_store, input_sources, data_store ):
rna_genes = data_store["/RNA/FAIR"].columns
meth_genes = ["M_"+s for s in data_store["/METH/FAIR"].columns]
mirna_hsas = data_store["/miRNA/FAIR"].columns
post_fix = "_scaled"
idx=1
n_sources = len(input_sources)
W = {}
for w_idx, input_source in zip( range(n_sources), input_sources ):
w = model_store[ "rec_hidden" + "/W/w%d"%(w_idx)].values
#pdb.set_trace()
d,k = w.shape
columns = np.array( ["h_%d"%i for i in range(k)])
if input_source == "RNA":
rows = rna_genes
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "miRNA":
rows = mirna_hsas
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "METH":
rows = meth_genes
#rows = np.array( [ "M-%s"%g for g in meth_genes], dtype=str )
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "TISSUE":
rows = tissue_names
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
model_store.close()
return W
def quantize( Z, q_range =[0,0.2, 0.4,0.6,0.8,1.0] ):
#n_z = len(Z)
n_z = len(Z.columns)
#quantiles = (len(Z)*np.array( [0,0.33, 0.66, 1.0] )).astype(int)
quantiles = (len(Z)*np.array( q_range )).astype(int)
#quantiles = (len(Z)*np.array( [0,0.1, 0.2,0.3,0.4,0.6,0.7,0.8,0.9,1.0] )).astype(int)
n_quantiles = len(quantiles)-1
start_q_id = -(n_quantiles-1)/2
#Z=Z.loc[barcodes]
Z_values = Z.values
argsort_Z = np.argsort( Z_values, 0 )
Z_quantized = np.zeros( Z_values.shape, dtype=int )
for start_q, end_q in zip( quantiles[:-1], quantiles[1:] ):
for z_idx in range(n_z):
z_idx_order = argsort_Z[:,z_idx]
Z_quantized[ z_idx_order[start_q:end_q], z_idx] = start_q_id
start_q_id+=1
Z_quantized = pd.DataFrame(Z_quantized, index=Z.index, columns=Z.columns )
return Z_quantized
def normalize( Z ):
Z_values = Z.values
Z_values -= Z_values.mean(0)
Z_values /= Z_values.std(0)
Z_normalized = pd.DataFrame(Z_values, index=Z.index, columns=Z.columns )
return Z_normalized
def normalize_by_tissue(X,T):
XV = X.values
#X2 =
for tissue_name in T.columns:
#print "working ", tissue_name
ids = pp.find( T[tissue_name]==1 )
n_ids = len(ids); n_tissue=n_ids
if n_ids==0:
continue
XV[ids,:] -= XV[ids,:].mean(0)
XV[ids,:] /= XV[ids,:].std(0)
return pd.DataFrame( XV, index = X.index, columns = X.columns )
def ids_with_at_least_n_mutations( dna, tissue, n = 1 ):
ok_ids = np.zeros( len(dna), dtype=bool )
for tissue_name in tissue.columns:
#print "working ", tissue_name
ids = pp.find( tissue[tissue_name]==1 )
n_ids = len(ids); n_tissue=n_ids
if n_ids==0:
continue
n_mutations = dna[ids].sum()
if n_mutations >= 1:
ok_ids[ ids ] = True
return ok_ids
def ids_with_at_least_p_mutations( dna, tissue, p = 1 ):
ok_ids = np.zeros( len(dna), dtype=bool )
relevant_tissues=[]
for tissue_name in tissue.columns:
#print "working ", tissue_name
ids = pp.find( tissue[tissue_name]==1 )
n_ids = len(ids); n_tissue=n_ids
if n_ids==0:
continue
n_mutations = dna[ids].sum()
if float(n_mutations)/float(n_ids) >= p:
ok_ids[ ids ] = True
relevant_tissues.append(tissue_name)
return ok_ids,relevant_tissues
def tissue_level_performance( ids, y_true, y_est, relevant_tissues, tissues ):
performances = []
mutations = int(np.sum(y_true[ids]))
wildtype = len(y_est[ids])-mutations
#pdb.set_trace()
auc_y_est, p_value_y_est = auc_and_pvalue(y_true[ids], y_est[ids] )
mean_precision = average_precision_score(y_true[ids], y_est[ids] )
performances.append( pd.Series( [mean_precision,auc_y_est,p_value_y_est,wildtype,mutations],index = ["AUPRC","AUROC","p-value","wildtype","mutations"], name = "PAN" ) )
for tissue in relevant_tissues:
tissue_query = tissues[tissue]==1
tissue_ids = pp.find(tissue_query) #ids_with_n[ tissue_query ]
#tissue_bcs = gene_bcs[ tissue_query ]
y_true_tissue = y_true[tissue_ids]
y_est_tissue = y_est[tissue_ids]
n = len(y_true_tissue)
mutations = int(np.sum(y_true_tissue))
wildtype = n-mutations
if mutations==0 or wildtype==0:
print "skipping ",tissue
continue
#pdb.set_trace()
auc_y_est, p_value_y_est = auc_and_pvalue(y_true_tissue, y_est_tissue )
mean_precision = average_precision_score(y_true_tissue, y_est_tissue )
performances.append( pd.Series( [mean_precision,auc_y_est,p_value_y_est,n,wildtype,mutations],index = ["AUPRC","AUROC","p-value","n","wildtype","mutations"], name = tissue ) )
performances = pd.concat(performances, axis=1).T
#pdb.set_trace()
print performances
return performances
def plot_pr_tissues( gene_dir, gene, bcs, y_est_full, y_true_full, relevant_tissues, tissues, performance, ax=None, save=True ):
if ax is None:
f=pp.figure()
ax=f.add_subplot(111)
best_tissues = performance["AUPRC"].sort_values().index.values
for tissue in best_tissues:
if tissue == "PAN":
continue
tissue_bcs = tissues[ tissues[tissue]==1 ].index.values
y_est = y_est_full.loc[tissue_bcs]
y_true = y_true_full.loc[tissue_bcs]
precision, recall, _ = precision_recall_curve(y_true.values, y_est.values )
ax.plot( recall, precision, '-', lw=1, label='%s %0.2f' % (tissue,performance.loc[tissue]["AUPRC"]) )
y_est = y_est_full.loc[bcs]
y_true = y_true_full.loc[bcs]
precision, recall, _ = precision_recall_curve(y_true.values, y_est.values )
ax.plot( recall, precision, 'k-', lw=4, label = 'PAN %0.2f'%(performance.loc["PAN"]["AUPRC"]) )
#ax.plot([0, 1], [mean_precision, mean_precision], color='navy', lw=1, linestyle='--')
#ax.plot( recall, precision, 'r-', lw=2, label='PR curve (mean = %0.2f)' % mean_precision )
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_title('%s'%(gene))
ax.legend(loc="lower right")
if save is True:
pp.savefig( gene_dir + "/precision_recall_tissues.png", fmt='png', dpi=300, bbox_inches='tight')
return ax
def plot_roc_tissues( gene_dir, gene, bcs, y_est_full, y_true_full, relevant_tissues, tissues, performance, ax=None, save=True ):
if ax is None:
f=pp.figure()
ax=f.add_subplot(111)
best_tissues = performance["AUROC"].sort_values().index.values
for tissue in best_tissues:
if tissue == "PAN":
continue
tissue_bcs = tissues[ tissues[tissue]==1 ].index.values
y_est = y_est_full.loc[tissue_bcs]
y_true = y_true_full.loc[tissue_bcs]
#precision, recall, _ = precision_recall_curve(y_true.values, y_est.values )
fpr, tpr, _ = roc_curve(y_true.values, y_est.values )
ax.plot( fpr, tpr, '-', lw=1, label='%s %0.2f' % (tissue,performance.loc[tissue]["AUROC"]) )
y_est = y_est_full.loc[bcs]
y_true = y_true_full.loc[bcs]
fpr, tpr, _ = roc_curve(y_true.values, y_est.values )
#precision, recall, _ = precision_recall_curve(y_true.values, y_est.values )
ax.plot( fpr, tpr, 'k-', lw=4, label='PAN %0.2f'%(performance.loc["PAN"]["AUROC"]) )
#ax.plot([0, 1], [mean_precision, mean_precision], color='navy', lw=1, linestyle='--')
#ax.plot( recall, precision, 'r-', lw=2, label='PR curve (mean = %0.2f)' % mean_precision )
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_title('%s'%(gene))
ax.legend(loc="lower right")
if save is True:
pp.savefig( gene_dir + "/roc_tissues.png", fmt='png', dpi=300, bbox_inches='tight')
return ax
#
# def auc_standard_error( theta, nA, nN ):
# # from: Hanley and McNeil (1982), The Meaning and Use of the Area under the ROC Curve
# # theta: estimated AUC, can be 0.5 for a random test
# # nA size of population A
# # nN size of population N
#
# Q1=theta/(2.0-theta); Q2=2*theta*theta/(1+theta)
#
# SE = np.sqrt( (theta*(1-theta)+(nA-1)*(Q1-theta*theta) + (nN-1)*(Q2-theta*theta) )/(nA*nN) )
#
# return SE
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from decorator import decorator
from sqlalchemy import exc as sa_exc
import web
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.db import db
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun import objects
from nailgun.objects.serializers.base import BasicSerializer
from nailgun.openstack.common import jsonutils
def check_client_content_type(handler):
content_type = web.ctx.env.get("CONTENT_TYPE", "application/json")
if web.ctx.path.startswith("/api")\
and not content_type.startswith("application/json"):
raise handler.http(415)
return handler()
def forbid_client_caching(handler):
if web.ctx.path.startswith("/api"):
web.header('Cache-Control',
'store, no-cache, must-revalidate,'
' post-check=0, pre-check=0')
web.header('Pragma', 'no-cache')
dt = datetime.fromtimestamp(0).strftime(
'%a, %d %b %Y %H:%M:%S GMT'
)
web.header('Expires', dt)
return handler()
def load_db_driver(handler):
"""Wrap all handlers calls in a special construction, that's call
rollback if something wrong or commit changes otherwise. Please note,
only HTTPError should be rised up from this function. All another
possible errors should be handle.
"""
try:
# execute handler and commit changes if all is ok
response = handler()
db.commit()
return response
except web.HTTPError:
# a special case: commit changes if http error ends with
# 200, 201, 202, etc
if web.ctx.status.startswith('2'):
db.commit()
else:
db.rollback()
raise
except (sa_exc.IntegrityError, sa_exc.DataError) as exc:
# respond a "400 Bad Request" if database constraints were broken
db.rollback()
raise BaseHandler.http(400, exc.message)
except Exception:
db.rollback()
raise
finally:
db.remove()
@decorator
def content_json(func, *args, **kwargs):
try:
data = func(*args, **kwargs)
except web.notmodified:
raise
except web.HTTPError as http_error:
web.header('Content-Type', 'application/json')
if isinstance(http_error.data, (dict, list)):
http_error.data = build_json_response(http_error.data)
raise
web.header('Content-Type', 'application/json')
return build_json_response(data)
def build_json_response(data):
web.header('Content-Type', 'application/json')
if type(data) in (dict, list):
return jsonutils.dumps(data)
return data
class BaseHandler(object):
validator = BasicValidator
serializer = BasicSerializer
fields = []
@classmethod
def render(cls, instance, fields=None):
return cls.serializer.serialize(
instance,
fields=fields or cls.fields
)
@classmethod
def http(cls, status_code, message='', headers=None):
"""Raise an HTTP status code, as specified. Useful for returning status
codes like 401 Unauthorized or 403 Forbidden.
:param status_code: the HTTP status code as an integer
:param message: the message to send along, as a string
:param headers: the headers to send along, as a dictionary
"""
class _nocontent(web.HTTPError):
message = 'No Content'
def __init__(self, message=''):
super(_nocontent, self).__init__(
status='204 No Content',
data=message or self.message
)
exc_status_map = {
200: web.ok,
201: web.created,
202: web.accepted,
204: _nocontent,
301: web.redirect,
302: web.found,
400: web.badrequest,
401: web.unauthorized,
403: web.forbidden,
404: web.notfound,
405: web.nomethod,
406: web.notacceptable,
409: web.conflict,
410: web.gone,
415: web.unsupportedmediatype,
500: web.internalerror,
}
exc = exc_status_map[status_code]()
exc.data = message
headers = headers or {}
for key, value in headers.items():
web.header(key, value)
return exc
def checked_data(self, validate_method=None, **kwargs):
try:
data = kwargs.pop('data', web.data())
method = validate_method or self.validator.validate
valid_data = method(data, **kwargs)
except (
errors.InvalidInterfacesInfo,
errors.InvalidMetadata
) as exc:
objects.Notification.create({
"topic": "error",
"message": exc.message
})
raise self.http(400, exc.message)
except (
errors.NotAllowed,
) as exc:
raise self.http(403, exc.message)
except (
errors.AlreadyExists
) as exc:
raise self.http(409, exc.message)
except (
errors.InvalidData,
errors.NodeOffline,
) as exc:
raise self.http(400, exc.message)
except (
errors.ObjectNotFound,
) as exc:
raise self.http(404, exc.message)
except Exception as exc:
raise
return valid_data
def get_object_or_404(self, obj, *args, **kwargs):
"""Get object instance by ID
:http: 404 when not found
:returns: object instance
"""
log_404 = kwargs.pop("log_404", None)
log_get = kwargs.pop("log_get", None)
uid = kwargs.get("id", (args[0] if args else None))
if uid is None:
if log_404:
getattr(logger, log_404[0])(log_404[1])
raise self.http(404, u'Invalid ID specified')
else:
instance = obj.get_by_uid(uid)
if not instance:
raise self.http(404, u'{0} not found'.format(obj.__name__))
if log_get:
getattr(logger, log_get[0])(log_get[1])
return instance
def get_objects_list_or_404(self, obj, ids):
"""Get list of objects
:param model: model object
:param ids: list of ids
:http: 404 when not found
:returns: list of object instances
"""
node_query = obj.filter_by_id_list(None, ids)
objects_count = obj.count(node_query)
if len(set(ids)) != objects_count:
raise self.http(404, '{0} not found'.format(obj.__name__))
return list(node_query)
class SingleHandler(BaseHandler):
validator = BasicValidator
single = None
@content_json
def GET(self, obj_id):
""":returns: JSONized REST object.
:http: * 200 (OK)
* 404 (object not found in db)
"""
obj = self.get_object_or_404(self.single, obj_id)
return self.single.to_json(obj)
@content_json
def PUT(self, obj_id):
""":returns: JSONized REST object.
:http: * 200 (OK)
* 404 (object not found in db)
"""
obj = self.get_object_or_404(self.single, obj_id)
data = self.checked_data(
self.validator.validate_update,
instance=obj
)
self.single.update(obj, data)
return self.single.to_json(obj)
def DELETE(self, obj_id):
""":returns: Empty string
:http: * 204 (object successfully deleted)
* 404 (object not found in db)
"""
obj = self.get_object_or_404(
self.single,
obj_id
)
try:
self.validator.validate_delete(obj)
except errors.CannotDelete as exc:
raise self.http(400, exc.message)
self.single.delete(obj)
raise self.http(204)
class CollectionHandler(BaseHandler):
validator = BasicValidator
collection = None
eager = ()
@content_json
def GET(self):
""":returns: Collection of JSONized REST objects.
:http: * 200 (OK)
"""
q = self.collection.eager(None, self.eager)
return self.collection.to_json(q)
@content_json
def POST(self):
""":returns: JSONized REST object.
:http: * 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
"""
data = self.checked_data()
try:
new_obj = self.collection.create(data)
except errors.CannotCreate as exc:
raise self.http(400, exc.message)
raise self.http(201, self.collection.single.to_json(new_obj))
# TODO(enchantner): rewrite more handlers to inherit from this
# and move more common code here
class DeferredTaskHandler(BaseHandler):
"""Abstract Deferred Task Handler
"""
validator = BasicValidator
single = objects.Task
log_message = u"Starting deferred task on environment '{env_id}'"
log_error = u"Error during execution of deferred task " \
u"on environment '{env_id}': {error}"
task_manager = None
@content_json
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 202 (task successfully executed)
* 400 (invalid object data specified)
* 404 (environment is not found)
* 409 (task with such parameters already exists)
"""
cluster = self.get_object_or_404(
objects.Cluster,
cluster_id,
log_404=(
u"warning",
u"Error: there is no cluster "
u"with id '{0}' in DB.".format(cluster_id)
)
)
logger.info(self.log_message.format(env_id=cluster_id))
try:
task_manager = self.task_manager(cluster_id=cluster.id)
task = task_manager.execute()
except (
errors.AlreadyExists,
errors.StopAlreadyRunning
) as exc:
raise self.http(409, exc.message)
except (
errors.DeploymentNotRunning,
errors.WrongNodeStatus
) as exc:
raise self.http(400, exc.message)
except Exception as exc:
logger.error(
self.log_error.format(
env_id=cluster_id,
error=str(exc)
)
)
# let it be 500
raise
raise self.http(202, self.single.to_json(task))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.recommendationengine_v1beta1.types import prediction_service
from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import PredictionServiceGrpcTransport
class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport):
"""gRPC AsyncIO backend transport for PredictionService.
Service for making recommendation prediction.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "recommendationengine.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "recommendationengine.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def predict(
self,
) -> Callable[
[prediction_service.PredictRequest],
Awaitable[prediction_service.PredictResponse],
]:
r"""Return a callable for the predict method over gRPC.
Makes a recommendation prediction. If using API Key based
authentication, the API Key must be registered using the
[PredictionApiKeyRegistry][google.cloud.recommendationengine.v1beta1.PredictionApiKeyRegistry]
service. `Learn
more </recommendations-ai/docs/setting-up#register-key>`__.
Returns:
Callable[[~.PredictRequest],
Awaitable[~.PredictResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "predict" not in self._stubs:
self._stubs["predict"] = self.grpc_channel.unary_unary(
"/google.cloud.recommendationengine.v1beta1.PredictionService/Predict",
request_serializer=prediction_service.PredictRequest.serialize,
response_deserializer=prediction_service.PredictResponse.deserialize,
)
return self._stubs["predict"]
def close(self):
return self.grpc_channel.close()
__all__ = ("PredictionServiceGrpcAsyncIOTransport",)
|
|
#!/usr/bin/python
import sys
import copy
import numpy as np
import pos_wrappers
from pos_wrapper_skel import generic_workflow
import pos_parameters
from pos_deformable_wrappers import blank_slice_deformation_wrapper
"""
* Assigning weights for the images by reading them from files or
applying weighting functions.
* Preprocessing images: calculating images that will be used to
perform registration based on the resliced images from previous
iteration.
* Launching actual registration process and calculating deformation
fields.
"""
class deformable_reconstruction_iteration(generic_workflow):
_f = {
'src_slice' : pos_parameters.filename('src_slice', work_dir='00_src_slices', str_template='{idx:04d}.nii.gz'),
'processed' : pos_parameters.filename('processed', work_dir='01_process_slices', str_template='{idx:04d}.nii.gz'),
'outline' : pos_parameters.filename('outline', work_dir='02_outline', str_template='{idx:04d}.nii.gz'),
'poutline' : pos_parameters.filename('poutline', work_dir='03_poutline', str_template='{idx:04d}.nii.gz'),
'cmask' : pos_parameters.filename('cmask', work_dir='04_cmask', str_template='{idx:04d}.nii.gz'),
'pcmask' : pos_parameters.filename('pcmask', work_dir='05_pcmask', str_template='{idx:04d}.nii.gz'),
'transform' : pos_parameters.filename('transform', work_dir='11_transformations', str_template='{idx:04d}Warp.nii.gz'),
'out_naming' : pos_parameters.filename('out_naming', work_dir='11_transformations', str_template='{idx:04d}'),
'resliced' : pos_parameters.filename('resliced', work_dir='21_resliced', str_template='{idx:04d}.nii.gz'),
'resliced_outline' : pos_parameters.filename('resliced_outline', work_dir='22_resliced_outline', str_template='{idx:04d}.nii.gz'),
'resliced_custom' : pos_parameters.filename('resliced_custom', work_dir='24_resliced_custom', str_template='{idx:04d}.nii.gz')
}
__IMAGE_DIMENSION = 2
def __init__(self, options, args):
super(self.__class__, self).__init__(options, args)
start, end, eps = self._get_edges()
self.slice_range = range(start, end + 1)
# Convert the number of iterations string to list of integers
self.options.antsIterations = \
map(int, self.options.antsIterations.strip().split("x"))
# Load data for outlier removal rutines
self._load_subset_file()
self._read_custom_registration_assignment()
def _read_custom_registration_assignment(self):
"""
Helper method for correcting outlier slices.
"""
# If custom mask is provided and its weight is more than zero, it means
# that 'outlier removal mechanism' should be used. This mechanism
# corrects only a part of the slices indicated by masks to the slices
# which are enumerated in self.options.maskedVolumeFile.
if self.options.maskedVolume and \
self.options.maskedVolumeWeight > 0 and \
self.options.maskedVolumeFile:
self.masked_registraion = \
self._get_outliers_registration_assignment(
self.options.maskedVolumeFile)
self.subset = self.masked_registraion.keys()
else:
self.masked_registraion = {}
def _get_outliers_registration_assignment(self, filename, delimiter=" "):
"""
:param filename: filename to read the outliers assignment from
:type filename: str
:param delimiter: field delimiter
:type delimiter: str
:returns: Settings for the coregistration of the given moving section.
:rtype: dict
Reads file containing the correction parameters for the outliers
slices. The registration parameters are used to correct individual
images, instead of correcting whole series of sections. The parameters
are required to be passed via CSV file. Depending on the amount of
parameters in a single line one of two workflows is used.
The parameters may be passed twofolds.
1. If a row contains two values, they are interpreted as: (1) moving
file index and (2) fixed file index. This means the the outlier
correction process will rely on warping moving image to the fixed
images. The registration process will be driven according to the
parameters passed via the command line thus all images will be
registared using the same set of parameters.
2. If a row contains nine fields they are interpreted in the following way:
1. Moving image index,
2. Fixed image index,
3. Image similarity metric,
4. Image similarity metric parameter,
5. Registration iterations,
6. Gradient step for SyN transformation,
7. Regularization type,
8. Ammount of regularization.
Unlike in the former scheme, In the latter approach each pair of
sections may be coregistered using different setting.
"""
returnDictionary = {}
columns = {'fixed': 1, 'moving': 0, 'metric': 2, 'met_opt': 3,
'iters': 4, 'trval': 5, 'regtype': 6, 'regam': 7}
for sourceLine in open(filename):
if sourceLine.strip().startswith('#') or sourceLine.strip() == "":
continue
line = sourceLine.split("#")[0].strip().split(delimiter)
key = int(line[columns['moving']])
# There are two options possible, either
# 1) There is only moving_image => fixed image assignment
# 2) There are full registration settings provided for
# each of the entries
# The two options can be mixed within single assignment file
# Check, if there is only one assignment per file
if key in returnDictionary:
print >> sys.stderr, \
"Entry %s defined more than once. Skipping..." % key
continue
if len(line) > 2:
value = {}
value['moving'] = key
value['fixed'] = int(line[columns['fixed']])
value['metric'] = line[columns['metric']]
value['met_opt'] = float(line[columns['met_opt']])
value['iters'] = map(int, line[columns['iters']].split("x"))
value['trval'] = float(line[columns['trval']])
value['regtype'] = line[columns['regtype']]
value['regam'] = map(float, line[columns['regam']].split(","))
elif len(line) == 2:
value = {}
value['moving'] = key
value['fixed'] = int(line[columns['fixed']])
value['metric'] = self.options.antsImageMetric
value['met_opt'] = self.options.antsImageMetricOpt
value['iters'] = self.options.antsIterations
value['trval'] = self.options.antsTransformation
value['regtype'] = self.options.antsRegularizationType
value['regam'] = self.options.antsRegularization
returnDictionary[key] = value
return returnDictionary
def _get_edges(self):
"""
Convenience function for returning frequently used numbers
:returns: Returns the first and the last slice index of the
reconstruction process as well as epsilon.
:rtype: tuple
"""
return (self.options.startSlice,
self.options.endSlice,
self.options.neighbourhood)
def _load_subset_file(self):
"""
Loads a subset of slices from a given file. When the additional file
is provided, only slices with indices from the file will be registered.
"""
if self.options.registerSubset:
subset = np.loadtxt(self.options.registerSubset)
self.subset = list(subset)
else:
self.subset = self.slice_range
def _assign_weights_from_func(self):
"""
Assing weights for image averaging. Currently just constants weights
are assigned and that seems to be quite a good solution.
"""
start, end, eps = self._get_edges()
self.weights = {}
for i in self.slice_range:
for j in range(i - eps, i + eps + 1):
self.weights[(i, j)] = 1
def _assign_weights(self):
self._assign_weights_from_func()
def get_weight(self, i, j):
return self.weights[(i, j)]
def _preprocess_images(self):
return self._average_images()
def _average_images(self):
start, end, eps = self._get_edges()
if self.options.inputVolume and self.options.inputVolumeWeight > 0:
commands = []
for i in self.slice_range:
files_to_average = []
weights = []
for j in range(i - eps, i + eps + 1):
if j != i and j <= end and j >= start:
files_to_average.append(self.f['src_slice'](idx=j))
weights.append(self.get_weight(i, j))
if j < start:
files_to_average.append(self.f['src_slice'](idx=start))
weights.append(self.get_weight(i, start))
if j > end:
files_to_average.append(self.f['src_slice'](idx=end))
weights.append(self.get_weight(i, end))
command = pos_wrappers.images_weighted_average(
dimension=2,
input_images=files_to_average,
weights=weights,
output_type='float',
output_image=self.f['processed'](idx=i))
commands.append(copy.deepcopy(command))
self.execute(commands)
if self.options.outlineVolume and self.options.outlineVolumeWeight > 0:
commands = []
for i in self.slice_range:
files_to_average = []
weights = []
for j in range(i - eps, i + eps + 1):
if j != i and j <= end and j >= start:
files_to_average.append(self.f['outline'](idx=j))
weights.append(self.get_weight(i, j))
if j < start:
files_to_average.append(self.f['src_slice'](idx=start))
weights.append(self.get_weight(i, start))
if j > end:
files_to_average.append(self.f['src_slice'](idx=end))
weights.append(self.get_weight(i, end))
command = pos_wrappers.images_weighted_average(
dimension=2,
input_images=files_to_average,
weights=weights,
output_type='float',
output_image=self.f['poutline'](idx=i))
commands.append(copy.deepcopy(command))
self.execute(commands)
def _get_default_reg_settings(self):
return (self.options.antsImageMetric,
self.options.antsImageMetricOpt,
self.options.antsIterations,
self.options.antsTransformation,
self.options.antsRegularizationType,
self.options.antsRegularization)
def _get_custom_reg_settings(self, mov_slice_idx):
src = self.masked_registraion[mov_slice_idx]
return (src['metric'], src['met_opt'], src['iters'],
src['trval'], src['regtype'], src['regam'])
def _calculate_transformations_masked(self):
"""
Generate and invoke commands for generating deformation fields.
Commands are generated based on a number of factors. The actual
dependencies what is registered to what and how its quite complicated
and it is my sweet secret how it is actually calculated.
"""
start, end, eps = self._get_edges()
commands = []
for i in self.slice_range:
metrics = []
j_data = self.masked_registraion.get(i, None)
if j_data is None:
fixed_image_type = 'processed'
fixed_outline_type = 'poutline'
mask_image = None
j = i
r_metric, parameter, iterations, transf_grad, reg_type, reg_ammount =\
self._get_default_reg_settings()
else:
fixed_image_type = 'src_slice'
fixed_outline_type = 'outline'
j = j_data['fixed']
mask_image = self.f['cmask'](idx=j)
r_metric, parameter, iterations, transf_grad, reg_type, reg_ammount =\
self._get_custom_reg_settings(i)
if self.options.inputVolume and self.options.inputVolumeWeight > 0:
metric = pos_wrappers.ants_intensity_meric(
fixed_image=self.f[fixed_image_type](idx=j),
moving_image=self.f['src_slice'](idx=i),
metric=r_metric,
weight=self.options.inputVolumeWeight,
parameter=parameter)
metrics.append(copy.deepcopy(metric))
if self.options.outlineVolume and self.options.outlineVolumeWeight > 0:
outline_metric = pos_wrappers.ants_intensity_meric(
fixed_image=self.f[fixed_outline_type](idx=j),
moving_image=self.f['outline'](idx=i),
metric=r_metric,
weight=self.options.outlineVolumeWeight,
parameter=parameter)
metrics.append(copy.deepcopy(outline_metric))
if self.options.referenceVolume and self.options.referenceVolumeWeight > 0:
reference_metric = pos_wrappers.ants_intensity_meric(
fixed_image=self.parent_process.f['ref_custom'](idx=j),
moving_image=self.f['src_slice'](idx=i),
metric=r_metric,
weight=self.options.referenceVolumeWeight,
parameter=parameter)
metrics.append(copy.deepcopy(reference_metric))
if i in self.subset:
registration = pos_wrappers.ants_registration(
dimension=self.__IMAGE_DIMENSION,
outputNaming=self.f['out_naming'](idx=i),
iterations=iterations,
transformation=('SyN', [transf_grad]),
regularization=(reg_type, reg_ammount),
affineIterations=[0], # 0: Hardcoded as this is a
continueAffine=False, # deformable reconstruction
rigidAffine=False, # workflow
imageMetrics=metrics,
maskImage=mask_image,
allMetricsConverge=True)
else:
registration = blank_slice_deformation_wrapper(
input_image=self.f['src_slice'](idx=i),
output_image=self.f['transform'](idx=i))
commands.append(copy.deepcopy(registration))
self.execute(commands)
def launch(self):
self._assign_weights()
self._preprocess_images()
self._calculate_transformations_masked()
def __call__(self, *args, **kwargs):
return self.launch()
|
|
# #
# #
#
# Routes requests to the Data Access Framework through Python Thrift.
#
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 05/21/13 2023 dgilling Initial Creation.
# 01/06/14 2537 bsteffen Share geometry WKT.
# 03/03/14 2673 bsteffen Add ability to query only ref times.
# 07/22/14 3185 njensen Added optional/default args to newDataRequest
# 07/23/14 3185 njensen Added new methods
# 07/30/14 3185 njensen Renamed valid identifiers to optional
# 06/30/15 4569 nabowle Use hex WKB for geometries.
# 04/13/15 5379 tgurney Add getIdentifierValues()
# 06/01/16 5587 tgurney Add new signatures for
# getRequiredIdentifiers() and
# getOptionalIdentifiers()
# 08/01/16 2416 tgurney Add getNotificationFilter()
# 10/13/16 5916 bsteffen Correct grid shape, allow lazy grid lat/lon
# 10/26/16 5919 njensen Speed up geometry creation in getGeometryData()
#
import numpy
import shapely.wkb
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.impl import DefaultDataRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetAvailableLocationNamesRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetAvailableTimesRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetGeometryDataRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetGridDataRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetGridLatLonRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetAvailableParametersRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetAvailableLevelsRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetRequiredIdentifiersRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetOptionalIdentifiersRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetIdentifierValuesRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetSupportedDatatypesRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.dataaccess.request import GetNotificationFilterRequest
from awips import ThriftClient
from awips.dataaccess import PyGeometryData
from awips.dataaccess import PyGridData
class LazyGridLatLon(object):
def __init__(self, client, nx, ny, envelope, crsWkt):
self._latLonGrid = None
self._client = client
self._request = GetGridLatLonRequest()
self._request.setNx(nx)
self._request.setNy(ny)
self._request.setEnvelope(envelope)
self._request.setCrsWkt(crsWkt)
def __call__(self):
# Its important that the data is cached internally so that if multiple
# GridData are sharing the same delegate then they can also share a
# single request for the LatLon information.
if self._latLonGrid is None:
response = self._client.sendRequest(self._request)
nx = response.getNx()
ny = response.getNy()
latData = numpy.reshape(numpy.array(response.getLats()), (ny, nx))
lonData = numpy.reshape(numpy.array(response.getLons()), (ny, nx))
self._latLonGrid = (lonData, latData)
return self._latLonGrid
class ThriftClientRouter(object):
def __init__(self, host='localhost'):
self._client = ThriftClient.ThriftClient(host)
self._lazyLoadGridLatLon = False
def setLazyLoadGridLatLon(self, lazyLoadGridLatLon):
self._lazyLoadGridLatLon = lazyLoadGridLatLon
def getAvailableTimes(self, request, refTimeOnly):
timesRequest = GetAvailableTimesRequest()
timesRequest.setRequestParameters(request)
timesRequest.setRefTimeOnly(refTimeOnly)
response = self._client.sendRequest(timesRequest)
return response
def getGridData(self, request, times):
gridDataRequest = GetGridDataRequest()
gridDataRequest.setIncludeLatLonData(not self._lazyLoadGridLatLon)
gridDataRequest.setRequestParameters(request)
# if we have an iterable times instance, then the user must have asked
# for grid data with the List of DataTime objects
# else, we assume it was a single TimeRange that was meant for the
# request
try:
iter(times)
gridDataRequest.setRequestedTimes(times)
except TypeError:
gridDataRequest.setRequestedPeriod(times)
response = self._client.sendRequest(gridDataRequest)
locSpecificData = {}
locNames = list(response.getSiteNxValues().keys())
for location in locNames:
nx = response.getSiteNxValues()[location]
ny = response.getSiteNyValues()[location]
if self._lazyLoadGridLatLon:
envelope = response.getSiteEnvelopes()[location]
crsWkt = response.getSiteCrsWkt()[location]
delegate = LazyGridLatLon(
self._client, nx, ny, envelope, crsWkt)
locSpecificData[location] = (nx, ny, delegate)
else:
latData = numpy.reshape(numpy.array(
response.getSiteLatGrids()[location]), (ny, nx))
lonData = numpy.reshape(numpy.array(
response.getSiteLonGrids()[location]), (ny, nx))
locSpecificData[location] = (nx, ny, (lonData, latData))
retVal = []
for gridDataRecord in response.getGridData():
locationName = gridDataRecord.getLocationName()
locData = locSpecificData[locationName]
if self._lazyLoadGridLatLon:
retVal.append(PyGridData.PyGridData(gridDataRecord, locData[
0], locData[1], latLonDelegate=locData[2]))
else:
retVal.append(PyGridData.PyGridData(
gridDataRecord, locData[0], locData[1], locData[2]))
return retVal
def getGeometryData(self, request, times):
geoDataRequest = GetGeometryDataRequest()
geoDataRequest.setRequestParameters(request)
# if we have an iterable times instance, then the user must have asked
# for geometry data with the List of DataTime objects
# else, we assume it was a single TimeRange that was meant for the
# request
try:
iter(times)
geoDataRequest.setRequestedTimes(times)
except TypeError:
geoDataRequest.setRequestedPeriod(times)
response = self._client.sendRequest(geoDataRequest)
geometries = []
for wkb in response.getGeometryWKBs():
# the wkb is a numpy.ndarray of dtype int8
# convert the bytearray to a byte string and load it
geometries.append(shapely.wkb.loads(wkb.tostring()))
retVal = []
for geoDataRecord in response.getGeoData():
geom = geometries[geoDataRecord.getGeometryWKBindex()]
retVal.append(PyGeometryData.PyGeometryData(geoDataRecord, geom))
return retVal
def getAvailableLocationNames(self, request):
locNamesRequest = GetAvailableLocationNamesRequest()
locNamesRequest.setRequestParameters(request)
response = self._client.sendRequest(locNamesRequest)
return response
def getAvailableParameters(self, request):
paramReq = GetAvailableParametersRequest()
paramReq.setRequestParameters(request)
response = self._client.sendRequest(paramReq)
return response
def getAvailableLevels(self, request):
levelReq = GetAvailableLevelsRequest()
levelReq.setRequestParameters(request)
response = self._client.sendRequest(levelReq)
return response
def getRequiredIdentifiers(self, request):
if str(request) == request:
# Handle old version getRequiredIdentifiers(str)
request = self.newDataRequest(request)
idReq = GetRequiredIdentifiersRequest()
idReq.setRequest(request)
response = self._client.sendRequest(idReq)
return response
def getOptionalIdentifiers(self, request):
if str(request) == request:
# Handle old version getOptionalIdentifiers(str)
request = self.newDataRequest(request)
idReq = GetOptionalIdentifiersRequest()
idReq.setRequest(request)
response = self._client.sendRequest(idReq)
return response
def getIdentifierValues(self, request, identifierKey):
idValReq = GetIdentifierValuesRequest()
idValReq.setIdentifierKey(identifierKey)
idValReq.setRequestParameters(request)
response = self._client.sendRequest(idValReq)
return response
def newDataRequest(self, datatype, parameters=[], levels=[], locationNames=[], envelope=None, **kwargs):
req = DefaultDataRequest()
if datatype:
req.setDatatype(datatype)
if parameters:
req.setParameters(*parameters)
if levels:
req.setLevels(*levels)
if locationNames:
req.setLocationNames(*locationNames)
if envelope:
req.setEnvelope(envelope)
if kwargs:
# any args leftover are assumed to be identifiers
req.identifiers = kwargs
return req
def getSupportedDatatypes(self):
response = self._client.sendRequest(GetSupportedDatatypesRequest())
return response
def getNotificationFilter(self, request):
notifReq = GetNotificationFilterRequest()
notifReq.setRequestParameters(request)
response = self._client.sendRequest(notifReq)
return response
|
|
# -*- coding: utf-8 -*-
"""
Class for reading the old data format from Plexon
acquisition system (.plx)
Note that Plexon now use a new format PL2 which is NOT
supported by this IO.
Compatible with versions 100 to 106.
Other versions have not been tested.
This IO is developed thanks to the header file downloadable from:
http://www.plexon.com/software-downloads
This IO was rewritten in 2017 and this was a huge pain because
the underlying file format is really inefficient.
The rewrite is now based on numpy dtype and not on Python struct.
This should be faster.
If one day, somebody use it, consider to offer me a beer.
Author: Samuel Garcia
"""
from __future__ import print_function, division, absolute_import
# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import numpy as np
from collections import OrderedDict
import datetime
class PlexonRawIO(BaseRawIO):
extensions = ['plx']
rawmode = 'one-file'
def __init__(self, filename=''):
BaseRawIO.__init__(self)
self.filename = filename
def _source_name(self):
return self.filename
def _parse_header(self):
# global header
with open(self.filename, 'rb') as fid:
offset0 = 0
global_header = read_as_dict(fid, GlobalHeader, offset=offset0)
rec_datetime = datetime.datetime(global_header['Year'],
global_header['Month'],
global_header['Day'],
global_header['Hour'],
global_header['Minute'],
global_header['Second'])
# dsp channels header = spikes and waveforms
nb_unit_chan = global_header['NumDSPChannels']
offset1 = np.dtype(GlobalHeader).itemsize
dspChannelHeaders = np.memmap(self.filename, dtype=DspChannelHeader, mode='r',
offset=offset1, shape=(nb_unit_chan,))
# event channel header
nb_event_chan = global_header['NumEventChannels']
offset2 = offset1 + np.dtype(DspChannelHeader).itemsize * nb_unit_chan
eventHeaders = np.memmap(self.filename, dtype=EventChannelHeader, mode='r',
offset=offset2, shape=(nb_event_chan,))
# slow channel header = signal
nb_sig_chan = global_header['NumSlowChannels']
offset3 = offset2 + np.dtype(EventChannelHeader).itemsize * nb_event_chan
slowChannelHeaders = np.memmap(self.filename, dtype=SlowChannelHeader, mode='r',
offset=offset3, shape=(nb_sig_chan,))
offset4 = offset3 + np.dtype(SlowChannelHeader).itemsize * nb_sig_chan
# loop over data blocks and put them by type and channel
block_headers = {1: {c: [] for c in dspChannelHeaders['Channel']},
4: {c: [] for c in eventHeaders['Channel']},
5: {c: [] for c in slowChannelHeaders['Channel']},
}
block_pos = {1: {c: [] for c in dspChannelHeaders['Channel']},
4: {c: [] for c in eventHeaders['Channel']},
5: {c: [] for c in slowChannelHeaders['Channel']},
}
data = self._memmap = np.memmap(self.filename, dtype='u1', offset=0, mode='r')
pos = offset4
while pos < data.size:
bl_header = data[pos:pos + 16].view(DataBlockHeader)[0]
length = bl_header['NumberOfWaveforms'] * bl_header['NumberOfWordsInWaveform'] * 2 + 16
bl_type = int(bl_header['Type'])
chan_id = int(bl_header['Channel'])
block_headers[bl_type][chan_id].append(bl_header)
block_pos[bl_type][chan_id].append(pos)
pos += length
self._last_timestamps = bl_header['UpperByteOf5ByteTimestamp'] * \
2 ** 32 + bl_header['TimeStamp']
# ... and finalize them in self._data_blocks
# for a faster acces depending on type (1, 4, 5)
self._data_blocks = {}
dt_base = [('pos', 'int64'), ('timestamp', 'int64'), ('size', 'int64')]
dtype_by_bltype = {
# Spikes and waveforms
1: np.dtype(dt_base + [('unit_id', 'uint16'), ('n1', 'uint16'), ('n2', 'uint16'), ]),
# Events
4: np.dtype(dt_base + [('label', 'uint16'), ]),
# Signals
5: np.dtype(dt_base + [('cumsum', 'int64'), ]),
}
for bl_type in block_headers:
self._data_blocks[bl_type] = {}
for chan_id in block_headers[bl_type]:
bl_header = np.array(block_headers[bl_type][chan_id], dtype=DataBlockHeader)
bl_pos = np.array(block_pos[bl_type][chan_id], dtype='int64')
timestamps = bl_header['UpperByteOf5ByteTimestamp'] * \
2 ** 32 + bl_header['TimeStamp']
n1 = bl_header['NumberOfWaveforms']
n2 = bl_header['NumberOfWordsInWaveform']
dt = dtype_by_bltype[bl_type]
data_block = np.empty(bl_pos.size, dtype=dt)
data_block['pos'] = bl_pos + 16
data_block['timestamp'] = timestamps
data_block['size'] = n1 * n2 * 2
if bl_type == 1: # Spikes and waveforms
data_block['unit_id'] = bl_header['Unit']
data_block['n1'] = n1
data_block['n2'] = n2
elif bl_type == 4: # Events
data_block['label'] = bl_header['Unit']
elif bl_type == 5: # Signals
if data_block.size > 0:
# cumulative some of sample index for fast acces to chunks
data_block['cumsum'][0] = 0
data_block['cumsum'][1:] = np.cumsum(data_block['size'][:-1]) // 2
self._data_blocks[bl_type][chan_id] = data_block
# signals channels
sig_channels = []
all_sig_length = []
for chan_index in range(nb_sig_chan):
h = slowChannelHeaders[chan_index]
name = h['Name'].decode('utf8')
chan_id = h['Channel']
length = self._data_blocks[5][chan_id]['size'].sum() // 2
if length == 0:
continue # channel not added
all_sig_length.append(length)
sampling_rate = float(h['ADFreq'])
sig_dtype = 'int16'
units = '' # I dont't knwon units
if global_header['Version'] in [100, 101]:
gain = 5000. / (2048 * h['Gain'] * 1000.)
elif global_header['Version'] in [102]:
gain = 5000. / (2048 * h['Gain'] * h['PreampGain'])
elif global_header['Version'] >= 103:
gain = global_header['SlowMaxMagnitudeMV'] / (
.5 * (2 ** global_header['BitsPerSpikeSample']) *
h['Gain'] * h['PreampGain'])
offset = 0.
group_id = 0
sig_channels.append((name, chan_id, sampling_rate, sig_dtype,
units, gain, offset, group_id))
if len(all_sig_length) > 0:
self._signal_length = min(all_sig_length)
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
self._global_ssampling_rate = global_header['ADFrequency']
if slowChannelHeaders.size > 0:
assert np.unique(slowChannelHeaders['ADFreq']
).size == 1, 'Signal do not have the same sampling rate'
self._sig_sampling_rate = float(slowChannelHeaders['ADFreq'][0])
# Determine number of units per channels
self.internal_unit_ids = []
for chan_id, data_clock in self._data_blocks[1].items():
unit_ids = np.unique(data_clock['unit_id'])
for unit_id in unit_ids:
self.internal_unit_ids.append((chan_id, unit_id))
# Spikes channels
unit_channels = []
for unit_index, (chan_id, unit_id) in enumerate(self.internal_unit_ids):
c = np.nonzero(dspChannelHeaders['Channel'] == chan_id)[0][0]
h = dspChannelHeaders[c]
name = h['Name'].decode('utf8')
_id = 'ch{}#{}'.format(chan_id, unit_id)
wf_units = ''
if global_header['Version'] < 103:
wf_gain = 3000. / (2048 * h['Gain'] * 1000.)
elif 103 <= global_header['Version'] < 105:
wf_gain = global_header['SpikeMaxMagnitudeMV'] / (
.5 * 2. ** (global_header['BitsPerSpikeSample']) *
h['Gain'] * 1000.)
elif global_header['Version'] >= 105:
wf_gain = global_header['SpikeMaxMagnitudeMV'] / (
.5 * 2. ** (global_header['BitsPerSpikeSample']) *
h['Gain'] * global_header['SpikePreAmpGain'])
wf_offset = 0.
wf_left_sweep = -1 # DONT KNOWN
wf_sampling_rate = global_header['WaveformFreq']
unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,
wf_left_sweep, wf_sampling_rate))
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
# Event channels
event_channels = []
for chan_index in range(nb_event_chan):
h = eventHeaders[chan_index]
chan_id = h['Channel']
name = h['Name'].decode('utf8')
_id = h['Channel']
event_channels.append((name, _id, 'event'))
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
# fille into header dict
self.header = {}
self.header['nb_block'] = 1
self.header['nb_segment'] = [1]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
# Annotations
self._generate_minimal_annotations()
bl_annotations = self.raw_annotations['blocks'][0]
seg_annotations = bl_annotations['segments'][0]
for d in (bl_annotations, seg_annotations):
d['rec_datetime'] = rec_datetime
d['plexon_version'] = global_header['Version']
def _segment_t_start(self, block_index, seg_index):
return 0.
def _segment_t_stop(self, block_index, seg_index):
t_stop1 = float(self._last_timestamps) / self._global_ssampling_rate
if hasattr(self, '_signal_length'):
t_stop2 = self._signal_length / self._sig_sampling_rate
return max(t_stop1, t_stop2)
else:
return t_stop1
def _get_signal_size(self, block_index, seg_index, channel_indexes):
return self._signal_length
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
return 0.
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
if i_start is None:
i_start = 0
if i_stop is None:
i_stop = self._signal_length
if channel_indexes is None:
channel_indexes = np.arange(self.header['signal_channels'].size)
raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype='int16')
for c, channel_index in enumerate(channel_indexes):
chan_header = self.header['signal_channels'][channel_index]
chan_id = chan_header['id']
data_blocks = self._data_blocks[5][chan_id]
# loop over data blocks and get chunks
bl0 = np.searchsorted(data_blocks['cumsum'], i_start, side='left')
bl1 = np.searchsorted(data_blocks['cumsum'], i_stop, side='left')
ind = 0
for bl in range(bl0, bl1):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] + ind0
data = self._memmap[ind0:ind1].view('int16')
if bl == bl1 - 1:
# right border
# be carfull that bl could be both bl0 and bl1!!
border = data.size - (i_stop - data_blocks[bl]['cumsum'])
data = data[:-border]
if bl == bl0:
# left border
border = i_start - data_blocks[bl]['cumsum']
data = data[border:]
raw_signals[ind:data.size + ind, c] = data
ind += data.size
return raw_signals
def _get_internal_mask(self, data_block, t_start, t_stop):
timestamps = data_block['timestamp']
if t_start is None:
lim0 = 0
else:
lim0 = int(t_start * self._global_ssampling_rate)
if t_stop is None:
lim1 = self._last_timestamps
else:
lim1 = int(t_stop * self._global_ssampling_rate)
keep = (timestamps >= lim0) & (timestamps <= lim1)
return keep
def _spike_count(self, block_index, seg_index, unit_index):
chan_id, unit_id = self.internal_unit_ids[unit_index]
data_block = self._data_blocks[1][chan_id]
nb_spike = np.sum(data_block['unit_id'] == unit_id)
return nb_spike
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
chan_id, unit_id = self.internal_unit_ids[unit_index]
data_block = self._data_blocks[1][chan_id]
keep = self._get_internal_mask(data_block, t_start, t_stop)
keep &= data_block['unit_id'] == unit_id
spike_timestamps = data_block[keep]['timestamp']
return spike_timestamps
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
spike_times = spike_timestamps.astype(dtype)
spike_times /= self._global_ssampling_rate
return spike_times
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
chan_id, unit_id = self.internal_unit_ids[unit_index]
data_block = self._data_blocks[1][chan_id]
n1 = data_block['n1'][0]
n2 = data_block['n2'][0]
keep = self._get_internal_mask(data_block, t_start, t_stop)
keep &= data_block['unit_id'] == unit_id
data_block = data_block[keep]
nb_spike = data_block.size
waveforms = np.zeros((nb_spike, n1, n2), dtype='int16')
for i, db in enumerate(data_block):
ind0 = db['pos']
ind1 = db['size'] + ind0
data = self._memmap[ind0:ind1].view('int16').reshape(n1, n2)
waveforms[i, :, :] = data
return waveforms
def _event_count(self, block_index, seg_index, event_channel_index):
chan_id = int(self.header['event_channels'][event_channel_index]['id'])
nb_event = self._data_blocks[4][chan_id].size
return nb_event
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
chan_id = int(self.header['event_channels'][event_channel_index]['id'])
data_block = self._data_blocks[4][chan_id]
keep = self._get_internal_mask(data_block, t_start, t_stop)
db = data_block[keep]
timestamps = db['timestamp']
labels = db['label'].astype('U')
durations = None
return timestamps, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
event_times = event_timestamps.astype(dtype)
event_times /= self._global_ssampling_rate
return event_times
def read_as_dict(fid, dtype, offset=None):
"""
Given a file descriptor
and a numpy.dtype of the binary struct return a dict.
Make conversion for strings.
"""
if offset is not None:
fid.seek(offset)
dt = np.dtype(dtype)
h = np.frombuffer(fid.read(dt.itemsize), dt)[0]
info = OrderedDict()
for k in dt.names:
v = h[k]
if dt[k].kind == 'S':
v = v.decode('utf8')
v = v.replace('\x03', '')
v = v.replace('\x00', '')
info[k] = v
return info
GlobalHeader = [
('MagicNumber', 'uint32'),
('Version', 'int32'),
('Comment', 'S128'),
('ADFrequency', 'int32'),
('NumDSPChannels', 'int32'),
('NumEventChannels', 'int32'),
('NumSlowChannels', 'int32'),
('NumPointsWave', 'int32'),
('NumPointsPreThr', 'int32'),
('Year', 'int32'),
('Month', 'int32'),
('Day', 'int32'),
('Hour', 'int32'),
('Minute', 'int32'),
('Second', 'int32'),
('FastRead', 'int32'),
('WaveformFreq', 'int32'),
('LastTimestamp', 'float64'),
# version >103
('Trodalness', 'uint8'),
('DataTrodalness', 'uint8'),
('BitsPerSpikeSample', 'uint8'),
('BitsPerSlowSample', 'uint8'),
('SpikeMaxMagnitudeMV', 'uint16'),
('SlowMaxMagnitudeMV', 'uint16'),
# version 105
('SpikePreAmpGain', 'uint16'),
# version 106
('AcquiringSoftware', 'S18'),
('ProcessingSoftware', 'S18'),
('Padding', 'S10'),
# all version
('TSCounts', 'int32', (650,)),
('WFCounts', 'int32', (650,)),
('EVCounts', 'int32', (512,)),
]
DspChannelHeader = [
('Name', 'S32'),
('SIGName', 'S32'),
('Channel', 'int32'),
('WFRate', 'int32'),
('SIG', 'int32'),
('Ref', 'int32'),
('Gain', 'int32'),
('Filter', 'int32'),
('Threshold', 'int32'),
('Method', 'int32'),
('NUnits', 'int32'),
('Template', 'uint16', (320,)),
('Fit', 'int32', (5,)),
('SortWidth', 'int32'),
('Boxes', 'uint16', (40,)),
('SortBeg', 'int32'),
# version 105
('Comment', 'S128'),
# version 106
('SrcId', 'uint8'),
('reserved', 'uint8'),
('ChanId', 'uint16'),
('Padding', 'int32', (10,)),
]
EventChannelHeader = [
('Name', 'S32'),
('Channel', 'int32'),
# version 105
('Comment', 'S128'),
# version 106
('SrcId', 'uint8'),
('reserved', 'uint8'),
('ChanId', 'uint16'),
('Padding', 'int32', (32,)),
]
SlowChannelHeader = [
('Name', 'S32'),
('Channel', 'int32'),
('ADFreq', 'int32'),
('Gain', 'int32'),
('Enabled', 'int32'),
('PreampGain', 'int32'),
# version 104
('SpikeChannel', 'int32'),
# version 105
('Comment', 'S128'),
# version 106
('SrcId', 'uint8'),
('reserved', 'uint8'),
('ChanId', 'uint16'),
('Padding', 'int32', (27,)),
]
DataBlockHeader = [
('Type', 'uint16'),
('UpperByteOf5ByteTimestamp', 'uint16'),
('TimeStamp', 'int32'),
('Channel', 'uint16'),
('Unit', 'uint16'),
('NumberOfWaveforms', 'uint16'),
('NumberOfWordsInWaveform', 'uint16'),
] # 16 bytes
|
|
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow_models.mlperf.models.rough.bert import lamb_optimizer
def create_optimizer(loss,
init_lr,
num_train_steps,
num_warmup_steps,
use_tpu,
optimizer_name="adamw",
poly_power=1.0,
start_warmup_step=0,
lamb_weight_decay_rate=0.01,
lamb_beta_1=0.9,
lamb_beta_2=0.999,
log_epsilon=-6,
use_bfloat16_all_reduce=False):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=poly_power,
cycle=False)
# Implements linear warmup. I.e., if global_step - start_warmup_step <
# num_warmup_steps, the learning rate will be
# `(global_step - start_warmup_step)/num_warmup_steps * init_lr`.
if num_warmup_steps:
tf.logging.info("++++++ warmup starts at step " + str(start_warmup_step)
+ ", for " + str(num_warmup_steps) + " steps ++++++")
global_steps_int = tf.cast(global_step, tf.int32)
start_warm_int = tf.constant(start_warmup_step, dtype=tf.int32)
global_steps_int = global_steps_int - start_warm_int
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is OK that you use this optimizer for finetuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
# It is OK to use AdamW in the finetuning even the model is trained by LAMB.
# As report in the Bert pulic github, the learning rate for SQuAD 1.1 finetune
# is 3e-5, 4e-5 or 5e-5. For LAMB, the users can use 3e-4, 4e-4,or 5e-4 for a
# batch size of 64 in the finetune.
if optimizer_name == "adamw":
tf.logging.info("using adamw")
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=10**(log_epsilon),
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
elif optimizer_name == "lamb":
tf.logging.info("using lamb")
optimizer = lamb_optimizer.LAMBOptimizer(
learning_rate=learning_rate,
weight_decay_rate=lamb_weight_decay_rate,
beta_1=lamb_beta_1,
beta_2=lamb_beta_2,
epsilon=10**(log_epsilon),
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
else:
raise ValueError("Not supported optimizer: ", optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
grads_tvars = zip(grads, tvars)
if use_tpu:
if use_bfloat16_all_reduce:
grads_tvars = [(tf.cast(
tf.tpu.cross_replica_sum(tf.cast(g, tf.bfloat16)), tf.float32), v)
for g, v in grads_tvars]
else:
grads_tvars = [(tf.tpu.cross_replica_sum(g), v) for g, v in grads_tvars]
train_op = optimizer.apply_gradients(grads_tvars, global_step=global_step)
if optimizer_name == "adamw":
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` does not do this.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
|
"""Tests for certbot._internal.client."""
import datetime
import contextlib
import platform
import shutil
import tempfile
import unittest
from unittest.mock import MagicMock
from josepy import interfaces
from certbot import errors
from certbot import util
from certbot._internal.display import obj as display_obj
from certbot._internal import account
from certbot._internal import constants
from certbot.compat import os
import certbot.tests.util as test_util
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san_512.pem")
# pylint: disable=line-too-long
class DetermineUserAgentTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.client.determine_user_agent."""
def _call(self):
from certbot._internal.client import determine_user_agent
return determine_user_agent(self.config)
@mock.patch.dict(os.environ, {"CERTBOT_DOCS": "1"})
def test_docs_value(self):
self._test(expect_doc_values=True)
@mock.patch.dict(os.environ, {})
def test_real_values(self):
self._test(expect_doc_values=False)
def _test(self, expect_doc_values):
ua = self._call()
if expect_doc_values:
doc_value_check = self.assertIn
real_value_check = self.assertNotIn
else:
doc_value_check = self.assertNotIn
real_value_check = self.assertIn
doc_value_check("OS_NAME OS_VERSION", ua)
doc_value_check("major.minor.patchlevel", ua)
real_value_check(util.get_os_info_ua(), ua)
real_value_check(platform.python_version(), ua)
class RegisterTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.client.register."""
def setUp(self):
super().setUp()
self.config.rsa_key_size = 1024
self.config.register_unsafely_without_email = False
self.config.email = "[email protected]"
self.account_storage = account.AccountMemoryStorage()
with mock.patch("zope.component.provideUtility"):
display_obj.set_display(MagicMock())
def _call(self):
from certbot._internal.client import register
tos_cb = mock.MagicMock()
return register(self.config, self.account_storage, tos_cb)
@staticmethod
def _public_key_mock():
m = mock.Mock(__class__=interfaces.JSONDeSerializable)
m.to_partial_json.return_value = '{"a": 1}'
return m
@staticmethod
def _new_acct_dir_mock():
return "/acme/new-account"
@staticmethod
def _true_mock():
return True
@staticmethod
def _false_mock():
return False
@staticmethod
@contextlib.contextmanager
def _patched_acme_client():
# This function is written this way to avoid deprecation warnings that
# are raised when BackwardsCompatibleClientV2 is accessed on the real
# acme.client module.
with mock.patch('certbot._internal.client.acme_client') as mock_acme_client:
yield mock_acme_client.BackwardsCompatibleClientV2
def test_no_tos(self):
with self._patched_acme_client() as mock_client:
mock_client.new_account_and_tos().terms_of_service = "http://tos"
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.prepare_subscription") as mock_prepare:
mock_client().new_account_and_tos.side_effect = errors.Error
self.assertRaises(errors.Error, self._call)
self.assertIs(mock_prepare.called, False)
mock_client().new_account_and_tos.side_effect = None
self._call()
self.assertIs(mock_prepare.called, True)
@test_util.patch_display_util()
def test_it(self, unused_mock_get_utility):
with self._patched_acme_client() as mock_client:
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.handle_subscription"):
self._call()
@mock.patch("certbot._internal.client.display_ops.get_email")
def test_email_retry(self, mock_get_email):
from acme import messages
self.config.noninteractive_mode = False
msg = "DNS problem: NXDOMAIN looking up MX for example.com"
mx_err = messages.Error.with_code('invalidContact', detail=msg)
with self._patched_acme_client() as mock_client:
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.prepare_subscription") as mock_prepare:
mock_client().new_account_and_tos.side_effect = [mx_err, mock.MagicMock()]
self._call()
self.assertEqual(mock_get_email.call_count, 1)
self.assertIs(mock_prepare.called, True)
def test_email_invalid_noninteractive(self):
from acme import messages
self.config.noninteractive_mode = True
msg = "DNS problem: NXDOMAIN looking up MX for example.com"
mx_err = messages.Error.with_code('invalidContact', detail=msg)
with self._patched_acme_client() as mock_client:
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.handle_subscription"):
mock_client().new_account_and_tos.side_effect = [mx_err, mock.MagicMock()]
self.assertRaises(errors.Error, self._call)
def test_needs_email(self):
self.config.email = None
self.assertRaises(errors.Error, self._call)
@mock.patch("certbot._internal.client.logger")
def test_without_email(self, mock_logger):
with mock.patch("certbot._internal.eff.prepare_subscription") as mock_prepare:
with self._patched_acme_client() as mock_client:
mock_client().external_account_required.side_effect = self._false_mock
self.config.email = None
self.config.register_unsafely_without_email = True
self.config.dry_run = False
self._call()
mock_logger.debug.assert_called_once_with(mock.ANY)
self.assertIs(mock_prepare.called, True)
@mock.patch("certbot._internal.client.display_ops.get_email")
def test_dry_run_no_staging_account(self, mock_get_email):
"""Tests dry-run for no staging account, expect account created with no email"""
with self._patched_acme_client() as mock_client:
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.handle_subscription"):
self.config.dry_run = True
self._call()
# check Certbot did not ask the user to provide an email
self.assertIs(mock_get_email.called, False)
# check Certbot created an account with no email. Contact should return empty
self.assertFalse(mock_client().new_account_and_tos.call_args[0][0].contact)
@test_util.patch_display_util()
def test_with_eab_arguments(self, unused_mock_get_utility):
with self._patched_acme_client() as mock_client:
mock_client().client.directory.__getitem__ = mock.Mock(
side_effect=self._new_acct_dir_mock
)
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.handle_subscription"):
target = "certbot._internal.client.messages.ExternalAccountBinding.from_data"
with mock.patch(target) as mock_eab_from_data:
self.config.eab_kid = "test-kid"
self.config.eab_hmac_key = "J2OAqW4MHXsrHVa_PVg0Y-L_R4SYw0_aL1le6mfblbE"
self._call()
self.assertIs(mock_eab_from_data.called, True)
@test_util.patch_display_util()
def test_without_eab_arguments(self, unused_mock_get_utility):
with self._patched_acme_client() as mock_client:
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.handle_subscription"):
target = "certbot._internal.client.messages.ExternalAccountBinding.from_data"
with mock.patch(target) as mock_eab_from_data:
self.config.eab_kid = None
self.config.eab_hmac_key = None
self._call()
self.assertIs(mock_eab_from_data.called, False)
def test_external_account_required_without_eab_arguments(self):
with self._patched_acme_client() as mock_client:
mock_client().client.net.key.public_key = mock.Mock(side_effect=self._public_key_mock)
mock_client().external_account_required.side_effect = self._true_mock
with mock.patch("certbot._internal.eff.handle_subscription"):
with mock.patch("certbot._internal.client.messages.ExternalAccountBinding.from_data"):
self.config.eab_kid = None
self.config.eab_hmac_key = None
self.assertRaises(errors.Error, self._call)
def test_unsupported_error(self):
from acme import messages
msg = "Test"
mx_err = messages.Error.with_code("malformed", detail=msg, title="title")
with self._patched_acme_client() as mock_client:
mock_client().client.directory.__getitem__ = mock.Mock(
side_effect=self._new_acct_dir_mock
)
mock_client().external_account_required.side_effect = self._false_mock
with mock.patch("certbot._internal.eff.handle_subscription") as mock_handle:
mock_client().new_account_and_tos.side_effect = [mx_err, mock.MagicMock()]
self.assertRaises(messages.Error, self._call)
self.assertIs(mock_handle.called, False)
class ClientTestCommon(test_util.ConfigTestCase):
"""Common base class for certbot._internal.client.Client tests."""
def setUp(self):
super().setUp()
self.config.no_verify_ssl = False
self.config.allow_subset_of_names = False
self.account = mock.MagicMock(**{"key.pem": KEY})
from certbot._internal.client import Client
with mock.patch("certbot._internal.client.acme_client") as acme:
self.acme_client = acme.BackwardsCompatibleClientV2
self.acme = self.acme_client.return_value = mock.MagicMock()
self.client_network = acme.ClientNetwork
self.client = Client(
config=self.config, account_=self.account,
auth=None, installer=None)
class ClientTest(ClientTestCommon):
"""Tests for certbot._internal.client.Client."""
def setUp(self):
super().setUp()
self.config.allow_subset_of_names = False
self.config.dry_run = False
self.config.strict_permissions = True
self.eg_domains = ["example.com", "www.example.com"]
self.eg_order = mock.MagicMock(
authorizations=[None],
csr_pem=mock.sentinel.csr_pem)
def test_init_acme_verify_ssl(self):
self.assertIs(self.client_network.call_args[1]['verify_ssl'], True)
def _mock_obtain_certificate(self):
self.client.auth_handler = mock.MagicMock()
self.client.auth_handler.handle_authorizations.return_value = [None]
self.client.auth_handler.deactivate_valid_authorizations.return_value = ([], [])
self.acme.finalize_order.return_value = self.eg_order
self.acme.new_order.return_value = self.eg_order
self.eg_order.update.return_value = self.eg_order
def _check_obtain_certificate(self, auth_count=1):
if auth_count == 1:
self.client.auth_handler.handle_authorizations.assert_called_once_with(
self.eg_order,
self.config,
self.config.allow_subset_of_names)
else:
self.assertEqual(self.client.auth_handler.handle_authorizations.call_count, auth_count)
self.acme.finalize_order.assert_called_once_with(
self.eg_order, mock.ANY,
fetch_alternative_chains=self.config.preferred_chain is not None)
@mock.patch("certbot._internal.client.crypto_util")
@mock.patch("certbot._internal.client.logger")
def test_obtain_certificate_from_csr(self, mock_logger, mock_crypto_util):
self._mock_obtain_certificate()
test_csr = util.CSR(form="pem", file=None, data=CSR_SAN)
auth_handler = self.client.auth_handler
self._set_mock_from_fullchain(mock_crypto_util.cert_and_chain_from_fullchain)
orderr = self.acme.new_order(test_csr.data)
auth_handler.handle_authorizations(orderr, self.config, False)
self.assertEqual(
(mock.sentinel.cert, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(
test_csr,
orderr=orderr))
mock_crypto_util.find_chain_with_issuer.assert_not_called()
# and that the cert was obtained correctly
self._check_obtain_certificate()
# Test that --preferred-chain results in chain selection
self.config.preferred_chain = "some issuer"
self.assertEqual(
(mock.sentinel.cert, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(
test_csr,
orderr=orderr))
mock_crypto_util.find_chain_with_issuer.assert_called_once_with(
[orderr.fullchain_pem] + orderr.alternative_fullchains_pem,
"some issuer", True)
self.config.preferred_chain = None
# Test for default issuance_timeout
expected_deadline = \
datetime.datetime.now() + datetime.timedelta(
seconds=constants.CLI_DEFAULTS["issuance_timeout"])
self.client.obtain_certificate_from_csr(test_csr, orderr=orderr)
((_, deadline), _) = self.client.acme.finalize_order.call_args
self.assertTrue(
abs(expected_deadline - deadline) <= datetime.timedelta(seconds=1))
# Test for specific issuance_timeout (300 seconds)
expected_deadline = \
datetime.datetime.now() + datetime.timedelta(seconds=300)
self.config.issuance_timeout = 300
self.client.obtain_certificate_from_csr(test_csr, orderr=orderr)
((_, deadline), _) = self.client.acme.finalize_order.call_args
self.assertTrue(
abs(expected_deadline - deadline) <= datetime.timedelta(seconds=1))
# Test for orderr=None
self.assertEqual(
(mock.sentinel.cert, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(
test_csr,
orderr=None))
auth_handler.handle_authorizations.assert_called_with(self.eg_order, self.config, False)
# Test for no auth_handler
self.client.auth_handler = None
self.assertRaises(
errors.Error,
self.client.obtain_certificate_from_csr,
test_csr)
mock_logger.error.assert_called_once_with(mock.ANY)
@mock.patch("certbot._internal.client.crypto_util")
def test_obtain_certificate(self, mock_crypto_util):
csr = util.CSR(form="pem", file=None, data=CSR_SAN)
mock_crypto_util.generate_csr.return_value = csr
mock_crypto_util.generate_key.return_value = mock.sentinel.key
self._set_mock_from_fullchain(mock_crypto_util.cert_and_chain_from_fullchain)
self._test_obtain_certificate_common(mock.sentinel.key, csr)
mock_crypto_util.generate_key.assert_called_once_with(
key_size=self.config.rsa_key_size,
key_dir=self.config.key_dir,
key_type=self.config.key_type,
elliptic_curve="secp256r1",
strict_permissions=True,
)
mock_crypto_util.generate_csr.assert_called_once_with(
mock.sentinel.key, self.eg_domains, self.config.csr_dir, False, True)
mock_crypto_util.cert_and_chain_from_fullchain.assert_called_once_with(
self.eg_order.fullchain_pem)
@mock.patch("certbot._internal.client.crypto_util")
@mock.patch("certbot.compat.os.remove")
def test_obtain_certificate_partial_success(self, mock_remove, mock_crypto_util):
csr = util.CSR(form="pem", file=mock.sentinel.csr_file, data=CSR_SAN)
key = util.CSR(form="pem", file=mock.sentinel.key_file, data=CSR_SAN)
mock_crypto_util.generate_csr.return_value = csr
mock_crypto_util.generate_key.return_value = key
self._set_mock_from_fullchain(mock_crypto_util.cert_and_chain_from_fullchain)
authzr = self._authzr_from_domains(["example.com"])
self.config.allow_subset_of_names = True
self._test_obtain_certificate_common(key, csr, authzr_ret=authzr, auth_count=2)
self.assertEqual(mock_crypto_util.generate_key.call_count, 2)
self.assertEqual(mock_crypto_util.generate_csr.call_count, 2)
self.assertEqual(mock_remove.call_count, 2)
self.assertEqual(mock_crypto_util.cert_and_chain_from_fullchain.call_count, 1)
@mock.patch("certbot._internal.client.crypto_util")
@mock.patch("certbot._internal.client.acme_crypto_util")
def test_obtain_certificate_dry_run(self, mock_acme_crypto, mock_crypto):
csr = util.CSR(form="pem", file=None, data=CSR_SAN)
mock_acme_crypto.make_csr.return_value = CSR_SAN
mock_crypto.make_key.return_value = mock.sentinel.key_pem
key = util.Key(file=None, pem=mock.sentinel.key_pem)
self._set_mock_from_fullchain(mock_crypto.cert_and_chain_from_fullchain)
self.client.config.dry_run = True
self._test_obtain_certificate_common(key, csr)
mock_crypto.make_key.assert_called_once_with(
bits=self.config.rsa_key_size,
elliptic_curve="secp256r1",
key_type=self.config.key_type,
)
mock_acme_crypto.make_csr.assert_called_once_with(
mock.sentinel.key_pem, self.eg_domains, self.config.must_staple)
mock_crypto.generate_key.assert_not_called()
mock_crypto.generate_csr.assert_not_called()
self.assertEqual(mock_crypto.cert_and_chain_from_fullchain.call_count, 1)
@mock.patch("certbot._internal.client.logger")
@mock.patch("certbot._internal.client.crypto_util")
@mock.patch("certbot._internal.client.acme_crypto_util")
def test_obtain_certificate_dry_run_authz_deactivations_failed(self, mock_acme_crypto,
mock_crypto, mock_log):
from acme import messages
csr = util.CSR(form="pem", file=None, data=CSR_SAN)
mock_acme_crypto.make_csr.return_value = CSR_SAN
mock_crypto.make_key.return_value = mock.sentinel.key_pem
key = util.Key(file=None, pem=mock.sentinel.key_pem)
self._set_mock_from_fullchain(mock_crypto.cert_and_chain_from_fullchain)
self._mock_obtain_certificate()
self.client.config.dry_run = True
# Two authzs that are already valid and should get deactivated (dry run)
authzrs = self._authzr_from_domains(["example.com", "www.example.com"])
for authzr in authzrs:
authzr.body.status = messages.STATUS_VALID
# One deactivation succeeds, one fails
auth_handler = self.client.auth_handler
auth_handler.deactivate_valid_authorizations.return_value = ([authzrs[0]], [authzrs[1]])
# Certificate should get issued despite one failed deactivation
self.eg_order.authorizations = authzrs
self.client.auth_handler.handle_authorizations.return_value = authzrs
with test_util.patch_display_util():
result = self.client.obtain_certificate(self.eg_domains)
self.assertEqual(result, (mock.sentinel.cert, mock.sentinel.chain, key, csr))
self._check_obtain_certificate(1)
# Deactivation success/failure should have been handled properly
self.assertEqual(auth_handler.deactivate_valid_authorizations.call_count, 1,
"Deactivate authorizations should be called")
self.assertEqual(self.acme.new_order.call_count, 2,
"Order should be recreated due to successfully deactivated authorizations")
mock_log.warning.assert_called_with("Certbot was unable to obtain fresh authorizations for"
" every domain. The dry run will continue, but results"
" may not be accurate.")
def _set_mock_from_fullchain(self, mock_from_fullchain):
mock_cert = mock.Mock()
mock_cert.encode.return_value = mock.sentinel.cert
mock_chain = mock.Mock()
mock_chain.encode.return_value = mock.sentinel.chain
mock_from_fullchain.return_value = (mock_cert, mock_chain)
def _authzr_from_domains(self, domains):
authzr = []
# domain ordering should not be affected by authorization order
for domain in reversed(domains):
authzr.append(
mock.MagicMock(
body=mock.MagicMock(
identifier=mock.MagicMock(
value=domain))))
return authzr
def _test_obtain_certificate_common(self, key, csr, authzr_ret=None, auth_count=1):
self._mock_obtain_certificate()
# return_value is essentially set to (None, None) in
# _mock_obtain_certificate(), which breaks this test.
# Thus fixed by the next line.
authzr = authzr_ret or self._authzr_from_domains(self.eg_domains)
self.eg_order.authorizations = authzr
self.client.auth_handler.handle_authorizations.return_value = authzr
with test_util.patch_display_util():
result = self.client.obtain_certificate(self.eg_domains)
self.assertEqual(
result,
(mock.sentinel.cert, mock.sentinel.chain, key, csr))
self._check_obtain_certificate(auth_count)
@mock.patch('certbot._internal.client.Client.obtain_certificate')
@mock.patch('certbot._internal.storage.RenewableCert.new_lineage')
def test_obtain_and_enroll_certificate(self,
mock_storage, mock_obtain_certificate):
domains = ["*.example.com", "example.com"]
mock_obtain_certificate.return_value = (mock.MagicMock(),
mock.MagicMock(), mock.MagicMock(), None)
self.client.config.dry_run = False
self.assertTrue(self.client.obtain_and_enroll_certificate(domains, "example_cert"))
self.assertTrue(self.client.obtain_and_enroll_certificate(domains, None))
self.assertTrue(self.client.obtain_and_enroll_certificate(domains[1:], None))
self.client.config.dry_run = True
self.assertFalse(self.client.obtain_and_enroll_certificate(domains, None))
names = [call[0][0] for call in mock_storage.call_args_list]
self.assertEqual(names, ["example_cert", "example.com", "example.com"])
@mock.patch("certbot._internal.cli.helpful_parser")
def test_save_certificate(self, mock_parser):
certs = ["cert_512.pem", "cert-san_512.pem"]
tmp_path = tempfile.mkdtemp()
cert_pem = test_util.load_vector(certs[0])
chain_pem = (test_util.load_vector(certs[0]) + test_util.load_vector(certs[1]))
candidate_cert_path = os.path.join(tmp_path, "certs", "cert_512.pem")
candidate_chain_path = os.path.join(tmp_path, "chains", "chain.pem")
candidate_fullchain_path = os.path.join(tmp_path, "chains", "fullchain.pem")
mock_parser.verb = "certonly"
mock_parser.args = ["--cert-path", candidate_cert_path,
"--chain-path", candidate_chain_path,
"--fullchain-path", candidate_fullchain_path]
cert_path, chain_path, fullchain_path = self.client.save_certificate(
cert_pem, chain_pem, candidate_cert_path, candidate_chain_path,
candidate_fullchain_path)
self.assertEqual(os.path.dirname(cert_path),
os.path.dirname(candidate_cert_path))
self.assertEqual(os.path.dirname(chain_path),
os.path.dirname(candidate_chain_path))
self.assertEqual(os.path.dirname(fullchain_path),
os.path.dirname(candidate_fullchain_path))
with open(cert_path, "rb") as cert_file:
cert_contents = cert_file.read()
self.assertEqual(cert_contents, test_util.load_vector(certs[0]))
with open(chain_path, "rb") as chain_file:
chain_contents = chain_file.read()
self.assertEqual(chain_contents, test_util.load_vector(certs[0]) +
test_util.load_vector(certs[1]))
shutil.rmtree(tmp_path)
@test_util.patch_display_util()
def test_deploy_certificate_success(self, mock_util):
self.assertRaises(errors.Error, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer = mock.MagicMock()
self.client.installer = installer
self.client.deploy_certificate(["foo.bar"], "key", "cert", "chain", "fullchain")
installer.deploy_cert.assert_called_once_with(
cert_path=os.path.abspath("cert"),
chain_path=os.path.abspath("chain"),
domain='foo.bar',
fullchain_path='fullchain',
key_path=os.path.abspath("key"))
self.assertEqual(installer.save.call_count, 2)
installer.restart.assert_called_once_with()
@mock.patch('certbot._internal.client.display_util.notify')
@test_util.patch_display_util()
def test_deploy_certificate_failure(self, mock_util, mock_notify):
installer = mock.MagicMock()
self.client.installer = installer
self.config.installer = "foobar"
installer.deploy_cert.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
mock_notify.assert_any_call('Deploying certificate')
@test_util.patch_display_util()
def test_deploy_certificate_save_failure(self, mock_util):
installer = mock.MagicMock()
self.client.installer = installer
installer.save.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
@mock.patch('certbot._internal.client.display_util.notify')
@test_util.patch_display_util()
def test_deploy_certificate_restart_failure(self, mock_get_utility, mock_notify):
installer = mock.MagicMock()
installer.restart.side_effect = [errors.PluginError, None]
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
mock_notify.assert_called_with(
'We were unable to install your certificate, however, we successfully restored '
'your server to its prior configuration.')
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch('certbot._internal.client.logger')
@test_util.patch_display_util()
def test_deploy_certificate_restart_failure2(self, mock_get_utility, mock_logger):
installer = mock.MagicMock()
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_logger.error.call_count, 1)
self.assertIn(
'An error occurred and we failed to restore your config',
mock_logger.error.call_args[0][0])
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
class EnhanceConfigTest(ClientTestCommon):
"""Tests for certbot._internal.client.Client.enhance_config."""
def setUp(self):
super().setUp()
self.config.hsts = False
self.config.redirect = False
self.config.staple = False
self.config.uir = False
self.domain = "example.org"
def test_no_installer(self):
self.assertRaises(
errors.Error, self.client.enhance_config, [self.domain], None)
def test_unsupported(self):
self.client.installer = mock.MagicMock()
self.client.installer.supported_enhancements.return_value = []
self.config.redirect = None
self.config.hsts = True
with mock.patch("certbot._internal.client.logger") as mock_logger:
self.client.enhance_config([self.domain], None)
self.assertEqual(mock_logger.error.call_count, 1)
self.client.installer.enhance.assert_not_called()
@mock.patch("certbot._internal.client.logger")
def test_already_exists_header(self, mock_log):
self.config.hsts = True
self._test_with_already_existing()
self.assertIs(mock_log.info.called, True)
self.assertEqual(mock_log.info.call_args[0][1],
'Strict-Transport-Security')
@mock.patch("certbot._internal.client.logger")
def test_already_exists_redirect(self, mock_log):
self.config.redirect = True
self._test_with_already_existing()
self.assertIs(mock_log.info.called, True)
self.assertEqual(mock_log.info.call_args[0][1],
'redirect')
@mock.patch("certbot._internal.client.logger")
def test_config_set_no_warning_redirect(self, mock_log):
self.config.redirect = False
self._test_with_already_existing()
self.assertIs(mock_log.warning.called, False)
@mock.patch("certbot._internal.client.logger")
def test_no_warn_redirect(self, mock_log):
self.config.redirect = None
self._test_with_all_supported()
self.assertIs(mock_log.warning.called, False)
def test_no_ask_hsts(self):
self.config.hsts = True
self._test_with_all_supported()
self.client.installer.enhance.assert_called_with(
self.domain, "ensure-http-header", "Strict-Transport-Security")
def test_no_ask_redirect(self):
self.config.redirect = True
self._test_with_all_supported()
self.client.installer.enhance.assert_called_with(
self.domain, "redirect", None)
def test_no_ask_staple(self):
self.config.staple = True
self._test_with_all_supported()
self.client.installer.enhance.assert_called_with(
self.domain, "staple-ocsp", None)
def test_no_ask_uir(self):
self.config.uir = True
self._test_with_all_supported()
self.client.installer.enhance.assert_called_with(
self.domain, "ensure-http-header", "Upgrade-Insecure-Requests")
def test_enhance_failure(self):
self.client.installer = mock.MagicMock()
self.client.installer.enhance.side_effect = errors.PluginError
self._test_error(enhance_error=True)
self.client.installer.recovery_routine.assert_called_once_with()
def test_save_failure(self):
self.client.installer = mock.MagicMock()
self.client.installer.save.side_effect = errors.PluginError
self._test_error()
self.client.installer.recovery_routine.assert_called_once_with()
self.client.installer.save.assert_called_once_with(mock.ANY)
def test_restart_failure(self):
self.client.installer = mock.MagicMock()
self.client.installer.restart.side_effect = [errors.PluginError, None]
self._test_error_with_rollback()
def test_restart_failure2(self):
installer = mock.MagicMock()
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
self.client.installer = installer
self._test_error_with_rollback()
def _test_error_with_rollback(self):
self._test_error()
self.assertIs(self.client.installer.restart.called, True)
def _test_error(self, enhance_error=False, restart_error=False):
self.config.redirect = True
with mock.patch('certbot._internal.client.logger') as mock_logger, \
test_util.patch_display_util() as mock_gu:
self.assertRaises(
errors.PluginError, self._test_with_all_supported)
if enhance_error:
self.assertEqual(mock_logger.error.call_count, 1)
self.assertEqual('Unable to set the %s enhancement for %s.', mock_logger.error.call_args_list[0][0][0])
if restart_error:
mock_logger.critical.assert_called_with(
'Rolling back to previous server configuration...')
def _test_with_all_supported(self):
if self.client.installer is None:
self.client.installer = mock.MagicMock()
self.client.installer.supported_enhancements.return_value = [
"ensure-http-header", "redirect", "staple-ocsp"]
self.client.enhance_config([self.domain], None)
self.assertEqual(self.client.installer.save.call_count, 1)
self.assertEqual(self.client.installer.restart.call_count, 1)
def _test_with_already_existing(self):
self.client.installer = mock.MagicMock()
self.client.installer.supported_enhancements.return_value = [
"ensure-http-header", "redirect", "staple-ocsp"]
self.client.installer.enhance.side_effect = errors.PluginEnhancementAlreadyPresent()
self.client.enhance_config([self.domain], None)
class RollbackTest(unittest.TestCase):
"""Tests for certbot._internal.client.rollback."""
def setUp(self):
self.m_install = mock.MagicMock()
@classmethod
def _call(cls, checkpoints, side_effect):
from certbot._internal.client import rollback
with mock.patch("certbot._internal.client.plugin_selection.pick_installer") as mpi:
mpi.side_effect = side_effect
rollback(None, checkpoints, {}, mock.MagicMock())
def test_no_problems(self):
self._call(1, self.m_install)
self.assertEqual(self.m_install().rollback_checkpoints.call_count, 1)
self.assertEqual(self.m_install().restart.call_count, 1)
def test_no_installer(self):
self._call(1, None) # Just make sure no exceptions are raised
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
"""Define the `mydriver.main` way of controlling experiments.
"""
import sys
import logging
from .tools import flatten
from .api0 import open_db as sql_db, parse_dbstring
from .sql import HOST, HOST_WORKDIR, EXPERIMENT, FUCKED_UP
from .sql import insert_dict, hash_state
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
class Cmd(object):
""" A callable object that attaches documentation strings to command functions.
This class is a helper for the decorators `cmd` and `cmd_desc`.
"""
def __init__(self, f, desc):
self.f = f
self.desc = desc
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
cmd_dct = {}
def cmd(f):
"""Declare a function `f` as a `mydriver.main` command.
The docstring of `f` is taken as the description of the command.
"""
cmd_dct[f.__name__] = Cmd(f, f.__doc__)
return f
mydriver_cmd = cmd
def cmd_desc(desc):
"""Declare a function `f` as a `mydriver.main` command, and provide an explicit description to appear to the right of your command when running the 'help' command.
"""
def deco(f):
cmd_dct[f.__name__] = Cmd(f, desc)
return f
return deco
mydriver_cmd_desc = cmd_desc
def help(db, **kwargs):
"""Print help for this program"""
print "Usage: %s <cmd>" % sys.argv[0]
# TODO
print "Commands available:"
for name, cmd in cmd_dct.iteritems():
print "%20s - %s" % (name, cmd.desc)
@cmd
def clear_db(db, **kwargs):
"""Delete all entries from the database """
class y (object):
pass
really_clear_db = False
n_records = len([i for i in db])
try:
if y is input('Are you sure you want to DELETE ALL %i records from %s? (N/y)' %
(n_records, kwargs['dbstring'])):
really_clear_db = True
except:
print 'not deleting anything'
if really_clear_db:
print 'deleting all...'
for d in db:
print d.id
d.delete()
@cmd_desc('Insert the job sequence into the database')
def insert(db, dbstring, argv, job_fn, job_dct_seq, exp_root, **kwargs):
if ('-h' in argv or '' in argv):
print """Ensure that all jobs in the job sequence have been inserted into the database.
Optional arguments to cmd 'insert':
--dry: don't actually commit any transaction with the database, just print how many duplicates there are.
--dbi: print to stdout the command necessary to launch all new jobs using dbidispatch.
"""
return
dryrun = ('--dry' in argv)
didsomething = True
pos = 0
full_job_fn_name = job_fn.__module__ + '.' + job_fn.__name__
S = db.session()
for i, dct in enumerate(job_dct_seq):
# TODO: use hashlib, not the builtin hash function. Requires changing in .sql as well, maybe more places?
# Also, will break old experiment code because inserts will all work
# even though jobs have already run.
state = dict(flatten(dct))
if EXPERIMENT in state:
if state[EXPERIMENT] != full_job_fn_name:
raise Exception('Inconsistency: state element %s does not match experiment %s' % (
EXPERIMENT, full_job_fn_name))
else:
state[EXPERIMENT] = full_job_fn_name
if HOST in state or HOST_WORKDIR in state:
raise ValueError(('State dictionary has HOST/HOST_WORKDIR already set,'
' use a lower-level insertion function if you really want to do this.'),
state)
jobhash = hash_state(state)
if dryrun:
# TODO: detect if dct is a duplicate or not
if (None is S.query(db._Dict).filter(db._Dict.hash == jobhash).filter(db._Dict.status != FUCKED_UP).first()):
is_dup = False
else:
is_dup = True
# print 'DEBUG', inserted, jobhash
else:
if None is insert_dict(state, db, force_dup=False, priority=1, session=S):
is_dup = True
else:
is_dup = False
if is_dup:
sys.stdout.write('-')
else:
pos += 1
sys.stdout.write('.')
# print ' #', jobhash,':', experiment
# print '\n'
sys.stdout.write('\n')
S.close()
print '***************************************'
if dryrun:
print '* Summary [DRY RUN] *'
else:
print '* Summary *'
print '***************************************'
print '* Inserted %i/%i jobs in database' % (pos, i + 1)
print '***************************************'
if '--dbi' in sys.argv:
dbi_index = argv.index('--dbi')
cmd = 'dbidispatch --repeat_jobs=%i %s' % (pos, argv[dbi_index + 1])
print 'TODO: run ', cmd, 'jobman sql', dbstring, exp_root
@cmd
def create_view(db, tablename, **kwargs):
"""Create a view (WRITEME)"""
db.createView(tablename + 'view')
@cmd
def status(db, **kwargs):
"""List counts of jobs that are queued, running, done, etc."""
sts = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 666: 0}
for d in db:
cnt = sts.get(d['jobman.status'], 0)
sts[d['jobman.status']] = cnt + 1
print 'QUEUED :', sts[0]
del sts[0]
print 'RUNNING :', sts[1]
del sts[1]
print 'DONE :', sts[2]
del sts[2]
print 'ERR_START:', sts[3]
del sts[3]
print 'ERR_SYNC :', sts[4]
del sts[4]
print 'CANCELED :', sts[5]
del sts[5]
print 'MESSED :', sts[666]
del sts[666]
if sts:
print 'WARNING: other status counts:', sts
@cmd
def dbstring(dbstring, **kwargs):
"""List what database we are using"""
print dbstring
def main(argv, dbstring, exp_root, job_fn, job_dct_seq):
db = sql_db(dbstring)
job_dct_seq = tuple(job_dct_seq)
try:
cmd = cmd_dct[argv[1]]
except:
cmd = help
cmd(**locals())
|
|
# jsb/plugs/core/core.py
#
#
""" core bot commands. """
## jsb imports
from jsb.lib.aliases import setalias
from jsb.lib.config import getmainconfig
from jsb.utils.statdict import StatDict
from jsb.utils.log import setloglevel, getloglevel
from jsb.utils.timeutils import elapsedstring
from jsb.utils.exception import handle_exception
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.plugins import plugs
from jsb.lib.boot import plugin_packages, getpluginlist, boot, getcmndtable, whatcommands, getshorttable
from jsb.lib.persist import Persist
from jsb.lib.reboot import reboot, reboot_stateful
from jsb.lib.eventhandler import mainhandler
from jsb.lib.fleet import getfleet
from jsb.lib.partyline import partyline
from jsb.lib.exit import globalshutdown
from jsb.lib.runner import defaultrunner, cmndrunner, longrunner, apirunner, threadrunner
from jsb.lib.errors import NoSuchPlugin
from jsb.utils.statdict import StatDict
from jsb.contrib.natural import date
from jsb.lib.threadloop import threadloops
from jsb.lib.threads import getname
## basic imports
import time
import threading
import sys
import re
import os
import copy
import cgi
import logging
import functools
## defines
cpy = copy.deepcopy
## reboot command
def handle_reboot(bot, ievent):
""" no arguments - reboot the bot. """
ievent.reply("rebooting")
#time.sleep(3)
if ievent.rest == "cold": stateful = False
else: stateful = True
if stateful:
if bot.type == "tornado":
callback = functools.partial(reboot_stateful, bot, ievent, getfleet(), partyline)
bot.server.io_loop.add_callback(callback)
else: mainhandler.put(0, reboot_stateful, bot, ievent, getfleet(), partyline)
else:
getfleet().exit()
mainhandler.put(0, reboot)
cmnds.add("reboot", handle_reboot, "OPER")
examples.add("reboot", "reboot the bot.", "reboot")
## ping command
def handle_ping(bot, event):
event.reply("pong")
cmnds.add("ping", handle_ping, ["OPER", "USER"])
examples.add("ping", "ping/pong", "ping")
## quit command
def handle_quit(bot, ievent):
""" no arguments - disconnect from the server. """
ievent.reply("quiting")
bot.exit()
cmnds.add("quit", handle_quit, "OPER")
examples.add("quit", "quit the bot.", "quit")
## encoding command
def handle_encoding(bot, ievent):
""" not arguments - show default encoding. """
ievent.reply('default encoding is %s' % bot.encoding or sys.getdefaultencoding())
cmnds.add('encoding', handle_encoding, ['USER', 'GUEST'])
examples.add('encoding', 'show default encoding', 'encoding')
## uptime command
def handle_uptime(bot, ievent):
""" no arguments - show uptime. """
ievent.reply("<b>uptime is %s</b>" % elapsedstring(time.time()-bot.starttime))
cmnds.add('uptime', handle_uptime, ['USER', 'GUEST'])
examples.add('uptime', 'show uptime of the bot', 'uptime')
## list command
def handle_available(bot, ievent):
""" no arguments - show available plugins .. to enable use !plug-enable. """
if ievent.rest: ievent.reply("%s plugin has the following commands: " % ievent.rest, whatcommands(ievent.rest))
else: ievent.reply("available plugins: ", getpluginlist(), raw=True) ; return
cmnds.add('list', handle_available, ['USER', 'GUEST'])
examples.add('list', 'list available plugins', 'list')
## commands command
def handle_commands(bot, ievent):
""" arguments: [<plugname>] - show commands of plugin. """
try: plugin = ievent.args[0].lower()
except IndexError: plugin = ""
result = []
cmnds = getcmndtable()
for cmnd, plugname in cmnds.iteritems():
if plugname:
if not plugin or plugin in plugname: result.append(cmnd)
if result:
result.sort()
if not plugin: plugin = "JSONBOT"
ievent.reply('%s has the following commands: ' % plugin, result)
else: ievent.reply('no commands found for plugin %s' % plugin)
cmnds.add('commands', handle_commands, ['USER', 'GUEST'])
examples.add('commands', 'show commands of <plugin>', '1) commands core')
## perm command
def handle_perm(bot, ievent):
""" arguments: <cmnd> - get permission of command. """
try:cmnd = ievent.args[0]
except IndexError:
ievent.missing("<cmnd>")
return
try: perms = cmnds.perms(cmnd)
except KeyError:
ievent.reply("no %sw command registered")
return
if perms: ievent.reply("%s command needs %s permission" % (cmnd, perms))
else: ievent.reply("can't find perm for %s" % cmnd)
cmnds.add('perm', handle_perm, ['USER', 'GUEST'])
examples.add('perm', 'show permission of command', 'perm quit')
## version command
def handle_version(bot, ievent):
""" no arguments - show bot's version. """
from jsb.version import getversion
version = getversion(bot.type.upper())
cfg = getmainconfig()
if cfg.dbenable: version += " " + cfg.dbtype.upper()
if ievent.rest and ievent.rest == "repo":
try:
from mercurial import context, hg, node, repo, ui
repository = hg.repository(ui.ui(), '.')
ctx = context.changectx(repository)
tip = str(ctx.rev())
except: tip = None
if tip: version2 = version + " HG " + tip
else: version2 = version
ievent.reply(version2)
cmnds.add('version', handle_version, ['USER', 'GUEST'])
examples.add('version', 'show version of the bot', 'version')
setalias('v', "version")
## short command
def handle_short(bot, ievent):
""" arguments: <cmnd> - give short possibilities for a command. """
try: cmnd = ievent.args[0]
except IndexError: ievent.missing('<cmnd>') ; return
try: ievent.reply("short commands for %s" % cmnd, getshorttable()[cmnd])
except KeyError: ievent.reply("no commands found for %s" % cmnd)
cmnds.add('short', handle_short, ['USER', 'GUEST'])
examples.add('short', 'find full comamands', 'short admin-exceptions')
## whereis command
def handle_whereis(bot, ievent):
""" arguments: <cmnd> - locate a command. """
try: cmnd = ievent.args[0]
except IndexError:
ievent.missing('<cmnd>')
return
cmnds.reloadcheck(bot, ievent, cmnd)
plugin = cmnds.whereis(cmnd)
if plugin: ievent.reply("%s command is in: %s" % (cmnd, plugin))
else: ievent.reply("can't find " + cmnd)
cmnds.add('whereis', handle_whereis, ['USER', 'GUEST'])
examples.add('whereis', 'show in which plugins <what> is', 'whereis test')
## help-plug command
def handle_helpplug(bot, ievent):
""" arguments: <plugname> - how help on plugin/command or show basic help msg. """
try: what = ievent.args[0]
except (IndexError, TypeError):
ievent.reply("available plugins: ", getpluginlist())
ievent.reply("see !help <plugin> to get help on a plugin.")
return
ievent.untildone = True
cmnds.reloadcheck(bot, ievent, what)
plugin = None
modname = ""
perms = []
for package in plugin_packages:
try:
modname = "%s.%s" % (package, what)
try:
plugin = plugs.load_mod(modname)
if plugin: break
except NoSuchPlugin: continue
except(KeyError, ImportError): pass
if not plugin:
ievent.reply("no %s plugin loaded" % what)
return
try: phelp = plugin.__doc__
except (KeyError, AttributeError):
ievent.reply('no description of %s plugin available' % what)
return
cmndresult = []
if phelp:
counter = 1
for i, j in cmnds.iteritems():
if what == j.plugname:
try:
descr = j.func.__doc__
if not descr: descr = "no description provided"
try: cmndresult.append(u" <b>!%s</b> - <i>%s</i> - perms: %s" % (i, descr, j.perms))
except KeyError: pass
except AttributeError: pass
counter += 1
if cmndresult and phelp:
res = []
for r in cmndresult:
if bot.type in ['web', ]: res.append("%s<br>" % r)
elif bot.type in ['irc', ]: res.append(r.strip())
else: res.append(r)
res.sort()
what = what.upper()
ievent.reply('<br><b>plugin %s: </b><br>%s' % (what, phelp))
ievent.reply("<b>commands: </b>", res, dot="count")
else:
if perms: ievent.reply('no commands available for permissions: %s' % ", ".join(perms))
else: ievent.reply("can't find help on %s" % what)
ievent.done(silent=True)
cmnds.add('help-plug', handle_helpplug, ['USER', 'GUEST'], how="msg")
examples.add('help-plug', 'get help on <plugin>', '1) help-plug 2) help-plug misc')
## help-site command
def handle_helpsite(bot, event):
""" arguments: <cmnd> - help commands that gives a url to the docs. """
if event.rest:
target = cmnds.whereis(event.rest)
target = target or event.rest
where = bot.plugs.getmodule(target)
if where:
theplace = os.sep.join(where.split(".")[-2:])
event.reply("help for %s is at http://jsonbot.org/plugins/%s.html" % (event.rest.upper(), theplace))
else: event.reply("can't find a help url for %s" % event.rest)
else:
event.reply("documentation for jsonbot can be found at http://jsonbot.org or http://jsonbot.appspot.com/docs")
event.reply('see !list for loaded plugins and "!help plugin" for a url to the plugin docs.')
cmndhelp = cmnds.gethelp(event.rest)
if cmndhelp: event.reply("<br><b>%s docstring: </b><br>" % event.rest, cmndhelp.split("\n"))
cmnds.add("help-site", handle_helpsite, ["OPER", "USER", "GUEST"])
examples.add("help-site", "show url pointing to the docs", "1) help 2) help rss")
## help command
def handle_help(bot, event):
""" arguments: [<cmndname or plugname>] - show help. """
if not event.args:
event.reply("documentation for jsonbot can be found at http://jsonbot.org")
event.reply('see !list for loaded plugins and "!help plugin" for help on the plugin.')
return
t = event.args[0]
logging.warn("help target is %s" % t)
cmnds.reloadcheck(bot, event, t)
where = cmnds.whereis(t)
cmndhelp = cmnds.gethelp(t)
if not cmndhelp:
ex = examples.get(t)
if ex: cmndhelp = ex.descr
if not cmndhelp: handle_helpplug(bot, event) ; return
try: ex = examples[t].example
except KeyError: ex = None
p = cmnds.perms(t)
res = "permissions: %s - location: %s - examples: %s" % (", ".join(p), where, ex)
event.reply('<br><b>%s command:</b> %s' % (event.rest, cmndhelp), res.split(" - "), dot="<br>")
cmnds.add("help", handle_help, ["OPER", "USER", "GUEST"])
examples.add("help", "show help of a command", "help rss-list")
## apro command
def handle_apro(bot, ievent):
""" arguments: <searchtxt> - apropos (search) for commands. """
try: what = ievent.args[0]
except IndexError:
ievent.missing('<what>')
return
result = cmnds.apropos(what)
result.sort()
if result:
ievent.reply("commands matching %s: " % what, result)
else: ievent.reply('no matching commands found for %s' % what)
cmnds.add('apro', handle_apro, ['USER', 'GUEST'])
examples.add('apro', 'apro <what> .. search for commands that contain <what>', 'apro com')
## whatcommands command
def handle_whatcommands(bot, ievent):
""" arguments: <permission. - show all commands with permission. """
if not ievent.rest:
ievent.missing('<perm>')
return
result = cmnds
res = []
for cmnd in result.values():
if cmnd and cmnd.perms and ievent.rest in cmnd.perms:
res.append(cmnd.cmnd)
res.sort()
if not res: ievent.reply('no commands known for permission %s' % ievent.rest)
else: ievent.reply('commands known for permission %s: ' % ievent.rest, res)
cmnds.add('whatcommands', handle_whatcommands, ['USER', 'GUEST'])
examples.add('whatcommands', 'show commands with permission <perm>', 'whatcommands USER')
## versions command
def handle_versions(bot, ievent):
""" no arguments - show versions of all loaded modules (if available). """
versions = {}
allowed = ["jsb.", ]
for mod in copy.copy(sys.modules):
for allow in allowed:
if mod.startswith(allow):
try: versions[mod] = sys.modules[mod].__version__
except AttributeError, ex: pass
try: versions['python'] = sys.version
except AttributeError, ex: pass
ievent.reply("versions ==> %s" % unicode(versions))
cmnds.add('versions', handle_versions, ['USER', 'GUEST'])
examples.add('versions', 'show versions of all loaded modules', 'versions')
## loglevel command
def handle_loglevel(bot, event):
""" arguments: <loglevel> - change loglevel of the bot. loglevel is on of debug, info, warn or error. """
if not event.rest: event.reply("loglevel is %s" % getloglevel()) ; return
from jsb.lib.config import getmainconfig
mainconfig = getmainconfig()
mainconfig.loglevel = event.rest
mainconfig.save()
#mainhandler.put(4, setloglevel, event.rest)
setloglevel(event.rest)
event.done()
cmnds.add("loglevel", handle_loglevel, "OPER")
examples.add("loglevel", "set loglevel ot on of debug, info, warning or error", "loglevel warn")
## activity command
def handle_activity(bot, ievent):
""" no arguments - show running threads. """
try: import threading
except ImportError:
ievent.reply("threading is not enabled.")
return
result = {}
todo = threadloops
for thread in threadloops:
name = "%s_%s" % (getname(type(thread)), thread.name)
try: result[name] = date.duration(thread.lastiter, plain=True)
except Exception, ex: logging.warn("%s - %s" % (name, str(ex)))
for b in getfleet().bots:
try: result[b.cfg.name] = date.duration(b.lastiter, plain=True)
except Exception, ex: logging.warn("%s - %s" % (name, str(ex)))
ievent.reply("last iterations: ", result)
cmnds.add('activity', handle_activity, ['USER', 'GUEST'])
examples.add('activity', 'show time past last thread iteration', 'activity')
## threads command
def handle_threads(bot, ievent):
""" no arguments - show running threads. """
try: import threading
except ImportError:
ievent.reply("threading is not enabled.")
return
stats = StatDict()
threadlist = threading.enumerate()
for thread in threadlist: stats.upitem(thread.getName())
result = []
for item in stats.top(): result.append("%s = %s" % (item[0], item[1]))
result.sort()
ievent.reply("threads running: ", result)
cmnds.add('threads', handle_threads, ['USER', 'GUEST'])
examples.add('threads', 'show running threads', 'threads')
## loaded command
def handle_loaded(bot, event):
""" no arguments - show plugins in cache. """
res = []
for plug in plugs.keys(): res.append(plug.split(".")[-1])
event.reply("loaded plugins (cache): ", res)
cmnds.add('loaded', handle_loaded, ['USER', 'GUEST'])
examples.add('loaded', 'show list of loaded plugins', 'loaded')
## statusline command
def handle_statusline(bot, event):
""" no arguments - show a status line. """
event.reply("<b>controlchars:</b> %s - <b>perms:</b> %s" % (event.chan.data.cc, ", ".join(event.user.data.perms)))
cmnds.add('statusline', handle_statusline, ['USER', 'GUEST'])
examples.add('statusline', 'show status line', 'statusline')
## topper command
def handle_topper(bot, event):
""" no arguments - show a 'topper' startus line. """
event.reply("<b>forwards:</b> %s - <b>watched:</b> %s - <b>feeds:</b> %s" % (", ".join(event.chan.data.forwards) or "none", ", ".join(event.chan.data.watched) or "none", ", ".join([unicode(x) for x in event.chan.data.feeds]) or "none"))
cmnds.add('topper', handle_topper, ['USER', 'GUEST'])
examples.add('topper', 'show topper line', 'topper')
## running command
def handle_running(bot, event):
""" no arguments - show running tasks. """
event.reply("<b>callbacks:</b> %s - <b>commands:</b> %s - <b>longrunning:</b> %s - <b>apirunner:</b> %s - <b>threadrunner:</b> %s" % (defaultrunner.running(), cmndrunner.running(), longrunner.running(), apirunner.running(), threadrunner.running()))
cmnds.add('running', handle_running, ['USER', 'GUEST'])
examples.add('running', "show running tasks", "running")
## descriptions command
def handle_descriptions(bot, event):
""" no arguments - show descriptions of all plugins. """
bot.plugs.loadall()
result = []
target = bot.plugs.keys()
target.sort()
for modname in target:
plug = bot.plugs.get(modname)
if plug.__doc__: txt = plug.__doc__.replace("\n", "<br>")
else: txt = "no docstring available"
result.append("* %s plugin (%s) - %s" % (modname.split(".")[-1], modname, txt))
event.reply("descriptions: <br>", result, dot="<br><br>")
cmnds.add('descriptions', handle_descriptions, ['USER', 'GUEST'])
examples.add('descriptions', "show descriptions of all plugins", "descriptions")
## stats command
def handle_stats(bot, event):
totalstats = StatDict()
counter = 0
for modname in sys.modules:
if modname.startswith("jsb"):
plugname = modname.split(".")[-1]
if event.args and plugname not in event.args: continue
try: modstats = getattr(sys.modules[modname], "stats")
except AttributeError: continue
totalstats += modstats
counter += 1
event.reply("stats results from %s modules: " % counter, totalstats)
cmnds.add("stats", handle_stats, ["OPER", "USER"])
examples.add("stats", "show stats for all JSONBOT modules/plugins or specify a module/plugin", "1) stats 2) stats rss")
|
|
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
from io import BytesIO, StringIO
import os
import numpy as np
import pytest
import pandas._libs.parsers as parser
from pandas._libs.parsers import TextReader
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.parsers import TextFileReader, read_csv
class TestTextReader:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "parser", "data")
self.csv1 = os.path.join(self.dirpath, "test1.csv")
self.csv2 = os.path.join(self.dirpath, "test2.csv")
self.xls1 = os.path.join(self.dirpath, "test.xls")
def test_file_handle(self):
with open(self.csv1, "rb") as f:
reader = TextReader(f)
reader.read()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
with open(self.csv1, "rb") as f:
reader = TextReader(f, memory_map=True, header=None)
reader.read()
def test_StringIO(self):
with open(self.csv1, "rb") as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = "a\nb\na\nb\na"
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = "a, b\na, b\na, b\na, b"
reader = TextReader(StringIO(data), skipinitialspace=True, header=None)
result = reader.read()
tm.assert_numpy_array_equal(
result[0], np.array(["a", "a", "a", "a"], dtype=np.object_)
)
tm.assert_numpy_array_equal(
result[1], np.array(["b", "b", "b", "b"], dtype=np.object_)
)
def test_parse_booleans(self):
data = "True\nFalse\nTrue\nTrue"
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True, header=None)
result = reader.read()
tm.assert_numpy_array_equal(
result[0], np.array(["a", "a", "a"], dtype=np.object_)
)
tm.assert_numpy_array_equal(
result[1], np.array(["b", "b", "b"], dtype=np.object_)
)
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(["a", "hello\nthere", "this"], dtype=np.object_)
tm.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = "12345,67\n345,678"
reader = TextReader(StringIO(data), delimiter=":", decimal=",", header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = "123,456\n12,500"
reader = TextReader(StringIO(data), delimiter=":", thousands=",", header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = "123.456\n12.500"
reader = TextFileReader(
StringIO(data), delimiter=":", thousands=".", header=None
)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
def test_skip_bad_lines(self, capsys):
# too many lines, see #2430 for why
data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r"
reader = TextReader(StringIO(data), delimiter=":", header=None)
msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4"
with pytest.raises(parser.ParserError, match=msg):
reader.read()
reader = TextReader(
StringIO(data),
delimiter=":",
header=None,
error_bad_lines=False,
warn_bad_lines=False,
)
result = reader.read()
expected = {
0: np.array(["a", "d", "g", "l"], dtype=object),
1: np.array(["b", "e", "h", "m"], dtype=object),
2: np.array(["c", "f", "i", "n"], dtype=object),
}
assert_array_dicts_equal(result, expected)
reader = TextReader(
StringIO(data),
delimiter=":",
header=None,
error_bad_lines=False,
warn_bad_lines=True,
)
reader.read()
captured = capsys.readouterr()
assert "Skipping line 4" in captured.err
assert "Skipping line 6" in captured.err
def test_header_not_enough_lines(self):
data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6"
reader = TextReader(StringIO(data), delimiter=",", header=2)
header = reader.header
expected = [["a", "b", "c"]]
assert header == expected
recs = reader.read()
expected = {
0: np.array([1, 4], dtype=np.int64),
1: np.array([2, 5], dtype=np.int64),
2: np.array([3, 6], dtype=np.int64),
}
assert_array_dicts_equal(recs, expected)
def test_escapechar(self):
data = '\\"hello world"\n\\"hello world"\n\\"hello world"'
reader = TextReader(StringIO(data), delimiter=",", header=None, escapechar="\\")
result = reader.read()
expected = {0: np.array(['"hello world"'] * 3, dtype=object)}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=",", header=None, **kwds)
reader = _make_reader(dtype="S5,i4")
result = reader.read()
assert result[0].dtype == "S5"
ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaaa"], dtype="S5")
assert (result[0] == ex_values).all()
assert result[1].dtype == "i4"
reader = _make_reader(dtype="S4")
result = reader.read()
assert result[0].dtype == "S4"
ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaa"], dtype="S4")
assert (result[0] == ex_values).all()
assert result[1].dtype == "S4"
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=",", **kwds)
reader = _make_reader(dtype={"one": "u1", 1: "S1"})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "S1"
reader = _make_reader(dtype={"one": np.uint8, 1: object})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "O"
reader = _make_reader(dtype={"one": np.dtype("u1"), 1: np.dtype("O")})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "O"
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=",", **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
assert len(result) == 2
assert (result[1] == exp[1]).all()
assert (result[2] == exp[2]).all()
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace("\r", "\r\n")
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = "a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12"
_test(data, delimiter=",")
data = "a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12"
_test(data, delim_whitespace=True)
data = "a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12"
_test(data, delimiter=",")
sample = (
"A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r"
"AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r"
",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0"
)
_test(sample, delimiter=",")
data = "A B C\r 2 3\r4 5 6"
_test(data, delim_whitespace=True)
data = "A B C\r2 3\r4 5 6"
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = "a,b,c\n1,2,3\n4,,"
result = TextReader(StringIO(data), delimiter=",").read()
expected = {
0: np.array([1, 4], dtype=np.int64),
1: np.array(["2", ""], dtype=object),
2: np.array(["3", ""], dtype=object),
}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([["b"], [np.nan]], columns=["a"], index=["a", "c"])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list("abcd"), index=[1, 1])
c = DataFrame(
[
[1, 2, 3, 4],
[6, np.nan, np.nan, np.nan],
[8, 9, 10, 11],
[13, 14, np.nan, np.nan],
],
columns=list("abcd"),
index=[0, 5, 7, 12],
)
for _ in range(100):
df = read_csv(StringIO("a,b\nc\n"), skiprows=0, names=["a"], engine="c")
tm.assert_frame_equal(df, a)
df = read_csv(
StringIO("1,1,1,1,0\n" * 2 + "\n" * 2), names=list("abcd"), engine="c"
)
tm.assert_frame_equal(df, b)
df = read_csv(
StringIO("0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14"),
names=list("abcd"),
engine="c",
)
tm.assert_frame_equal(df, c)
def test_empty_csv_input(self):
# GH14867
df = read_csv(StringIO(), chunksize=20, header=None, names=["a", "b", "c"])
assert isinstance(df, TextFileReader)
def assert_array_dicts_equal(left, right):
for k, v in left.items():
tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k]))
|
|
import ast
import builtins
import traceback
from enum import Enum
from sys import version_info
from textwrap import TextWrapper
from taichi.lang.exception import (TaichiCompilationError, TaichiNameError,
TaichiSyntaxError,
handle_exception_from_cpp)
class Builder:
def __call__(self, ctx, node):
method = getattr(self, 'build_' + node.__class__.__name__, None)
try:
if method is None:
error_msg = f'Unsupported node "{node.__class__.__name__}"'
raise TaichiSyntaxError(error_msg)
return method(ctx, node)
except Exception as e:
if ctx.raised or not isinstance(node, (ast.stmt, ast.expr)):
raise e.with_traceback(None)
ctx.raised = True
e = handle_exception_from_cpp(e)
if not isinstance(e, TaichiCompilationError):
msg = ctx.get_pos_info(node) + traceback.format_exc()
raise TaichiCompilationError(msg) from None
msg = ctx.get_pos_info(node) + str(e)
raise type(e)(msg) from None
class VariableScopeGuard:
def __init__(self, scopes):
self.scopes = scopes
def __enter__(self):
self.scopes.append({})
def __exit__(self, exc_type, exc_val, exc_tb):
self.scopes.pop()
class StaticScopeStatus:
def __init__(self):
self.is_in_static_scope = False
class StaticScopeGuard:
def __init__(self, status):
self.status = status
def __enter__(self):
self.prev = self.status.is_in_static_scope
self.status.is_in_static_scope = True
def __exit__(self, exc_type, exc_val, exc_tb):
self.status.is_in_static_scope = self.prev
class NonStaticControlFlowStatus:
def __init__(self):
self.is_in_non_static_control_flow = False
class NonStaticControlFlowGuard:
def __init__(self, status):
self.status = status
def __enter__(self):
self.prev = self.status.is_in_non_static_control_flow
self.status.is_in_non_static_control_flow = True
def __exit__(self, exc_type, exc_val, exc_tb):
self.status.is_in_non_static_control_flow = self.prev
class LoopStatus(Enum):
Normal = 0
Break = 1
Continue = 2
class LoopScopeAttribute:
def __init__(self, is_static):
self.is_static = is_static
self.status = LoopStatus.Normal
class LoopScopeGuard:
def __init__(self, scopes, non_static_guard=None):
self.scopes = scopes
self.non_static_guard = non_static_guard
def __enter__(self):
self.scopes.append(LoopScopeAttribute(self.non_static_guard is None))
if self.non_static_guard:
self.non_static_guard.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.scopes.pop()
if self.non_static_guard:
self.non_static_guard.__exit__(exc_type, exc_val, exc_tb)
class ASTTransformerContext:
def __init__(self,
excluded_parameters=(),
is_kernel=True,
func=None,
arg_features=None,
global_vars=None,
argument_data=None,
file=None,
src=None,
start_lineno=None,
ast_builder=None,
is_real_function=False):
self.func = func
self.local_scopes = []
self.loop_scopes = []
self.excluded_parameters = excluded_parameters
self.is_kernel = is_kernel
self.arg_features = arg_features
self.returns = None
self.global_vars = global_vars
self.argument_data = argument_data
self.return_data = None
self.file = file
self.src = src
self.indent = 0
for c in self.src[0]:
if c == ' ':
self.indent += 1
else:
break
self.lineno_offset = start_lineno - 1
self.raised = False
self.non_static_control_flow_status = NonStaticControlFlowStatus()
self.static_scope_status = StaticScopeStatus()
self.returned = False
self.ast_builder = ast_builder
self.visited_funcdef = False
self.is_real_function = is_real_function
# e.g.: FunctionDef, Module, Global
def variable_scope_guard(self):
return VariableScopeGuard(self.local_scopes)
# e.g.: For, While
def loop_scope_guard(self, is_static=False):
if is_static:
return LoopScopeGuard(self.loop_scopes)
return LoopScopeGuard(self.loop_scopes,
self.non_static_control_flow_guard())
def non_static_control_flow_guard(self):
return NonStaticControlFlowGuard(self.non_static_control_flow_status)
def static_scope_guard(self):
return StaticScopeGuard(self.static_scope_status)
def current_scope(self):
return self.local_scopes[-1]
def current_loop_scope(self):
return self.loop_scopes[-1]
def loop_status(self):
if self.loop_scopes:
return self.loop_scopes[-1].status
return LoopStatus.Normal
def set_loop_status(self, status):
self.loop_scopes[-1].status = status
def is_in_static_for(self):
if self.loop_scopes:
return self.loop_scopes[-1].is_static
return False
def is_in_non_static_control_flow(self):
return self.non_static_control_flow_status.is_in_non_static_control_flow
def is_in_static_scope(self):
return self.static_scope_status.is_in_static_scope
def is_var_declared(self, name):
for s in self.local_scopes:
if name in s:
return True
return False
def create_variable(self, name, var):
if name in self.current_scope():
raise TaichiSyntaxError("Recreating variables is not allowed")
self.current_scope()[name] = var
def check_loop_var(self, loop_var):
if self.is_var_declared(loop_var):
raise TaichiSyntaxError(
f"Variable '{loop_var}' is already declared in the outer scope and cannot be used as loop variable"
)
def get_var_by_name(self, name):
for s in reversed(self.local_scopes):
if name in s:
return s[name]
if name in self.global_vars:
return self.global_vars[name]
try:
return getattr(builtins, name)
except AttributeError:
raise TaichiNameError(f'Name "{name}" is not defined')
def get_pos_info(self, node):
msg = f'On line {node.lineno + self.lineno_offset} of file "{self.file}", in {self.func.func.__name__}:\n'
if version_info < (3, 8):
msg += self.src[node.lineno - 1] + "\n"
return msg
col_offset = self.indent + node.col_offset
end_col_offset = self.indent + node.end_col_offset
wrapper = TextWrapper(width=80)
def gen_line(code, hint):
hint += ' ' * (len(code) - len(hint))
code = wrapper.wrap(code)
hint = wrapper.wrap(hint)
if not len(code):
return "\n\n"
return "".join([c + '\n' + h + '\n' for c, h in zip(code, hint)])
if node.lineno == node.end_lineno:
hint = ' ' * col_offset + '^' * (end_col_offset - col_offset)
msg += gen_line(self.src[node.lineno - 1], hint)
else:
node_type = node.__class__.__name__
if node_type in ["For", "While", "FunctionDef", "If"]:
end_lineno = max(node.body[0].lineno - 1, node.lineno)
else:
end_lineno = node.end_lineno
for i in range(node.lineno - 1, end_lineno):
last = len(self.src[i])
while last > 0 and (self.src[i][last - 1].isspace() or
not self.src[i][last - 1].isprintable()):
last -= 1
first = 0
while first < len(self.src[i]) and (
self.src[i][first].isspace()
or not self.src[i][first].isprintable()):
first += 1
if i == node.lineno - 1:
hint = ' ' * col_offset + '^' * (last - col_offset)
elif i == node.end_lineno - 1:
hint = ' ' * first + '^' * (end_col_offset - first)
elif first < last:
hint = ' ' * first + '^' * (last - first)
else:
hint = ''
msg += gen_line(self.src[i], hint)
return msg
|
|
#!/usr/bin/python3
""" DHT_publish_AWS
##Copyright 2016 Clint H. O'Connor
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""
##----- imports -----------------------------------------------------------------
import os
import sys
import ast
import time
import json
import argparse
from Sensors import Sensors, Sensor_DHT
from AwsPubSub import AwsClient
from Configuration import Configuration
##----- testing -----------------------------------------------------------------
debugthis = True
##----- globals -----------------------------------------------------------------
cfg = None
state = None
thismodule = None
##----- configuration -----------------------------------------------------------------
new_configuration = {
'name' : 'DHT_THING', # name of thing
'publish' : 'unassigned', # topic to publish on
'subscribe' : 'unassigned_reply', # topic to reply on
'interval' : '3600', # default interval = 1 hour = 60*60 secs
'qos' : '0', # quality of service needed for this thing
'hwpin' : '0',
'hwtype' : ''
}
##----- defs -----------------------------------------------------------------
def subscribe_callback(client, userdata, message):
global cfg, state
# MQTT message callback
try:
control = ast.literal_eval(message.payload.decode())
if debugthis: print ("control ", control)
if 'state' in control and control['state'] == '?':
if debugthis: print ("state ", state)
client.publish(
cfg.configuration['publish'],
json.dumps({'ack':'state '+state}),
1
)
elif 'configuration' in control:
if control['configuration'] == '?':
if debugthis: print ("configuration ", cfg.configuration)
client.publish(
cfg.configuration['publish'],
json.dumps(cfg.configuration),
1
)
elif control['configuration'] == 'save':
if debugthis: print ("saved configuration file")
cfg.write()
client.publish(
cfg.configuration['publish'],
json.dumps({'ack':'saved configuration file'}),
1
)
elif 'interval' in control and int(control['interval']) >= 1:
cfg.configuration['interval'] = control['interval']
if debugthis: print ("new interval ", cfg.configuration['interval'])
client.publish(
cfg.configuration['publish'],
json.dumps({'ack':'interval '+cfg.configuration['interval']}),
1
)
elif 'hwpin' in control and int(control['hwpin']) > 0:
cfg.configuration['hwpin'] = control['hwpin']
state = 'ready'
if debugthis: print ("new hwpin ", cfg.configuration['hwpin'])
client.publish(
cfg.configuration['publish'],
json.dumps({'ack':'hwpin '+cfg.configuration['hwpin']}),
1
)
elif 'hwtype' in control and control['hwtype'] is not "":
cfg.configuration['hwtype'] = control['hwtype']
state = 'ready'
if debugthis: print ("new hwtype ", cfg.configuration['hwtype'])
client.publish(
cfg.configuration['publish'],
json.dumps({'ack':'hwtype '+cfg.configuration['hwpin']}),
1
)
elif 'publish' in control:
# unsubscribe and disconnect immediately
client.publish(
cfg.configuration['publish'],
json.dumps({'ack':'changing topic to '+control['publish']}),
1
)
client.unsubscribe(cfg.configuration['subscribe'])
client.disconnect()
# set up new mqtt channels
cfg.configuration['publish'] = control['publish']
cfg.configuration['subscribe'] = control['publish'] + '_reply'
if debugthis:
print ("new publish ", cfg.configuration['publish'])
print ("new subscribe ", cfg.configuration['subscribe'])
state = 'connect'
elif 'Reb00t' in control and control['Reb00t'] == 'True':
client.publish(
cfg.configuration['publish'],
json.dumps({'ack':'rebooting...'}),
1
)
command = "/usr/bin/sudo /sbin/shutdown -r now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
except:
if debugthis:
print("Unprocessed message: ")
print(message.payload)
print("from topic: ")
print(message.topic)
print("--------------\n\n")
pass
def process_command_line_parameters():
""" process command line parameters
-e endpoint
-r root CA
-c cert
-k private key
-p port
"""
global thismodule
# process command line parameters
parser = argparse.ArgumentParser("Description: " + thismodule)
parser.add_argument('-e','--endpoint', help='Root CA file path',required=True)
parser.add_argument('-r','--rootCA', help='Root CA file path',required=True)
parser.add_argument('-c','--cert', help='Certificate file path',required=True)
parser.add_argument('-k','--key', help='Private key file path',required=True)
parser.add_argument('-p','--port', help='Port number',required=True)
args = parser.parse_args()
if debugthis:
print("endpoint ", args.endpoint)
print("rootCA ", args.rootCA)
print("cert ", args.cert)
print("key ", args.key)
print("port ", args.port)
return args
##----- main -----------------------------------------------------------------
def __main__(argv):
global cfg, state, thismodule
args = None
state = 'init'
thismodule = os.path.splitext(os.path.basename(sys.argv[0]))[0]
if debugthis and len(sys.argv) == 1:
import args_gitignore
for arg1 in args_gitignore.test_args:
sys.argv.extend(arg1)
print (sys.argv)
with open('call.txt', 'w') as fileout:
fileout.write(str(sys.argv))
while state is not 'stop':
if state is 'init':
try:
args = process_command_line_parameters()
cfile = os.path.dirname(os.path.realpath(__file__)) \
+ '/' + thismodule + '.conf'
if debugthis:
print ("configfile ", cfile)
cfg = Configuration(cfile, new_configuration)
if cfg.read() is not None:
state = 'connect'
except:
raise
if state is 'connect':
try:
# get instance of AWS IOT services
mymqtt = AwsClient(
thismodule,
args.endpoint,
int(args.port),
args.rootCA,
args.cert,
args.key
)
# connect to AWS
connect_attempts = 3
while mymqtt.connect() == False and connect_attempts > 0:
time.sleep(5)
connect_attempts -= 1
if connect_attempts > 0:
state = 'subscribe'
except:
raise
if state is 'subscribe':
try:
# subscribe to control topic, QoS must be 1 to ensure responsiveness
connect_attempts = 3
if debugthis:
print("state subscribe")
print("subscribe ", cfg.configuration['subscribe'])
while (mymqtt.subscribe(
cfg.configuration['subscribe'],
1,
subscribe_callback
)
== False and connect_attempts > 0):
time.sleep(5)
connect_attempts -= 1
if connect_attempts > 0:
state = 'ready'
except:
raise
if state is 'ready':
try:
if debugthis:
print ("hwpin ", cfg.configuration['hwpin'])
print ("hwtype ", cfg.configuration['hwtype'], Sensor_DHT.DHT[cfg.configuration['hwtype']])
with Sensor_DHT(
pin=cfg.configuration['hwpin'],
type=Sensor_DHT.DHT[cfg.configuration['hwtype']]
) as myDHT:
if myDHT is not None:
state = 'active'
while state is 'active':
try:
# get DHT readings and timestamp
readtime, humidity, temperature = myDHT.read()
dht = {
'timestamp' : readtime,
'humidity' : humidity,
'temperature' : temperature
}
# publish the data
json_data = json.dumps(dht)
mymqtt.publish(
cfg.configuration['publish'],
json_data,
int(cfg.configuration['qos'])
)
# wait for the next interval
now = time.time()
while time.time() < now + int(cfg.configuration['interval']):
time.sleep(1)
except:
raise
except:
raise
## if state is 'disconnect':
## try:
## mymqtt.unsubscribe(cfg.configuration['subscribe'])
## mymqtt.disconnect()
## state = 'connect'
## except:
## return True
if __name__ == "__main__":
__main__(sys.argv[1:])
|
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Michael Stead <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class PullRequestComment(github.GithubObject.CompletableGithubObject):
"""
This class represents PullRequestComments. The reference can be found here http://developer.github.com/v3/pulls/comments/
"""
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def commit_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._commit_id)
return self._commit_id.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def diff_hunk(self):
"""
:type: string
"""
self._completeIfNotSet(self._diff_hunk)
return self._diff_hunk.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def original_commit_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._original_commit_id)
return self._original_commit_id.value
@property
def original_position(self):
"""
:type: integer
"""
self._completeIfNotSet(self._original_position)
return self._original_position.value
@property
def path(self):
"""
:type: string
"""
self._completeIfNotSet(self._path)
return self._path.value
@property
def position(self):
"""
:type: integer
"""
self._completeIfNotSet(self._position)
return self._position.value
@property
def pull_request_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._pull_request_url)
return self._pull_request_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, body):
"""
:calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:rtype: None
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._commit_id = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._diff_hunk = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._original_commit_id = github.GithubObject.NotSet
self._original_position = github.GithubObject.NotSet
self._path = github.GithubObject.NotSet
self._position = github.GithubObject.NotSet
self._pull_request_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "diff_hunk" in attributes: # pragma no branch
self._diff_hunk = self._makeStringAttribute(attributes["diff_hunk"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "original_commit_id" in attributes: # pragma no branch
self._original_commit_id = self._makeStringAttribute(attributes["original_commit_id"])
if "original_position" in attributes: # pragma no branch
self._original_position = self._makeIntAttribute(attributes["original_position"])
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "position" in attributes: # pragma no branch
self._position = self._makeIntAttribute(attributes["position"])
if "pull_request_url" in attributes: # pragma no branch
self._pull_request_url = self._makeStringAttribute(attributes["pull_request_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
|
|
import numpy as np
import logging
import collections
from pystella.rf import band
from pystella.rf.lc import SetLightCurve, LightCurve
__author__ = 'bakl'
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
err_prefix = ('er', 'er_', 'err', 'err_')
def read_table_header_float(fname, header=None, skip=0):
if header is None:
i = 0
with open(fname, "r") as f:
for line in f:
i += 1
if i <= skip:
continue
header = line
break
names = [s for s in header.split()]
dt = np.dtype({'names': names, 'formats': [np.float64] * len(names)})
block = np.loadtxt(fname, skiprows=skip+1, dtype=dt, comments='#')
return block
def read_obs_table_header(fname, header=None, skip=0, colt=('time', 'JD', 'MJD'),
include_names=None, include_patterns=None,
is_out=False, comments='#'):
"""
Load tabular data from file.
:param fname: The name of the file with data
:param header: str, optional
The string is used to build data-type of the resulting array.
Default: None.
:param skip: int, optional
Skip the first rows before header.
Default: 0.
:param colt: list, optional.
Possible names for time column.
Default: ('time', 'JD', 'MJD')
:param include_names: list or None
Which columns to read.
Default None, results in all columns being read.
Example: ['B','V','R']
The columns with errors, like 'err'+use_names, also will be read.
:param include_patterns: list or None
Which columns to read as the pattern of regular expression.
Default None, results in all columns being read.
Example: ['Vel\d+','Vel.*']
The columns with errors, like 'err'+use_names, also will be read.
:param is_out: bool, optional
If True the skipped, header and first file-rows are printed.
Default: False
:param comments: str or sequence, optional
Default: '#'.
:return: ndarray - data is read from the file
"""
lskip = 0
if isinstance(colt, str):
colt = [colt]
if header is None:
with open(fname, "r") as f:
i = 0
for line in f:
i += 1
if is_out:
print(line.strip())
if i <= skip:
continue
if line.strip().startswith(comments):
if not line.strip().startswith('###'):
lskip += 1
continue
else:
line = line.replace('###', '')
header = line
break
else:
raise ValueError('Could not get header. Check the file: {}. Probably skip [{}] is too large.'
.format(fname, skip))
# print first lines
if is_out:
line = f.readline().strip()
print(line)
cols_names = header.split()
cols = {i: nm for i, nm in enumerate(cols_names)}
is_time = False
cols_used = {}
cols_data = {}
def check_col_nm(nm, names, patterns):
import re
if names is None and patterns is None:
return True
if names is not None and v in names:
return True
if patterns is not None:
for n in patterns:
if re.match(n, nm):
return True
return False
for k, v in list(cols.items()):
# time
if not is_time and v.lower() in map(str.lower, colt):
cols_used[k] = v
is_time = True
# data
elif check_col_nm(v, include_names, include_patterns):
cols_used[k] = v
cols_data[k] = v
# error
for err_name in (es+v for es in err_prefix):
for i, bn in list(cols.items()):
if err_name.upper() == bn.upper():
cols_used[i] = bn
cols.pop(i)
break
od = collections.OrderedDict(sorted(cols_used.items()))
usecols = list(od.keys())
names = list(od.values())
dt = np.dtype({'names': names, 'formats': [np.float64] * len(names)})
block = np.loadtxt(fname, skiprows=max(lskip, skip)+1, dtype=dt, comments=comments, usecols=usecols)
return block, cols_data
def table2curves(name, tbl, bands=None, colt=('time', 'JD', 'MJD'), is_filter_zero=True):
# time = None
for nm in colt:
if nm in tbl.dtype.names:
time = tbl[nm]
break
else:
raise ValueError("THe table should contain a column with name in [{0}]".format(', '.join(colt)))
curves = SetLightCurve(name)
if bands is None:
bands = [n for n in tbl.dtype.names if band.is_exist(n)]
for bname in bands:
b = band.band_by_name(bname)
mag = tbl[bname]
mask = ~np.isnan(mag)
# filter
if is_filter_zero:
mask = np.logical_and(mask, mag != 0) # filter out values not equal 0
mask = np.logical_and(mask, mag < 99)
t = time[mask]
m = mag[mask]
for err_name in (prefix+bname for prefix in err_prefix):
if err_name in tbl.dtype.names:
err = tbl[err_name]
e = err[mask]
lc = LightCurve(b, t, m, e)
break
else:
lc = LightCurve(b, t, m)
curves.add(lc)
return curves
def curves2table(curves):
def add(a, vals):
# a = np.append(a, [lc.Mag], axis=0)
# # a[:, :-1] = lc.Mag
combined = np.vstack((a, vals))
return combined
# a = np.array(curves.TimeCommon, dtype=[('time', np.float64, (len(curves.TimeCommon)))])
# a = np.empty([0, len(curves.TimeCommon)])
# a = np.array([0,100])
# a = np.append(a, [curves.TimeCommon], axis=0)
a = np.array(curves.TimeCommon)
names = ['time']
for lc in curves:
a = add(a, lc.Mag)
names.append(lc.Band.Name)
if lc.IsErr:
a = add(a, lc.MagErr)
names.append('err'+lc.Band.Name)
# dt = {'names': names, 'formats': [np.float64] * len(names)}
dt = list(zip(names, [np.float64] * len(names)))
a = a.T
a.dtype = np.dtype(dt)
# a.dtype = np.dtype(dt)
# a.dtype.names = names
return a
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn.estimators import composable_model
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
# TODO(ispir): Increase test coverage
class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
"""An estimator for TensorFlow Linear and DNN joined training models.
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
target_column,
model_dir=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=True,
config=None):
"""Initializes a _DNNLinearCombinedBaseEstimator instance.
Args:
target_column: A _TargetColumn object.
model_dir: Directory to save model parameters, graph and etc.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set should be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
super(_DNNLinearCombinedBaseEstimator, self).__init__(model_dir=model_dir,
config=config)
num_ps_replicas = config.num_ps_replicas if config else 0
self._linear_model = composable_model.LinearComposableModel(
num_label_columns=target_column.num_label_columns,
optimizer=linear_optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas)
self._dnn_model = composable_model.DNNComposableModel(
num_label_columns=target_column.num_label_columns,
hidden_units=dnn_hidden_units,
optimizer=dnn_optimizer,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None
self._linear_feature_columns = linear_feature_columns
self._linear_optimizer = linear_optimizer
self._linear_weight_collection = (
self._linear_model.get_weight_collection_name())
self._dnn_feature_columns = dnn_feature_columns
self._dnn_hidden_units = dnn_hidden_units
self._centered_bias_weight_collection = "centered_bias"
self._enable_centered_bias = enable_centered_bias
self._target_column = target_column
@property
def linear_weights_(self):
"""Returns weights per feature of the linear part."""
all_variables = self.get_variable_names()
# TODO(ispir): Figure out a better way to retrieve variables for features.
# for example using feature info / columns.
values = {}
for name in all_variables:
if (name.startswith("linear/") and name.rfind("/") == 6 and
name != "linear/bias_weight"):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
def linear_bias_(self):
"""Returns bias of the linear part."""
return (self.get_variable_value("linear/bias_weight") +
self.get_variable_value("centered_bias_weight"))
@property
def dnn_weights_(self):
"""Returns weights of deep neural network part."""
return [self.get_variable_value("hiddenlayer_%d/weights" % i)
for i, _ in enumerate(self._dnn_hidden_units)] + [
self.get_variable_value("dnn_logits/weights")]
@property
def dnn_bias_(self):
"""Returns bias of deep neural network part."""
return [self.get_variable_value("hiddenlayer_%d/biases" % i)
for i, _ in enumerate(self._dnn_hidden_units)] + [
self.get_variable_value("dnn_logits/biases"),
self.get_variable_value("centered_bias_weight")]
def _get_feature_dict(self, features):
if isinstance(features, dict):
return features
return {"": features}
def _get_train_ops(self, features, targets):
"""See base class."""
global_step = contrib_variables.get_global_step()
assert global_step
features = self._get_feature_dict(features)
logits = self._logits(features, is_training=True)
if self._enable_centered_bias:
centered_bias_step = [self._centered_bias_step(targets, features)]
else:
centered_bias_step = []
with ops.control_dependencies(centered_bias_step):
loss = self._target_column.loss(logits, targets, features)
logging_ops.scalar_summary("loss", loss)
linear_train_step = self._linear_model.get_train_step(loss)
dnn_train_step = (self._dnn_model.get_train_step(loss)
if self._dnn_model else [])
with ops.control_dependencies(linear_train_step + dnn_train_step):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op, loss
def _get_eval_ops(self, features, targets, metrics=None):
raise NotImplementedError
def _get_predict_ops(self, features):
"""See base class."""
features = self._get_feature_dict(features)
logits = self._logits(features)
return self._target_column.logits_to_predictions(logits, proba=True)
def _get_feature_ops_from_example(self, examples_batch):
column_types = layers.create_feature_spec_for_parsing((
self._get_linear_feature_columns() or []) + (
self._get_dnn_feature_columns() or []))
features = parsing_ops.parse_example(examples_batch, column_types)
return features
def _get_linear_feature_columns(self):
if not self._linear_feature_columns:
return None
feature_column_ops.check_feature_columns(self._linear_feature_columns)
return sorted(set(self._linear_feature_columns), key=lambda x: x.key)
def _get_dnn_feature_columns(self):
if not self._dnn_feature_columns:
return None
feature_column_ops.check_feature_columns(self._dnn_feature_columns)
return sorted(set(self._dnn_feature_columns), key=lambda x: x.key)
def _dnn_logits(self, features, is_training):
return self._dnn_model.build_model(
features, self._dnn_feature_columns, is_training)
def _linear_logits(self, features, is_training):
return self._linear_model.build_model(
features, self._linear_feature_columns, is_training)
def _centered_bias(self):
centered_bias = variables.Variable(
array_ops.zeros([self._target_column.num_label_columns]),
collections=[self._centered_bias_weight_collection,
ops.GraphKeys.VARIABLES],
name="centered_bias_weight")
logging_ops.scalar_summary(
["centered_bias_%d" % cb for cb in range(
self._target_column.num_label_columns)],
array_ops.reshape(centered_bias, [-1]))
return centered_bias
def _centered_bias_step(self, targets, features):
centered_bias = ops.get_collection(self._centered_bias_weight_collection)
batch_size = array_ops.shape(targets)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, self._target_column.num_label_columns])
loss = self._target_column.loss(logits, targets, features)
# Learn central bias by an optimizer. 0.1 is a convervative lr for a single
# variable.
return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)
def _logits(self, features, is_training=False):
linear_feature_columns = self._get_linear_feature_columns()
dnn_feature_columns = self._get_dnn_feature_columns()
if not (linear_feature_columns or dnn_feature_columns):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
if linear_feature_columns and dnn_feature_columns:
logits = (self._linear_logits(features, is_training) +
self._dnn_logits(features, is_training))
elif dnn_feature_columns:
logits = self._dnn_logits(features, is_training)
else:
logits = self._linear_logits(features, is_training)
if self._enable_centered_bias:
return nn.bias_add(logits, self._centered_bias())
else:
return logits
def _get_optimizer(self, optimizer, default_optimizer, default_learning_rate):
if optimizer is None:
optimizer = default_optimizer
if isinstance(optimizer, six.string_types):
optimizer = layers.OPTIMIZER_CLS_NAMES[optimizer](
learning_rate=default_learning_rate)
return optimizer
class DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator):
"""A classifier for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=True,
config=None):
"""Constructs a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training.
It will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If `n_classes` < 2.
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time.
"""
if n_classes < 2:
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
target_column = layers.multi_class_target(
n_classes=n_classes,
weight_column_name=weight_column_name)
super(DNNLinearCombinedClassifier, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config)
def predict(self, x=None, input_fn=None, batch_size=None):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
Returns:
Numpy array of predicted classes or regression values.
"""
predictions = super(DNNLinearCombinedClassifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size)
predictions = np.argmax(predictions, axis=1)
return predictions
def predict_proba(self, x=None, input_fn=None, batch_size=None):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
Returns:
Numpy array of predicted probabilities.
"""
return super(DNNLinearCombinedClassifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size)
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
features = self._get_feature_dict(features)
logits = self._logits(features)
return self._target_column.get_eval_ops(features, logits, targets, metrics)
class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
"""A regressor for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=True,
target_dimension=1,
config=None):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
target_dimension: TODO(zakaria): dimension of the target for multilabels.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
target_column = layers.regression_target(
weight_column_name=weight_column_name,
target_dimension=target_dimension)
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config)
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
features = self._get_feature_dict(features)
logits = self._logits(features)
return self._target_column.get_eval_ops(features, logits, targets, metrics)
|
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the BaseHandler."""
__author__ = \
'[email protected] (Jeff Posnick) and [email protected] (JJ Behrens)'
from cgi import escape
from apiclient.errors import HttpError
from gdata.service import RequestError
from google.appengine.api import app_identity, users
from google.appengine.ext import webapp
import httplib2
from oauth2client.appengine import CredentialsModel, OAuth2Decorator, \
StorageByKeyName
from playlistpicker import model
from playlistpicker.utils import googleplus as googleplusutils
from playlistpicker.utils import memcache as memcacheutils
from playlistpicker.utils import web as webutils
from playlistpicker.utils import youtube as youtubeutils
class BaseHandler(webapp.RequestHandler):
"""This is the base request handler.
It contains code that's common to several request handlers. Some of these
things couldn't be broken off into the utils directory because they had too
much knowledge of the request handler's environment.
"""
# The client_id and client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>
oauth2_decorator = OAuth2Decorator(
client_id='205496663185.apps.googleusercontent.com',
client_secret='-84bG7a8jGJDRCqD6f8ug_c0',
scope=" ".join([
"https://www.googleapis.com/auth/plus.me",
"http://gdata.youtube.com"
]),
user_agent=app_identity.get_application_id())
def __init__(self):
"""Set some instance variables.
I hate to set these here (especially to None!) because they're actually
set in various decorators. However, I'm trying to appease PyCharm because
it's really useful to have it statically figure out where stuff is coming
from.
"""
self.current_user_id = None
self.current_display_name = None
self.people = None
self.owner_oauth_token = None
self.playlist_uri = None
self.playlist_entry_id = None
@staticmethod
def playlist_entry_uri_required(handler_method):
"""This is a decorator to parse the uri parameter.
Set self.playlist_uri and self.playlist_entry_id. Automatically handle
any errors.
"""
def handle(self, *args, **kargs):
uri = self.request.get("uri")
match = youtubeutils.PLAYLIST_ENTRY_URL_RE.match(uri)
if not match:
self.error(400)
self.response.out.write("Invalid uri parameter: %s" % escape(uri))
return
self.playlist_uri, self.playlist_entry_id = match.groups()
return handler_method(self, *args, **kargs)
return handle
@staticmethod
def authorize_playlist(handler_method):
"""Lookup the playlist and check authorization.
The owner of the playlist can always edit the playlist. Other users can
only edit the playlist if they have the right uuid in the URL.
This decorator wraps handler methods. The handler method must receive a
playlist_id argument, and it must use @decorator.oauth_required before
this decorator.
This decorator will set:
- self.playlist_metadata
- self.people
- self.current_user_id
- self.current_display_name
- self.owner_oauth_token
"""
def handle(self, playlist_id):
try:
self.playlist_metadata = model.PlaylistMetadata.gql(
"WHERE playlist_id = :1", playlist_id)[0]
except IndexError:
self.error(404)
self.response.out.write("Party Playlist Picker does not know about playlist %s." %
escape(playlist_id))
return
if users.get_current_user() != self.playlist_metadata.owner:
if self.request.get("uuid", -1) != self.playlist_metadata.uuid:
self.error(401)
self.response.out.write("You are not authorized to view this page.")
return
owner_id = self.playlist_metadata.owner.user_id()
owner_credentials = StorageByKeyName(CredentialsModel, owner_id,
'credentials').get()
self.owner_oauth_token = owner_credentials.access_token
try:
me = memcacheutils.cache_call(
key=self.oauth2_decorator.credentials.access_token,
namespace="oauth2_token_to_user",
time=memcacheutils.USER_EXPIRATION_SECS,
f=lambda: googleplusutils.service.people().get(userId="me").execute(
webutils.create_authorized_http_with_timeout(
self.oauth2_decorator.credentials)))
except HttpError, e:
if e.resp['status'] == 404:
webutils.render_to_response(self, "no_profile.html")
return
raise
self.current_user_id = me["id"]
self.current_display_name = me["displayName"]
# TODO: Front some of the following datastore lookups with memcache.
query = model.Person.all().filter("user_id = ", me["id"])
person = query.get()
if person is None:
person = model.Person(
user_id=me["id"],
display_name=me["displayName"],
image_url=me.get("image", {}).get("url",
"/static/images/default_profile.jpg"),
profile_url=me["url"]
)
person.put()
query = model.PlaylistEditors.all().filter("playlist_id = ",
playlist_id).ancestor(person)
if query.get() is None:
model.PlaylistEditors(parent=person, playlist_id=playlist_id).put()
# We'll probably end up moving this out of the decorator entirely.
self.people = []
playlist_editors = model.PlaylistEditors.all().filter("playlist_id = ",
playlist_id)
for playlist_editor in playlist_editors:
person = playlist_editor.parent()
self.people.append(dict(
user_id=person.user_id,
display_name=person.display_name,
image_url=person.image_url,
profile_url=person.profile_url
))
handler_method(self, playlist_id)
return handle
def handle_exception(self, exception, debug_mode):
"""Handle certain global exceptions such as OAuth2 problems."""
if (isinstance(exception, RequestError) and
exception.args[0]["status"] == 401):
body = exception.args[0]["body"]
if "Stateless token expired" in body:
self._force_refresh()
self.redirect(self.request.url)
return
if "NoLinkedYouTubeAccount" in body:
webutils.render_to_response(self, "unlinked_account.html")
return
webapp.RequestHandler.handle_exception(self, exception, debug_mode)
@oauth2_decorator.oauth_required
def _force_refresh(self):
"""HACK: Force the refresh of the OAuth2 token."""
self.oauth2_decorator.credentials._refresh(httplib2.Http().request)
|
|
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum Accelerate,
FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder import interface
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
driver_opts = [
cfg.StrOpt(
'proxy',
default='cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy',
help='Proxy driver that connects to the IBM Storage Array'),
cfg.StrOpt(
'connection_type',
default='iscsi',
choices=['fibre_channel', 'iscsi'],
help='Connection type to the IBM Storage Array'),
cfg.StrOpt(
'chap',
default='disabled',
choices=['disabled', 'enabled'],
help='CHAP authentication mode, effective only for iscsi'
' (disabled|enabled)'),
cfg.StrOpt(
'management_ips',
default='',
help='List of Management IP addresses (separated by commas)'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
@interface.volumedriver
class IBMStorageDriver(san.SanDriver,
driver.ManageableVD,
driver.MigrateVD,
driver.CloneableImageVD):
"""IBM Storage driver
IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum
Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage
systems.
Version history:
.. code-block:: none
2.0 - First open source driver version
2.1.0 - Support Consistency groups through Generic volume groups
- Support XIV/A9000 Volume independent QoS
- Support Consistency groups replication
2.3.0 - Support Report backend state
"""
VERSION = "2.3.0"
# ThirdPartySystems wiki page
CI_WIKI_NAME = "IBM_STORAGE_CI"
def __init__(self, *args, **kwargs):
"""Initialize the driver."""
super(IBMStorageDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(driver_opts)
proxy = importutils.import_class(self.configuration.proxy)
active_backend_id = kwargs.get('active_backend_id', None)
# Driver additional flags should be specified in the cinder.conf
# preferably in each backend configuration.
self.proxy = proxy(
{
"user": self.configuration.san_login,
"password": self.configuration.san_password,
"address": self.configuration.san_ip,
"vol_pool": self.configuration.san_clustername,
"connection_type": self.configuration.connection_type,
"chap": self.configuration.chap,
"management_ips": self.configuration.management_ips
},
LOG,
exception,
driver=self,
active_backend_id=active_backend_id,
host=self.host)
@staticmethod
def get_driver_options():
return driver_opts
def do_setup(self, context):
"""Setup and verify connection to IBM Storage."""
self.proxy.setup(context)
def ensure_export(self, context, volume):
"""Ensure an export."""
return self.proxy.ensure_export(context, volume)
def create_export(self, context, volume, connector):
"""Create an export."""
return self.proxy.create_export(context, volume)
def create_volume(self, volume):
"""Create a volume on the IBM Storage system."""
return self.proxy.create_volume(volume)
def delete_volume(self, volume):
"""Delete a volume on the IBM Storage system."""
self.proxy.delete_volume(volume)
def remove_export(self, context, volume):
"""Disconnect a volume from an attached instance."""
return self.proxy.remove_export(context, volume)
def initialize_connection(self, volume, connector):
"""Map the created volume."""
conn_info = self.proxy.initialize_connection(volume, connector)
fczm_utils.add_fc_zone(conn_info)
return conn_info
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume."""
conn_info = self.proxy.terminate_connection(volume, connector)
fczm_utils.remove_fc_zone(conn_info)
return conn_info
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
return self.proxy.create_volume_from_snapshot(
volume,
snapshot)
def create_snapshot(self, snapshot):
"""Create a snapshot."""
return self.proxy.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
return self.proxy.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
return self.proxy.get_volume_stats(refresh)
def create_cloned_volume(self, tgt_volume, src_volume):
"""Create Cloned Volume."""
return self.proxy.create_cloned_volume(tgt_volume, src_volume)
def extend_volume(self, volume, new_size):
"""Extend Created Volume."""
self.proxy.extend_volume(volume, new_size)
def migrate_volume(self, context, volume, host):
"""Migrate the volume to the specified host."""
return self.proxy.migrate_volume(context, volume, host)
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object to Cinder management."""
return self.proxy.manage_volume(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
return self.proxy.manage_volume_get_size(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
return self.proxy.unmanage_volume(volume)
def freeze_backend(self, context):
"""Notify the backend that it's frozen. """
return self.proxy.freeze_backend(context)
def thaw_backend(self, context):
"""Notify the backend that it's unfrozen/thawed. """
return self.proxy.thaw_backend(context)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover a backend to a secondary replication target. """
return self.proxy.failover_host(
context, volumes, secondary_id, groups)
def get_replication_status(self, context, volume):
"""Return replication status."""
return self.proxy.get_replication_status(context, volume)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.proxy.retype(ctxt, volume, new_type, diff, host)
def revert_to_snapshot(self, ctxt, volume, snapshot):
"""Revert volume to snapshot."""
return self.proxy.revert_to_snapshot(ctxt, volume, snapshot)
def create_group(self, context, group):
"""Creates a group."""
return self.proxy.create_group(context, group)
def delete_group(self, context, group, volumes):
"""Deletes a group."""
return self.proxy.delete_group(context, group, volumes)
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group snapshot."""
return self.proxy.create_group_snapshot(
context, group_snapshot, snapshots)
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group snapshot."""
return self.proxy.delete_group_snapshot(
context, group_snapshot, snapshots)
def update_group(self, context, group, add_volumes, remove_volumes):
"""Adds or removes volume(s) to/from an existing group."""
return self.proxy.update_group(
context, group, add_volumes, remove_volumes)
def create_group_from_src(
self, context, group, volumes, group_snapshot, snapshots,
source_cg=None, source_vols=None):
"""Creates a group from source."""
return self.proxy.create_group_from_src(
context, group, volumes, group_snapshot, snapshots,
source_cg, source_vols)
def enable_replication(self, context, group, volumes):
"""Enable replication."""
return self.proxy.enable_replication(context, group, volumes)
def disable_replication(self, context, group, volumes):
"""Disable replication."""
return self.proxy.disable_replication(context, group, volumes)
def failover_replication(self, context, group, volumes,
secondary_backend_id):
"""Failover replication."""
return self.proxy.failover_replication(context, group, volumes,
secondary_backend_id)
def get_replication_error_status(self, context, groups):
"""Returns error info for replicated groups and its volumes."""
return self.proxy.get_replication_error_status(context, groups)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.