gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
import sys
import xml.sax # @UnusedImport
import xml.sax.handler # @UnusedImport
class AimlParserError(Exception): pass
class AimlHandler(ContentHandler):
# The legal states of the AIML parser
_STATE_OutsideAiml = 0
_STATE_InsideAiml = 1
_STATE_InsideCategory = 2
_STATE_InsidePattern = 3
_STATE_AfterPattern = 4
_STATE_InsideThat = 5
_STATE_AfterThat = 6
_STATE_InsideTemplate = 7
_STATE_AfterTemplate = 8
def __init__(self, encoding = "UTF-8"):
self.categories = {}
self._encoding = encoding
self._state = self._STATE_OutsideAiml
self._version = ""
self._namespace = ""
self._forwardCompatibleMode = False
self._currentPattern = ""
self._currentThat = ""
self._currentTopic = ""
self._insideTopic = False
self._currentUnknown = "" # the name of the current unknown element
# This is set to true when a parse error occurs in a category.
self._skipCurrentCategory = False
# Counts the number of parse errors in a particular AIML document.
# query with getNumErrors(). If 0, the document is AIML-compliant.
self._numParseErrors = 0
# TODO: select the proper validInfo table based on the version number.
self._validInfo = self._validationInfo101
# This stack of bools is used when parsing <li> elements inside
# <condition> elements, to keep track of whether or not an
# attribute-less "default" <li> element has been found yet. Only
# one default <li> is allowed in each <condition> element. We need
# a stack in order to correctly handle nested <condition> tags.
self._foundDefaultLiStack = []
# This stack of strings indicates what the current whitespace-handling
# behavior should be. Each string in the stack is either "default" or
# "preserve". When a new AIML element is encountered, a new string is
# pushed onto the stack, based on the value of the element's "xml:space"
# attribute (if absent, the top of the stack is pushed again). When
# ending an element, pop an object off the stack.
self._whitespaceBehaviorStack = ["default"]
self._elemStack = []
self._locator = Locator()
self.setDocumentLocator(self._locator)
def getNumErrors(self):
"Return the number of errors found while parsing the current document."
return self._numParseErrors
def setEncoding(self, encoding):
"""Set the text encoding to use when encoding strings read from XML.
Defaults to 'UTF-8'.
"""
self._encoding = encoding
def _location(self):
"Return a string describing the current location in the source file."
line = self._locator.getLineNumber()
column = self._locator.getColumnNumber()
return "(line %d, column %d)" % (line, column)
def _pushWhitespaceBehavior(self, attr):
"""Push a new string onto the whitespaceBehaviorStack.
The string's value is taken from the "xml:space" attribute, if it exists
and has a legal value ("default" or "preserve"). Otherwise, the previous
stack element is duplicated.
"""
assert len(self._whitespaceBehaviorStack) > 0, "Whitespace behavior stack should never be empty!"
try:
if attr["xml:space"] == "default" or attr["xml:space"] == "preserve":
self._whitespaceBehaviorStack.append(attr["xml:space"])
else:
raise AimlParserError("Invalid value for xml:space attribute "+self._location())
except KeyError:
self._whitespaceBehaviorStack.append(self._whitespaceBehaviorStack[-1])
def startElementNS(self, name, qname, attr):
print("QNAME:", qname)
print("NAME:", name)
uri,elem = name # @UnusedVariable
if (elem == "bot"): print("name:", attr.getValueByQName("name"), "a'ite?")
self.startElement(elem, attr)
pass
def startElement(self, name, attr):
# Wrapper around _startElement, which catches errors in _startElement()
# and keeps going.
# If we're inside an unknown element, ignore everything until we're
# out again.
if self._currentUnknown != "":
return
# If we're skipping the current category, ignore everything until
# it's finished.
if self._skipCurrentCategory:
return
# process this start-element.
try: self._startElement(name, attr)
except AimlParserError as msg:
# Print the error message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _startElement(self, name, attr):
if name == "aiml":
# <aiml> tags are only legal in the OutsideAiml state
if self._state != self._STATE_OutsideAiml:
raise AimlParserError("Unexpected <aiml> tag "+self._location())
self._state = self._STATE_InsideAiml
self._insideTopic = False
self._currentTopic = ""
try: self._version = attr["version"]
except KeyError:
# This SHOULD be a syntax error, but so many AIML sets out there are missing
# "version" attributes that it just seems nicer to let it slide.
#raise AimlParserError, "Missing 'version' attribute in <aiml> tag "+self._location()
#print "WARNING: Missing 'version' attribute in <aiml> tag "+self._location()
#print " Defaulting to version 1.0"
self._version = "1.0"
self._forwardCompatibleMode = (self._version != "1.0.1")
self._pushWhitespaceBehavior(attr)
# Not sure about this namespace business yet...
#try:
# self._namespace = attr["xmlns"]
# if self._version == "1.0.1" and self._namespace != "http://alicebot.org/2001/AIML-1.0.1":
# raise AimlParserError, "Incorrect namespace for AIML v1.0.1 "+self._location()
#except KeyError:
# if self._version != "1.0":
# raise AimlParserError, "Missing 'version' attribute(s) in <aiml> tag "+self._location()
elif self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, we ignore all tags.
return
elif name == "topic":
# <topic> tags are only legal in the InsideAiml state, and only
# if we're not already inside a topic.
if (self._state != self._STATE_InsideAiml) or self._insideTopic:
raise AimlParserError("Unexpected <topic> tag").with_traceback(self._location())
try: self._currentTopic = str(attr['name'])
except KeyError:
raise AimlParserError("Required \"name\" attribute missing in <topic> element "+self._location())
self._insideTopic = True
elif name == "category":
# <category> tags are only legal in the InsideAiml state
if self._state != self._STATE_InsideAiml:
raise AimlParserError("Unexpected <category> tag "+self._location())
self._state = self._STATE_InsideCategory
self._currentPattern = ""
self._currentThat = ""
# If we're not inside a topic, the topic is implicitly set to *
if not self._insideTopic: self._currentTopic = "*"
self._elemStack = []
self._pushWhitespaceBehavior(attr)
elif name == "pattern":
# <pattern> tags are only legal in the InsideCategory state
if self._state != self._STATE_InsideCategory:
raise AimlParserError("Unexpected <pattern> tag "+self._location())
self._state = self._STATE_InsidePattern
elif name == "that" and self._state == self._STATE_AfterPattern:
# <that> are legal either inside a <template> element, or
# inside a <category> element, between the <pattern> and the
# <template> elements. This clause handles the latter case.
self._state = self._STATE_InsideThat
elif name == "template":
# <template> tags are only legal in the AfterPattern and AfterThat
# states
if self._state not in [self._STATE_AfterPattern, self._STATE_AfterThat]:
raise AimlParserError("Unexpected <template> tag "+self._location())
# if no <that> element was specified, it is implicitly set to *
if self._state == self._STATE_AfterPattern:
self._currentThat = "*"
self._state = self._STATE_InsideTemplate
self._elemStack.append(['template',{}])
self._pushWhitespaceBehavior(attr)
elif self._state == self._STATE_InsidePattern:
# Certain tags are allowed inside <pattern> elements.
if name == "bot" and "name" in attr and attr["name"] == "name":
# Insert a special character string that the PatternMgr will
# replace with the bot's name.
self._currentPattern += " BOT_NAME "
else:
raise AimlParserError(("Unexpected <%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideThat:
# Certain tags are allowed inside <that> elements.
if name == "bot" and "name" in attr and attr["name"] == "name":
# Insert a special character string that the PatternMgr will
# replace with the bot's name.
self._currentThat += " BOT_NAME "
else:
raise AimlParserError(("Unexpected <%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideTemplate and name in self._validInfo:
# Starting a new element inside the current pattern. First
# we need to convert 'attr' into a native Python dictionary,
# so it can later be marshaled.
attrDict = {}
for k,v in list(attr.items()):
#attrDict[k[1].encode(self._encoding)] = v.encode(self._encoding)
#attrDict[k.encode(self._encoding)] = str(v) # This makes the attr's bytes, and they don't work with comparing strings.
attrDict[str(k)] = str(v)
self._validateElemStart(name, attrDict, self._version)
# Push the current element onto the element stack.
#self._elemStack.append([name.encode(self._encoding),attrDict])
self._elemStack.append([name,attrDict])
self._pushWhitespaceBehavior(attr)
# If this is a condition element, push a new entry onto the
# foundDefaultLiStack
if name == "condition":
self._foundDefaultLiStack.append(False)
else:
# we're now inside an unknown element.
if self._forwardCompatibleMode:
# In Forward Compatibility Mode, we ignore the element and its
# contents.
self._currentUnknown = name
else:
# Otherwise, unknown elements are grounds for error!
raise AimlParserError(("Unexpected <%s> tag " % name)+self._location())
def characters(self, ch):
# Wrapper around _characters which catches errors in _characters()
# and keeps going.
if self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, we ignore all text
return
if self._currentUnknown != "":
# If we're inside an unknown element, ignore all text
return
if self._skipCurrentCategory:
# If we're skipping the current category, ignore all text.
return
try: self._characters(ch)
except AimlParserError as msg:
# Print the message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _characters(self, ch):
text = str(ch)
if self._state == self._STATE_InsidePattern:
self._currentPattern += text
elif self._state == self._STATE_InsideThat:
self._currentThat += text
elif self._state == self._STATE_InsideTemplate:
# First, see whether the element at the top of the element stack
# is permitted to contain text.
try:
parent = self._elemStack[-1][0]
parentAttr = self._elemStack[-1][1]
required, optional, canBeParent = self._validInfo[parent] # @UnusedVariable
nonBlockStyleCondition = (parent == "condition" and not ("name" in parentAttr and "value" in parentAttr))
if not canBeParent:
raise AimlParserError(("Unexpected text inside <%s> element "%parent)+self._location())
elif parent == "random" or nonBlockStyleCondition:
# <random> elements can only contain <li> subelements. However,
# there's invariably some whitespace around the <li> that we need
# to ignore. Same for non-block-style <condition> elements (i.e.
# those which don't have both a "name" and a "value" attribute).
if len(text.strip()) == 0:
# ignore whitespace inside these elements.
return
else:
# non-whitespace text inside these elements is a syntax error.
raise AimlParserError(("Unexpected text inside <%s> element "%parent)+self._location())
except IndexError:
# the element stack is empty. This should never happen.
raise AimlParserError("Element stack is empty while validating text "+self._location())
# Add a new text element to the element at the top of the element
# stack. If there's already a text element there, simply append the
# new characters to its contents.
try: textElemOnStack = (self._elemStack[-1][-1][0] == "text")
except IndexError: textElemOnStack = False
except KeyError: textElemOnStack = False
if textElemOnStack:
self._elemStack[-1][-1][2] += text
else:
self._elemStack[-1].append(["text", {"xml:space": self._whitespaceBehaviorStack[-1]}, text])
else:
# all other text is ignored
pass
def endElementNS(self, name, qname):
uri, elem = name # @UnusedVariable
self.endElement(elem)
def endElement(self, name):
"""Wrapper around _endElement which catches errors in _characters()
and keeps going.
"""
if self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, ignore all tags
return
if self._currentUnknown != "":
# see if we're at the end of an unknown element. If so, we can
# stop ignoring everything.
if name == self._currentUnknown:
self._currentUnknown = ""
return
if self._skipCurrentCategory:
# If we're skipping the current category, see if it's ending. We
# stop on ANY </category> tag, since we're not keeping track of
# state in ignore-mode.
if name == "category":
self._skipCurrentCategory = False
self._state = self._STATE_InsideAiml
return
try: self._endElement(name)
except AimlParserError as msg:
# Print the message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _endElement(self, name):
"""Verify that an AIML end element is valid in the current
context.
Raises an AimlParserError if an illegal end element is encountered.
"""
if name == "aiml":
# </aiml> tags are only legal in the InsideAiml state
if self._state != self._STATE_InsideAiml:
raise AimlParserError("Unexpected </aiml> tag "+self._location())
self._state = self._STATE_OutsideAiml
self._whitespaceBehaviorStack.pop()
elif name == "topic":
# </topic> tags are only legal in the InsideAiml state, and
# only if _insideTopic is true.
if self._state != self._STATE_InsideAiml or not self._insideTopic:
raise AimlParserError("Unexpected </topic> tag "+self._location())
self._insideTopic = False
self._currentTopic = ""
elif name == "category":
# </category> tags are only legal in the AfterTemplate state
if self._state != self._STATE_AfterTemplate:
raise AimlParserError("Unexpected </category> tag "+self._location())
self._state = self._STATE_InsideAiml
# End the current category. Store the current pattern/that/topic and
# element in the categories dictionary.
key = (self._currentPattern.strip(), self._currentThat.strip(),self._currentTopic.strip())
self.categories[key] = self._elemStack[-1]
self._whitespaceBehaviorStack.pop()
elif name == "pattern":
# </pattern> tags are only legal in the InsidePattern state
if self._state != self._STATE_InsidePattern:
raise AimlParserError("Unexpected </pattern> tag "+self._location())
self._state = self._STATE_AfterPattern
elif name == "that" and self._state == self._STATE_InsideThat:
# </that> tags are only allowed inside <template> elements or in
# the InsideThat state. This clause handles the latter case.
self._state = self._STATE_AfterThat
elif name == "template":
# </template> tags are only allowed in the InsideTemplate state.
if self._state != self._STATE_InsideTemplate:
raise AimlParserError("Unexpected </template> tag "+self._location())
self._state = self._STATE_AfterTemplate
self._whitespaceBehaviorStack.pop()
elif self._state == self._STATE_InsidePattern:
# Certain tags are allowed inside <pattern> elements.
if name not in ["bot"]:
raise AimlParserError(("Unexpected </%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideThat:
# Certain tags are allowed inside <that> elements.
if name not in ["bot"]:
raise AimlParserError(("Unexpected </%s> tag " % name)+self._location())
elif self._state == self._STATE_InsideTemplate:
# End of an element inside the current template. Append the
# element at the top of the stack onto the one beneath it.
elem = self._elemStack.pop()
self._elemStack[-1].append(elem)
self._whitespaceBehaviorStack.pop()
# If the element was a condition, pop an item off the
# foundDefaultLiStack as well.
if elem[0] == "condition": self._foundDefaultLiStack.pop()
else:
# Unexpected closing tag
raise AimlParserError(("Unexpected </%s> tag " % name)+self._location())
# A dictionary containing a validation information for each AIML
# element. The keys are the names of the elements. The values are a
# tuple of three items. The first is a list containing the names of
# REQUIRED attributes, the second is a list of OPTIONAL attributes,
# and the third is a boolean value indicating whether or not the
# element can contain other elements and/or text (if False, the
# element can only appear in an atomic context, such as <date/>).
_validationInfo101 = {
"bot": ( ["name"], [], False ),
"condition": ( [], ["name", "value"], True ), # can only contain <li> elements
"date": ( [], [], False ),
"formal": ( [], [], True ),
"gender": ( [], [], True ),
"get": ( ["name"], [], False ),
"gossip": ( [], [], True ),
"id": ( [], [], False ),
"input": ( [], ["index"], False ),
"javascript": ( [], [], True ),
"learn": ( [], [], True ),
"li": ( [], ["name", "value"], True ),
"lowercase": ( [], [], True ),
"person": ( [], [], True ),
"person2": ( [], [], True ),
"random": ( [], [], True ), # can only contain <li> elements
"sentence": ( [], [], True ),
"set": ( ["name"], [], True),
"size": ( [], [], False ),
"sr": ( [], [], False ),
"srai": ( [], [], True ),
"star": ( [], ["index"], False ),
"system": ( [], [], True ),
"template": ( [], [], True ), # needs to be in the list because it can be a parent.
"that": ( [], ["index"], False ),
"thatstar": ( [], ["index"], False ),
"think": ( [], [], True ),
"topicstar": ( [], ["index"], False ),
"uppercase": ( [], [], True ),
"version": ( [], [], False ),
}
def _validateElemStart(self, name, attr, version):
"""Test the validity of an element starting inside a <template>
element.
This function raises an AimlParserError exception if it the tag is
invalid. Otherwise, no news is good news.
"""
# Check the element's attributes. Make sure that all required
# attributes are present, and that any remaining attributes are
# valid options.
required, optional, canBeParent = self._validInfo[name] # @UnusedVariable
for a in required:
if a not in attr and not self._forwardCompatibleMode:
raise AimlParserError(("Required \"%s\" attribute missing in <%s> element " % (a,name))+self._location())
for a in attr:
if a in required: continue
if a[0:4] == "xml:": continue # attributes in the "xml" namespace can appear anywhere
if a not in optional and not self._forwardCompatibleMode:
raise AimlParserError(("Unexpected \"%s\" attribute in <%s> element " % (a,name))+self._location())
# special-case: several tags contain an optional "index" attribute.
# This attribute's value must be a positive integer.
if name in ["star", "thatstar", "topicstar"]:
for k,v in list(attr.items()):
if k == "index":
temp = 0
try: temp = int(v)
except:
raise AimlParserError(("Bad type for \"%s\" attribute (expected integer, found \"%s\") " % (k,v))+self._location())
if temp < 1:
raise AimlParserError(("\"%s\" attribute must have non-negative value " % (k))+self._location())
# See whether the containing element is permitted to contain
# subelements. If not, this element is invalid no matter what it is.
try:
parent = self._elemStack[-1][0]
parentAttr = self._elemStack[-1][1]
except IndexError:
# If the stack is empty, no parent is present. This should never
# happen.
raise AimlParserError(("Element stack is empty while validating <%s> " % name)+self._location())
required, optional, canBeParent = self._validInfo[parent]
nonBlockStyleCondition = (parent == "condition" and not ("name" in parentAttr and "value" in parentAttr))
if not canBeParent:
raise AimlParserError(("<%s> elements cannot have any contents "%parent)+self._location())
# Special-case test if the parent element is <condition> (the
# non-block-style variant) or <random>: these elements can only
# contain <li> subelements.
elif (parent == "random" or nonBlockStyleCondition) and name!="li":
raise AimlParserError(("<%s> elements can only contain <li> subelements "%parent)+self._location())
# Special-case test for <li> elements, which can only be contained
# by non-block-style <condition> and <random> elements, and whose
# required attributes are dependent upon which attributes are
# present in the <condition> parent.
elif name=="li":
if not (parent=="random" or nonBlockStyleCondition):
raise AimlParserError(("Unexpected <li> element contained by <%s> element "%parent)+self._location())
if nonBlockStyleCondition:
if "name" in parentAttr:
# Single-predicate condition. Each <li> element except the
# last must have a "value" attribute.
if len(attr) == 0:
# This could be the default <li> element for this <condition>,
# unless we've already found one.
if self._foundDefaultLiStack[-1]:
raise AimlParserError("Unexpected default <li> element inside <condition> "+self._location())
else:
self._foundDefaultLiStack[-1] = True
elif len(attr) == 1 and "value" in attr:
pass # this is the valid case
else:
raise AimlParserError("Invalid <li> inside single-predicate <condition> "+self._location())
elif len(parentAttr) == 0:
# Multi-predicate condition. Each <li> element except the
# last must have a "name" and a "value" attribute.
if len(attr) == 0:
# This could be the default <li> element for this <condition>,
# unless we've already found one.
if self._foundDefaultLiStack[-1]:
raise AimlParserError("Unexpected default <li> element inside <condition> "+self._location())
else:
self._foundDefaultLiStack[-1] = True
elif len(attr) == 2 and "value" in attr and "name" in attr:
pass # this is the valid case
else:
raise AimlParserError("Invalid <li> inside multi-predicate <condition> "+self._location())
# All is well!
return True
def create_parser():
"""Create and return an AIML parser object."""
parser = xml.sax.make_parser()
handler = AimlHandler("UTF-8")
parser.setContentHandler(handler)
#parser.setFeature(xml.sax.handler.feature_namespaces, True)
return parser
def main():
parser = create_parser()
handler = parser.getContentHandler()
handler.setEncoding("utf-8")
try:
parser.parse("C:\\Users\\jason\\My Workspaces.new\\In-House\\pyaiml-3\\test.aiml")
except xml.sax.SAXException as msg:
err = "\nFATAL PARSE ERROR in file :\n%s\n" % (msg)
sys.stderr.write(err)
for key, tem in handler.categories.items():
print("Key = %s, tem = %s" % (key, tem))
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc as wexc
from neutron.api.v2 import base
from neutron import context
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config
from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc
from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver
from neutron.tests.unit.ml2 import test_ml2_plugin
LOG = logging.getLogger(__name__)
PHYS_NET = 'physnet1'
COMP_HOST_NAME = 'testhost'
COMP_HOST_NAME_2 = 'testhost_2'
VLAN_START = 1000
VLAN_END = 1100
NEXUS_IP_ADDR = '1.1.1.1'
NETWORK_NAME = 'test_network'
NETWORK_NAME_2 = 'test_network_2'
NEXUS_INTERFACE = '1/1'
NEXUS_INTERFACE_2 = '1/2'
CIDR_1 = '10.0.0.0/24'
CIDR_2 = '10.0.1.0/24'
DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111'
DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222'
DEVICE_OWNER = 'compute:None'
BOUND_SEGMENT1 = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PHYS_NET,
api.SEGMENTATION_ID: VLAN_START}
BOUND_SEGMENT2 = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PHYS_NET,
api.SEGMENTATION_ID: VLAN_START + 1}
class CiscoML2MechanismTestCase(test_ml2_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['cisco_nexus']
def setUp(self):
"""Configure for end-to-end neutron testing using a mock ncclient.
This setup includes:
- Configure the ML2 plugin to use VLANs in the range of 1000-1100.
- Configure the Cisco mechanism driver to use an imaginary switch
at NEXUS_IP_ADDR.
- Create a mock NETCONF client (ncclient) for the Cisco mechanism
driver
"""
# Configure the Cisco Nexus mechanism driver
nexus_config = {
(NEXUS_IP_ADDR, 'username'): 'admin',
(NEXUS_IP_ADDR, 'password'): 'mySecretPassword',
(NEXUS_IP_ADDR, 'ssh_port'): 22,
(NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE,
(NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2}
nexus_patch = mock.patch.dict(
cisco_config.ML2MechCiscoConfig.nexus_dict,
nexus_config)
nexus_patch.start()
self.addCleanup(nexus_patch.stop)
# The NETCONF client module is not included in the DevStack
# distribution, so mock this module for unit testing.
self.mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=self.mock_ncclient).start()
# Mock port context values for bound_segments and 'status'.
self.mock_bound_segment = mock.patch.object(
driver_context.PortContext,
'bottom_bound_segment',
new_callable=mock.PropertyMock).start()
self.mock_bound_segment.return_value = BOUND_SEGMENT1
self.mock_original_bound_segment = mock.patch.object(
driver_context.PortContext,
'original_bottom_bound_segment',
new_callable=mock.PropertyMock).start()
self.mock_original_bound_segment.return_value = None
# Use _is_status_active method to determine bind state.
def _mock_check_bind_state(port_context):
if (port_context[portbindings.VIF_TYPE] !=
portbindings.VIF_TYPE_UNBOUND):
return True
else:
return False
self.mock_status = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_is_status_active').start()
self.mock_status.side_effect = _mock_check_bind_state
super(CiscoML2MechanismTestCase, self).setUp()
self.port_create_status = 'DOWN'
def _create_deviceowner_mock(self):
# Mock deviceowner method for UT's that expect update precommit
# failures. This allows control of delete_port_pre/postcommit()
# actions.
mock_deviceowner = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_is_deviceowner_compute').start()
mock_deviceowner.return_value = False
self.addCleanup(mock_deviceowner.stop)
@contextlib.contextmanager
def _patch_ncclient(self, attr, value):
"""Configure an attribute on the mock ncclient module.
This method can be used to inject errors by setting a side effect
or a return value for an ncclient method.
:param attr: ncclient attribute (typically method) to be configured.
:param value: Value to be configured on the attribute.
"""
# Configure attribute.
config = {attr: value}
self.mock_ncclient.configure_mock(**config)
# Continue testing
yield
# Unconfigure attribute
config = {attr: None}
self.mock_ncclient.configure_mock(**config)
@staticmethod
def _config_dependent_side_effect(match_config, exc):
"""Generates a config-dependent side effect for ncclient edit_config.
This method generates a mock side-effect function which can be
configured on the mock ncclient module for the edit_config method.
This side effect will cause a given exception to be raised whenever
the XML config string that is passed to edit_config contains all
words in a given match config string.
:param match_config: String containing keywords to be matched
:param exc: Exception to be raised when match is found
:return: Side effect function for the mock ncclient module's
edit_config method.
"""
keywords = match_config.split()
def _side_effect_function(target, config):
if all(word in config for word in keywords):
raise exc
return _side_effect_function
def _is_in_nexus_cfg(self, words):
"""Check if any config sent to Nexus contains all words in a list."""
for call in (self.mock_ncclient.connect.return_value.
edit_config.mock_calls):
configlet = call[2]['config']
if all(word in configlet for word in words):
return True
return False
def _is_in_last_nexus_cfg(self, words):
"""Confirm last config sent to Nexus contains specified keywords."""
last_cfg = (self.mock_ncclient.connect.return_value.
edit_config.mock_calls[-1][2]['config'])
return all(word in last_cfg for word in words)
def _is_vlan_configured(self, vlan_creation_expected=True,
first_vlan_addition=False):
"""Confirm if VLAN was configured or not."""
vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name'])
add_appears = self._is_in_last_nexus_cfg(['add'])
# The first VLAN being configured should be done without the
# ADD keyword. Thereafter additional VLANs to be configured
# should be done with the ADD keyword.
add_keyword_expected = not first_vlan_addition
return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and
vlan_created == vlan_creation_expected and
add_appears == add_keyword_expected)
def _is_vlan_unconfigured(self, vlan_deletion_expected=True):
vlan_deleted = self._is_in_last_nexus_cfg(
['no', 'vlan', 'vlan-id-create-delete'])
return (self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) and
vlan_deleted == vlan_deletion_expected)
class TestCiscoBasicGet(CiscoML2MechanismTestCase,
test_ml2_plugin.TestMl2BasicGet):
pass
class TestCiscoV2HTTPResponse(CiscoML2MechanismTestCase,
test_ml2_plugin.TestMl2V2HTTPResponse):
pass
class TestCiscoPortsV2(CiscoML2MechanismTestCase,
test_ml2_plugin.TestMl2PortsV2):
@contextlib.contextmanager
def _create_resources(self, name=NETWORK_NAME, cidr=CIDR_1,
device_id=DEVICE_ID_1,
host_id=COMP_HOST_NAME,
expected_failure=False):
"""Create network, subnet, and port resources for test cases.
Create a network, subnet, port and then update the port, yield the
result, then delete the port, subnet and network.
:param name: Name of network to be created.
:param cidr: cidr address of subnetwork to be created.
:param device_id: Device ID to use for port to be created/updated.
:param host_id: Host ID to use for port create/update.
:param expected_failure: Set to True when an update_port_precommit
failure is expected. Results in no actions being taken in
delete_port_pre/postcommit() methods.
"""
with self.network(name=name) as network:
with self.subnet(network=network, cidr=cidr) as subnet:
with self.port(subnet=subnet, cidr=cidr) as port:
data = {'port': {portbindings.HOST_ID: host_id,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
yield req.get_response(self.api)
if expected_failure:
self._create_deviceowner_mock()
self._delete('ports', port['port']['id'])
self._delete('networks', network['network']['id'])
def _assertExpectedHTTP(self, status, exc):
"""Confirm that an HTTP status corresponds to an expected exception.
Confirm that an HTTP status which has been returned for an
neutron API request matches the HTTP status corresponding
to an expected exception.
:param status: HTTP status
:param exc: Expected exception
"""
if exc in base.FAULT_MAP:
expected_http = base.FAULT_MAP[exc].code
else:
expected_http = wexc.HTTPInternalServerError.code
self.assertEqual(status, expected_http)
def _mock_config_first_trunk(self):
"""Mock the behavior for the first VLAN addition.
When the first VLAN is being added to the interface the usage of
the ADD keyword should raise an exception specifying that the ADD
keyword cannot be used till a VLAN list is created and to create
a VLAN list the configuration should not contain the ADD keyword.
"""
config = "switchport trunk allowed vlan add"
exc = Exception("switchport trunk allowed vlan list is empty, "
"please specify a list using "
"'switchport trunk allowed vlan X' "
"before using the add option")
return (self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
self._config_dependent_side_effect(config, exc)))
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API chooses the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'_create_port_db') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# Expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_emulated(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'_create_port_db') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_nexus_enable_vlan_cmd_on_same_host(self):
"""Verify the syntax of the command to enable a vlan on an intf.
Test of the following ml2_conf_cisco_ini config:
[ml2_mech_cisco_nexus:1.1.1.1]
Resource A on host=COMP_HOST_NAME with vlan_id = 1000
Resource B on host=COMP_HOST_NAME with vlan_id = 1001
Confirm that when configuring the first VLAN on a Nexus interface,
the final command string sent to the switch does not contain the
keyword 'add'. The initial attempt will contain 'add' but when
the switch rejects it, the re-attempt shouldn't contain the 'add'.
Confirm that for the second VLAN configured on a Nexus interface,
the command string sent to the switch contains the keyword 'add'
since it is on the same host.
"""
# First vlan should be configured without 'add' keyword and an
# exception should be raised when it is done with the 'add'
# thereby triggering a re-attempt without the 'add'.
with self._mock_config_first_trunk():
with self._create_resources():
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
first_vlan_addition=True))
self.mock_ncclient.reset_mock()
self.mock_bound_segment.return_value = BOUND_SEGMENT2
# Second vlan should be configured with the 'add' keyword
# when on first host.
with(self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
None)):
with self._create_resources(name=NETWORK_NAME_2,
device_id=DEVICE_ID_2,
cidr=CIDR_2,
host_id=COMP_HOST_NAME):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
first_vlan_addition=False
))
# Return to first segment for delete port calls.
self.mock_bound_segment.return_value = BOUND_SEGMENT1
def test_nexus_enable_vlan_cmd_on_different_hosts(self):
"""Verify the syntax of the command to enable a vlan on an intf.
Test of the following ml2_conf_cisco_ini config:
[ml2_mech_cisco_nexus:1.1.1.1]
Resource A on host=COMP_HOST_NAME with vlan_id = 1000
Resource B on host=COMP_HOST_NAME_2 with vlan_id = 1001
Confirm that when configuring the first VLAN on a Nexus interface,
the final command string sent to the switch does not contain the
keyword 'add'. The initial attempt will contain 'add' but when
the switch rejects it, the re-attempt shouldn't contain the 'add'.
Confirm that for the second VLAN configured on a Nexus interface,
the command string sent to the switch does not contain the
keyword 'add' since it is on a different host.
"""
# First vlan should be configured without 'add' keyword and an
# exception should be raised when it is done with the 'add'
# thereby triggering a re-attempt without the 'add'.
with self._mock_config_first_trunk():
with self._create_resources():
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
first_vlan_addition=True))
self.mock_ncclient.reset_mock()
self.mock_bound_segment.return_value = BOUND_SEGMENT2
# Second vlan should be configured without the 'add' keyword
# when on second host.
with self._create_resources(name=NETWORK_NAME_2,
device_id=DEVICE_ID_2,
cidr=CIDR_2,
host_id=COMP_HOST_NAME_2):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
first_vlan_addition=True
))
# Return to first segment for delete port calls.
self.mock_bound_segment.return_value = BOUND_SEGMENT1
def test_ncclient_version_detect(self):
"""Test ability to handle connection to old and new-style ncclient.
We used to require a custom version of the ncclient library. However,
recent contributions to the ncclient make this unnecessary. Our
driver was modified to be able to establish a connection via both
the old and new type of ncclient.
The new style ncclient.connect() function takes one additional
parameter.
The ML2 driver uses this to detect whether we are dealing with an
old or new ncclient installation.
"""
# The code we are exercising calls connect() twice, if there is a
# TypeError on the first call (if the old ncclient is installed).
# The second call should succeed. That's what we are simulating here.
orig_connect_return_val = self.mock_ncclient.connect.return_value
with self._patch_ncclient('connect.side_effect',
[TypeError, orig_connect_return_val]):
with self._create_resources() as result:
self.assertEqual(result.status_int,
wexc.HTTPOk.code)
def test_ncclient_fail_on_second_connect(self):
"""Test that other errors during connect() sequences are still handled.
If the old ncclient is installed, we expect to get a TypeError first,
but should still handle other errors in the usual way, whether they
appear on the first or second call to connect().
"""
with self._patch_ncclient('connect.side_effect',
[TypeError, IOError]):
with self._create_resources() as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConnectFailed)
def test_nexus_connect_fail(self):
"""Test failure to connect to a Nexus switch.
While creating a network, subnet, and port, simulate a connection
failure to a nexus switch. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient('connect.side_effect',
AttributeError):
with self._create_resources() as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConnectFailed)
def test_nexus_vlan_config_two_hosts(self):
"""Verify config/unconfig of vlan on two compute hosts."""
@contextlib.contextmanager
def _create_port_check_vlan(comp_host_name, device_id,
vlan_creation_expected=True):
with self.port(subnet=subnet, fmt=self.fmt) as port:
data = {'port': {portbindings.HOST_ID: comp_host_name,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
req.get_response(self.api)
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=vlan_creation_expected,
first_vlan_addition=True))
self.mock_ncclient.reset_mock()
yield
self._delete('ports', port['port']['id'])
# Create network and subnet
with self._mock_config_first_trunk():
with self.network(name=NETWORK_NAME) as network:
with self.subnet(network=network, cidr=CIDR_1) as subnet:
# Create an instance on first compute host
with _create_port_check_vlan(COMP_HOST_NAME, DEVICE_ID_1,
vlan_creation_expected=True):
# Create an instance on second compute host
with _create_port_check_vlan(
COMP_HOST_NAME_2,
DEVICE_ID_2,
vlan_creation_expected=False):
pass
# Instance on second host is now terminated.
# Vlan should be untrunked from port, but vlan should
# still exist on the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=False))
self.mock_ncclient.reset_mock()
# Instance on first host is now terminated.
# Vlan should be untrunked from port and vlan should have
# been deleted from the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=True))
def test_nexus_vm_migration(self):
"""Verify VM (live) migration.
Simulate the following:
Nova informs neutron of live-migration with port-update(new host).
This should trigger two update_port_pre/postcommit() calls.
The first one should only change the current host_id and remove the
binding resulting in the mechanism drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_bottom_bound_segment: previous value
PortContext.current['binding:host_id']: current (new) value
PortContext.bottom_bound_segment: None
The second one binds the new host resulting in the mechanism
drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_bottom_bound_segment: None
PortContext.current['binding:host_id']: previous value
PortContext.bottom_bound_segment: new value
"""
# Create network, subnet and port.
with self._create_resources() as result:
# Verify initial database entry.
# Use port_id to verify that 1st host name was used.
binding = nexus_db_v2.get_nexusvm_bindings(VLAN_START,
DEVICE_ID_1)[0]
intf_type, nexus_port = binding.port_id.split(':')
self.assertEqual(nexus_port, NEXUS_INTERFACE)
port = self.deserialize(self.fmt, result)
port_id = port['port']['id']
# Trigger update event to unbind segment.
# Results in port being deleted from nexus DB and switch.
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}}
self.mock_bound_segment.return_value = None
self.mock_original_bound_segment.return_value = BOUND_SEGMENT1
self.new_update_request('ports', data,
port_id).get_response(self.api)
# Verify that port entry has been deleted.
self.assertRaises(c_exc.NexusPortBindingNotFound,
nexus_db_v2.get_nexusvm_bindings,
VLAN_START, DEVICE_ID_1)
# Trigger update event to bind segment with new host.
self.mock_bound_segment.return_value = BOUND_SEGMENT1
self.mock_original_bound_segment.return_value = None
self.new_update_request('ports', data,
port_id).get_response(self.api)
# Verify that port entry has been added using new host name.
# Use port_id to verify that 2nd host name was used.
binding = nexus_db_v2.get_nexusvm_bindings(VLAN_START,
DEVICE_ID_1)[0]
intf_type, nexus_port = binding.port_id.split(':')
self.assertEqual(nexus_port, NEXUS_INTERFACE_2)
def test_nexus_config_fail(self):
"""Test a Nexus switch configuration failure.
While creating a network, subnet, and port, simulate a nexus
switch configuration error. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
AttributeError):
with self._create_resources() as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConfigFailed)
def test_nexus_extended_vlan_range_failure(self):
"""Test that extended VLAN range config errors are ignored.
Some versions of Nexus switch do not allow state changes for
the extended VLAN range (1006-4094), but these errors can be
ignored (default values are appropriate). Test that such errors
are ignored by the Nexus plugin.
"""
def mock_edit_config_a(target, config):
if all(word in config for word in ['state', 'active']):
raise Exception("Can't modify state for extended")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_a):
with self._create_resources() as result:
self.assertEqual(result.status_int, wexc.HTTPOk.code)
def mock_edit_config_b(target, config):
if all(word in config for word in ['no', 'shutdown']):
raise Exception("Command is only allowed on VLAN")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_b):
with self._create_resources() as result:
self.assertEqual(result.status_int, wexc.HTTPOk.code)
def test_nexus_vlan_config_rollback(self):
"""Test rollback following Nexus VLAN state config failure.
Test that the Cisco Nexus plugin correctly deletes the VLAN
on the Nexus switch when the 'state active' command fails (for
a reason other than state configuration change is rejected
for the extended VLAN range).
"""
vlan_state_configs = ['state active', 'no shutdown']
for config in vlan_state_configs:
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
self._config_dependent_side_effect(config, ValueError)):
with self._create_resources() as result:
# Confirm that the last configuration sent to the Nexus
# switch was deletion of the VLAN.
self.assertTrue(
self._is_in_last_nexus_cfg(['<no>', '<vlan>'])
)
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConfigFailed)
def test_nexus_host_not_configured(self):
"""Test handling of a NexusComputeHostNotConfigured exception.
Test the Cisco NexusComputeHostNotConfigured exception by using
a fictitious host name during port creation.
"""
with self._create_resources(host_id='fake_host',
expected_failure=True) as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusComputeHostNotConfigured)
def test_nexus_missing_fields(self):
"""Test handling of a NexusMissingRequiredFields exception.
Test the Cisco NexusMissingRequiredFields exception by using
empty device_id value during port creation.
"""
with self._create_resources(device_id='',
expected_failure=True) as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusMissingRequiredFields)
class TestCiscoNetworksV2(CiscoML2MechanismTestCase,
test_ml2_plugin.TestMl2NetworksV2):
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with mock.patch.object(plugin_obj,
'_create_network_db') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
LOG.debug("response is %s" % res)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_network
with mock.patch.object(plugin_obj,
'_create_network_db') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
class TestCiscoSubnetsV2(CiscoML2MechanismTestCase,
test_ml2_plugin.TestMl2SubnetsV2):
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'_create_subnet_db') as patched_plugin:
def side_effect(*args, **kwargs):
self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'_create_subnet_db') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
|
|
#!/usr/bin/env python
"""
__author__ = "Axelle Apvrille"
__status__ = "Beta"
__copyright__ = "Copyright 2015, Fortinet, Fortiguard Labs"
__license__ = "The MIT License (MIT)"
This utility helps work with Fitbit fitness trackers
Connect the USB dongle to your host, and place your tracker nearby.
Use responsibly.
Use at your own risk!!!
"""
import usb.core
import usb.util
import sys
import time
import argparse
import random
VID = 0x2687
PID = 0xfb01
device = 0 # Global variable for USB device
mytracker = None # Global variable for selected tracker
# USB connection -----------------------------------------------
def connectUSB(VID=0x2687, PID=0xfb01):
'''Connect to USB device and returns it '''
dev = usb.core.find(idVendor=VID, idProduct=PID)
if dev is None:
raise ValueError('Device not found')
return dev
def detach(dev, interface=0, show_packets=False):
'''Detach the interface'''
if dev.is_kernel_driver_active(interface):
if show_packets:
print "Detaching kernel driver for interface %d" % (interface)
dev.detach_kernel_driver(interface)
def unclaim(dev, interface=0, show_packets=False):
if show_packets:
print "Unclaiming interface %d " % (interface)
usb.util.release_interface(dev, interface)
def unclaimFitbit(show_packets=False):
'''Unclaim a fitbit device'''
global device
device = connectUSB()
for interface in range(0,2):
detach(device, interface, show_packets)
unclaim(device, interface, show_packets)
# Message display ----------------------------------------
def displayLongHex(buffer, bytes_per_line=16):
'''Formats an hexadecimal string with bytes per line at most
Example:
28 02 00 00 01 00 44 04 00 00 d2 c0 56 2e 15 07
19 7f 2b f6 f6 49 f9 f2 a5 fc 88 38 18 a6 9c 50
fb 01 00 00
'''
hexstring = ''.join('{:02x} '.format(x) for x in buffer)
newstring = ''
hexstring_len = len(hexstring)
pos = 0
while pos < hexstring_len:
maxpos = pos + (3*bytes_per_line)
if maxpos >= hexstring_len:
maxpos = hexstring_len
newstring = newstring + hexstring[pos:maxpos] + '\n'
pos = pos + (3*bytes_per_line)
return newstring
def displayCommandId(packet, endpoint):
assert len(packet) > 1, "Packet length too small"
tag = ''
if packet[0] == 0x20 and packet[1] == 0x01:
return 'Status Message'
else:
if endpoint == 0x01:
if packet[0] == 0xc0 and packet[1] == 0x04:
tag = 'Handle Secret Req'
if packet[0] == 0xc0 and packet[1] == 0x05:
tag = 'Alert User Req'
if packet[0] == 0xc0 and packet[1] == 0x06:
tag = 'Display Code Req'
if packet[0] == 0xc0 and packet[1] == 0x09:
tag = 'Echo Req'
if packet[0] == 0xc0 and packet[1] == 0x0a:
tag = 'Init AirLink Req'
if packet[0] == 0xc0 and packet[1] == 0x10:
tag = 'Get Dump Req'
if packet[0] == 0xc0 and packet[1] == 0x24:
tag = 'Data Transmit Req'
if endpoint == 0x81:
if packet[0] == 0xc0 and packet[1] == 0x02:
tag = 'Ack Resp'
if packet[0] == 0xc0 and packet[1] == 0x03:
tag = 'Error Resp'
if packet[0] == 0xc0 and packet[1] == 0x05:
tag = 'Alert User Resp'
if packet[0] == 0xc0 and packet[1] == 0x09:
tag = 'Echo Resp'
if packet[0] == 0xc0 and packet[1] == 0x12:
tag = '1st block ack Resp'
if packet[0] == 0xc0 and packet[1] == 0x13:
tag = 'Next block ack Resp'
if packet[0] == 0xc0 and packet[1] == 0x14:
tag = 'AirLink Init Resp'
if packet[0] == 0xc0 and packet[1] == 0x0b:
tag = 'Toggle Tx Resp'
if packet[0] == 0xc0 and packet[1] == 0x41:
tag = 'Start Dump Resp'
if packet[0] == 0xc0 and packet[1] == 0x42:
tag = 'End Dump Resp'
if endpoint == 0x02:
if packet[1] == 0x06:
tag = 'Establish Link Req'
if packet[1] == 0x08:
tag = 'Toggle Tx Req'
if packet[1] == 0x04:
tag = 'Start Discovery Req'
if packet[1] == 0x05:
tag = 'Cancel Discovery Req'
if packet[1] == 0x01:
tag = 'Get Dongle Info Req'
if packet[1] == 0x07:
tag = 'Terminate AirLink'
if endpoint == 0x82:
if packet[1] == 0x04:
tag = 'Establish Link Resp'
if packet[1] == 0x03:
if (len(packet) < 19):
tag = 'Bad discovered tracker resp'
else:
tag = 'Discovered Tracker Resp'
if packet[1] == 0x02:
tag = 'Finished Discovery Resp'
if packet[1] == 0x06:
tag = 'AirLink Test Resp'
if packet[1] == 0x08:
tag = 'Dongle Info Resp'
return tag
def displayPacket(packet, endpoint):
'''Displays status messages as strings if possible,
otherwise, displays the message as bytes'''
assert len(packet) > 1, "Packet length too small"
if packet[0] == 0x20 and packet[1] == 0x01:
print "[%02x] Status Message : %s" % (endpoint, ''.join(chr(i) for i in packet[2:]))
else:
tag=displayCommandId(packet, endpoint)
if endpoint == 0x82 and packet[1] == 0x03 and len(packet) >= 19:
tracker = Tracker(packet)
print tracker
print "[%02x] %25s: %s" % (endpoint, tag, ''.join('{:02x} '.format(x) for x in packet))
def displayUsefulContent(packet, endpoint):
'''Displays the packet only the useful length part'''
ul = 0
if packet[0] == 0xc0:
ul = packet[0x20-1]
else:
ul = packet[0]
tag = displayCommandId(packet, endpoint)
return "[%02x] %25s: '%s'" % (endpoint, tag, ''.join(map(chr, packet[2:ul])))
#
def readResponse(endpoint=0x82, length=32, timeout=2000, show_packets=False):
'''Reads data of given length on USB endpoint.
Will wait at most timeout seconds for data, if nothing is read, the timeout
exception is caught and displayed.
Assumes USB device is connected and set.
'''
assert device != 0, "Please call connectUSB() first"
response=None
try:
response = device.read(endpoint, length, timeout)
if show_packets:
displayPacket(response, endpoint)
except usb.core.USBError as usbexception:
if usbexception.errno != 110: # TIMEOUT
raise
else:
print "Warning: no response (timeout=%d)" % (timeout)
return response
def exhaustPipe(endpoint=0x82,length=32,timeout=2000, show_packets=False):
'''Reads incoming data packets of given length on USB endpoint.
Loops reading until there is no more to be read after timeout seconds.
Assumes USB device (device) is connected and set'''
assert device != 0, "Please call connectUSB() first"
fullResponse = None
while True:
try:
response = device.read(endpoint, length, timeout)
if response is None:
break
else:
if fullResponse is None:
fullResponse = []
fullResponse.extend(response)
if show_packets:
displayPacket(response, endpoint)
except usb.core.USBError as usbexception:
if usbexception.errno != 110: # TIMEOUT
raise
# else
# we have exhausted the pipe
break
return fullResponse
def sendData(endpoint=0x02, data=0, timeout=500, show_packets=False):
'''Writes data on USB endpoint
Assumes USB device (device) is connected and set
'''
assert device != 0, "Please call connectUSB() first"
try:
if show_packets:
displayPacket(data, endpoint)
device.write(endpoint, data, timeout)
except usb.core.USBError:
print "sendData(): Resource busy usually means you need to unclaim the device"
# --------------------------------- Classes --------------------
class Tracker(object):
def __init__(self, id, addr, rssi, attr, suuid):
self.trackerid = id
self.addrtype = addr
self.rssi = rssi
self.attr = attr
self.suuid = suuid
def __init__(self, packet):
assert packet[0] != 0xc0, "This is not a dongle message"
assert packet[1] == 0x03, "This is not a discovered tracker response"
assert len(packet) >= 19, "Bad length for discovered tracker response"
self.trackerid = packet[2:8]
self.addrtype = packet[8]
self.rssi = packet[9]
self.attr = packet[10:12]
self.suuid = packet[17:19]
def __str__(self):
return "TrackerId: %s AddrType: %d RSSI: %d Attr: %s SUUID: %s" % (''.join('{:02x} '.format(x) for x in self.trackerid), self.addrtype, self.rssi, ''.join('{:02x} '.format(x) for x in self.attr), ''.join('{:02x} '.format(x) for x in self.suuid))
class megadump(object):
'''This class represents a megadump packet '''
type = 0x0d
def __init__(self, dump):
assert len(dump)>16, "This is not a valid dump"
self.device_type = dump[0]
self.device_version = dump[1]
self.seq_counter = dump[6:10]
self.model = dump[10:16]
self.encrypted = dump[16:]
def getDeviceName(self):
options = { 0x28 : "Flex",
0xf4 : "Zip",
0x26 : "One" }
if self.device_type in options:
return options[self.device_type]
return 'unknown'
def __str__(self):
return "Device Type: %s\nVersion : %d\nSeq Counter: %s\nModel : %s\nEncrypted blob:\n%s" % (self.getDeviceName(), self.device_version, ''.join('{:02x} '.format(x) for x in self.seq_counter), ''.join('{:02x} '.format(x) for x in self.model), displayLongHex(self.encrypted))
class minidump(object):
''' This class represents a microdump/minidump packet'''
type = 0x03
def __init__(self, dump):
assert len(dump) == 0x7b, "Invalid minidump length"
assert dump[0] == 0x30, "This is not a minidump"
self.seq_counter = dump[6:10]
self.model = dump[10:16]
self.encrypted = dump[16:121]
def __str__(self):
return "Seq Counter: %s\nModel : %s\nEncrypted blob:\n%s" % (''.join('{:02x} '.format(x) for x in self.seq_counter), ''.join('{:02x} '.format(x) for x in self.model), displayLongHex(self.encrypted))
# --------------------------------- Dongle messages --------------------
def dongleReset(timeout=500, show_packets=False, display=False):
'''Resets/disconnects the dongle.
Usually, the dongle replies by a Cancel Discovery information message
and possible by a Terminate Link.
'''
if display:
print "resetting dongle..."
sendData(endpoint=0x02, data=[0x02, 0x02], timeout=timeout, show_packets=show_packets)
# cancel discovery
response = device.read(0x82, 32, timeout)
if show_packets:
displayPacket(response, 0x82)
# we might receive a Terminate Link, but this is optional
# we exhaust the pipe to be in a clear state
exhaustPipe(endpoint=0x82, show_packets=show_packets, timeout=4000)
def cancelDiscovery(timeout=500, show_packets=False):
'''Sends a cancel discovery message'''
if show_packets:
print "Cancel Discovery..."
sendData(endpoint=0x02, data=[0x02, 0x05], timeout=timeout, show_packets=show_packets)
# we expect a cancel discovery status message
readResponse(show_packets=show_packets)
def discover(timeout=500, show_packets=False, cancel=True):
'''Sends a device discovery message
Waits for a start discovery information response
And responses from trackers.
When all trackers have answered, cancel discovery.
'''
listOfTrackers = []
# UUID of Fitbit
# then service 0xfb00, 0xfb001, 0xfb002
data = [ 0x1a,0x04,0xba,0x56,0x89,0xa6,0xfa,0xbf,0xa2,0xbd,0x01,0x46,0x7d,0x6e,0x00,0x00,0xab,0xad,0x00,0xfb,0x01,0xfb,0x02,0xfb,0xa0,0x0f ]
sendData(endpoint=0x02, data=data, timeout=timeout, show_packets=show_packets)
# read responses
# we should receive: StartDiscovery, messages from trackers, and
# amount of trackers found
while True:
response = device.read(0x82, 32, 4000)
if response is None:
if show_packets:
print "Warning: we have exhausted the pipe"
break
if show_packets:
displayPacket(response, 0x82)
if response[1] == 0x02:
if show_packets:
print "Amount of trackers found: %d " % (response[2])
break
if response[0] != 0xc0 and response[1] == 0x03:
tracker = Tracker(response)
if show_packets:
print tracker
listOfTrackers.append(tracker)
# in most cases, you want to properly finish the discovery
if cancel:
cancelDiscovery(timeout, show_packets=show_packets)
return listOfTrackers
def togglePipe(value=0, timeout=500, show_packets=False):
'''Send Toggle Tx Pipe
value is 0 for disable, 1 for enable
'''
if show_packets:
print "Toggle Pipe..."
data = [ 0x03, 0x08 ]
data.extend([value])
sendData(endpoint=0x02, data=data, timeout=timeout, show_packets=show_packets)
# c0 0b
readResponse(endpoint=0x81,timeout=10000,show_packets=show_packets)
def establishLink(trackerId, addrType, serviceUuid, timeout=500, show_packets=False):
'''Sends an Establish Link request to a given tracker, and reads the response'''
if show_packets:
print "Establish Link with tracker %s and serviceUUID=%s" % (trackerId, serviceUuid)
endpoint = 0x02
data = [ 0x00, 0x06 ]
data.extend(list(bytearray.fromhex(trackerId)))
data.extend([addrType])
data.extend(list(bytearray.fromhex(serviceUuid)))
data[0] = len(data) # 0B
sendData(endpoint, data, timeout, show_packets)
# in this one, the tracker responds 'EstablishLink called'
readResponse(show_packets=show_packets) # 20 01 EstablishLink called
# we need to wait longer for ACK
readResponse(timeout=5000, show_packets=show_packets) # EstablishLink ack
# we need to wait even longer for this one
readResponse(timeout=8000, show_packets=show_packets) # GAP_LINK_ESTABLISHED_EVENT
# 02 07 Now it is established!
readResponse(show_packets=show_packets)
def terminateAirLink(timeout=500, show_packets=False):
'''Terminates the air link, reads the potential responses'''
sendData(endpoint=0x02, data=[0x02, 0x07], timeout=timeout, show_packets=show_packets)
exhaustPipe(show_packets=show_packets)
# ------------------------------------- Tracker messages ---------------------------
def prepareTrackerPacket(id=0x01, data=[0x00]*29, useful_len=2):
'''Prepares a tracker packet c0 ...
Expands payload with trailing 0x00 if necessary
The useful packet length must include the length of C0 id so
it is payload length + 2.
This does not send nor print the packet. It only returns it.
'''
assert useful_len <= 0xff, "Implementation does not support length on more than a byte"
packet = [0xc0, id]
packet.extend(data)
if len(data) < 29:
packet.extend([0x00] * (29-len(data)))
packet.extend([useful_len])
return packet
def initAirLink(timeout=500, show_packets=False):
'''Init Air Link message'''
if show_packets:
print "Init Air Link..."
#data = [ 0xc0, 0x0a, 0x0a, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c ]
#sendData(endpoint=0x01,data=data,timeout=timeout, show_packets=show_packets)
data = [ 0x0a, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, 0x00, 0xc8, 0x00 ]
packet = prepareTrackerPacket(0x0a, data, useful_len=0x0c)
sendData(endpoint=0x01, data=packet, timeout=timeout, show_packets=show_packets)
# AirLink test response up: 08 06 00 00 00 0xc8...
readResponse(endpoint=0x82,timeout=10000, show_packets=show_packets)
# AirLink is initialized c0 14 0c ...
readResponse(endpoint=0x81, show_packets=show_packets)
def getDump(dumptype=0x0d, timeout=500,show_packets=False):
'''
Sends a request for a given dumptype
and reads the answer until it has retrieved the full dump
Returns it.
'''
packet = prepareTrackerPacket(id=0x10, data=[dumptype], useful_len=3)
sendData(endpoint=0x01, data=packet, timeout=timeout, show_packets=show_packets)
dump = []
while True:
response = device.read(0x81, 32, 2000)
if response is None:
if show_packets:
print "Warning: we have exhausted the pipe"
break
if not (response[0] == 0xc0 and response[1] == 0x41) and not (response[0] == 0xc0 and response[1] == 0x42):
# the start dump response is not part
# the end dump is not part either
dump.extend(response[:response[31]])
if show_packets:
displayPacket(response, 0x81)
if response[0] == 0xc0 and response[1] == 0x42:
if show_packets:
print "End of Dump packet"
break
return dump
def echo(data=[0x0],timeout=500,show_packets=False):
'''According to my research, echo is 0x09, despite
http://samdmarshall.com/blog/fitbit_re.html
It is consistent with
https://bitbucket.org/benallard/galileo/wiki/Communicationprotocol
Sends an Echo Request and reads the response.
'''
if show_packets:
print "Sending Echo..."
data = prepareTrackerPacket(id=0x09, data=data,useful_len=len(data)+2)
sendData(endpoint=0x01,data=data,timeout=timeout,show_packets=show_packets)
response = exhaustPipe(endpoint=0x81, timeout=timeout, show_packets=show_packets)
if response is not None and len(response) >= 0x20:
ul = response[0x20-1]
print "Echo Message: %s" % (''.join(map(chr, response[2:ul])))
# ---------------- Helper funcs -------------------------------
def getAirLink(show_packets=False):
'''A helper func that resets the link and re-initializes air link'''
global mytracker
if mytracker is None:
if show_packets:
"Trying automatic selection of tracker"
dongleReset(show_packets=show_packets)
listOfTracker = discover(show_packets=show_packets)
assert len(listOfTracker) == 1, "We dont know which tracker to establish link to"
mytracker = listOfTracker[0]
print "Establishing link with %s..." % (mytracker)
establishLink(show_packets=show_packets, trackerId=''.join('{:02x}'.format(x) for x in mytracker.trackerid), addrType=mytracker.addrtype, serviceUuid=''.join('{:02x}'.format(x) for x in mytracker.suuid))
togglePipe(value=1, show_packets=show_packets)
initAirLink(show_packets=show_packets)
def sendExhaustReinit(data=[], send_timeout=500, read_timeout=10000,show_packets=False):
'''
1. re-init air link with tracker
2. send data to tracker using send_timeout
3. exhaust pipe using read_timeout
'''
getAirLink(show_packets=False)
sendData(endpoint=0x01,data=data,timeout=send_timeout, show_packets=show_packets)
response = exhaustPipe(endpoint=0x81, show_packets=show_packets, timeout=read_timeout)
if response is not None:
print "THERE IS A RESPONSE !"
# --------------------- "Hacks" --------------------------
def getRandom(seed=[], get_air_link=True, show_packets=False):
'''Using the tracker like a random number generator
There is no guarantee this will provide good entropy'''
if get_air_link:
getAirLink(False)
# payload needs to be at least of length 8
payload = seed
for i in range(len(payload), 8):
payload.extend([i])
assert len(payload) >= 8, "payload is too small"
# let's send the client challenge
useful_len = 2 + len(payload)
packet = prepareTrackerPacket(id=0x50, data=payload, useful_len=useful_len)
sendData(endpoint=0x01,data=packet,timeout=500,show_packets=show_packets)
# tracker is expected to respond with a tracker challenge
response = exhaustPipe(endpoint=0x81, timeout=2000, show_packets=show_packets)
# the random part is 8 byte long
if show_packets:
print ''.join('{:02x} '.format(x) for x in response[2:10])
return response[2:10]
# Functions called by the menu ------------------------------
def doDiscover(show_packets=False):
print "Discovering trackers nearby..."
dongleReset(show_packets=show_packets)
listOfTracker = discover(show_packets=show_packets)
for t in listOfTracker:
print t
def selectTracker(show_packets=False):
print "Getting list of available trackers..."
dongleReset(show_packets=show_packets)
listOfTracker = discover(show_packets=show_packets)
if (len(listOfTracker) > 0):
num = 1
for t in listOfTracker:
print "%d- %s" % (num, t)
num += 1
try:
choice = int(raw_input("Select tracker's num: "))
except ValueError:
print "Please enter a number!"
quit()
assert choice >= 1 and choice <= len(listOfTracker), "Please select a valid tracker"
global mytracker
mytracker = listOfTracker[choice-1]
def getMegadump(show_packets=False):
print "Initializing Air Link..."
getAirLink(show_packets=show_packets)
print "Getting tracker data..."
packet = getDump(megadump.type, show_packets=show_packets)
print "Received tracker data: "
print megadump(packet)
def getMinidump(show_packets=False):
print "Initializing Air Link..."
getAirLink(show_packets=show_packets)
print "Getting tracker firmware..."
packet = getDump(minidump.type, show_packets=show_packets)
print "Received tracker firmware: "
print minidump(packet)
def doRng(show_packets=False):
print "Initializing air link..."
random_buffer = getRandom(get_air_link=True, show_packets=show_packets)
print "Getting random data..."
print ''.join('{:02x} '.format(x) for x in random_buffer)
for i in range(0, 100):
random_buffer = getRandom(get_air_link=False, show_packets=show_packets)
print ''.join('{:02x} '.format(x) for x in random_buffer)
def doDongleInfo(show_packets=False):
'''Sends a Get Dongle Information request
Reads the Get Dongle Information response - waiting at most for timeout seconds
Displays the dongle version, MAC address and response packet
Returns the response packet.
'''
print "Get Dongle Info :"
# send Get Dongle Info Request
data = [ 0x02, 0x01 ]
sendData(endpoint=0x02, data=data, timeout=500, show_packets=show_packets)
# read Get Dongle Info Response
response = device.read(0x82, 32, 500)
if show_packets:
displayPacket(response, 0x82)
assert len(response) > 10, "Bad Dongle Info Response!"
maj = response[2]
min = response[3]
mac = response[4:10]
print "Dongle : version %d.%d" % (maj, min)
print "Dongle MAC address: %s " % (':'.join('{:02x}'.format(x) for x in mac))
def doDongleStatus(show_packets=False):
print "Get Dongle Status :"
# send Get Dongle Status
data = [ 0x02, 0x11 ]
sendData(endpoint=0x02, data=data, timeout=500, show_packets=show_packets)
# read responses
scan = False
disc = False
ble = False
for i in range(0, 6):
response = readResponse(endpoint=0x82, show_packets=show_packets)
if response[0] == 0x20 and response[1] == 0x01:
ul = response[0]
message = ''.join(map(chr, response[2:ul]))
if scan:
print "Scan state\t\t: %s" % (message)
if disc:
print "Service discovery state\t: %s" % (message)
if ble:
print "Bluetooth state\t\t: %s" % (message)
if message.startswith('scanInProgress:'):
scan = True
else:
scan = False
if message.startswith('svcDiscoveryState:'):
disc = True
else:
disc = False
if message.startswith('dongleBLEState:'):
ble = True
else:
ble = False
#print "scan %d disc %d ble %d " % (scan, disc, ble)
exhaustPipe(endpoint=0x82, show_packets=show_packets)
def doEcho(show_packets=False):
text = raw_input("Enter short string to echo: ");
text = text[:20]
print "Initializing air link with tracker..."
getAirLink(show_packets=False)
data = []
for i in range(0, len(text)):
data.extend([ord(text[i])])
print "Sending echo with '%s'" % (text)
echo(data=data, show_packets=show_packets)
def doHandleSecret(show_packets=False, display=True):
'''Testing the command Handle Secret'''
print "Initializing air link with tracker..."
getAirLink(show_packets=False)
if display:
print "Sending Handle Secret - Display..."
else:
print "Sending Handle Secret - Clear..."
data = prepareTrackerPacket(id=0x04, data=[display],useful_len=3)
sendData(endpoint=0x01,data=data,show_packets=show_packets)
response = exhaustPipe(endpoint=0x81, show_packets=show_packets)
print displayUsefulContent(response, endpoint=0x81)
def doAlert(show_packets=False):
print "Initializing air link with tracker..."
getAirLink(show_packets=False)
print "Sending Alert User..."
data = prepareTrackerPacket(id=0x05, data=[],useful_len=2)
sendData(endpoint=0x01,data=data,show_packets=show_packets)
response = exhaustPipe(endpoint=0x81, show_packets=show_packets)
print displayUsefulContent(response, endpoint=0x81)
def doDisplayCode(show_packets=False, code='1234'):
print "Initializing air link with tracker..."
getAirLink(show_packets=False)
print "Sending Display Code: %s..." % (code)
data = []
for i in range(0, len(code)):
data.extend([ord(code[i])])
packet = prepareTrackerPacket(id=0x06, data=data,useful_len=len(data)+2)
sendData(endpoint=0x01,data=packet,show_packets=show_packets)
response = exhaustPipe(endpoint=0x81, show_packets=show_packets)
print displayUsefulContent(response, endpoint=0x81)
def doReset(show_packets=False):
print "Resetting dongle..."
dongleReset(show_packets=show_packets)
def doQuit(show_packets=False):
print "Bye!"
quit()
# User Interface ---------------------------------------------------
def get_arguments():
'''Read arguments for the program and returns the ArgumentParser'''
parser = argparse.ArgumentParser(description='Standalone tool to talk to the Fitbit Flex', prog='talk2flex')
parser.add_argument('-v', '--verbose', help='display packets and various debug messages', action='store_true')
parser.add_argument('-o','--output', help='output file', action='store')
args = parser.parse_args()
return args
def displayMenu(show_packets=False):
if show_packets:
print "displayMenu()"
print "=== talk2flex - a FitBit Flex Linux utility tool ===";
print "Dongle commands:"
print "1- Unclaim dongle"
print "2- Get dongle info"
print "3- Get dongle status"
print "Tracker commands: "
print "5- Detect trackers"
print "6- Select tracker"
print "7- Get tracker data"
print "8- Get firmware data"
print "9- Echo"
print "10- Handle Secret"
print "11- Alert"
print "12- Display Code"
print "14- RNG"
print "Misc: "
print "15- Reset"
print "16- Quit"
try:
response = int(raw_input("Your choice? "))
except ValueError:
print "Please enter a number!"
quit()
global device
if (device == 0):
device = connectUSB()
actions = { 1 : unclaimFitbit,
2: doDongleInfo,
3: doDongleStatus,
5: doDiscover,
6: selectTracker,
7: getMegadump,
8: getMinidump,
9: doEcho,
10: doHandleSecret,
11: doAlert,
12: doDisplayCode,
14: doRng,
15: doReset,
16: doQuit,
}
assert response in actions, "Unavailable choice"
actions[response](show_packets=show_packets)
# Main ---------------------------
if __name__ == "__main__":
args = get_arguments()
while True:
displayMenu(show_packets=args.verbose)
|
|
'''Actors communicate with each other by sending and receiving messages.
The :mod:`pulsar.async.mailbox` module implements the message passing layer
via a bidirectional socket connections between the :class:`.Arbiter`
and any :class:`.Actor`.
Message sending is asynchronous and safe, the message is guaranteed to
eventually reach the recipient, provided that the recipient exists.
The implementation details are outlined below:
* Messages are sent via the :func:`.send` function, which is a proxy for
the actor :meth:`~.Actor.send` method.
Here is how you ping actor ``abc`` in a coroutine::
from pulsar import send
async def example():
result = await send('abc', 'ping')
* The :class:`.Arbiter` :attr:`~pulsar.Actor.mailbox` is a :class:`.TcpServer`
accepting connections from remote actors.
* The :attr:`.Actor.mailbox` is a :class:`.MailboxClient` of the arbiter
mailbox server.
* When an actor sends a message to another actor, the arbiter mailbox behaves
as a proxy server by routing the message to the targeted actor.
* Communication is bidirectional and there is **only one connection** between
the arbiter and any given actor.
* Messages are encoded and decoded using the unmasked websocket protocol
implemented in :func:`.frame_parser`.
* If, for some reasons, the connection between an actor and the arbiter
get broken, the actor will eventually stop running and garbaged collected.
Implementation
=========================
For the curious this is how the internal protocol is implemented
Protocol
~~~~~~~~~~~~
.. autoclass:: MailboxProtocol
:members:
:member-order: bysource
Client
~~~~~~~~~~~~
.. autoclass:: MailboxClient
:members:
:member-order: bysource
'''
import socket
import pickle
import asyncio
from collections import namedtuple
from pulsar import ProtocolError, CommandError
from pulsar.utils.internet import nice_address
from pulsar.utils.websocket import frame_parser
from pulsar.utils.string import gen_unique_id
from .access import get_actor, isawaitable
from .futures import Future, task
from .proxy import actor_identity, get_proxy, get_command, ActorProxy
from .protocols import Protocol
from .clients import AbstractClient
CommandRequest = namedtuple('CommandRequest', 'actor caller connection')
def create_aid():
return gen_unique_id()[:8]
@asyncio.coroutine
def command_in_context(command, caller, target, args, kwargs, connection=None):
cmnd = get_command(command)
if not cmnd:
raise CommandError('unknown %s' % command)
request = CommandRequest(target, caller, connection)
result = cmnd(request, args, kwargs)
if isawaitable(result):
result = yield from result
return result
class ProxyMailbox:
'''A proxy for the arbiter :class:`Mailbox`.
'''
active_connections = 0
def __init__(self, actor):
mailbox = actor.monitor.mailbox
if isinstance(mailbox, ProxyMailbox):
mailbox = mailbox.mailbox
self.mailbox = mailbox
def __repr__(self):
return self.mailbox.__repr__()
def __str__(self):
return self.mailbox.__str__()
def __getattr__(self, name):
return getattr(self.mailbox, name)
def _run(self):
pass
def close(self):
pass
class Message:
'''A message which travels from actor to actor.
'''
def __init__(self, data, waiter=None):
self.data = data
self.waiter = waiter
def __repr__(self):
return self.data.get('command', 'unknown')
__str__ = __repr__
@classmethod
def command(cls, command, sender, target, args, kwargs):
command = get_command(command)
data = {'command': command.__name__,
'sender': actor_identity(sender),
'target': actor_identity(target),
'args': args if args is not None else (),
'kwargs': kwargs if kwargs is not None else {}}
waiter = Future()
if command.ack:
data['ack'] = create_aid()
else:
waiter.set_result(None)
return cls(data, waiter)
@classmethod
def callback(cls, result, ack):
data = {'command': 'callback', 'result': result, 'ack': ack}
return cls(data)
class MailboxProtocol(Protocol):
'''The :class:`.Protocol` for internal message passing between actors.
Encoding and decoding uses the unmasked websocket protocol.
'''
def __init__(self, **kw):
super().__init__(**kw)
self._pending_responses = {}
self._parser = frame_parser(kind=2, pyparser=True)
actor = get_actor()
if actor.is_arbiter():
self.bind_event('connection_lost', self._connection_lost)
def request(self, command, sender, target, args, kwargs):
'''Used by the server to send messages to the client.'''
req = Message.command(command, sender, target, args, kwargs)
self._start(req)
return req.waiter
def data_received(self, data):
# Feed data into the parser
msg = self._parser.decode(data)
while msg:
try:
message = pickle.loads(msg.body)
except Exception as e:
raise ProtocolError('Could not decode message body: %s' % e)
self._on_message(message)
msg = self._parser.decode()
########################################################################
# INTERNALS
def _start(self, req):
if req.waiter and 'ack' in req.data:
self._pending_responses[req.data['ack']] = req.waiter
try:
self._write(req)
except Exception as exc:
req.waiter.set_exception(exc)
else:
self._write(req)
def _connection_lost(self, _, exc=None):
if exc:
actor = get_actor()
if actor.is_running():
actor.logger.warning('Connection lost with actor.')
@task
def _on_message(self, message):
actor = get_actor()
command = message.get('command')
ack = message.get('ack')
if command == 'callback':
if not ack:
raise ProtocolError('A callback without id')
try:
pending = self._pending_responses.pop(ack)
except KeyError:
raise KeyError('Callback %s not in pending callbacks' % ack)
pending.set_result(message.get('result'))
else:
try:
target = actor.get_actor(message['target'])
if target is None:
raise CommandError('cannot execute "%s", unknown actor '
'"%s"' % (command, message['target']))
# Get the caller proxy without throwing
caller = get_proxy(actor.get_actor(message['sender']),
safe=True)
if isinstance(target, ActorProxy):
# route the message to the actor proxy
if caller is None:
raise CommandError(
"'%s' got message from unknown '%s'"
% (actor, message['sender']))
result = yield from actor.send(target, command,
*message['args'],
**message['kwargs'])
else:
result = yield from command_in_context(command, caller,
target,
message['args'],
message['kwargs'],
self)
except CommandError as exc:
self.logger.warning('Command error: %s' % exc)
result = None
except Exception as exc:
self.logger.exception('Unhandled exception')
result = None
if ack:
self._start(Message.callback(result, ack))
def _write(self, req):
obj = pickle.dumps(req.data, protocol=2)
data = self._parser.encode(obj, opcode=2)
try:
self._transport.write(data)
except socket.error:
actor = get_actor()
if actor.is_running():
if actor.is_arbiter():
raise
else:
actor.logger.warning('Lost connection with arbiter')
actor._loop.stop()
class MailboxClient(AbstractClient):
'''Used by actors to send messages to other actors via the arbiter.
'''
protocol_factory = MailboxProtocol
def __init__(self, address, actor, loop):
super().__init__(loop)
self.address = address
self.name = 'Mailbox for %s' % actor
self._connection = None
def response(self, request):
resp = super().response
self._consumer = resp(request, self._consumer, False)
return self._consumer
def connect(self):
return self.create_connection(self.address)
def __repr__(self):
return '%s %s' % (self.name, nice_address(self.address))
@task
def request(self, command, sender, target, args, kwargs):
# the request method
if self._connection is None:
self._connection = yield from self.connect()
self._connection.bind_event('connection_lost', self._lost)
req = Message.command(command, sender, target, args, kwargs)
self._connection._start(req)
response = yield from req.waiter
return response
def start_serving(self):
pass
def close(self):
if self._connection:
self._connection.close()
def _lost(self, _, exc=None):
# When the connection is lost, stop the event loop
if self._loop.is_running():
self._loop.stop()
|
|
import sys
import copy
import functools
import datetime
import decimal
from functools import update_wrapper
from inspect import getargspec
from django import forms
from django.utils.encoding import force_unicode
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template import Context, Template
from django.template.response import TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.decorators import method_decorator, classonlymethod
from django.utils.encoding import smart_unicode
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic import View
from xadmin.util import static, json, vendor, sortkeypicker
csrf_protect_m = method_decorator(csrf_protect)
class IncorrectPluginArg(Exception):
pass
def filter_chain(filters, token, func, *args, **kwargs):
if token == -1:
return func()
else:
def _inner_method():
fm = filters[token]
fargs = getargspec(fm)[0]
if len(fargs) == 1:
# Only self arg
result = func()
if result is None:
return fm()
else:
raise IncorrectPluginArg(u'Plugin filter method need a arg to receive parent method result.')
else:
return fm(func if fargs[1] == '__' else func(), *args, **kwargs)
return filter_chain(filters, token - 1, _inner_method, *args, **kwargs)
def filter_hook(func):
tag = func.__name__
func.__doc__ = "``filter_hook``\n\n" + (func.__doc__ or "")
@functools.wraps(func)
def method(self, *args, **kwargs):
def _inner_method():
return func(self, *args, **kwargs)
if self.plugins:
filters = [(getattr(getattr(p, tag), 'priority', 10), getattr(p, tag))
for p in self.plugins if callable(getattr(p, tag, None))]
filters = [f for p, f in sorted(filters, key=lambda x:x[0])]
return filter_chain(filters, len(filters) - 1, _inner_method, *args, **kwargs)
else:
return _inner_method()
return method
def inclusion_tag(file_name, context_class=Context, takes_context=False):
def wrap(func):
@functools.wraps(func)
def method(self, context, nodes, *arg, **kwargs):
_dict = func(self, context, nodes, *arg, **kwargs)
from django.template.loader import get_template, select_template
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
new_context = context_class(_dict, **{
'autoescape': context.autoescape,
'current_app': context.current_app,
'use_l10n': context.use_l10n,
'use_tz': context.use_tz,
})
new_context['admin_view'] = context['admin_view']
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
nodes.append(t.render(new_context))
return method
return wrap
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, decimal.Decimal):
return str(o)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class BaseAdminObject(object):
def get_view(self, view_class, option_class=None, *args, **kwargs):
opts = kwargs.pop('opts', {})
return self.admin_site.get_view_class(view_class, option_class, **opts)(self.request, *args, **kwargs)
def get_model_view(self, view_class, model, *args, **kwargs):
return self.get_view(view_class, self.admin_site._registry.get(model), *args, **kwargs)
def get_admin_url(self, name, *args, **kwargs):
return reverse('%s:%s' % (self.admin_site.app_name, name), args=args, kwargs=kwargs)
def get_model_url(self, model, name, *args, **kwargs):
return reverse(
'%s:%s_%s_%s' % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, name),
args=args, kwargs=kwargs, current_app=self.admin_site.name)
def get_model_perm(self, model, name):
return '%s.%s_%s' % (model._meta.app_label, name, model._meta.model_name)
def has_model_perm(self, model, name, user=None):
user = user or self.user
return user.has_perm(self.get_model_perm(model, name)) or (name == 'view' and self.has_model_perm(model, 'change', user))
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_form_params(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return mark_safe(''.join(
'<input type="hidden" name="%s" value="%s"/>' % (k, v) for k, v in p.items() if v))
def render_response(self, content, response_type='json'):
if response_type == 'json':
response = HttpResponse(content_type="application/json; charset=UTF-8")
response.write(
json.dumps(content, cls=JSONEncoder, ensure_ascii=False))
return response
return HttpResponse(content)
def template_response(self, template, context):
return TemplateResponse(self.request, template, context, current_app=self.admin_site.name)
def message_user(self, message, level='info'):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
"""
if hasattr(messages, level) and callable(getattr(messages, level)):
getattr(messages, level)(self.request, message)
def static(self, path):
return static(path)
def vendor(self, *tags):
return vendor(*tags)
class BaseAdminPlugin(BaseAdminObject):
def __init__(self, admin_view):
self.admin_view = admin_view
self.admin_site = admin_view.admin_site
if hasattr(admin_view, 'model'):
self.model = admin_view.model
self.opts = admin_view.model._meta
def init_request(self, *args, **kwargs):
pass
class BaseAdminView(BaseAdminObject, View):
""" Base Admin view, support some comm attrs."""
base_template = 'xadmin/base.html'
need_site_permission = True
def __init__(self, request, *args, **kwargs):
self.request = request
self.request_method = request.method.lower()
self.user = request.user
self.base_plugins = [p(self) for p in getattr(self,
"plugin_classes", [])]
self.args = args
self.kwargs = kwargs
self.init_plugin(*args, **kwargs)
self.init_request(*args, **kwargs)
@classonlymethod
def as_view(cls):
def view(request, *args, **kwargs):
self = cls(request, *args, **kwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
if self.request_method in self.http_method_names:
handler = getattr(
self, self.request_method, self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
view.need_site_permission = cls.need_site_permission
return view
def init_request(self, *args, **kwargs):
pass
def init_plugin(self, *args, **kwargs):
plugins = []
for p in self.base_plugins:
p.request = self.request
p.user = self.user
p.args = self.args
p.kwargs = self.kwargs
result = p.init_request(*args, **kwargs)
if result is not False:
plugins.append(p)
self.plugins = plugins
@filter_hook
def get_context(self):
return {'admin_view': self, 'media': self.media, 'base_template': self.base_template}
@property
def media(self):
return self.get_media()
@filter_hook
def get_media(self):
return forms.Media()
class CommAdminView(BaseAdminView):
base_template = 'xadmin/base_site.html'
menu_template = 'xadmin/includes/sitemenu_default.html'
site_title = None
global_models_icon = {}
default_model_icon = None
apps_label_title = {}
apps_icons = {}
def get_site_menu(self):
return None
@filter_hook
def get_nav_menu(self):
site_menu = list(self.get_site_menu() or [])
had_urls = []
def get_url(menu, had_urls):
if 'url' in menu:
had_urls.append(menu['url'])
if 'menus' in menu:
for m in menu['menus']:
get_url(m, had_urls)
get_url({'menus': site_menu}, had_urls)
nav_menu = SortedDict()
for model, model_admin in self.admin_site._registry.items():
if getattr(model_admin, 'hidden_menu', False):
continue
app_label = model._meta.app_label
app_icon = None
model_dict = {
'title': unicode(capfirst(model._meta.verbose_name_plural)),
'url': self.get_model_url(model, "changelist"),
'icon': self.get_model_icon(model),
'perm': self.get_model_perm(model, 'view'),
'order': model_admin.order,
}
if model_dict['url'] in had_urls:
continue
app_key = "app:%s" % app_label
if app_key in nav_menu:
nav_menu[app_key]['menus'].append(model_dict)
else:
# Find app title
app_title = unicode(app_label.title())
if app_label.lower() in self.apps_label_title:
app_title = self.apps_label_title[app_label.lower()]
else:
mods = model.__module__.split('.')
if len(mods) > 1:
mod = '.'.join(mods[0:-1])
if mod in sys.modules:
mod = sys.modules[mod]
if 'verbose_name' in dir(mod):
app_title = getattr(mod, 'verbose_name')
elif 'app_title' in dir(mod):
app_title = getattr(mod, 'app_title')
#find app icon
if app_label.lower() in self.apps_icons:
app_icon = self.apps_icons[app_label.lower()]
nav_menu[app_key] = {
'title': app_title,
'menus': [model_dict],
}
app_menu = nav_menu[app_key]
if app_icon:
app_menu['first_icon'] = app_icon
elif ('first_icon' not in app_menu or
app_menu['first_icon'] == self.default_model_icon) and model_dict.get('icon'):
app_menu['first_icon'] = model_dict['icon']
if 'first_url' not in app_menu and model_dict.get('url'):
app_menu['first_url'] = model_dict['url']
for menu in nav_menu.values():
menu['menus'].sort(key=sortkeypicker(['order', 'title']))
nav_menu = nav_menu.values()
nav_menu.sort(key=lambda x: x['title'])
site_menu.extend(nav_menu)
return site_menu
@filter_hook
def get_context(self):
context = super(CommAdminView, self).get_context()
if not settings.DEBUG and 'nav_menu' in self.request.session:
nav_menu = json.loads(self.request.session['nav_menu'])
else:
menus = copy.copy(self.get_nav_menu())
def check_menu_permission(item):
need_perm = item.pop('perm', None)
if need_perm is None:
return True
elif callable(need_perm):
return need_perm(self.user)
elif need_perm == 'super':
return self.user.is_superuser
else:
return self.user.has_perm(need_perm)
def filter_item(item):
if 'menus' in item:
item['menus'] = [filter_item(
i) for i in item['menus'] if check_menu_permission(i)]
return item
nav_menu = [filter_item(item) for item in menus if check_menu_permission(item)]
nav_menu = filter(lambda i: bool(i['menus']), nav_menu)
if not settings.DEBUG:
self.request.session['nav_menu'] = json.dumps(nav_menu)
self.request.session.modified = True
def check_selected(menu, path):
selected = False
if 'url' in menu:
chop_index = menu['url'].find('?')
if chop_index == -1:
selected = path.startswith(menu['url'])
else:
selected = path.startswith(menu['url'][:chop_index])
if 'menus' in menu:
for m in menu['menus']:
_s = check_selected(m, path)
if _s:
selected = True
if selected:
menu['selected'] = True
return selected
for menu in nav_menu:
check_selected(menu, self.request.path)
context.update({
'menu_template': self.menu_template,
'nav_menu': nav_menu,
'site_title': self.site_title or _(u'Django Xadmin'),
'breadcrumbs': self.get_breadcrumb()
})
return context
@filter_hook
def get_model_icon(self, model):
icon = self.global_models_icon.get(model)
if icon is None and model in self.admin_site._registry:
icon = getattr(self.admin_site._registry[model],
'model_icon', self.default_model_icon)
return icon
@filter_hook
def get_breadcrumb(self):
return [{
'url': self.get_admin_url('index'),
'title': _('Home')
}]
class ModelAdminView(CommAdminView):
fields = None
exclude = None
ordering = None
model = None
remove_permissions = []
def __init__(self, request, *args, **kwargs):
self.opts = self.model._meta
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
self.model_info = (self.app_label, self.model_name)
super(ModelAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def get_context(self):
new_context = {
"opts": self.opts,
"app_label": self.app_label,
"module_name": self.model_name,
"verbose_name": force_unicode(self.opts.verbose_name),
'model_icon': self.get_model_icon(self.model),
}
context = super(ModelAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelAdminView, self).get_breadcrumb()
item = {'title': self.opts.verbose_name_plural}
if self.has_view_permission():
item['url'] = self.model_admin_url('changelist')
bcs.append(item)
return bcs
@filter_hook
def get_object(self, object_id):
"""
Get model object instance by object_id, used for change admin view
"""
# first get base admin view property queryset, return default model queryset
queryset = self.queryset()
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError):
return None
@filter_hook
def get_object_url(self, obj):
if self.has_change_permission(obj):
return self.model_admin_url("change", getattr(obj, self.opts.pk.attname))
elif self.has_view_permission(obj):
return self.model_admin_url("detail", getattr(obj, self.opts.pk.attname))
else:
return None
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.opts.app_label,
self.model_name, name), args=args, kwargs=kwargs)
def get_model_perms(self):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'view': self.has_view_permission(),
'add': self.has_add_permission(),
'change': self.has_change_permission(),
'delete': self.has_delete_permission(),
}
def get_template_list(self, template_name):
opts = self.opts
return (
"xadmin/%s/%s/%s" % (
opts.app_label, opts.object_name.lower(), template_name),
"xadmin/%s/%s" % (opts.app_label, template_name),
"xadmin/%s" % template_name,
)
def get_ordering(self):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def queryset(self):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
return self.model._default_manager.get_queryset()
def has_view_permission(self, obj=None):
return ('view' not in self.remove_permissions) and (self.user.has_perm('%s.view_%s' % self.model_info) or \
self.user.has_perm('%s.change_%s' % self.model_info))
def has_add_permission(self):
return ('add' not in self.remove_permissions) and self.user.has_perm('%s.add_%s' % self.model_info)
def has_change_permission(self, obj=None):
return ('change' not in self.remove_permissions) and self.user.has_perm('%s.change_%s' % self.model_info)
def has_delete_permission(self, obj=None):
return ('delete' not in self.remove_permissions) and self.user.has_perm('%s.delete_%s' % self.model_info)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import testtools
from senlin.engine.actions import base as action_base
from senlin.engine import cluster
from senlin.engine import node
from senlin.events import base
from senlin.events import message as MSG
from senlin.objects import notification as nobj
from senlin.tests.unit.common import utils
CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de'
class TestMessageEvent(testtools.TestCase):
def setUp(self):
super(TestMessageEvent, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(nobj.NotificationBase, '_emit')
def test_notify_cluster_action(self, mock_emit):
cluster_id = uuidutils.generate_uuid()
profile_id = uuidutils.generate_uuid()
cluster_init = timeutils.utcnow(True)
action_id = uuidutils.generate_uuid()
cluster_params = {
'id': cluster_id,
'init_at': cluster_init,
'min_size': 1,
'max_size': 10,
'timeout': 4,
'status': 'ACTIVE',
'status_reason': 'Good',
'user': 'user1',
'project': 'project1',
}
c1 = cluster.Cluster('fake_name', 5, profile_id, **cluster_params)
action_params = {
'id': action_id,
'name': 'fake_name',
'start_time': 1.23,
'status': 'RUNNING',
'status_reason': 'Good',
'user': 'user1',
'project': 'project1',
}
action = action_base.Action(cluster_id, 'CLUSTER_CREATE', self.ctx,
**action_params)
publisher_id = 'senlin-engine:%s' % cfg.CONF.host
expected_payload = {
'senlin_object.data': {
'action': {
'senlin_object.data': {
'action': 'CLUSTER_CREATE',
'created_at': None,
'data': '{}',
'end_time': None,
'id': action_id,
'inputs': '{}',
'name': 'fake_name',
'outputs': '{}',
'project': self.ctx.project_id,
'start_time': 1.23,
'status': 'RUNNING',
'status_reason': 'Good',
'target': cluster_id,
'timeout': 3600,
'user': self.ctx.user_id,
},
'senlin_object.name': 'ActionPayload',
'senlin_object.namespace': 'senlin',
'senlin_object.version': '1.0'
},
'cluster': {
'senlin_object.data': {
'created_at': None,
'data': '{}',
'dependents': '{}',
'desired_capacity': 5,
'domain': '',
'id': cluster_id,
'init_at': mock.ANY,
'max_size': 10,
'metadata': '{}',
'min_size': 1,
'name': 'fake_name',
'profile_id': profile_id,
'project': u'project1',
'status': u'ACTIVE',
'status_reason': u'Good',
'timeout': 4,
'updated_at': None,
'user': u'user1'
},
'senlin_object.name': 'ClusterPayload',
'senlin_object.namespace': 'senlin',
'senlin_object.version': '1.0'
},
'exception': None
},
'senlin_object.name': 'ClusterActionPayload',
'senlin_object.namespace': 'senlin',
'senlin_object.version': '1.0'
}
res = MSG.MessageEvent._notify_cluster_action(
self.ctx, logging.INFO, c1, action, phase='start')
self.assertIsNone(res)
mock_emit.assert_called_once_with(
self.ctx, 'cluster.create.start', publisher_id, mock.ANY)
payload = mock_emit.call_args[0][3]
self.assertEqual(expected_payload, payload)
@mock.patch.object(nobj.NotificationBase, '_emit')
def test_notify_node_action(self, mock_emit):
node_id = uuidutils.generate_uuid()
profile_id = uuidutils.generate_uuid()
node_init = timeutils.utcnow(True)
action_id = uuidutils.generate_uuid()
node_params = {
'id': node_id,
'cluster_id': '',
'index': -1,
'init_at': node_init,
'status': 'ACTIVE',
'status_reason': 'Good',
'user': 'user1',
'project': 'project1',
}
n1 = node.Node('fake_name', profile_id, **node_params)
action_params = {
'id': action_id,
'name': 'fake_name',
'start_time': 1.23,
'status': 'RUNNING',
'status_reason': 'Good',
}
action = action_base.Action(node_id, 'NODE_CREATE', self.ctx,
**action_params)
publisher_id = 'senlin-engine:%s' % cfg.CONF.host
expected_payload = {
'senlin_object.data': {
'action': {
'senlin_object.data': {
'action': 'NODE_CREATE',
'created_at': None,
'data': '{}',
'end_time': None,
'id': action_id,
'inputs': '{}',
'name': 'fake_name',
'outputs': '{}',
'project': self.ctx.project_id,
'start_time': 1.23,
'status': 'RUNNING',
'status_reason': 'Good',
'target': node_id,
'timeout': 3600,
'user': self.ctx.user_id,
},
'senlin_object.name': 'ActionPayload',
'senlin_object.namespace': 'senlin',
'senlin_object.version': '1.0'
},
'node': {
'senlin_object.data': {
'cluster_id': '',
'created_at': None,
'data': '{}',
'dependents': '{}',
'domain': '',
'id': node_id,
'index': -1,
'init_at': mock.ANY,
'metadata': '{}',
'name': 'fake_name',
'physical_id': None,
'profile_id': profile_id,
'project': 'project1',
'role': '',
'status': 'ACTIVE',
'status_reason': 'Good',
'updated_at': None,
'user': 'user1',
},
'senlin_object.name': 'NodePayload',
'senlin_object.namespace': 'senlin',
'senlin_object.version': '1.0'
},
'exception': None
},
'senlin_object.name': 'NodeActionPayload',
'senlin_object.namespace': 'senlin',
'senlin_object.version': '1.0'
}
res = MSG.MessageEvent._notify_node_action(
self.ctx, logging.INFO, n1, action, phase='start')
self.assertIsNone(res)
mock_emit.assert_called_once_with(
self.ctx, 'node.create.start', publisher_id, mock.ANY)
payload = mock_emit.call_args[0][3]
self.assertEqual(expected_payload, payload)
@mock.patch.object(MSG.MessageEvent, '_notify_cluster_action')
@mock.patch.object(base.EventBackend, '_check_entity')
def test_dump_cluster_action_event(self, mock_check, mock_notify):
mock_check.return_value = 'CLUSTER'
entity = mock.Mock()
action = mock.Mock(context=self.ctx, entity=entity)
res = MSG.MessageEvent.dump(logging.INFO, action)
self.assertIsNone(res)
mock_check.assert_called_once_with(entity)
mock_notify.assert_called_once_with(self.ctx, logging.INFO, entity,
action)
@mock.patch.object(MSG.MessageEvent, '_notify_cluster_action')
@mock.patch.object(base.EventBackend, '_check_entity')
def test_dump_cluster_action_event_warn(self, mock_check, mock_notify):
mock_check.return_value = 'CLUSTER'
entity = mock.Mock()
action = mock.Mock(context=self.ctx, entity=entity)
res = MSG.MessageEvent.dump(logging.WARNING, action)
self.assertIsNone(res)
mock_check.assert_called_once_with(entity)
mock_notify.assert_called_once_with(self.ctx, logging.WARNING,
entity, action)
@mock.patch.object(MSG.MessageEvent, '_notify_node_action')
@mock.patch.object(base.EventBackend, '_check_entity')
def test_dump_node_action_event(self, mock_check, mock_notify):
mock_check.return_value = 'NODE'
entity = mock.Mock()
action = mock.Mock(context=self.ctx, entity=entity)
res = MSG.MessageEvent.dump(logging.INFO, action)
self.assertIsNone(res)
mock_check.assert_called_once_with(entity)
mock_notify.assert_called_once_with(self.ctx, logging.INFO, entity,
action)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations:
"""ExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitConnection"]:
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitConnectionListResult"]:
"""Gets all global reach connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'} # type: ignore
|
|
from skimage.data import imread
import numpy as np
import math
class Img:
@staticmethod
def load_image(path, as_grey = False, to_float = True):
# Load image
image = imread(path, as_grey)
if to_float:
# Convert to floating point matrix
image = image.astype(np.float32)
return image
@staticmethod
def sticht_images(img_left, img_right):
width_left_img = img_left.shape[1]
width = width_left_img + img_right.shape[1]
height = np.max([img_left.shape[0], img_right.shape[0]])
stichted_img = np.zeros((height, width, img_left.shape[2]))
for x in range(width_left_img):
for y in range(img_left.shape[0]):
stichted_img[y, x] = img_left[y, x]
for x in range(img_right.shape[1]):
for y in range(img_right.shape[0]):
stichted_img[y, x + width_left_img] = img_right[y, x]
return stichted_img
@staticmethod
def get_2d_rotation_matrix(rad):
rotation_matrix = np.zeros((2, 2))
rotation_matrix[0, 0] = math.cos(rad)
rotation_matrix[0, 1] = -math.sin(rad)
rotation_matrix[1, 0] = math.sin(rad)
rotation_matrix[1, 1] = math.cos(rad)
return rotation_matrix
@staticmethod
def get_2d_scale_matrix(scale):
scale_matrix = np.zeros((2, 2))
scale_matrix[0, 0] = scale
scale_matrix[1, 1] = scale
return scale_matrix
@staticmethod
def get_2d_x_scale_matrix(scale):
x_scale_matrix = np.zeros((2, 2))
x_scale_matrix[0, 0] = scale
x_scale_matrix[1, 1] = 1
return x_scale_matrix
@staticmethod
def get_2d_x_y_scale_matrix(x_scale, y_scale):
x_scale_matrix = np.zeros((2, 2))
x_scale_matrix[0, 0] = x_scale
x_scale_matrix[1, 1] = y_scale
return x_scale_matrix
@staticmethod
def get_x_3d_rotation_matrix(degrees):
"""Rotation through x axis"""
rotation_matrix = np.zeros((3, 3))
rotation_matrix[0, 0, 0] = 1
rotation_matrix[1, 1, 1] = math.cos(degrees)
rotation_matrix[1, 1, 2] = -math.sin(degrees)
rotation_matrix[2, 2, 1] = math.sin(degrees)
rotation_matrix[2, 2, 2] = math.cos(degrees)
return rotation_matrix
@staticmethod
def get_scale_diagonal_matrix(scale_diag):
scale_diagonal_matrix = np.zeros((2, 2))
scale_diagonal_matrix[0, 0] = 1
scale_diagonal_matrix[0, 1] = scale_diag
scale_diagonal_matrix[1, 0] = 1
scale_diagonal_matrix[1, 1] = 1
return scale_diagonal_matrix
@staticmethod
def get_scale_orthogonal_matrix(scale_orthogonal):
scale_orthogonal_matrix = np.zeros((2, 2))
scale_orthogonal_matrix[0, 0] = 1
scale_orthogonal_matrix[0, 1] = 1
scale_orthogonal_matrix[1, 0] = scale_orthogonal
scale_orthogonal_matrix[1, 1] = 1
return scale_orthogonal_matrix
class Transform:
@staticmethod
def translate(matrix, trans_vector):
return matrix + trans_vector
class RestructuringMethod(object):
NearestNeighbor = 1
BilinearInterpolation = 2
@staticmethod
def affine_transform(image,
transform_matrix,
translation_vector,
restructuring_method=BilinearInterpolation):
from numpy.linalg import inv
new_x_size = int(image.shape[0] * 1.5)
new_y_size = int(image.shape[1] * 1.5)
new_image = np.zeros((new_x_size, new_y_size, 3))
# Get the inverse matrix for indirect restructuring
trans_inv = inv(transform_matrix)
for x in range(new_x_size):
for y in range(new_y_size):
new_coordinates = np.array([x, y])
# First reverse translation
new_coordinates = new_coordinates - translation_vector + np.array([0, -image.shape[1]/2])#-image.shape[0]/2
# Reverse transformation
new_coordinates = np.dot(new_coordinates, trans_inv)
new_x = new_coordinates[0]
new_y = new_coordinates[1]
if restructuring_method == RestructuringMethod.NearestNeighbor:
new_x, new_y = RestructuringMethod.nearest_neighboor(new_x, new_y)
if new_x > 0 and new_y > 0 and new_x < image.shape[0] and new_y < image.shape[1]:
if restructuring_method == RestructuringMethod.BilinearInterpolation:
new_image[x, y, :] = RestructuringMethod.bilinear_interpolation(image[:, :, :], new_x, new_y)
else:
new_image[x, y, :] = image[new_x, new_y, :]
# back casting to uint8
return new_image.astype(np.uint8)
@staticmethod
def bilinear_interpolation(image, x, y):
x_left = int(x)
x_right = int(x + 1)
y_upper = int(y)
y_lower = int(y + 1)
# Because we added 1 on x and y, we could possibly be over
# the range of the image
image_x_max_index = image.shape[0] - 1
image_y_max_index = image.shape[1] - 1
if x_right > image_x_max_index or y_lower > image_y_max_index:
return image[x, y]
# calculate areas
a1 = (x - x_left) * (y - y_upper)
a2 = (x_right - x) * (y - y_upper)
a3 = (x - x_left) * (y_lower - y)
a4 = (x_right - x) * (y_lower - y)
grey_value_left_upper = image[x_left, y_upper]
grey_value_right_upper = image[x_right, y_upper]
grey_value_left_lower = image[x_left, y_lower]
grey_value_right_lower = image[x_right, y_lower]
bilinear_interpolated_gray_value = grey_value_left_upper * a4 + grey_value_right_upper * a3 + \
grey_value_left_lower * a2 + grey_value_right_lower * a1
return bilinear_interpolated_gray_value
@staticmethod
def nearest_neighboor(x, y):
# round coordinates
new_x = int(x + 0.5)
new_y = int(y + 0.5)
return new_x, new_y
class DistortionCorrection(object):
@staticmethod
def generate_distort_correction_mat(points):
equalisation_matrix = np.zeros(8)
target_points = []
for point in points:
tmp_entry = [point.pass_point_x, point.pass_point_y, 1, 0, 0, 0, -point.target_point_x*point.pass_point_x,-point.target_point_x*point.pass_point_y]
equalisation_matrix = np.vstack((equalisation_matrix, tmp_entry))
tmp_entry = [0,0,0,point.pass_point_x,point.pass_point_y,1,-point.target_point_y*point.pass_point_x,-point.target_point_y*point.pass_point_y]
equalisation_matrix = np.vstack((equalisation_matrix, tmp_entry))
target_points.append(point.target_point_x)
target_points.append(point.target_point_y)
# delete first pseudo entry
equalisation_matrix = np.delete(equalisation_matrix, 0, 0)
target_points = np.transpose(target_points)
pseudo_inverse = np.linalg.pinv(equalisation_matrix)
return pseudo_inverse.dot(target_points)
@staticmethod
def distortion_correction(points, image_orig, new_image, use_bilinear_interpolation=True):
a = DistortionCorrection.generate_distort_correction_mat(points)
a1 = a[0]
a2 = a[1]
a3 = a[2]
b1 = a[3]
b2 = a[4]
b3 = a[5]
c1 = a[6]
c2 = a[7]
for y in np.arange(0, new_image.shape[0]):
for x in np.arange(0, new_image.shape[1]):
denominator = ((b1 * c2 - b2 * c1) * x) + ((a2 * c1 - a1 * c2) * y) + (a1 * b2) - (a2 * b1)
new_x = ((b2 - c2 * b3) * x + (a3 * c2 - a2) * y + a2 * b3 - a3 * b2) / denominator
new_y = ((b3 * c1 - b1) * x + (a1 - a3 * c1) * y + a3 * b1 - a1 * b3) / denominator
if new_x > 0 and new_y > 0 and new_x < image_orig.shape[1] and new_y < image_orig.shape[0]:
if use_bilinear_interpolation:
new_image[y, x, :] = RestructuringMethod.bilinear_interpolation(image_orig[:, :, :], new_y, new_x)
else:
new_image[y, x, :] = image_orig[new_y, new_x, :]
return new_image
class DistortionCorrectionPoint(object):
def __init__(self,pass_x, pass_y, target_x, target_y):
self.pass_point_x = pass_x
self.pass_point_y = pass_y
self.target_point_x = target_x
self.target_point_y = target_y
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.test import TestCase
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import CsrfViewMiddleware
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token, ensure_csrf_cookie
from django.core.context_processors import csrf
from django.conf import settings
from django.template import RequestContext, Template
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content=u"""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure', False)
class CsrfViewMiddlewareTest(TestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = "<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertTrue('Cookie' in resp2.get('Vary',''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertEqual(None, req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
req = TestingHttpRequest()
req.method = 'DELETE'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_put_and_delete_allowed(self):
"""
Tests that HTTP PUT and DELETE methods can get through with
X-CSRFToken and a cookie
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
Check that CsrfTokenNode works when no CSRF cookie is set
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
self.assertEqual(u"", resp.content)
def test_token_node_empty_csrf_cookie(self):
"""
Check that we get a new token if the csrf_cookie is the empty string
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = ""
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self.assertNotEqual(u"", resp.content)
def test_token_node_with_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is set
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
Check that get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
Check that get_token works for a view decorated solely with requires_csrf_token
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
def test_https_bad_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertNotEqual(None, req2)
self.assertEqual(403, req2.status_code)
def test_https_good_referer(self):
"""
Test that a POST HTTPS request with a good referer is accepted
"""
req = self._get_POST_request_with_token()
req._is_secure = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_https_good_referer_2(self):
"""
Test that a POST HTTPS request with a good referer is accepted
where the referer contains no trailing slash
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_ensures_csrf_cookie_no_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with no middleware
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
resp = view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertTrue('Cookie' in resp.get('Vary',''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with the middleware enabled.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, view, (), {})
resp = view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertTrue('Cookie' in resp2.get('Vary',''))
|
|
"""
process.py
"""
from helper import config as helper_config
from tornado import httpserver
from tornado import ioloop
import logging
import multiprocessing
import signal
import socket
import ssl
from tornado import version as tornado_version
from tinman import application
from tinman import config
from tinman import exceptions
LOGGER = logging.getLogger(__name__)
class Process(multiprocessing.Process):
"""The process holding the HTTPServer and Application"""
CERT_REQUIREMENTS = {config.NONE: ssl.CERT_NONE,
config.OPTIONAL: ssl.CERT_OPTIONAL,
config.REQUIRED: ssl.CERT_REQUIRED}
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
"""Create a new instance of Process
"""
super(Process, self).__init__(group, target, name, args, kwargs)
# Passed in values
self.namespace = kwargs['namespace']
self.port = kwargs['port']
# Internal attributes holding instance information
self.app = None
self.http_server = None
self.request_counters = dict()
# Re-setup logging in the new process
self.logging_config = None
# If newrelic is passed, use it
if self.newrelic_ini_path:
self.setup_newrelic()
def create_application(self):
"""Create and return a new instance of tinman.application.Application"""
return application.Application(self.settings,
self.namespace.routes,
self.port)
def create_http_server(self):
"""Setup the HTTPServer
:rtype: tornado.httpserver.HTTPServer
"""
return self.start_http_server(self.port, self.http_config)
@property
def http_config(self):
"""Return a dictionary of HTTPServer arguments using the default values
as specified in the HTTPServer class docstrings if no values are
specified.
:param dict config: The HTTPServer specific section of the config
:rtype: dict
"""
return {config.NO_KEEP_ALIVE:
self.namespace.server.get(config.NO_KEEP_ALIVE, False),
config.SSL_OPTIONS: self.ssl_options,
config.XHEADERS: self.namespace.server.get(config.XHEADERS,
False)}
def on_sigabrt(self, signal_unused, frame_unused):
"""Stop the HTTP Server and IO Loop, shutting down the process
:param int signal_unused: Unused signal number
:param frame frame_unused: Unused frame the signal was caught in
"""
LOGGER.info('Stopping HTTP Server and IOLoop')
self.http_server.stop()
self.ioloop.stop()
def on_sighup(self, signal_unused, frame_unused):
"""Reload the configuration
:param int signal_unused: Unused signal number
:param frame frame_unused: Unused frame the signal was caught in
"""
# Update HTTP configuration
for setting in self.http_config:
if getattr(self.http_server, setting) != self.http_config[setting]:
LOGGER.debug('Changing HTTPServer %s setting', setting)
setattr(self.http_server, setting, self.http_config[setting])
# Update Application Settings
for setting in self.settings:
if self.app.settings[setting] != self.settings[setting]:
LOGGER.debug('Changing Application %s setting', setting)
self.app.settings[setting] = self.settings[setting]
# Update the routes
self.app.handlers = []
self.app.named_handlers = {}
routes = self.namespace.config.get(config.ROUTES)
self.app.add_handlers(".*$", self.app.prepare_routes(routes))
LOGGER.info('Configuration reloaded')
def run(self):
"""Called when the process has started
:param int port: The HTTP Server port
"""
LOGGER.debug('Initializing process')
# Setup logging
self.logging_config = self.setup_logging()
# Register the signal handlers
self.setup_signal_handlers()
# Create the application instance
try:
self.app = self.create_application()
except exceptions.NoRoutesException:
return
# Create the HTTPServer
self.http_server = self.create_http_server()
# Hold on to the IOLoop in case it's needed for responding to signals
self.ioloop = ioloop.IOLoop.instance()
# Start the IOLoop, blocking until it is stopped
try:
self.ioloop.start()
except KeyboardInterrupt:
pass
@property
def settings(self):
"""Return the Application configuration
:rtype: dict
"""
return dict(self.namespace.config)
def setup_logging(self):
return helper_config.LoggingConfig(self.namespace.logging)
@property
def newrelic_ini_path(self):
return self.namespace.config.get(config.NEWRELIC)
def setup_newrelic(self):
"""Setup the NewRelic python agent"""
import newrelic.agent
newrelic.agent.initialize(self.newrelic_ini_path)
def setup_signal_handlers(self):
"""Called when a child process is spawned to register the signal
handlers
"""
LOGGER.debug('Registering signal handlers')
signal.signal(signal.SIGABRT, self.on_sigabrt)
@property
def ssl_options(self):
"""Check the config to see if SSL configuration options have been passed
and replace none, option, and required with the correct values in
the certreqs attribute if it is specified.
:rtype: dict
"""
opts = self.namespace.server.get(config.SSL_OPTIONS) or dict()
if config.CERT_REQS in opts:
opts[config.CERT_REQS] = \
self.CERT_REQUIREMENTS[opts[config.CERT_REQS]]
return opts or None
def start_http_server(self, port, args):
"""Start the HTTPServer
:param int port: The port to run the HTTPServer on
:param dict args: Dictionary of arguments for HTTPServer
:rtype: tornado.httpserver.HTTPServer
"""
# Start the HTTP Server
LOGGER.info("Starting Tornado v%s HTTPServer on port %i Args: %r",
tornado_version, port, args)
http_server = httpserver.HTTPServer(self.app, **args)
http_server.bind(port, family=socket.AF_INET)
http_server.start(1)
return http_server
|
|
# coding=utf-8
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2012, Intel Performance Learning Solutions Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The compute resource backend for OpenStack.
"""
#pylint: disable=W0232,R0201
import random
from occi import backend
from occi import exceptions
from occi_os_api.extensions import os_addon
from occi_os_api.nova_glue import vm
from occi_os_api.nova_glue import security
class OsComputeBackend(backend.MixinBackend, backend.ActionBackend):
"""
The OpenStackCompute backend.
"""
def retrieve(self, entity, extras):
"""
Add OpenStack related actions.
"""
uid = entity.attributes['occi.core.id']
context = extras['nova_ctx']
# set additional actions
if 'occi.compute.state' in entity.attributes and entity.attributes[
'occi.compute.state'] == 'active':
entity.actions.append(os_addon.OS_CREATE_IMAGE)
entity.actions.append(os_addon.OS_CHG_PWD)
# add VNC link if available
console = vm.get_vnc(uid, context)
if console:
entity.attributes['org.openstack.compute.console.vnc'] =\
console['url']
else:
entity.attributes['org.openstack.compute.console.vnc'] = 'N/A'
# also expose the exact openstack state
entity.attributes['org.openstack.compute.state'] = \
vm.get_vm(uid, context)['vm_state']
def action(self, entity, action, attributes, extras):
"""
This is called by pyssf when an action request is issued.
"""
context = extras['nova_ctx']
uid = entity.attributes['occi.core.id']
if action == os_addon.OS_CHG_PWD:
if 'org.openstack.credentials.admin_pwd' not in attributes:
msg = 'org.openstack.credentials.admin_pwd was not supplied'\
' in the request.'
raise AttributeError(msg)
new_password = attributes['org.openstack.credentials.admin_pwd']
vm.set_password_for_vm(uid, new_password, context)
elif action == os_addon.OS_CREATE_IMAGE:
if 'org.openstack.snapshot.image_name' not in attributes:
raise AttributeError('Missing image name')
image_name = attributes['org.openstack.snapshot.image_name']
vm.snapshot_vm(uid, image_name, context)
else:
raise AttributeError('Not an applicable action.')
class OsNetLinkBackend(backend.MixinBackend, backend.ActionBackend):
"""
The OpenStack network link backend.
"""
pass
class SecurityGroupBackend(backend.UserDefinedMixinBackend):
"""
Security Group backend.
"""
def init_sec_group(self, category, extras):
"""
Creates the security group as specified in the request.
"""
#do not recreate default openstack security groups
if category.scheme == \
'http://schemas.openstack.org/infrastructure/security/group#':
return
context = extras['nova_ctx']
group_name = category.term.strip()
group_description = (category.title.strip()
if category.title else group_name)
security.create_group(group_name, group_description, context)
def destroy(self, category, extras):
"""
Deletes the specified security group.
"""
context = extras['nova_ctx']
security_group = security.retrieve_group_by_name(category.term,
extras['nova_ctx'])
security.remove_group(security_group, context)
class SecurityRuleBackend(backend.KindBackend):
"""
Security rule backend.
"""
def create(self, entity, extras):
"""
Creates a security rule.
The group to add the rule to must exist.
In OCCI-speak this means the mixin must be supplied with the request
"""
sec_mixin = get_sec_mixin(entity)
context = extras['nova_ctx']
security_group = security.retrieve_group_by_name(sec_mixin.term,
context)
sg_rule = make_sec_rule(entity, security_group['id'])
if security_group_rule_exists(security_group, sg_rule):
#This rule already exists in group
msg = 'This rule already exists in group. %s' %\
str(security_group)
raise AttributeError(msg)
security.create_rule(sec_mixin.term, security_group['id'], [sg_rule],
context)
def delete(self, entity, extras):
"""
Deletes the security rule.
"""
try:
context = extras['nova_ctx']
rule = security.retrieve_rule(entity.attributes['occi.core.id'],
context)
security.remove_rule(rule, context)
except Exception as error:
raise exceptions.HTTPError(500, str(error))
def make_sec_rule(entity, sec_grp_id):
"""
Create and validate the security rule.
"""
# TODO: remove this one!
name = random.randrange(0, 99999999)
sg_rule = {'id': name,
'parent_group_id': sec_grp_id}
entity.attributes['occi.core.id'] = str(sg_rule['id'])
prot = \
entity.attributes['occi.network.security.protocol'].lower().strip()
if prot in ('tcp', 'udp', 'icmp'):
sg_rule['protocol'] = prot
else:
raise AttributeError('Invalid protocol defined:' + prot)
from_p = entity.attributes['occi.network.security.to'].strip()
from_p = int(from_p)
if (type(from_p) is int) and 0 < from_p <= 65535:
sg_rule['from_port'] = from_p
else:
raise AttributeError('No valid from port defined.')
to_p = entity.attributes['occi.network.security.to'].strip()
to_p = int(to_p)
if (type(to_p) is int) and 0 < to_p <= 65535:
sg_rule['to_port'] = to_p
else:
raise AttributeError('No valid to port defined.')
if from_p > to_p:
raise AttributeError('From port is bigger than to port defined.')
cidr = entity.attributes['occi.network.security.range'].strip()
if len(cidr) <= 0:
cidr = '0.0.0.0/0'
if True:
sg_rule['cidr'] = cidr
else:
raise AttributeError('No valid CIDR defined.')
sg_rule['group'] = {}
return sg_rule
def get_sec_mixin(entity):
"""
Get the security mixin of the supplied entity.
"""
sec_mixin_present = 0
sec_mixin = None
for mixin in entity.mixins:
if os_addon.SEC_GROUP in mixin.related:
sec_mixin = mixin
sec_mixin_present += 1
if not sec_mixin_present:
# no mixin of the type security group was found
msg = 'No security group mixin was found'
raise AttributeError(msg)
if sec_mixin_present > 1:
msg = 'More than one security group mixin was found'
raise AttributeError(msg)
return sec_mixin
def security_group_rule_exists(security_group, values):
"""
Indicates whether the specified rule values are already
defined in the given security group.
"""
# Taken directly from security_groups.py as that method is not
# directly import-able.
for rule in security_group['rules']:
is_duplicate = True
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return True
return False
|
|
import os
import subprocess
import sublime
from ..SublimeCscope import DEBUG, PACKAGE_NAME
from . import settings
from .indexer import PRIMARY_DB, SECONDARY_DB
from .cscope_results import CscopeBuildDbResult, CscopeQueryResult, CscopeResultLimitException
CSCOPE_FILE_LIST_EXT = 'files'
CSCOPE_DB_EXT = 'out'
CSCOPE_OPTIONS = {
'build_db_only': '-b',
'query_only': '-d',
'db_name' : '-f',
'inc_dir' : '-I',
'file_list': '-i',
'kernel_mode': '-k',
'line_mode_search': '-L',
'fast_index': '-q',
'force_db_rebuild': '-u',
'verbose': '-v',
'find_symbol': '-0',
'find_definition': '-1',
'find_callees': '-2',
'find_callers': '-3',
'find_string': '-4',
'find_egrep_pattern': '-6',
'find_files_including': '-8'
}
class CscopeRunner:
def __init__(self, cwd, win, results, arg_list):
self._win = win
self._cwd = cwd
self._results = results
self._arg_list = arg_list
@property
def _cscope(self):
def is_exe(f):
if not os.path.exists(f):
return False
if not os.path.isfile(f):
return False
if not os.access(f, os.X_OK):
print("%s: Found cscope candidate: %s but it is not an executable" %
(PACKAGE_NAME, f))
return False
return True
cscope = settings.get('cscope_path', self._win)
if cscope and is_exe(cscope):
return cscope
path_env = os.defpath
if os.environ['PATH']:
path_env = os.environ['PATH']
for path in os.environ['PATH'].split(os.pathsep):
cscope = os.path.join(path, 'cscope')
if 'nt' == os.name:
cscope = os.extsep.join([cscope, 'exe'])
if is_exe(cscope):
if DEBUG: print("Saving %s in settings" % cscope)
# save it for later since it will most likely not change
settings.load_settings().set('cscope_path', cscope)
return cscope
raise FileNotFoundError("cscope executable not found in PATH")
def run(self):
cmd = [self._cscope]
env = {}
if not self._cwd:
print("%s-CscopeRunner: No working directory given. Aborting")
return
tmp_folder = settings.get('tmp_folder', self._win)
kernel_mode = not bool(settings.get('search_std_include_folders', self._win))
extra_inc_folders = settings.get('extra_include_folders', self._win)
if tmp_folder:
env['TMPDIR'] = tmp_folder
if kernel_mode:
cmd.append(CSCOPE_OPTIONS['kernel_mode'])
for folder in extra_inc_folders:
cmd.extend([CSCOPE_OPTIONS['inc_dir'], folder])
cmd.extend(self._arg_list)
if DEBUG: print("%s-CscopeRunner: About to run %s" % (PACKAGE_NAME, cmd))
try:
with subprocess.Popen(cmd, cwd=self._cwd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) as p:
for line in p.stdout:
if not line:
continue
self._results.parse(line)
except subprocess.CalledProcessError as e:
print("%s: Running cscope returned an error. cmd_line: %s, cwd: %s, error_code: %d"
% (PACKAGE_NAME, e.cmd, self._cwd, e.returncode))
except CscopeResultLimitException as le:
sublime.error_message(str(le))
finally:
self._results.parse(None)
class CscopeBuildDbCommand:
def __init__(self, cwd, win=None, force_rebuild=False, name=SECONDARY_DB):
if not win:
win = sublime.active_window()
args = []
file_list = os.extsep.join([name, CSCOPE_FILE_LIST_EXT])
db_name = os.extsep.join([name, CSCOPE_DB_EXT])
args.append(CSCOPE_OPTIONS['build_db_only'])
args.append(CSCOPE_OPTIONS['fast_index'])
args.append(CSCOPE_OPTIONS['verbose'])
args.append("%s%s" % (CSCOPE_OPTIONS['file_list'], file_list))
args.append("%s%s" % (CSCOPE_OPTIONS['db_name'], db_name))
if force_rebuild:
args.append(CSCOPE_OPTIONS['force_db_rebuild'])
self._results = CscopeBuildDbResult()
self._runner = CscopeRunner(cwd, win, self._results, args)
@property
def results(self):
return self._results
def run(self):
self._runner.run()
class CscopeQueryCommand:
def __init__(self, action, search_term, win=None):
self._win = win
if not self._win:
self._win = sublime.active_window()
from .indexer import get_db_location
self._cwd = get_db_location(win)
self._results = CscopeQueryResult(settings.get('maximum_results', self._win))
self._action = action
self._search_term = search_term
def _run_once(self, db_name, file_list=None, filter=None):
args = []
if file_list:
args.append("%s%s" % (CSCOPE_OPTIONS['file_list'], file_list))
else:
args.append(CSCOPE_OPTIONS['query_only'])
args.append(CSCOPE_OPTIONS['line_mode_search'])
args.append("%s%s" % (CSCOPE_OPTIONS[self._action], self._search_term))
args.append("%s%s" % (CSCOPE_OPTIONS['db_name'], db_name))
if filter:
self._results.filter = filter
runner = self._runner = CscopeRunner(self._cwd, self._win, self._results, args)
runner.run()
@property
def results(self):
return self._results
def run(self):
file_list = os.extsep.join([PRIMARY_DB, CSCOPE_FILE_LIST_EXT])
db_name = os.extsep.join([PRIMARY_DB, CSCOPE_DB_EXT])
file_filter = []
file_list_path_name = os.path.join(self._cwd, file_list)
if os.path.isfile(file_list_path_name):
if DEBUG:
print("CscopeQueryCommand: querying primary DB")
with open(file_list_path_name) as f:
file_filter = filter(bool, [line.strip() for line in f])
self._run_once(db_name, file_list=file_list)
db_name = os.extsep.join([SECONDARY_DB, CSCOPE_DB_EXT])
if os.path.isfile(os.path.join(self._cwd, db_name)):
if DEBUG:
print("CscopeQueryCommand: querying secondary DB")
self._run_once(db_name, filter=file_filter)
def generate_index(cwd, win, force_rebuild=False):
build_db_command = CscopeBuildDbCommand(cwd, win=win, force_rebuild=force_rebuild)
build_db_command.run()
return build_db_command.results
|
|
"""Fabfile for the ``webfaction-django-boilerplate``.
Make sure to setup your ``fabric_settings.py`` first. As a start, just copy
``fabric_settings.py.sample``.
"""
from __future__ import with_statement
import os
from fabric.api import (
cd,
env,
lcd,
local,
run,
settings,
)
from fabric.contrib.files import append, contains, exists, sed
import fabric_settings as fab_settings
env.hosts = fab_settings.ENV_HOSTS
env.user = fab_settings.ENV_USER
BASHRC_SETTING1 = 'export VIRTUALENVWRAPPER_PYTHON=/usr/local/bin/python2.7'
BASHRC_SETTING2 = 'export WORKON_HOME=$HOME/Envs'
BASHRC_SETTING3 = 'source /home/{0}/bin/virtualenvwrapper.sh'.format(env.user)
BASHRC_SETTING4 = 'export PIP_VIRTUALENV_BASE=$WORKON_HOME'
BASHRC_SETTING5 = 'export PIP_RESPECT_VIRTUALENV=true'
PROJECT_NAME = fab_settings.PROJECT_NAME
FILE_SCRIPT_SETTINGS = 'script-settings-{0}.sh'.format(PROJECT_NAME)
FILE_DEPLOY_WEBSITE = 'deploy-website-{0}.sh'.format(PROJECT_NAME)
FILE_MYSQL_BACKUP = 'mysql-backup-{0}.sh'.format(PROJECT_NAME)
FILE_PG_BACKUP = 'pg-backup-{0}.sh'.format(PROJECT_NAME)
FILE_LOCALE_BACKUP = 'locale-backup-{0}.sh'.format(PROJECT_NAME)
FILE_RESTART_APACHE = 'restart-apache-{0}.sh'.format(PROJECT_NAME)
FILE_DJANGO_CLEANUP = 'django-cleanup-{0}.sh'.format(PROJECT_NAME)
FILE_CRONTAB = 'crontab-{0}.txt'.format(PROJECT_NAME)
FILE_SHOW_MEMORY = 'show-memory.sh'
FILE_PGPASS = '.pgpass-{0}'.format(PROJECT_NAME)
# ****************************************************************************
# HIGH LEVEL TASKS
# ****************************************************************************
def install_everything():
install_local_repo()
install_server()
local_link_repo_with_remote_repo()
first_deployment()
def first_deployment():
run_delete_previous_attempts()
run_create_virtualenv()
run_clone_repo()
run_install_scripts()
run_install_pgpass()
run_install_crontab()
run_delete_django()
run_install_requirements()
run_deploy_website(with_manage_py=False)
run_prepare_local_settings()
run_deploy_website()
run_loaddata_auth()
def install_local_repo():
local_create_new_repo()
local_init_django_project()
local_create_fab_settings()
local_initial_commit()
def install_server():
run_install_virtualenv()
run_install_mercurial()
run_add_bashrc_settings()
run_create_git_repo()
run_delete_index_files()
# ****************************************************************************
# LOCAL TASKS
# ****************************************************************************
def local_link_repo_with_remote_repo():
with lcd(fab_settings.PROJECT_ROOT):
local('git config http.sslVerify false')
local('git config http.postBuffer 524288000')
with settings(warn_only=True):
local('git remote rm origin')
local('git remote add origin'
' {0}@{0}.webfactional.com:'
'/home/{0}/webapps/git/repos/{1}'.format(
fab_settings.ENV_USER, fab_settings.GIT_REPO_NAME))
local('git push -u origin master')
def local_create_fab_settings():
fabfile_dir = os.path.join(fab_settings.PROJECT_ROOT, 'website',
'webapps', 'django', 'myproject', 'fabfile')
with lcd(fabfile_dir):
local('cp fab_settings.py.sample fab_settings.py')
local("sed -i -r -e 's/INSERT_PROJECT_NAME/{0}/g'"
" fab_settings.py".format(PROJECT_NAME))
local("sed -i -r -e 's/INSERT_ENV_USER/{0}/g'"
" fab_settings.py".format(fab_settings.ENV_USER))
def local_create_new_repo():
with lcd(fab_settings.PROJECT_ROOT):
local('rm -rf .git')
local('rm -f .gitmodules')
local('rm -rf website/webapps/django/myproject/submodules/bootstrap')
local('git init')
local('git submodule add git://github.com/twitter/bootstrap.git'
' website/webapps/django/myproject/submodules/bootstrap')
def local_init_django_project():
with lcd(fab_settings.DJANGO_PROJECT_ROOT):
# prepare local_settings.py
local('cp myproject/settings/local/local_settings.py.sample'
' myproject/settings/local/local_settings.py')
local("sed -i -r -e 's/MEDIA_APP_NAME/media/g'"
" myproject/settings/local/local_settings.py")
local("sed -i -r -e 's/STATIC_APP_NAME/static/g'"
" myproject/settings/local/local_settings.py")
local('cp fabfile/fab_settings.py.sample'
' fabfile/fab_settings.py')
# prepare wsgi.py
local("sed -i -r -e 's/ENV_USER/{0}/g'"
" myproject/wsgi.py".format(fab_settings.ENV_USER))
local("sed -i -r -e 's/VENV_NAME/{0}/g'"
" myproject/wsgi.py".format(fab_settings.VENV_NAME))
local("sed -i -r -e 's/DJANGO_APP_NAME/{0}/g'"
" myproject/wsgi.py".format(fab_settings.DJANGO_APP_NAME))
# initialize local Django project
local('python manage.py syncdb --all --noinput')
local('python manage.py migrate --fake')
local('python manage.py loaddata bootstrap_auth.json')
def local_initial_commit():
with lcd(fab_settings.PROJECT_ROOT):
local('git add .')
local('git commit -am "Initial commit."')
# ****************************************************************************
# REMOTE TASKS
# ****************************************************************************
def run_add_bashrc_settings():
with cd('$HOME'):
append('.bashrc', BASHRC_SETTING1, partial=True)
append('.bashrc', BASHRC_SETTING2, partial=True)
append('.bashrc', BASHRC_SETTING3, partial=True)
append('.bashrc', BASHRC_SETTING4, partial=True)
append('.bashrc', BASHRC_SETTING5, partial=True)
def run_clone_repo():
run('mkdir -p $HOME/src')
cloned_repo_path = '$HOME/src/{0}'.format(PROJECT_NAME)
if exists(cloned_repo_path):
run('rm -rf {0}'.format(cloned_repo_path))
with cd('$HOME/src'):
run('git clone $HOME/webapps/git/repos/{0} {1}'.format(
fab_settings.GIT_REPO_NAME, PROJECT_NAME))
with cd('$HOME/src/{0}'.format(PROJECT_NAME)):
run('git submodule init')
run('git submodule update')
def run_create_git_repo():
run('rm -rf $HOME/webapps/git/repos/{0}'.format(
fab_settings.GIT_REPO_NAME))
with cd('$HOME/webapps/git'):
run('git init --bare ./repos/{0}'.format(fab_settings.GIT_REPO_NAME))
with cd('$HOME/webapps/git/repos/{0}'.format(fab_settings.GIT_REPO_NAME)):
run('git config http.receivepack true')
def run_create_ssh_dir():
with cd('$HOME'):
with settings(warn_only=True):
run('mkdir .ssh')
run('touch .ssh/authorized_keys')
run('chmod 600 .ssh/authorized_keys')
run('chmod 700 .ssh')
def run_create_virtualenv():
with cd('$HOME'):
run('rm -rf $HOME/Envs/{0}'.format(fab_settings.VENV_NAME))
run('mkvirtualenv -p python2.7 --system-site-packages {0}'.format(
fab_settings.VENV_NAME))
def run_delete_index_files():
run('rm -f $HOME/webapps/{0}/index.html'.format(
fab_settings.MEDIA_APP_NAME))
run('rm -f $HOME/webapps/{0}/index.html'.format(
fab_settings.STATIC_APP_NAME))
def run_delete_previous_attempts():
run('rm -rf $HOME/webapps/{0}/myproject'.format(
fab_settings.DJANGO_APP_NAME))
run('rm -rf $HOME/Envs/{0}/'.format(fab_settings.VENV_NAME))
run('rm -rf $HOME/src/{0}/'.format(PROJECT_NAME))
run('rm -rf $HOME/bin/*{0}*.*'.format(PROJECT_NAME))
with cd('$HOME'):
run('touch .pgpass')
run("sed '/{0}/d' .pgpass > .pgpass_tmp".format(fab_settings.DB_NAME))
run('mv .pgpass_tmp .pgpass')
run('crontab -l > crontab_bak')
run("sed '/{0}.sh/d' crontab_bak > crontab_tmp".format(
fab_settings.PROJECT_NAME))
run('crontab crontab_tmp')
run('rm crontab_tmp')
def run_deploy_website(with_manage_py=True):
args = ' 1'
if with_manage_py:
args = ''
run('workon {0} && deploy-website-{1}.sh{2}'.format(fab_settings.VENV_NAME,
PROJECT_NAME, args))
def run_install_crontab():
run('mkdir -p $HOME/mylogs/cron/')
with cd('$HOME/bin/'):
run('crontab -l > crontab_tmp')
run('cat crontab-{0}.txt >> crontab_tmp'.format(
PROJECT_NAME))
run('crontab crontab_tmp')
run('rm crontab_tmp')
def run_install_mercurial():
with cd('$HOME'):
run('easy_install-2.7 mercurial')
def run_install_pgpass():
with cd('$HOME'):
run('touch .pgpass')
run('chmod 0600 .pgpass')
if not contains('.pgpass', fab_settings.DB_NAME):
run('cat {0} > .pgpass'.format(FILE_PGPASS))
run('rm {0}'.format(FILE_PGPASS))
def run_install_requirements():
run('workon {0} && pip install -r $HOME/src/{1}/website/webapps/django/'
'myproject/requirements.txt --upgrade'.format(
fab_settings.VENV_NAME, PROJECT_NAME))
def run_install_scripts():
with cd('$HOME/src/{0}/scripts'.format(PROJECT_NAME)):
run('git pull origin master')
run('cp deploy-website.sh $HOME/bin/{0}'.format(FILE_DEPLOY_WEBSITE))
run('cp mysql-backup.sh $HOME/bin/{0}'.format(FILE_MYSQL_BACKUP))
run('cp pg-backup.sh $HOME/bin/{0}'.format(FILE_PG_BACKUP))
run('cp locale-backup.sh $HOME/bin/{0}'.format(FILE_LOCALE_BACKUP))
run('cp restart-apache.sh $HOME/bin/{0}'.format(FILE_RESTART_APACHE))
run('cp django-cleanup.sh $HOME/bin/{0}'.format(FILE_DJANGO_CLEANUP))
run('cp script-settings.sh $HOME/bin/{0}'.format(FILE_SCRIPT_SETTINGS))
run('cp crontab.txt $HOME/bin/{0}'.format(FILE_CRONTAB))
run('cp {0} $HOME/bin/{0}'.format(FILE_SHOW_MEMORY))
# This one goes to $HOME
run('cp .pgpass $HOME/{0}'.format(FILE_PGPASS))
with cd('$HOME/bin'):
sed(FILE_SCRIPT_SETTINGS, 'INSERT_USERNAME', fab_settings.ENV_USER)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DB_USER', fab_settings.DB_USER)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DB_NAME', fab_settings.DB_NAME)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DB_PASSWORD',
fab_settings.DB_PASSWORD)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_PROJECT_NAME', PROJECT_NAME)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DJANGO_APP_NAME',
fab_settings.DJANGO_APP_NAME)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_VENV_NAME', fab_settings.VENV_NAME)
sed(FILE_DEPLOY_WEBSITE, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_MYSQL_BACKUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_PG_BACKUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_LOCALE_BACKUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_RESTART_APACHE, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_DJANGO_CLEANUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_CRONTAB, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_SHOW_MEMORY, 'INSERT_PROJECTNAME', PROJECT_NAME)
run('rm -f *.bak')
with cd('$HOME'):
sed(FILE_PGPASS, 'INSERT_DB_NAME', fab_settings.DB_NAME)
sed(FILE_PGPASS, 'INSERT_DB_USER', fab_settings.DB_USER)
sed(FILE_PGPASS, 'INSERT_DB_PASSWORD', fab_settings.DB_PASSWORD)
def run_install_virtualenv():
with cd('$HOME'):
run('mkdir -p $HOME/lib/python2.7')
run('easy_install-2.7 virtualenv')
run('easy_install-2.7 pip')
run('pip install virtualenvwrapper')
run('mkdir -p $HOME/Envs')
def run_loaddata_auth():
with cd('$HOME/webapps/{0}/myproject/'.format(
fab_settings.DJANGO_APP_NAME)):
run('workon {0} && ./manage.py loaddata bootstrap_auth.json'.format(
fab_settings.VENV_NAME))
def run_prepare_local_settings():
with cd('$HOME/webapps/{0}/myproject/myproject/settings/local'.format(
fab_settings.DJANGO_APP_NAME)):
run('cp local_settings.py.sample local_settings.py')
sed('local_settings.py', 'backends.sqlite3',
'backends.postgresql_psycopg2')
sed('local_settings.py', 'db.sqlite', fab_settings.DB_NAME)
sed('local_settings.py', '"USER": ""', '"USER": "{0}"'.format(
fab_settings.DB_USER))
sed('local_settings.py', '"PASSWORD": ""', '"PASSWORD": "{0}"'.format(
fab_settings.DB_PASSWORD))
sed('local_settings.py', 'yourproject', '{0}'.format(
PROJECT_NAME))
sed('local_settings.py', '##EMAIL_BACKEND', 'EMAIL_BACKEND')
sed('local_settings.py', 'FROM_EMAIL = "[email protected]"',
'FROM_EMAIL = "{0}"'.format(fab_settings.EMAIL_DEFAULT_FROM_EMAIL))
sed('local_settings.py', 'MAILER_EMAIL_BACKEND', '#MAILER_EMAIL_BACKEND') # NOQA
sed('local_settings.py', 'TEST_EMAIL_BACKEND_RECEPIENTS', '#TEST_EMAIL_BACKEND_RECEPIENTS') # NOQA
sed('local_settings.py', 'FROM_EMAIL =', '#FROM_EMAIL =')
sed('local_settings.py', '##FROM_EMAIL', 'FROM_EMAIL')
sed('local_settings.py', 'DEFAULT_#FROM_EMAIL', 'DEFAULT_FROM_EMAIL')
sed('local_settings.py', 'EMAIL_SUBJECT_PREFIX', '#EMAIL_SUBJECT_PREFIX') # NOQA
sed('local_settings.py', '##EMAIL_SUBJECT_PREFIX', 'EMAIL_SUBJECT_PREFIX') # NOQA
sed('local_settings.py', 'EMAIL_HOST =', '#EMAIL_HOST =')
sed('local_settings.py', '##EMAIL_HOST', 'EMAIL_HOST')
sed('local_settings.py', 'EMAIL_HOST_USER = FROM_EMAIL', '#EMAIL_HOST_USER = FROM_EMAIL') # NOQA
sed('local_settings.py', '#EMAIL_HOST_USER = ""',
'EMAIL_HOST_USER = "{0}"'.format(fab_settings.EMAIL_INBOX))
sed('local_settings.py', 'EMAIL_HOST_PASSWORD', '#EMAIL_HOST_PASSWORD')
sed('local_settings.py', '##EMAIL_HOST_PASSWORD = ""',
'EMAIL_HOST_PASSWORD = "{0}"'.format(fab_settings.EMAIL_PASSWORD))
sed('local_settings.py', 'EMAIL_PORT', '#EMAIL_PORT')
sed('local_settings.py', '##EMAIL_PORT', 'EMAIL_PORT')
sed('local_settings.py', 'MEDIA_APP_NAME', fab_settings.MEDIA_APP_NAME)
sed('local_settings.py', 'STATIC_APP_NAME',
fab_settings.STATIC_APP_NAME)
sed('local_settings.py', 'yourname', fab_settings.ADMIN_NAME)
sed('local_settings.py', '[email protected]', fab_settings.ADMIN_EMAIL)
run('rm -f *.bak')
def run_delete_django():
with cd('$HOME/webapps/{0}/lib/python2.7/'.format(
fab_settings.DJANGO_APP_NAME)):
run('rm -rf django')
run('rm -rf Django*')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_clusters_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_access_profile_request, build_get_request, build_get_upgrade_profile_request, build_list_by_resource_group_request, build_list_cluster_admin_credentials_request, build_list_cluster_monitoring_user_credentials_request, build_list_cluster_user_credentials_request, build_list_request, build_reset_aad_profile_request_initial, build_reset_service_principal_profile_request_initial, build_rotate_cluster_certificates_request_initial, build_update_tags_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_10_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_10_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterUpgradeProfile":
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_10_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_access_profile(
self,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> "_models.ManagedClusterAccessProfile":
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_10_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_access_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
role_name=role_name,
template_url=self.get_access_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_10_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_admin_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_admin_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_10_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_monitoring_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster monitoring user credential of a managed cluster.
Gets cluster monitoring user credential of the managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_10_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_monitoring_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_monitoring_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedCluster":
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_10_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedCluster')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2019_10_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_10_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._update_tags_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2019_10_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_10_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _reset_service_principal_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
request = build_reset_service_principal_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_service_principal_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters:
~azure.mgmt.containerservice.v2019_10_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def _reset_aad_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterAADProfile')
request = build_reset_aad_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_aad_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2019_10_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
async def _rotate_cluster_certificates_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_rotate_cluster_certificates_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._rotate_cluster_certificates_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
@distributed_trace_async
async def begin_rotate_cluster_certificates(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
|
|
"""
Module that builds the melted_variant_schema used to construct the
materialized view of the melted variant data.
Build the schema used to build the materialized view.
"""
class SchemaBuilder(object):
"""Builder object for the schema.
Usage:
schema_builder = SchemaBuilder()
schema_builder.add_melted_variant_field(<source_col_name>,
<joined_table_col_named>, <is_null_in_variant_to_set_label>,
<user_queryable>)
schema_builder.add_melted_variant_field(...)
schema_builder.add_melted_variant_field(...)
my_schema = schema_builder.get_schema()
"""
def __init__(self):
self.schema = []
# Check for duplicates while building.
self.joined_table_col_name_set = set()
def add_melted_variant_field(self,
source_col_name,
joined_table_col_name,
is_null_in_variant_to_set_label,
is_user_queryable,
query_schema=None):
"""Updates the schema with a field to be included in computing the
denormalized materialized view.
Args:
source_col_name: Full source name <table>.<col>
joined_table_col_name: Name in the materialized table
is_null_in_variant_to_set_label: Not required for imported variant
is_user_queryable: Whether the user is allowed to query for this
field from the ui.
query_schema: Dictionary that describes the schema of this
particular field when the user queries it.
"""
assert joined_table_col_name not in self.joined_table_col_name_set
self.schema.append({
'source_col_name': source_col_name,
'joined_table_col_name': joined_table_col_name,
'is_null_in_variant_to_set_label': is_null_in_variant_to_set_label,
'is_user_queryable': is_user_queryable,
'query_schema': query_schema,
})
self.joined_table_col_name_set.add(joined_table_col_name)
def get_schema(self):
return self.schema
# User-queryable schema keys.
# These are all caps to match our code logic of capitalizing the keys in a
# a user's query before handling the filter.
# TODO: Convert all uses of these keys to refer to these constants.
MELTED_SCHEMA_KEY__UID = 'UID'
MELTED_SCHEMA_KEY__POSITION = 'POSITION'
MELTED_SCHEMA_KEY__CHROMOSOME = 'CHROMOSOME'
MELTED_SCHEMA_KEY__REF = 'REF'
MELTED_SCHEMA_KEY__ALT = 'ALT'
MELTED_SCHEMA_KEY__HET = 'IS_HET'
MELTED_SCHEMA_KEY__ES_UID = 'EXPERIMENT_SAMPLE_UID'
MELTED_SCHEMA_KEY__ES_LABEL = 'EXPERIMENT_SAMPLE_LABEL'
MELTED_SCHEMA_KEY__VS_UID = 'VARIANT_SET_UID'
MELTED_SCHEMA_KEY__VS_LABEL = 'VARIANT_SET_LABEL'
MELTED_SCHEMA_KEY__VA_ID = 'VA_ID'
MELTED_SCHEMA_KEY__ES_ID = 'ES_ID'
MELTED_SCHEMA_KEY__VE_ID = 'VE_ID'
MELTED_SCHEMA_KEY__VCCD_ID = 'VCCD_ID'
MELTED_SCHEMA_KEY__ALIGNMENT_GROUP_ID = 'AG_ID'
# Used for aggregate total sample count in Postgres query.
CAST_SCHEMA_KEY__TOTAL_SAMPLE_COUNT = 'SAMPLE_COUNT'
SCHEMA_BUILDER = SchemaBuilder()
# SCHEMA_BUILDER.add_melted_variant_field(<source_col_name>,
# <joined_table_col_named>, <is_null_in_variant_to_set_label>,
# <user_queryable>)
# Variant
SCHEMA_BUILDER.add_melted_variant_field('main_variant.id', 'id', False, False)
SCHEMA_BUILDER.add_melted_variant_field('main_variant.uid', MELTED_SCHEMA_KEY__UID, False, True,
{'type': 'String', 'num': 1})
SCHEMA_BUILDER.add_melted_variant_field('main_variant.position', MELTED_SCHEMA_KEY__POSITION, False, True,
{'type': 'Integer', 'num': 1})
SCHEMA_BUILDER.add_melted_variant_field('main_chromosome.label', MELTED_SCHEMA_KEY__CHROMOSOME, False, True,
{'type': 'String', 'num': 1})
SCHEMA_BUILDER.add_melted_variant_field('main_variant.ref_value', MELTED_SCHEMA_KEY__REF, False, True,
{'type': 'String', 'num': 1})
# VariantAlternate
SCHEMA_BUILDER.add_melted_variant_field('main_variantalternate.id', MELTED_SCHEMA_KEY__VA_ID, False, False)
SCHEMA_BUILDER.add_melted_variant_field('main_variantalternate.alt_value', MELTED_SCHEMA_KEY__ALT, False, True,
{'type': 'String', 'num': 1})
# Allow filtering by AlignmentGroup.
SCHEMA_BUILDER.add_melted_variant_field(
'main_variantcallercommondata.alignment_group_id',
MELTED_SCHEMA_KEY__ALIGNMENT_GROUP_ID, True, False)
# For joining key-value data.
SCHEMA_BUILDER.add_melted_variant_field('main_variantcallercommondata.id', MELTED_SCHEMA_KEY__VCCD_ID, True, False)
SCHEMA_BUILDER.add_melted_variant_field('main_variantevidence.id', MELTED_SCHEMA_KEY__VE_ID, True, False)
# ExperimentSample
SCHEMA_BUILDER.add_melted_variant_field('main_experimentsample.id', MELTED_SCHEMA_KEY__ES_ID, True, False)
SCHEMA_BUILDER.add_melted_variant_field('main_experimentsample.uid', MELTED_SCHEMA_KEY__ES_UID, True, True,
{'type': 'String', 'num': 1})
SCHEMA_BUILDER.add_melted_variant_field('main_experimentsample.label', MELTED_SCHEMA_KEY__ES_LABEL, True, True,
{'type': 'String', 'num': 1})
# VariantSet
SCHEMA_BUILDER.add_melted_variant_field('main_variantset.uid', MELTED_SCHEMA_KEY__VS_UID, False, True,
{'type': 'String', 'num': 1})
SCHEMA_BUILDER.add_melted_variant_field('main_variantset.label', MELTED_SCHEMA_KEY__VS_LABEL, False, True,
{'type': 'String', 'num': 1})
# Build the schema.
MELTED_VARIANT_SCHEMA = SCHEMA_BUILDER.get_schema()
# Generate the SELECT clause for building the table.
MATERIALIZED_TABLE_SELECT_CLAUSE_COMPONENTS = []
for schema_obj in MELTED_VARIANT_SCHEMA:
if (schema_obj['source_col_name'] in [
'main_variantset.uid',
'main_variantset.label']):
MATERIALIZED_TABLE_SELECT_CLAUSE_COMPONENTS.append(
'array_agg(' + schema_obj['source_col_name'] + ') AS ' +
schema_obj['joined_table_col_name'])
else:
MATERIALIZED_TABLE_SELECT_CLAUSE_COMPONENTS.append(
schema_obj['source_col_name'] + ' AS ' +
schema_obj['joined_table_col_name'])
MATERIALIZED_TABLE_SELECT_CLAUSE = ', '.join(
MATERIALIZED_TABLE_SELECT_CLAUSE_COMPONENTS)
# A GROUP BY, for dealing with repeated variant sets.
MATERIALIZED_TABLE_GROUP_BY_CLAUSE_COMPONENTS = []
for schema_obj in MELTED_VARIANT_SCHEMA:
if (schema_obj['source_col_name'] in [
'main_variantset.uid',
'main_variantset.label']):
continue
MATERIALIZED_TABLE_GROUP_BY_CLAUSE_COMPONENTS.append(
schema_obj['source_col_name'])
MATERIALIZED_TABLE_GROUP_BY_CLAUSE = ', '.join(
MATERIALIZED_TABLE_GROUP_BY_CLAUSE_COMPONENTS)
# Generate the SELECT clause for the Variant to VariantSet.label view.
# We perform a UNION with this table to ensure that we yield Variants that
# are in a VariantSet without an association with any ExperimentSample.
MATERIALIZED_TABLE_VTVS_SELECT_CLAUSE_COMPONENTS = []
for schema_obj in MELTED_VARIANT_SCHEMA:
if schema_obj['is_null_in_variant_to_set_label']:
MATERIALIZED_TABLE_VTVS_SELECT_CLAUSE_COMPONENTS.append(
'NULL' + ' AS ' + schema_obj['joined_table_col_name'])
elif (schema_obj['source_col_name'] in [
'main_variantset.uid',
'main_variantset.label']):
MATERIALIZED_TABLE_VTVS_SELECT_CLAUSE_COMPONENTS.append(
'array_agg(' + schema_obj['source_col_name'] + ') AS ' +
schema_obj['joined_table_col_name'])
else:
MATERIALIZED_TABLE_VTVS_SELECT_CLAUSE_COMPONENTS.append(
schema_obj['source_col_name'] + ' AS ' +
schema_obj['joined_table_col_name'])
MATERIALIZED_TABLE_VTVS_SELECT_CLAUSE = ', '.join(
MATERIALIZED_TABLE_VTVS_SELECT_CLAUSE_COMPONENTS)
# A GROUP BY, for dealing with repeated variant sets.
MATERIALIZED_TABLE_VTVS_GROUP_BY_CLAUSE_COMPONENTS = []
for schema_obj in MELTED_VARIANT_SCHEMA:
if schema_obj['is_null_in_variant_to_set_label']:
continue
if (schema_obj['source_col_name'] in [
'main_variantset.uid',
'main_variantset.label']):
continue
MATERIALIZED_TABLE_VTVS_GROUP_BY_CLAUSE_COMPONENTS.append(
schema_obj['source_col_name'])
MATERIALIZED_TABLE_VTVS_GROUP_BY_CLAUSE = ', '.join(
MATERIALIZED_TABLE_VTVS_GROUP_BY_CLAUSE_COMPONENTS)
# Generate the SELECT clause for querying the table.
MATERIALIZED_TABLE_QUERY_SELECT_CLAUSE_COMPONENTS = [
schema_obj['joined_table_col_name']
for schema_obj in MELTED_VARIANT_SCHEMA
] + [
'va_data', # Needed to hard-code showing INFO_EFF_GENE,
'es_data',
've_data'
]
# Map from queryable fields to schema info (e.g. type, num).
MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP = dict([
(schema_obj['joined_table_col_name'], schema_obj['query_schema'])
for schema_obj in MELTED_VARIANT_SCHEMA
if schema_obj['is_user_queryable']])
# Assert that all user queryable fields have schema defined.
for key, query_schema in MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP.iteritems():
assert query_schema is not None, (
"Missing query schema for queryable %s" % key)
|
|
"""numpy.distutils.fcompiler
Contains FCompiler, an abstract base class that defines the interface
for the numpy.distutils Fortran compiler abstraction model.
Terminology:
To be consistent, where the term 'executable' is used, it means the single
file, like 'gcc', that is executed, and should be a string. In contrast,
'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
"""
__all__ = ['FCompiler','new_fcompiler','show_fcompilers',
'dummy_fortran_file']
import os
import sys
import re
import new
try:
set
except NameError:
from sets import Set as set
from distutils.sysconfig import get_config_var, get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
from distutils.util import split_quoted, strtobool
from numpy.distutils.ccompiler import CCompiler, gen_lib_options
from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, make_temp_file
from numpy.distutils.environment import EnvironmentConfig
from numpy.distutils.exec_command import find_executable
__metaclass__ = type
class CompilerNotFound(Exception):
pass
def flaglist(s):
if is_string(s):
return split_quoted(s)
else:
return s
def str2bool(s):
if is_string(s):
return strtobool(s)
return bool(s)
def is_sequence_of_strings(seq):
return is_sequence(seq) and all_strings(seq)
class FCompiler(CCompiler):
"""Abstract base class to define the interface that must be implemented
by real Fortran compiler classes.
Methods that subclasses may redefine:
update_executables(), find_executables(), get_version()
get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
get_flags_arch_f90(), get_flags_debug_f90(),
get_flags_fix(), get_flags_linker_so()
DON'T call these methods (except get_version) after
constructing a compiler instance or inside any other method.
All methods, except update_executables() and find_executables(),
may call the get_version() method.
After constructing a compiler instance, always call customize(dist=None)
method that finalizes compiler construction and makes the following
attributes available:
compiler_f77
compiler_f90
compiler_fix
linker_so
archiver
ranlib
libraries
library_dirs
"""
# These are the environment variables and distutils keys used.
# Each configuration descripition is
# (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>)
# The hook names are handled by the self._environment_hook method.
# - names starting with 'self.' call methods in this class
# - names starting with 'exe.' return the key in the executables dict
# - names like 'flags.YYY' return self.get_flag_YYY()
# convert is either None or a function to convert a string to the
# appropiate type used.
distutils_vars = EnvironmentConfig(
distutils_section='config_fc',
noopt = (None, None, 'noopt', str2bool),
noarch = (None, None, 'noarch', str2bool),
debug = (None, None, 'debug', str2bool),
verbose = (None, None, 'verbose', str2bool),
)
command_vars = EnvironmentConfig(
distutils_section='config_fc',
compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None),
compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None),
compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None),
version_cmd = ('exe.version_cmd', None, None, None),
linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None),
linker_exe = ('exe.linker_exe', 'LD', 'ld', None),
archiver = (None, 'AR', 'ar', None),
ranlib = (None, 'RANLIB', 'ranlib', None),
)
flag_vars = EnvironmentConfig(
distutils_section='config_fc',
f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist),
f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist),
free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist),
fix = ('flags.fix', None, None, flaglist),
opt = ('flags.opt', 'FOPT', 'opt', flaglist),
opt_f77 = ('flags.opt_f77', None, None, flaglist),
opt_f90 = ('flags.opt_f90', None, None, flaglist),
arch = ('flags.arch', 'FARCH', 'arch', flaglist),
arch_f77 = ('flags.arch_f77', None, None, flaglist),
arch_f90 = ('flags.arch_f90', None, None, flaglist),
debug = ('flags.debug', 'FDEBUG', 'fdebug', None, flaglist),
debug_f77 = ('flags.debug_f77', None, None, flaglist),
debug_f90 = ('flags.debug_f90', None, None, flaglist),
flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist),
linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist),
linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist),
ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist),
)
language_map = {'.f':'f77',
'.for':'f77',
'.F':'f77', # XXX: needs preprocessor
'.ftn':'f77',
'.f77':'f77',
'.f90':'f90',
'.F90':'f90', # XXX: needs preprocessor
'.f95':'f90',
}
language_order = ['f90','f77']
# These will be set by the subclass
compiler_type = None
compiler_aliases = ()
version_pattern = None
possible_executables = []
executables = {
'version_cmd' : ["f77", "-v"],
'compiler_f77' : ["f77"],
'compiler_f90' : ["f90"],
'compiler_fix' : ["f90", "-fixed"],
'linker_so' : ["f90", "-shared"],
'linker_exe' : ["f90"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
# If compiler does not support compiling Fortran 90 then it can
# suggest using another compiler. For example, gnu would suggest
# gnu95 compiler type when there are F90 sources.
suggested_f90_compiler = None
compile_switch = "-c"
object_switch = "-o " # Ending space matters! It will be stripped
# but if it is missing then object_switch
# will be prefixed to object file name by
# string concatenation.
library_switch = "-o " # Ditto!
# Switch to specify where module files are created and searched
# for USE statement. Normally it is a string and also here ending
# space matters. See above.
module_dir_switch = None
# Switch to specify where module files are searched for USE statement.
module_include_switch = '-I'
pic_flags = [] # Flags to create position-independent code
src_extensions = ['.for','.ftn','.f77','.f','.f90','.f95','.F','.F90']
obj_extension = ".o"
shared_lib_extension = get_config_var('SO') # or .dll
static_lib_extension = ".a" # or .lib
static_lib_format = "lib%s%s" # or %s%s
shared_lib_format = "%s%s"
exe_extension = ""
_exe_cache = {}
_executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
'ranlib']
# This will be set by new_fcompiler when called in
# command/{build_ext.py, build_clib.py, config.py} files.
c_compiler = None
def __init__(self, *args, **kw):
CCompiler.__init__(self, *args, **kw)
self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
self.command_vars = self.command_vars.clone(self._environment_hook)
self.flag_vars = self.flag_vars.clone(self._environment_hook)
self.executables = self.executables.copy()
for e in self._executable_keys:
if e not in self.executables:
self.executables[e] = None
# Some methods depend on .customize() being called first, so
# this keeps track of whether that's happened yet.
self._is_customised = False
def __copy__(self):
obj = new.instance(self.__class__, self.__dict__)
obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
obj.command_vars = obj.command_vars.clone(obj._environment_hook)
obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
obj.executables = obj.executables.copy()
return obj
def copy(self):
return self.__copy__()
# Use properties for the attributes used by CCompiler. Setting them
# as attributes from the self.executables dictionary is error-prone,
# so we get them from there each time.
def _command_property(key):
def fget(self):
assert self._is_customised
return self.executables[key]
return property(fget=fget)
version_cmd = _command_property('version_cmd')
compiler_f77 = _command_property('compiler_f77')
compiler_f90 = _command_property('compiler_f90')
compiler_fix = _command_property('compiler_fix')
linker_so = _command_property('linker_so')
linker_exe = _command_property('linker_exe')
archiver = _command_property('archiver')
ranlib = _command_property('ranlib')
# Make our terminology consistent.
def set_executable(self, key, value):
self.set_command(key, value)
def set_commands(self, **kw):
for k, v in kw.items():
self.set_command(k, v)
def set_command(self, key, value):
if not key in self._executable_keys:
raise ValueError(
"unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
if is_string(value):
value = split_quoted(value)
assert value is None or is_sequence_of_strings(value[1:]), (key, value)
self.executables[key] = value
######################################################################
## Methods that subclasses may redefine. But don't call these methods!
## They are private to FCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def find_executables(self):
"""Go through the self.executables dictionary, and attempt to
find and assign appropiate executables.
Executable names are looked for in the environment (environment
variables, the distutils.cfg, and command line), the 0th-element of
the command list, and the self.possible_executables list.
Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
or the Fortran 90 compiler executable is used, unless overridden
by an environment setting.
Subclasses should call this if overriden.
"""
assert self._is_customised
exe_cache = self._exe_cache
def cached_find_executable(exe):
if exe in exe_cache:
return exe_cache[exe]
fc_exe = find_executable(exe)
exe_cache[exe] = exe_cache[fc_exe] = fc_exe
return fc_exe
def verify_command_form(name, value):
if value is not None and not is_sequence_of_strings(value):
raise ValueError(
"%s value %r is invalid in class %s" %
(name, value, self.__class__.__name__))
def set_exe(exe_key, f77=None, f90=None):
cmd = self.executables.get(exe_key, None)
if not cmd:
return None
# Note that we get cmd[0] here if the environment doesn't
# have anything set
exe_from_environ = getattr(self.command_vars, exe_key)
if not exe_from_environ:
possibles = [f90, f77] + self.possible_executables
else:
possibles = [exe_from_environ] + self.possible_executables
seen = set()
unique_possibles = []
for e in possibles:
if e == '<F77>':
e = f77
elif e == '<F90>':
e = f90
if not e or e in seen:
continue
seen.add(e)
unique_possibles.append(e)
for exe in unique_possibles:
fc_exe = cached_find_executable(exe)
if fc_exe:
cmd[0] = fc_exe
return fc_exe
self.set_command(exe_key, None)
return None
ctype = self.compiler_type
f90 = set_exe('compiler_f90')
if not f90:
f77 = set_exe('compiler_f77')
if f77:
log.warn('%s: no Fortran 90 compiler found' % ctype)
else:
raise CompilerNotFound('%s: f90 nor f77' % ctype)
else:
f77 = set_exe('compiler_f77', f90=f90)
if not f77:
log.warn('%s: no Fortran 77 compiler found' % ctype)
set_exe('compiler_fix', f90=f90)
set_exe('linker_so', f77=f77, f90=f90)
set_exe('linker_exe', f77=f77, f90=f90)
set_exe('version_cmd', f77=f77, f90=f90)
set_exe('archiver')
set_exe('ranlib')
def update_executables(elf):
"""Called at the beginning of customisation. Subclasses should
override this if they need to set up the executables dictionary.
Note that self.find_executables() is run afterwards, so the
self.executables dictionary values can contain <F77> or <F90> as
the command, which will be replaced by the found F77 or F90
compiler.
"""
pass
def get_flags(self):
"""List of flags common to all compiler types."""
return [] + self.pic_flags
def _get_command_flags(self, key):
cmd = self.executables.get(key, None)
if cmd is None:
return []
return cmd[1:]
def get_flags_f77(self):
"""List of Fortran 77 specific flags."""
return self._get_command_flags('compiler_f77')
def get_flags_f90(self):
"""List of Fortran 90 specific flags."""
return self._get_command_flags('compiler_f90')
def get_flags_free(self):
"""List of Fortran 90 free format specific flags."""
return []
def get_flags_fix(self):
"""List of Fortran 90 fixed format specific flags."""
return self._get_command_flags('compiler_fix')
def get_flags_linker_so(self):
"""List of linker flags to build a shared library."""
return self._get_command_flags('linker_so')
def get_flags_linker_exe(self):
"""List of linker flags to build an executable."""
return self._get_command_flags('linker_exe')
def get_flags_ar(self):
"""List of archiver flags. """
return self._get_command_flags('archiver')
def get_flags_opt(self):
"""List of architecture independent compiler flags."""
return []
def get_flags_arch(self):
"""List of architecture dependent compiler flags."""
return []
def get_flags_debug(self):
"""List of compiler flags to compile with debugging information."""
return []
get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
def get_libraries(self):
"""List of compiler libraries."""
return self.libraries[:]
def get_library_dirs(self):
"""List of compiler library directories."""
return self.library_dirs[:]
def get_version(self, force=False, ok_status=[0]):
assert self._is_customised
return CCompiler.get_version(self, force=force, ok_status=ok_status)
############################################################
## Public methods:
def customize(self, dist = None):
"""Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv).
"""
log.info('customize %s' % (self.__class__.__name__))
self._is_customised = True
self.distutils_vars.use_distribution(dist)
self.command_vars.use_distribution(dist)
self.flag_vars.use_distribution(dist)
self.update_executables()
# find_executables takes care of setting the compiler commands,
# version_cmd, linker_so, linker_exe, ar, and ranlib
self.find_executables()
noopt = self.distutils_vars.get('noopt', False)
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
f77 = self.command_vars.compiler_f77
f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
freeflags = []
fixflags = []
if f77:
f77flags = self.flag_vars.f77
if f90:
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
if fix:
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
# examine get_flags_<tag>_<compiler> for extra flags
# only add them if the method is different from get_flags_<tag>
def get_flags(tag, flags):
# note that self.flag_vars.<tag> calls self.get_flags_<tag>()
flags.extend(getattr(self.flag_vars, tag))
this_get = getattr(self, 'get_flags_' + tag)
for name, c, flagvar in [('f77', f77, f77flags),
('f90', f90, f90flags),
('f90', fix, fixflags)]:
t = '%s_%s' % (tag, name)
if c and this_get is not getattr(self, 'get_flags_' + t):
flagvar.extend(getattr(self.flag_vars, t))
if not noopt:
get_flags('opt', oflags)
if not noarch:
get_flags('arch', aflags)
if debug:
get_flags('debug', dflags)
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
self.set_commands(compiler_f77=[f77]+f77flags+fflags)
if f90:
self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags)
if fix:
self.set_commands(compiler_fix=[fix]+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
linker_so = self.linker_so
if linker_so:
linker_so_flags = self.flag_vars.linker_so
if sys.platform.startswith('aix'):
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
self.set_commands(linker_so=linker_so+linker_so_flags)
linker_exe = self.linker_exe
if linker_exe:
linker_exe_flags = self.flag_vars.linker_exe
self.set_commands(linker_exe=linker_exe+linker_exe_flags)
ar = self.command_vars.archiver
if ar:
arflags = self.flag_vars.ar
self.set_commands(archiver=[ar]+arflags)
self.set_library_dirs(self.get_library_dirs())
self.set_libraries(self.get_libraries())
def dump_properties(self):
"""Print out the attributes of a compiler instance."""
props = []
for key in self.executables.keys() + \
['version','libraries','library_dirs',
'object_switch','compile_switch']:
if hasattr(self,key):
v = getattr(self,key)
props.append((key, None, '= '+repr(v)))
props.sort()
pretty_printer = FancyGetopt(props)
for l in pretty_printer.generate_help("%s instance properties:" \
% (self.__class__.__name__)):
if l[:4]==' --':
l = ' ' + l[4:]
print l
###################
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
src_flags = {}
if is_f_file(src) and not has_f90_header(src):
flavor = ':f77'
compiler = self.compiler_f77
src_flags = get_f77flags(src)
elif is_free_format(src):
flavor = ':f90'
compiler = self.compiler_f90
if compiler is None:
raise DistutilsExecError, 'f90 not supported by %s needed for %s'\
% (self.__class__.__name__,src)
else:
flavor = ':fix'
compiler = self.compiler_fix
if compiler is None:
raise DistutilsExecError, 'f90 (fixed) not supported by %s needed for %s'\
% (self.__class__.__name__,src)
if self.object_switch[-1]==' ':
o_args = [self.object_switch.strip(),obj]
else:
o_args = [self.object_switch.strip()+obj]
assert self.compile_switch.strip()
s_args = [self.compile_switch, src]
extra_flags = src_flags.get(self.compiler_type,[])
if extra_flags:
log.info('using compile options from source: %r' \
% ' '.join(extra_flags))
command = compiler + cc_args + extra_flags + s_args + o_args \
+ extra_postargs
display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
src)
try:
self.spawn(command,display=display)
except DistutilsExecError, msg:
raise CompileError, msg
def module_options(self, module_dirs, module_build_dir):
options = []
if self.module_dir_switch is not None:
if self.module_dir_switch[-1]==' ':
options.extend([self.module_dir_switch.strip(),module_build_dir])
else:
options.append(self.module_dir_switch.strip()+module_build_dir)
else:
print 'XXX: module_build_dir=%r option ignored' % (module_build_dir)
print 'XXX: Fix module_dir_switch for ',self.__class__.__name__
if self.module_include_switch is not None:
for d in [module_build_dir]+module_dirs:
options.append('%s%s' % (self.module_include_switch, d))
else:
print 'XXX: module_dirs=%r option ignored' % (module_dirs)
print 'XXX: Fix module_include_switch for ',self.__class__.__name__
return options
def library_option(self, lib):
return "-l" + lib
def library_dir_option(self, dir):
return "-L" + dir
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if is_string(output_dir):
output_filename = os.path.join(output_dir, output_filename)
elif output_dir is not None:
raise TypeError, "'output_dir' must be a string or None"
if self._need_link(objects, output_filename):
if self.library_switch[-1]==' ':
o_args = [self.library_switch.strip(),output_filename]
else:
o_args = [self.library_switch.strip()+output_filename]
if is_string(self.objects):
ld_args = objects + [self.objects]
else:
ld_args = objects + self.objects
ld_args = ld_args + lib_opts + o_args
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
command = linker + ld_args
try:
self.spawn(command)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _environment_hook(self, name, hook_name):
if hook_name is None:
return None
if is_string(hook_name):
if hook_name.startswith('self.'):
hook_name = hook_name[5:]
hook = getattr(self, hook_name)
return hook()
elif hook_name.startswith('exe.'):
hook_name = hook_name[4:]
var = self.executables[hook_name]
if var:
return var[0]
else:
return None
elif hook_name.startswith('flags.'):
hook_name = hook_name[6:]
hook = getattr(self, 'get_flags_' + hook_name)
return hook()
else:
return hook_name()
## class FCompiler
_default_compilers = (
# sys.platform mappings
('win32', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')),
('cygwin.*', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')),
('linux.*', ('gnu','intel','lahey','pg','absoft','nag','vast','compaq',
'intele','intelem','gnu95','g95')),
('darwin.*', ('nag', 'absoft', 'ibm', 'intel', 'gnu', 'gnu95', 'g95')),
('sunos.*', ('sun','gnu','gnu95','g95')),
('irix.*', ('mips','gnu','gnu95',)),
('aix.*', ('ibm','gnu','gnu95',)),
# os.name mappings
('posix', ('gnu','gnu95',)),
('nt', ('gnu','gnu95',)),
('mac', ('gnu','gnu95',)),
)
fcompiler_class = None
fcompiler_aliases = None
def load_all_fcompiler_classes():
"""Cache all the FCompiler classes found in modules in the
numpy.distutils.fcompiler package.
"""
from glob import glob
global fcompiler_class, fcompiler_aliases
if fcompiler_class is not None:
return
pys = os.path.join(os.path.dirname(__file__), '*.py')
fcompiler_class = {}
fcompiler_aliases = {}
for fname in glob(pys):
module_name, ext = os.path.splitext(os.path.basename(fname))
module_name = 'numpy.distutils.fcompiler.' + module_name
__import__ (module_name)
module = sys.modules[module_name]
if hasattr(module, 'compilers'):
for cname in module.compilers:
klass = getattr(module, cname)
desc = (klass.compiler_type, klass, klass.description)
fcompiler_class[klass.compiler_type] = desc
for alias in klass.compiler_aliases:
if alias in fcompiler_aliases:
raise ValueError("alias %r defined for both %s and %s"
% (alias, klass.__name__,
fcompiler_aliases[alias][1].__name__))
fcompiler_aliases[alias] = desc
def _find_existing_fcompiler(compiler_types,
osname=None, platform=None,
requiref90=False,
c_compiler=None):
from numpy.distutils.core import get_distribution
dist = get_distribution(always=True)
for compiler_type in compiler_types:
v = None
try:
c = new_fcompiler(plat=platform, compiler=compiler_type,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if requiref90 and c.compiler_f90 is None:
v = None
new_compiler = c.suggested_f90_compiler
if new_compiler:
log.warn('Trying %r compiler as suggested by %r '
'compiler for f90 support.' % (compiler_type,
new_compiler))
c = new_fcompiler(plat=platform, compiler=new_compiler,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if v is not None:
compiler_type = new_compiler
if requiref90 and c.compiler_f90 is None:
raise ValueError('%s does not support compiling f90 codes, '
'skipping.' % (c.__class__.__name__))
except DistutilsModuleError:
log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
except CompilerNotFound:
log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
if v is not None:
return compiler_type
return None
def available_fcompilers_for_platform(osname=None, platform=None):
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
matching_compiler_types = []
for pattern, compiler_type in _default_compilers:
if re.match(pattern, platform) or re.match(pattern, osname):
for ct in compiler_type:
if ct not in matching_compiler_types:
matching_compiler_types.append(ct)
if not matching_compiler_types:
matching_compiler_types.append('gnu')
return matching_compiler_types
def get_default_fcompiler(osname=None, platform=None, requiref90=False,
c_compiler=None):
"""Determine the default Fortran compiler to use for the given
platform."""
matching_compiler_types = available_fcompilers_for_platform(osname,
platform)
compiler_type = _find_existing_fcompiler(matching_compiler_types,
osname=osname,
platform=platform,
requiref90=requiref90,
c_compiler=c_compiler)
return compiler_type
def new_fcompiler(plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0,
requiref90=False,
c_compiler = None):
"""Generate an instance of some FCompiler subclass for the supplied
platform/compiler combination.
"""
load_all_fcompiler_classes()
if plat is None:
plat = os.name
if compiler is None:
compiler = get_default_fcompiler(plat, requiref90=requiref90,
c_compiler=c_compiler)
if compiler in fcompiler_class:
module_name, klass, long_description = fcompiler_class[compiler]
elif compiler in fcompiler_aliases:
module_name, klass, long_description = fcompiler_aliases[compiler]
else:
msg = "don't know how to compile Fortran code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler." % compiler
msg = msg + " Supported compilers are: %s)" \
% (','.join(fcompiler_class.keys()))
log.warn(msg)
return None
compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
compiler.c_compiler = c_compiler
return compiler
def show_fcompilers(dist=None):
"""Print list of available compilers (used by the "--help-fcompiler"
option to "config_fc").
"""
if dist is None:
from distutils.dist import Distribution
from numpy.distutils.command.config_compiler import config_fc
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = ['config_fc'] + sys.argv[1:]
try:
dist.script_args.remove('--help-fcompiler')
except ValueError:
pass
dist.cmdclass['config_fc'] = config_fc
dist.parse_config_files()
dist.parse_command_line()
compilers = []
compilers_na = []
compilers_ni = []
if not fcompiler_class:
load_all_fcompiler_classes()
platform_compilers = available_fcompilers_for_platform()
for compiler in platform_compilers:
v = None
log.set_verbosity(-2)
try:
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
except (DistutilsModuleError, CompilerNotFound), e:
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
if v is None:
compilers_na.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2]))
else:
c.dump_properties()
compilers.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2] + ' (%s)' % v))
compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
for fc in compilers_ni]
compilers.sort()
compilers_na.sort()
compilers_ni.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("Fortran compilers found:")
pretty_printer = FancyGetopt(compilers_na)
pretty_printer.print_help("Compilers available for this "
"platform, but not found:")
if compilers_ni:
pretty_printer = FancyGetopt(compilers_ni)
pretty_printer.print_help("Compilers not available on this platform:")
print "For compiler details, run 'config_fc --verbose' setup command."
def dummy_fortran_file():
fo, name = make_temp_file(suffix='.f')
fo.write(" subroutine dummy()\n end\n")
fo.close()
return name[:-2]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search
_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]',re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file,'r')
line = f.readline()
n = 10000 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
line = line.rstrip()
if line and line[0]!='!':
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
result = 1
break
line = f.readline()
f.close()
return result
def has_f90_header(src):
f = open(src,'r')
line = f.readline()
f.close()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)',re.I)
def get_f77flags(src):
"""
Search the first 20 lines of fortran 77 code for line pattern
`CF77FLAGS(<fcompiler type>)=<f77 flags>`
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
f = open(src,'r')
i = 0
for line in f.readlines():
i += 1
if i>20: break
m = _f77flags_re.match(line)
if not m: continue
fcname = m.group('fcname').strip()
fflags = m.group('fflags').strip()
flags[fcname] = split_quoted(fflags)
f.close()
return flags
if __name__ == '__main__':
show_fcompilers()
|
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
virt_cpu_opts = [
cfg.StrOpt('vcpu_pin_set',
help='Defines which pcpus that instance vcpus can use. '
'For example, "4-12,^8,15"'),
]
CONF = cfg.CONF
CONF.register_opts(virt_cpu_opts)
LOG = logging.getLogger(__name__)
def get_vcpu_pin_set():
"""Parsing vcpu_pin_set config.
Returns a list of pcpu ids can be used by instances.
"""
if not CONF.vcpu_pin_set:
return None
cpuset_ids = parse_cpu_spec(CONF.vcpu_pin_set)
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.vcpu_pin_set)
return sorted(cpuset_ids)
def parse_cpu_spec(spec):
"""Parse a CPU set specification.
:param spec: cpu set string eg "1-4,^3,6"
Each element in the list is either a single
CPU number, a range of CPU numbers, or a
caret followed by a CPU number to be excluded
from a previous range.
:returns: a set of CPU indexes
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
return cpuset_ids
def format_cpu_spec(cpuset, allow_ranges=True):
"""Format a libvirt CPU range specification.
:param cpuset: set (or list) of CPU indexes
Format a set/list of CPU indexes as a libvirt CPU
range specification. It allow_ranges is true, it
will try to detect continuous ranges of CPUs,
otherwise it will just list each CPU index explicitly.
:returns: a formatted CPU range string
"""
# We attempt to detect ranges, but don't bother with
# trying to do range negations to minimize the overall
# spec string length
if allow_ranges:
ranges = []
previndex = None
for cpuindex in sorted(cpuset):
if previndex is None or previndex != (cpuindex - 1):
ranges.append([])
ranges[-1].append(cpuindex)
previndex = cpuindex
parts = []
for entry in ranges:
if len(entry) == 1:
parts.append(str(entry[0]))
else:
parts.append("%d-%d" % (entry[0], entry[len(entry) - 1]))
return ",".join(parts)
else:
return ",".join(str(id) for id in sorted(cpuset))
class VirtCPUTopology(object):
def __init__(self, sockets, cores, threads):
"""Create a new CPU topology object
:param sockets: number of sockets, at least 1
:param cores: number of cores, at least 1
:param threads: number of threads, at least 1
Create a new CPU topology object representing the
number of sockets, cores and threads to use for
the virtual instance.
"""
self.sockets = sockets
self.cores = cores
self.threads = threads
def score(self, wanttopology):
"""Calculate score for the topology against a desired configuration
:param wanttopology: VirtCPUTopology instance for preferred topology
Calculate a score indicating how well this topology
matches against a preferred topology. A score of 3
indicates an exact match for sockets, cores and threads.
A score of 2 indicates a match of sockets & cores or
sockets & threads or cores and threads. A score of 1
indicates a match of sockets or cores or threads. A
score of 0 indicates no match
:returns: score in range 0 (worst) to 3 (best)
"""
score = 0
if (wanttopology.sockets != -1 and
self.sockets == wanttopology.sockets):
score = score + 1
if (wanttopology.cores != -1 and
self.cores == wanttopology.cores):
score = score + 1
if (wanttopology.threads != -1 and
self.threads == wanttopology.threads):
score = score + 1
return score
@staticmethod
def get_topology_constraints(flavor, image_meta):
"""Get the topology constraints declared in flavor or image
:param flavor: Flavor object to read extra specs from
:param image_meta: Image object to read image metadata from
Gets the topology constraints from the configuration defined
in the flavor extra specs or the image metadata. In the flavor
this will look for
hw:cpu_sockets - preferred socket count
hw:cpu_cores - preferred core count
hw:cpu_threads - preferred thread count
hw:cpu_maxsockets - maximum socket count
hw:cpu_maxcores - maximum core count
hw:cpu_maxthreads - maximum thread count
In the image metadata this will look at
hw_cpu_sockets - preferred socket count
hw_cpu_cores - preferred core count
hw_cpu_threads - preferred thread count
hw_cpu_maxsockets - maximum socket count
hw_cpu_maxcores - maximum core count
hw_cpu_maxthreads - maximum thread count
The image metadata must be strictly lower than any values
set in the flavor. All values are, however, optional.
This will return a pair of VirtCPUTopology instances,
the first giving the preferred socket/core/thread counts,
and the second giving the upper limits on socket/core/
thread counts.
exception.ImageVCPULimitsRangeExceeded will be raised
if the maximum counts set against the image exceed
the maximum counts set against the flavor
exception.ImageVCPUTopologyRangeExceeded will be raised
if the preferred counts set against the image exceed
the maximum counts set against the image or flavor
:returns: (preferred topology, maximum topology)
"""
# Obtain the absolute limits from the flavor
flvmaxsockets = int(flavor.extra_specs.get(
"hw:cpu_max_sockets", 65536))
flvmaxcores = int(flavor.extra_specs.get(
"hw:cpu_max_cores", 65536))
flvmaxthreads = int(flavor.extra_specs.get(
"hw:cpu_max_threads", 65536))
LOG.debug("Flavor limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flvmaxsockets,
"cores": flvmaxcores,
"threads": flvmaxthreads})
# Get any customized limits from the image
maxsockets = int(image_meta.get("properties", {})
.get("hw_cpu_max_sockets", flvmaxsockets))
maxcores = int(image_meta.get("properties", {})
.get("hw_cpu_max_cores", flvmaxcores))
maxthreads = int(image_meta.get("properties", {})
.get("hw_cpu_max_threads", flvmaxthreads))
LOG.debug("Image limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": maxsockets,
"cores": maxcores,
"threads": maxthreads})
# Image limits are not permitted to exceed the flavor
# limits. ie they can only lower what the flavor defines
if ((maxsockets > flvmaxsockets) or
(maxcores > flvmaxcores) or
(maxthreads > flvmaxthreads)):
raise exception.ImageVCPULimitsRangeExceeded(
sockets=maxsockets,
cores=maxcores,
threads=maxthreads,
maxsockets=flvmaxsockets,
maxcores=flvmaxcores,
maxthreads=flvmaxthreads)
# Get any default preferred topology from the flavor
flvsockets = int(flavor.extra_specs.get("hw:cpu_sockets", -1))
flvcores = int(flavor.extra_specs.get("hw:cpu_cores", -1))
flvthreads = int(flavor.extra_specs.get("hw:cpu_threads", -1))
LOG.debug("Flavor pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flvsockets,
"cores": flvcores,
"threads": flvthreads})
# If the image limits have reduced the flavor limits
# we might need to discard the preferred topology
# from the flavor
if ((flvsockets > maxsockets) or
(flvcores > maxcores) or
(flvthreads > maxthreads)):
flvsockets = flvcores = flvthreads = -1
# Finally see if the image has provided a preferred
# topology to use
sockets = int(image_meta.get("properties", {})
.get("hw_cpu_sockets", -1))
cores = int(image_meta.get("properties", {})
.get("hw_cpu_cores", -1))
threads = int(image_meta.get("properties", {})
.get("hw_cpu_threads", -1))
LOG.debug("Image pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": sockets,
"cores": cores,
"threads": threads})
# Image topology is not permitted to exceed image/flavor
# limits
if ((sockets > maxsockets) or
(cores > maxcores) or
(threads > maxthreads)):
raise exception.ImageVCPUTopologyRangeExceeded(
sockets=sockets,
cores=cores,
threads=threads,
maxsockets=maxsockets,
maxcores=maxcores,
maxthreads=maxthreads)
# If no preferred topology was set against the image
# then use the preferred topology from the flavor
# We use 'and' not 'or', since if any value is set
# against the image this invalidates the entire set
# of values from the flavor
if sockets == -1 and cores == -1 and threads == -1:
sockets = flvsockets
cores = flvcores
threads = flvthreads
LOG.debug("Chosen %(sockets)d:%(cores)d:%(threads)d limits "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
{"sockets": sockets, "cores": cores,
"threads": threads, "maxsockets": maxsockets,
"maxcores": maxcores, "maxthreads": maxthreads})
return (VirtCPUTopology(sockets, cores, threads),
VirtCPUTopology(maxsockets, maxcores, maxthreads))
@staticmethod
def get_possible_topologies(vcpus, maxtopology, allow_threads):
"""Get a list of possible topologies for a vCPU count
:param vcpus: total number of CPUs for guest instance
:param maxtopology: VirtCPUTopology for upper limits
:param allow_threads: if the hypervisor supports CPU threads
Given a total desired vCPU count and constraints on the
maximum number of sockets, cores and threads, return a
list of VirtCPUTopology instances that represent every
possible topology that satisfies the constraints.
exception.ImageVCPULimitsRangeImpossible is raised if
it is impossible to achieve the total vcpu count given
the maximum limits on sockets, cores & threads.
:returns: list of VirtCPUTopology instances
"""
# Clamp limits to number of vcpus to prevent
# iterating over insanely large list
maxsockets = min(vcpus, maxtopology.sockets)
maxcores = min(vcpus, maxtopology.cores)
maxthreads = min(vcpus, maxtopology.threads)
if not allow_threads:
maxthreads = 1
LOG.debug("Build topologies for %(vcpus)d vcpu(s) "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
{"vcpus": vcpus, "maxsockets": maxsockets,
"maxcores": maxcores, "maxthreads": maxthreads})
# Figure out all possible topologies that match
# the required vcpus count and satisfy the declared
# limits. If the total vCPU count were very high
# it might be more efficient to factorize the vcpu
# count and then only iterate over its factors, but
# that's overkill right now
possible = []
for s in range(1, maxsockets + 1):
for c in range(1, maxcores + 1):
for t in range(1, maxthreads + 1):
if t * c * s == vcpus:
possible.append(VirtCPUTopology(s, c, t))
# We want to
# - Minimize threads (ie larger sockets * cores is best)
# - Prefer sockets over cores
possible = sorted(possible, reverse=True,
key=lambda x: (x.sockets * x.cores,
x.sockets,
x.threads))
LOG.debug("Got %d possible topologies", len(possible))
if len(possible) == 0:
raise exception.ImageVCPULimitsRangeImpossible(vcpus=vcpus,
sockets=maxsockets,
cores=maxcores,
threads=maxthreads)
return possible
@staticmethod
def sort_possible_topologies(possible, wanttopology):
"""Sort the topologies in order of preference
:param possible: list of VirtCPUTopology instances
:param wanttopology: VirtCPUTopology for preferred topology
This takes the list of possible topologies and resorts
it such that those configurations which most closely
match the preferred topology are first.
:returns: sorted list of VirtCPUTopology instances
"""
# Look at possible topologies and score them according
# to how well they match the preferred topologies
# We don't use python's sort(), since we want to
# preserve the sorting done when populating the
# 'possible' list originally
scores = collections.defaultdict(list)
for topology in possible:
score = topology.score(wanttopology)
scores[score].append(topology)
# Build list of all possible topologies sorted
# by the match score, best match first
desired = []
desired.extend(scores[3])
desired.extend(scores[2])
desired.extend(scores[1])
desired.extend(scores[0])
return desired
@staticmethod
def get_desirable_configs(flavor, image_meta, allow_threads=True):
"""Get desired CPU topologies according to settings
:param flavor: Flavor object to query extra specs from
:param image_meta: ImageMeta object to query properties from
:param allow_threads: if the hypervisor supports CPU threads
Look at the properties set in the flavor extra specs and
the image metadata and build up a list of all possible
valid CPU topologies that can be used in the guest. Then
return this list sorted in order of preference.
:returns: sorted list of VirtCPUTopology instances
"""
LOG.debug("Getting desirable topologies for flavor %(flavor)s "
"and image_meta %(image_meta)s",
{"flavor": flavor, "image_meta": image_meta})
preferred, maximum = (
VirtCPUTopology.get_topology_constraints(flavor,
image_meta))
possible = VirtCPUTopology.get_possible_topologies(
flavor.vcpus, maximum, allow_threads)
desired = VirtCPUTopology.sort_possible_topologies(
possible, preferred)
return desired
@staticmethod
def get_best_config(flavor, image_meta, allow_threads=True):
"""Get bst CPU topology according to settings
:param flavor: Flavor object to query extra specs from
:param image_meta: ImageMeta object to query properties from
:param allow_threads: if the hypervisor supports CPU threads
Look at the properties set in the flavor extra specs and
the image metadata and build up a list of all possible
valid CPU topologies that can be used in the guest. Then
return the best topology to use
:returns: a VirtCPUTopology instance for best topology
"""
return VirtCPUTopology.get_desirable_configs(flavor,
image_meta,
allow_threads)[0]
class VirtNUMATopologyCell(object):
"""Class for reporting NUMA resources in a cell
The VirtNUMATopologyCell class represents the
hardware resources present in a NUMA cell.
"""
def __init__(self, id, cpuset, memory):
"""Create a new NUMA Cell
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
Creates a new NUMA cell object to record the hardware
resources.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCell, self).__init__()
self.id = id
self.cpuset = cpuset
self.memory = memory
def _to_dict(self):
return {'cpus': format_cpu_spec(self.cpuset, allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id}
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory)
class VirtNUMATopologyCellUsage(VirtNUMATopologyCell):
"""Class for reporting NUMA resources and usage in a cell
The VirtNUMATopologyCellUsage class specializes
VirtNUMATopologyCell to include information about the
utilization of hardware resources in a NUMA cell.
"""
def __init__(self, id, cpuset, memory, cpu_usage=0, memory_usage=0):
"""Create a new NUMA Cell with usage
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
:param cpu_usage: number of CPUs allocated
:param memory_usage: RAM allocated in KiB
Creates a new NUMA cell object to record the hardware
resources and utilization. The number of CPUs specified
by the @cpu_usage parameter may be larger than the number
of bits set in @cpuset if CPU overcommit is used. Likewise
the amount of RAM specified by the @memory_usage parameter
may be larger than the available RAM in @memory if RAM
overcommit is used.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCellUsage, self).__init__(
id, cpuset, memory)
self.cpu_usage = cpu_usage
self.memory_usage = memory_usage
def _to_dict(self):
data_dict = super(VirtNUMATopologyCellUsage, self)._to_dict()
data_dict['mem']['used'] = self.memory_usage
data_dict['cpu_usage'] = self.cpu_usage
return data_dict
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
cpu_usage = data_dict.get('cpu_usage', 0)
memory = data_dict.get('mem', {}).get('total', 0)
memory_usage = data_dict.get('mem', {}).get('used', 0)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory, cpu_usage, memory_usage)
class VirtNUMATopology(object):
"""Base class for tracking NUMA topology information
The VirtNUMATopology class represents the NUMA hardware
topology for memory and CPUs in any machine. It is
later specialized for handling either guest instance
or compute host NUMA topology.
"""
def __init__(self, cells=None):
"""Create a new NUMA topology object
:param cells: list of VirtNUMATopologyCell instances
"""
super(VirtNUMATopology, self).__init__()
self.cells = cells or []
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self._to_dict()))
def _to_dict(self):
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
return cls(cells=[cls.cell_class._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
def to_json(self):
return jsonutils.dumps(self._to_dict())
@classmethod
def from_json(cls, json_string):
return cls._from_dict(jsonutils.loads(json_string))
class VirtNUMAInstanceTopology(VirtNUMATopology):
"""Class to represent the topology configured for a guest
instance. It provides helper APIs to determine configuration
from the metadata specified against the flavour and or
disk image
"""
cell_class = VirtNUMATopologyCell
@staticmethod
def _get_flavor_or_image_prop(flavor, image_meta, propname):
flavor_val = flavor.get('extra_specs', {}).get("hw:" + propname)
image_val = image_meta.get("hw_" + propname)
if flavor_val is not None:
if image_val is not None:
raise exception.ImageNUMATopologyForbidden(
name='hw_' + propname)
return flavor_val
else:
return image_val
@classmethod
def _get_constraints_manual(cls, nodes, flavor, image_meta):
cells = []
totalmem = 0
availcpus = set(range(flavor.vcpus))
for node in range(nodes):
cpus = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.%d" % node)
mem = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_mem.%d" % node)
# We're expecting both properties set, so
# raise an error if either is missing
if cpus is None or mem is None:
raise exception.ImageNUMATopologyIncomplete()
mem = int(mem)
cpuset = parse_cpu_spec(cpus)
for cpu in cpuset:
if cpu > (flavor.vcpus - 1):
raise exception.ImageNUMATopologyCPUOutOfRange(
cpunum=cpu, cpumax=(flavor.vcpus - 1))
if cpu not in availcpus:
raise exception.ImageNUMATopologyCPUDuplicates(
cpunum=cpu)
availcpus.remove(cpu)
cells.append(VirtNUMATopologyCell(node, cpuset, mem))
totalmem = totalmem + mem
if availcpus:
raise exception.ImageNUMATopologyCPUsUnassigned(
cpuset=str(availcpus))
if totalmem != flavor.memory_mb:
raise exception.ImageNUMATopologyMemoryOutOfRange(
memsize=totalmem,
memtotal=flavor.memory_mb)
return cls(cells)
@classmethod
def _get_constraints_auto(cls, nodes, flavor, image_meta):
if ((flavor.vcpus % nodes) > 0 or
(flavor.memory_mb % nodes) > 0):
raise exception.ImageNUMATopologyAsymmetric()
cells = []
for node in range(nodes):
cpus = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.%d" % node)
mem = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_mem.%d" % node)
# We're not expecting any properties set, so
# raise an error if there are any
if cpus is not None or mem is not None:
raise exception.ImageNUMATopologyIncomplete()
ncpus = int(flavor.vcpus / nodes)
mem = int(flavor.memory_mb / nodes)
start = node * ncpus
cpuset = set(range(start, start + ncpus))
cells.append(VirtNUMATopologyCell(node, cpuset, mem))
return cls(cells)
@classmethod
def get_constraints(cls, flavor, image_meta):
nodes = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_nodes")
if nodes is None:
return None
nodes = int(nodes)
# We'll pick what path to go down based on whether
# anything is set for the first node. Both paths
# have logic to cope with inconsistent property usage
auto = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.0") is None
if auto:
return cls._get_constraints_auto(
nodes, flavor, image_meta)
else:
return cls._get_constraints_manual(
nodes, flavor, image_meta)
class VirtNUMAHostTopology(VirtNUMATopology):
"""Class represents the NUMA configuration and utilization
of a compute node. As well as exposing the overall topology
it tracks the utilization of the resources by guest instances
"""
cell_class = VirtNUMATopologyCellUsage
@classmethod
def usage_from_instances(cls, host, instances, free=False):
"""Get host topology usage
:param host: VirtNUMAHostTopology with usage information
:param instances: list of VirtNUMAInstanceTopology
:param free: If True usage of the host will be decreased
Sum the usage from all @instances to report the overall
host topology usage
:returns: VirtNUMAHostTopology including usage information
"""
if host is None:
return
instances = instances or []
cells = []
sign = -1 if free else 1
for hostcell in host.cells:
memory_usage = hostcell.memory_usage
cpu_usage = hostcell.cpu_usage
for instance in instances:
for instancecell in instance.cells:
if instancecell.id == hostcell.id:
memory_usage = (
memory_usage + sign * instancecell.memory)
cpu_usage = cpu_usage + sign * len(instancecell.cpuset)
cell = cls.cell_class(
hostcell.id, hostcell.cpuset, hostcell.memory,
max(0, cpu_usage), max(0, memory_usage))
cells.append(cell)
return cls(cells)
|
|
import sys
from mock import Mock, patch
import pytest
from remoto import backends
from remoto.backends import local
from remoto.tests import fake_module
from remoto.tests.conftest import Capture, Factory
class FakeSocket(object):
def __init__(self, gethostname, getfqdn=None):
self.gethostname = lambda: gethostname
self.getfqdn = lambda: getfqdn or gethostname
class TestJsonModuleExecute(object):
def test_execute_returns_casted_boolean(self):
conn = local.LocalConnection()
conn.remote_import_system = 'json'
remote_fake_module = conn.import_module(fake_module)
assert remote_fake_module.function(None) is True
def test_execute_can_raise_remote_exceptions(self):
conn = local.LocalConnection()
conn.remote_import_system = 'json'
remote_fake_module = conn.import_module(fake_module)
with pytest.raises(Exception) as error:
assert remote_fake_module.fails()
assert 'Exception: failure from fails() function' in str(error.value)
def test_execute_can_raise_unexpected_remote_exceptions(self):
conn = local.LocalConnection()
conn.remote_import_system = 'json'
remote_fake_module = conn.import_module(fake_module)
with pytest.raises(Exception) as error:
remote_fake_module.unexpected_fail()
assert 'error calling "unexpected_fail"' in str(error.value)
assert 'Unexpected remote exception' in str(error.value)
def test_execute_noop(self):
conn = local.LocalConnection()
conn.remote_import_system = 'json'
remote_fake_module = conn.import_module(fake_module)
assert remote_fake_module.noop() is None
def test_execute_passes_is_none(self):
conn = local.LocalConnection()
conn.remote_import_system = 'json'
remote_fake_module = conn.import_module(fake_module)
assert remote_fake_module.passes() is None
def test_execute_wrong_interpreter(self):
conn = local.LocalConnection()
conn.remote_import_system = 'json'
remote_fake_module = conn.import_module(fake_module)
remote_fake_module.python_executable = 'python9'
with pytest.raises(Exception) as error:
remote_fake_module.passes()
assert 'Failed to execute command: python9' in str(error.value)
def test_fallback_interpreter(self, monkeypatch, capsys):
monkeypatch.setattr(backends, 'check', lambda *a, **kw: ('', '', 1))
conn = local.LocalConnection()
conn.remote_import_system = 'json'
remote_fake_module = conn.import_module(fake_module)
try:
remote_fake_module.passes()
except Exception:
pass
assert remote_fake_module.python_executable is not None
class TestNeedsSsh(object):
def test_short_hostname_matches(self):
socket = FakeSocket('foo.example.org')
assert backends.needs_ssh('foo', socket) is False
def test_long_hostname_matches(self):
socket = FakeSocket('foo.example.org')
assert backends.needs_ssh('foo.example.org', socket) is False
def test_hostname_does_not_match(self):
socket = FakeSocket('foo')
assert backends.needs_ssh('meh', socket) is True
def test_fqdn_hostname_matches_short_hostname(self):
socket = FakeSocket('foo', getfqdn='foo.example.org')
assert backends.needs_ssh('foo.example.org', socket) is False
@pytest.mark.parametrize('hostname', ['localhost', '127.0.0.1', '127.0.1.1'])
def test_local_hostname(self, hostname):
assert backends.needs_ssh(hostname) is False
class FakeGateway(object):
def remote_exec(self, module):
pass
class TestLegacyRemoteModule(object):
def setup(self):
self.conn = backends.BaseConnection('localhost', sudo=True, eager=False)
self.conn.gateway = FakeGateway()
def test_importing_it_sets_it_as_remote_module(self):
self.conn.import_module(fake_module)
assert fake_module == self.conn.remote_module.module
def test_importing_it_returns_the_module_too(self):
remote_foo = self.conn.import_module(fake_module)
assert remote_foo.module == fake_module
def test_execute_the_remote_module_send(self):
stub_channel = Factory(send=Capture(), receive=Capture())
self.conn.gateway.channel = self.conn.gateway
remote_foo = self.conn.import_module(fake_module)
remote_foo.channel = stub_channel
remote_foo.function('argument')
assert stub_channel.send.calls[0]['args'][0] == "function('argument')"
def test_execute_the_remote_module_receive(self):
stub_channel = Factory(receive=Capture(return_values=[True]), send=Capture())
self.conn.gateway.channel = self.conn.gateway
remote_foo = self.conn.import_module(fake_module)
remote_foo.channel = stub_channel
assert remote_foo.function('argument') is True
class TestLegacyModuleExecuteArgs(object):
def setup(self):
self.remote_module = backends.LegacyModuleExecute(FakeGateway(), None)
def test_single_argument(self):
assert self.remote_module._convert_args(('foo',)) == "'foo'"
def test_more_than_one_argument(self):
args = ('foo', 'bar', 1)
assert self.remote_module._convert_args(args) == "'foo', 'bar', 1"
def test_dictionary_as_argument(self):
args = ({'some key': 1},)
assert self.remote_module._convert_args(args) == "{'some key': 1}"
class TestLegacyModuleExecuteGetAttr(object):
def setup(self):
self.remote_module = backends.LegacyModuleExecute(FakeGateway(), None)
def test_raise_attribute_error(self):
with pytest.raises(AttributeError) as err:
self.remote_module.foo()
assert err.value.args[0] == 'module None does not have attribute foo'
class TestMakeConnectionString(object):
def test_makes_sudo_python_no_ssh(self):
conn = backends.BaseConnection('localhost', sudo=True, eager=False, interpreter='python')
conn_string = conn._make_connection_string('localhost', _needs_ssh=lambda x: False)
assert conn_string == 'popen//python=sudo python'
def test_makes_sudo_python_with_ssh(self):
conn = backends.BaseConnection('localhost', sudo=True, eager=False, interpreter='python')
conn_string = conn._make_connection_string('localhost', _needs_ssh=lambda x: True)
assert conn_string == 'ssh=localhost//python=sudo python'
def test_makes_sudo_python_with_ssh_options(self):
conn = backends.BaseConnection(
'localhost', sudo=True, eager=False,
interpreter='python', ssh_options='-F vagrant_ssh_config')
conn_string = conn._make_connection_string('localhost', _needs_ssh=lambda x: True)
assert conn_string == 'ssh=-F vagrant_ssh_config localhost//python=sudo python'
def test_makes_python_no_ssh(self):
conn = backends.BaseConnection('localhost', sudo=False, eager=False, interpreter='python')
conn_string = conn._make_connection_string('localhost', _needs_ssh=lambda x: False)
assert conn_string == 'popen//python=python'
def test_makes_python_with_ssh(self):
conn = backends.BaseConnection('localhost', sudo=False, eager=False, interpreter='python')
conn_string = conn._make_connection_string('localhost', _needs_ssh=lambda x: True)
assert conn_string == 'ssh=localhost//python=python'
def test_makes_sudo_python_with_forced_sudo(self):
conn = backends.BaseConnection('localhost', sudo=True, eager=False, interpreter='python')
conn_string = conn._make_connection_string(
'localhost', _needs_ssh=lambda x: False, use_sudo=True
)
assert conn_string == 'popen//python=sudo python'
def test_does_not_make_sudo_python_with_forced_sudo(self):
conn = backends.BaseConnection('localhost', sudo=True, eager=False, interpreter='python')
conn_string = conn._make_connection_string(
'localhost', _needs_ssh=lambda x: False, use_sudo=False
)
assert conn_string == 'popen//python=python'
def test_detects_python3(self):
with patch.object(sys, 'version_info', (3, 5, 1)):
conn = backends.BaseConnection('localhost', sudo=True, eager=False)
conn_string = conn._make_connection_string('localhost', _needs_ssh=lambda x: False)
assert conn_string == 'popen//python=sudo python3'
def test_detects_python2(self):
with patch.object(sys, 'version_info', (2, 7, 11)):
conn = backends.BaseConnection('localhost', sudo=False, eager=False)
conn_string = conn._make_connection_string('localhost', _needs_ssh=lambda x: True)
assert conn_string == 'ssh=localhost//python=python2'
class TestDetectSudo(object):
def setup(self):
self.execnet = Mock()
self.execnet.return_value = self.execnet
self.execnet.makegateway.return_value = self.execnet
self.execnet.remote_exec.return_value = self.execnet
def test_does_not_need_sudo(self):
self.execnet.receive.return_value = 'root'
conn = backends.BaseConnection('localhost', sudo=True, eager=False)
assert conn._detect_sudo(_execnet=self.execnet) is False
def test_does_need_sudo(self):
self.execnet.receive.return_value = 'alfredo'
conn = backends.BaseConnection('localhost', sudo=True, eager=False)
assert conn._detect_sudo(_execnet=self.execnet) is True
class TestGetPythonExecutable(object):
def test_non_zero(self, monkeypatch):
monkeypatch.setattr(backends, 'check', lambda *a, **kw: ([], [], 1))
conn = local.LocalConnection()
result = backends.get_python_executable(conn)
assert result == conn.interpreter
def test_no_stdout(self, monkeypatch):
monkeypatch.setattr(backends, 'check', lambda *a, **kw: ([], [], 0))
conn = local.LocalConnection()
result = backends.get_python_executable(conn)
assert result == conn.interpreter
def test_which(self, monkeypatch):
monkeypatch.setattr(backends, 'check', lambda *a, **kw: (['/usr/bin/python17'], [], 0))
conn = local.LocalConnection()
result = backends.get_python_executable(conn)
assert result == '/usr/bin/python17'
|
|
from __future__ import unicode_literals
from django.utils import six
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import DOES_NOT_EXIST, PERMISSION_DENIED
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
watched_review_request_item_mimetype,
watched_review_request_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (
get_review_request_item_url,
get_watched_review_request_item_url,
get_watched_review_request_list_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(BaseWebAPITestCase):
"""Testing the WatchedReviewRequestResource list API tests."""
fixtures = ['test_users']
test_http_methods = ('GET', 'POST')
sample_api_url = 'users/<username>/watched/review-requests/'
resource = resources.watched_review_request
def compare_item(self, item_rsp, obj):
watched_rsp = item_rsp['watched_review_request']
self.assertEqual(watched_rsp['id'], obj.display_id)
self.assertEqual(watched_rsp['summary'], obj.summary)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
if populate_items:
review_request = self.create_review_request(
with_local_site=with_local_site,
publish=True)
profile = user.get_profile()
profile.starred_review_requests.add(review_request)
items = [review_request]
else:
items = []
return (get_watched_review_request_list_url(user.username,
local_site_name),
watched_review_request_list_mimetype,
items)
@add_fixtures(['test_site'])
def test_get_with_site_does_not_exist(self):
"""Testing the GET users/<username>/watched/review-requests/ API
with a local site and Does Not Exist error
"""
self._login_user(local_site=True)
rsp = self.api_get(
get_watched_review_request_list_url(self.user.username,
self.local_site_name),
expected_status=404)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
publish=True)
if post_valid_data:
post_data = {
'object_id': review_request.display_id,
}
else:
post_data = {}
return (get_watched_review_request_list_url(user.username,
local_site_name),
watched_review_request_item_mimetype,
post_data,
[review_request])
def check_post_result(self, user, rsp, review_request):
profile = user.get_profile()
self.assertTrue(review_request in
profile.starred_review_requests.all())
def test_post_with_does_not_exist_error(self):
"""Testing the POST users/<username>/watched/review-requests/
with Does Not Exist error
"""
rsp = self.api_post(
get_watched_review_request_list_url(self.user.username),
{'object_id': 999},
expected_status=404)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)
@add_fixtures(['test_site'])
def test_post_with_site_does_not_exist_error(self):
"""Testing the POST users/<username>/watched/review-requests/ API
with a local site and Does Not Exist error
"""
user = self._login_user(local_site=True)
rsp = self.api_post(
get_watched_review_request_list_url(user.username,
self.local_site_name),
{'object_id': 10},
expected_status=404)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(BaseWebAPITestCase):
"""Testing the WatchedReviewRequestResource item API tests."""
fixtures = ['test_users']
test_http_methods = ('DELETE', 'PUT')
sample_api_url = 'users/<username>/watched/review-requests/<id>/'
resource = resources.watched_review_request
def setup_http_not_allowed_item_test(self, user):
return get_watched_review_request_item_url(user.username, 1)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
publish=True)
profile = user.get_profile()
profile.starred_review_requests.add(review_request)
return (get_watched_review_request_item_url(user.username,
review_request.display_id,
local_site_name),
[profile, review_request])
def check_delete_result(self, user, profile, review_request):
self.assertFalse(review_request in
profile.starred_review_requests.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the DELETE users/<username>/watched/review_request/<id>/ API
with Does Not Exist error
"""
rsp = self.api_delete(
get_watched_review_request_item_url(self.user.username, 999),
expected_status=404)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)
#
# HTTP GET tests
#
def test_get(self):
"""Testing the GET users/<username>/watched/review-requests/<id>/ API
"""
review_request = self.create_review_request(publish=True)
profile = self.user.get_profile()
profile.starred_review_requests.add(review_request)
expected_url = (self.base_url +
get_review_request_item_url(review_request.display_id))
self.api_get(
get_watched_review_request_item_url(self.user.username,
review_request.display_id),
expected_status=302,
expected_headers={
'Location': expected_url,
})
@add_fixtures(['test_site'])
def test_get_with_site(self):
"""Testing the GET users/<username>/watched/review-requests/<id>/ API
with access to a local site
"""
user = self._login_user(local_site=True)
review_request = self.create_review_request(with_local_site=True,
publish=True)
profile = user.get_profile()
profile.starred_review_requests.add(review_request)
expected_url = (self.base_url +
get_review_request_item_url(review_request.display_id,
self.local_site_name))
self.api_get(
get_watched_review_request_item_url(user.username,
review_request.display_id,
self.local_site_name),
expected_status=302,
expected_headers={
'Location': expected_url,
})
@add_fixtures(['test_site'])
def test_get_with_site_no_access(self):
"""Testing the GET users/<username>/watched/review-requests/<id>/ API
without access to a local site
"""
review_request = self.create_review_request(with_local_site=True,
publish=True)
profile = self.user.get_profile()
profile.starred_review_requests.add(review_request)
rsp = self.api_get(
get_watched_review_request_item_url(self.user.username,
review_request.display_id,
self.local_site_name),
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
|
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import unittest
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, with_transaction
from nereid.testing import NereidTestCase
class TestTranslation(NereidTestCase):
'Test Translation'
def setUp(self):
# Install the test module which has bundled translations which can
# be used for this test
trytond.tests.test_tryton.install_module('nereid_test')
@with_transaction()
def test_0010_nereid_template_extraction(self):
"""
Test translation extaction from nereid templates
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
count_before = IRTranslation.search([
('type', '=', 'nereid_template')
], count=True)
self.assertEqual(count_before, 0)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
count_after = IRTranslation.search([
('type', '=', 'nereid_template')
], count=True)
self.assertTrue(count_after > count_before)
@with_transaction()
def test_0020_nereid_code_extraction(self):
"""
Ensure that templates are extracted from the code
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
count_before = IRTranslation.search([
('type', '=', 'nereid')
], count=True)
self.assertEqual(count_before, 0)
# Set the nereid translations alone
set_wizard.set_nereid()
count_after = IRTranslation.search([
('type', '=', 'nereid')
], count=True)
self.assertTrue(count_after > count_before)
@with_transaction()
def test_0030_wtforms_builtin_extraction(self):
"""
Ensure that the builtin messages from wtforms are also extracted
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
count_before = IRTranslation.search([
('type', '=', 'wtforms')
], count=True)
self.assertEqual(count_before, 0)
# Set the wtforms translations alone
set_wizard.set_wtforms()
count_after = IRTranslation.search([
('type', '=', 'wtforms')
], count=True)
self.assertTrue(count_after > count_before)
@with_transaction()
def test_0040_template_gettext_using_(self):
"""
Test for gettext without comment using _
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
# gettext with no comments and using _
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', 'gettext')
])
self.assertEqual(translation.comments, None)
self.assertEqual(translation.res_id, 7)
@with_transaction()
def test_0050_template_gettext_2(self):
"""
Test for gettext with comment before it
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', 'gettext with comment b4')
])
self.assertEqual(translation.comments, translation.src)
self.assertEqual(translation.res_id, 10)
@with_transaction()
def test_0060_template_gettext_3(self):
"""
Test for gettext with comment inline
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', 'gettext with comment inline')
])
self.assertEqual(translation.comments, translation.src)
self.assertEqual(translation.res_id, 12)
@with_transaction()
def test_0070_template_gettext_4(self):
"""
Test for gettext using gettext instead of _
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', 'Hello World!')
])
self.assertEqual(translation.comments, None)
self.assertEqual(translation.res_id, 17)
@with_transaction()
def test_0080_template_ngettext(self):
"""
Test for ngettext
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', '%(num)d apple')
])
self.assertEqual(translation.res_id, 20)
# Look for plural
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', '%(num)d apples')
])
self.assertEqual(translation.res_id, 20)
@with_transaction()
def test_0090_template_trans_tag(self):
"""
Test for {% trans %}Hola {{ user }}!{% endtrans %} tag
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
# XXX: See how {{ user }} changed to %(user)s
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', 'Hello %(username)s!'),
])
self.assertEqual(
translation.comments, 'Translation with trans tag'
)
self.assertEqual(translation.res_id, 23)
@with_transaction()
def test_0100_template_trans_tag_with_expr(self):
"""
Test for
{% trans user=user.username %}Hello {{ user }}!{% endtrans %} tag
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
# XXX: See how {{ user }} changed to %(user)s
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', '=', 'Hello %(name)s!')
])
self.assertEqual(
translation.comments, 'Translation with an expression'
)
self.assertEqual(translation.res_id, 26)
@with_transaction()
def test_0110_template_trans_tag_plural(self):
"""
Test for
{% trans count=list|length %}
There is {{ count }} {{ name }} object.
{% pluralize %}
There are {{ count }} {{ name }} objects.
{% endtrans %}
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
IRTranslation = POOL.get('ir.translation')
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
# Set the nereid_template translations alone
set_wizard.set_nereid_template()
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', 'ilike', '%There is %(count)s %(objname)s object.%'),
])
self.assertEqual(
translation.comments, 'trans tag with pluralisation'
)
self.assertEqual(translation.res_id, 29)
# now look for the plural
translation, = IRTranslation.search([
('type', '=', 'nereid_template'),
('module', '=', 'nereid_test'),
('src', 'ilike', '%There are %(count)s %(objname)s objects.%'),
])
self.assertEqual(
translation.comments, 'trans tag with pluralisation'
)
self.assertEqual(translation.res_id, 29)
@with_transaction()
def test_0200_translation_clean(self):
"""
Check if the cleaning of translations work
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
TranslationClean = POOL.get('ir.translation.clean', type='wizard')
IRTranslation = POOL.get('ir.translation')
IRModule = POOL.get('ir.module')
# First create all the translations
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
set_wizard.transition_set_()
# Uninstall nereid_test and there should be no translations
# belonging to that module with type as nereid or
# nereid_template
nereid_test, = IRModule.search([('name', '=', 'nereid_test')])
nereid_test.state = 'uninstalled'
nereid_test.save()
session_id, _, _ = TranslationClean.create()
clean_wizard = TranslationClean(session_id)
clean_wizard.transition_clean()
count = IRTranslation.search([
('module', '=', 'nereid_test'),
('type', 'in', ('nereid', 'nereid_template'))
], count=True)
self.assertEqual(count, 0)
@with_transaction()
def test_0300_translation_update(self):
"""
Check if the update does not break this functionality
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
TranslationUpdate = POOL.get('ir.translation.update', type='wizard')
IRTranslation = POOL.get('ir.translation')
IRLanguage = POOL.get('ir.lang')
# First create all the translations
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
set_wizard.transition_set_()
# set an additional language as translatable
new_lang, = IRLanguage.search([
('translatable', '=', False)
], limit=1)
new_lang.translatable = True
new_lang.save()
count_before = IRTranslation.search([], count=True)
# Now update the translations
session_id, _, _ = TranslationUpdate.create()
update_wizard = TranslationUpdate(session_id)
update_wizard.start.language = new_lang
update_wizard.do_update(update_wizard.update.get_action())
# check the count now
count_after = IRTranslation.search([], count=True)
self.assertEqual(count_after, count_before * 2)
@with_transaction()
def test_0400_translation_export(self):
"""
Export the translations and test
"""
TranslationSet = POOL.get('ir.translation.set', type='wizard')
TranslationUpdate = POOL.get('ir.translation.update', type='wizard')
IRTranslation = POOL.get('ir.translation')
IRLanguage = POOL.get('ir.lang')
# First create all the translations
session_id, _, _ = TranslationSet.create()
set_wizard = TranslationSet(session_id)
set_wizard.transition_set_()
# set an additional language as translatable
new_lang, = IRLanguage.search([
('translatable', '=', False)
], limit=1)
new_lang.translatable = True
new_lang.save()
# Now update the translations
session_id, _, _ = TranslationUpdate.create()
update_wizard = TranslationUpdate(session_id)
update_wizard.start.language = new_lang
update_wizard.do_update(update_wizard.update.get_action())
# TODO: Check the contents of the po file
IRTranslation.translation_export(new_lang.code, 'nereid_test')
IRTranslation.translation_export(new_lang.code, 'nereid')
def suite():
"Nereid test suite"
test_suite = unittest.TestSuite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestTranslation)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
|
import json
import time
import attr
import requests
import responses
from htmlvis import HTTPSniffer, RequestsSniffer
from pytest import fixture, mark
@fixture
def success_response():
responses.add(
responses.GET,
'http://mysniffer.com/api/1/success',
json={"greeting": "hi there!"}, )
@fixture
def error_response():
responses.add(
responses.GET,
'http://mysniffer.com/api/1/notfound',
json={"error": "not found"},
status=404)
def test_implements_http_sniffer():
assert issubclass(RequestsSniffer, HTTPSniffer)
@responses.activate
def test_intercepts_one_request(success_response):
sniffing_hook = RequestsSniffer('', '')
response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
response.raise_for_status()
assert len(sniffing_hook.transactions) == 1
@responses.activate
def test_intercepts_two_requests(success_response):
sniffing_hook = RequestsSniffer('', '')
first_response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
first_response.raise_for_status()
second_response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
second_response.raise_for_status()
assert len(sniffing_hook.transactions) == 2
@responses.activate
def test_records_the_request_method(success_response):
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.request.method == 'GET'
@responses.activate
def test_records_the_request_url_path(success_response):
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.request.url_path == '/api/1/success'
@responses.activate
@mark.parametrize("url, response_fixture",
[('http://mysniffer.com/api/1/success', success_response),
('http://mysniffer.com/api/1/notfound', error_response)])
def test_transactions_are_json_serializable(url, response_fixture):
response_fixture()
sniffing_hook = RequestsSniffer('', '')
requests.get(url, hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
json.dumps(attr.asdict(transaction))
@responses.activate
def test_records_the_request_headers(success_response):
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
headers={'Accept': 'application/json',
'Better-Safe': 'Than/Sorry'},
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.request.headers['Accept'] == 'application/json'
assert transaction.request.headers['Better-Safe'] == 'Than/Sorry'
@responses.activate
def test_records_the_request_body_if_content_type_is_json(success_response):
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
json={'Better safe': 'Than sorry'},
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
# FIXME: should be str instead of bytes?
assert transaction.request.body == b'{"Better safe": "Than sorry"}'
@responses.activate
def test_records_the_request_body_if_content_is_plain_text(success_response):
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
data='Better safe than sorry',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
# FIXME: should be bytes instead of str?
assert transaction.request.body == 'Better safe than sorry'
@responses.activate
def test_measures_elapsed_time_for_one_transaction(success_response):
sniffing_hook = RequestsSniffer('', '')
start_time = time.time()
response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
elapsed_time = time.time() - start_time
response.raise_for_status()
transaction = sniffing_hook.transactions[0]
assert 0 <= transaction.request.elapsed <= elapsed_time
assert 0 <= transaction.response.elapsed <= elapsed_time
assert 0 <= transaction.request.elapsed <= transaction.response.elapsed
@responses.activate
def test_measures_elapsed_time_for_two_transactions(mocker):
mocker.patch('time.time')
time.time.return_value = 0.1
sniffing_hook = RequestsSniffer('', '')
success_response()
start_time = time.time()
time.time.return_value = 0.5
response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
success_response()
first_elapsed_time = time.time() - start_time
time.time.return_value = 2.2
response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
second_elapsed_time = time.time() - start_time
response.raise_for_status()
sniffing_hook.transactions[0]
second_transaction = sniffing_hook.transactions[1]
assert first_elapsed_time <= second_transaction.request.elapsed <= second_elapsed_time
assert first_elapsed_time <= second_transaction.response.elapsed <= second_elapsed_time
assert first_elapsed_time <= second_transaction.request.elapsed <= second_transaction.response.elapsed
@responses.activate
def test_records_the_response_headers():
responses.add(
responses.GET,
'http://mysniffer.com/api/1/success',
adding_headers={
'Better-Safe': 'Than/Sorry',
'Content-Type': 'application/json'
})
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.response.headers['Content-Type'] == 'application/json'
assert transaction.response.headers['Better-Safe'] == 'Than/Sorry'
@responses.activate
def test_records_the_response_body_if_content_type_is_json():
responses.add(
responses.GET,
'http://mysniffer.com/api/1/success',
json={'Better safe': 'Than sorry'})
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.response.body == '{"Better safe": "Than sorry"}'
@responses.activate
def test_records_the_response_body_if_content_is_plain_text():
responses.add(
responses.GET,
'http://mysniffer.com/api/1/success',
body='Better safe than sorry')
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.response.body == 'Better safe than sorry'
@responses.activate
def test_records_the_response_status_for_a_success_response(success_response):
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.response.status == '200'
@responses.activate
def test_records_the_response_status_for_an_error_response(error_response):
sniffing_hook = RequestsSniffer('', '')
requests.get(
'http://mysniffer.com/api/1/notfound',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.response.status == '404'
@responses.activate
def test_transaction_includes_the_client_and_server_name(success_response):
sniffing_hook = RequestsSniffer('Client name', 'Server name')
requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
transaction = sniffing_hook.transactions[0]
assert transaction.client_name == 'Client name'
assert transaction.server_name == 'Server name'
@responses.activate
def test_restart_resets_the_elapsed_time(mocker):
mocker.patch('time.time')
time.time.return_value = 0.1
sniffing_hook = RequestsSniffer('', '')
success_response()
start_time = time.time()
time.time.return_value = 0.5
response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
time.time.return_value = 2.0
success_response()
time.time() - start_time
first_transaction = sniffing_hook.transactions[0]
sniffing_hook.restart()
time.time.return_value = 2.2
response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
response.raise_for_status()
second_transaction = sniffing_hook.transactions[0]
assert second_transaction.request.elapsed < first_transaction.response.elapsed
@responses.activate
def test_restart_resets_the_captured_transactions(success_response):
sniffing_hook = RequestsSniffer('', '')
first_response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
first_response.raise_for_status()
sniffing_hook.restart()
second_response = requests.get(
'http://mysniffer.com/api/1/success',
hooks={'response': sniffing_hook})
second_response.raise_for_status()
assert len(sniffing_hook.transactions) == 1
@fixture
def transactions_response():
responses.add(
responses.GET,
'http://mysniffer.com/sniffer/transactions',
json={
"transactions": [{
"client_name": "Client name",
"server_name": "Server name",
"request": {
"body": '{"a": 1, "b": 2}',
"elapsed": 1.2,
"headers": {
"Accept": "application/json"
},
"method": "POST",
"url_path": "/some/url",
},
"response": {
"body": "",
"elapsed": 1.4,
"headers": "",
"status": "",
}
}, {
"client_name": "Client name",
"server_name": "Server name",
"request": {
"body": "",
"elapsed": 2.3,
"headers": [""],
"method": "GET",
"url_path": "/another/url",
},
"response": {
"body": "",
"elapsed": "",
"headers": "",
"status": "",
}
}]
})
|
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
if len(sys.argv) > 1:
sys.path.insert(0, sys.argv.pop(1))
import unittest
import math
import viennagrid.wrapper
from utils import equal, point_equal
class TestPointCartesian1D(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(viennagrid.wrapper.PointCartesian1D, point_equal)
def test_init(self):
"""Test constructor, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCartesian1D()
self.assertEqual(point.dim, 1)
self.assertEqual(point.coords[0], 0)
def test_init_double(self):
"""Test constructor with doubles, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCartesian1D(2.0)
self.assertEqual(point.dim, 1)
self.assertEqual(point.coords[0], 2.0)
def test_coord_system(self):
"""Test property 'coord_system'."""
point = viennagrid.wrapper.PointCartesian1D()
self.assertEqual(point.coord_system, 'cartesian')
def test_get_coord(self):
"""Test method 'get_coord'."""
point = viennagrid.wrapper.PointCartesian1D()
self.assertEqual(point.get_coord(0), 0)
point = viennagrid.wrapper.PointCartesian1D(2)
self.assertEqual(point.get_coord(0), 2)
def test_set_coord(self):
"""Test method 'set_coord'."""
point = viennagrid.wrapper.PointCartesian1D()
self.assertEqual(point.get_coord(0), 0)
point.set_coord(0, 5)
self.assertEqual(point.get_coord(0), 5)
def test_equal(self):
"""Test operator '=='."""
p1 = viennagrid.wrapper.PointCartesian1D()
p2 = viennagrid.wrapper.PointCartesian1D()
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p2.coords[0], 0.0)
p1 = viennagrid.wrapper.PointCartesian1D(2)
p2 = viennagrid.wrapper.PointCartesian1D(2)
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p2.coords[0], 2.0)
def test_assign(self):
"""Test operator '='."""
p1 = viennagrid.wrapper.PointCartesian1D(2)
p2 = viennagrid.wrapper.PointCartesian1D(1)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p2.coords[0], 1.0)
p1 = p2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 1.0)
self.assertEqual(p2.coords[0], 1.0)
def test_add(self):
"""Test operator '+' (addition)."""
p1 = viennagrid.wrapper.PointCartesian1D(2)
p2 = viennagrid.wrapper.PointCartesian1D(1)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p2.coords[0], 1.0)
p3 = p2 + p1
self.assertEqual(p3.coords[0], 3.0)
def test_sub(self):
"""Test operator '-' (subtraction)."""
p1 = viennagrid.wrapper.PointCartesian1D(2)
p2 = viennagrid.wrapper.PointCartesian1D(1)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p2.coords[0], 1.0)
p3 = p2 - p1
self.assertEqual(p3.coords[0], -1.0)
def test_mul(self):
"""Test operator '*' (multiplication)."""
p1 = viennagrid.wrapper.PointCartesian1D()
p2 = viennagrid.wrapper.PointCartesian1D()
p1 = p1 * 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
p1 = viennagrid.wrapper.PointCartesian1D(1)
p2 = viennagrid.wrapper.PointCartesian1D(2)
p1 = p1 * 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
def test_div(self):
"""Test operator '/' (division)."""
p1 = viennagrid.wrapper.PointCartesian1D()
p2 = viennagrid.wrapper.PointCartesian1D()
p1 = p1 / 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
p1 = viennagrid.wrapper.PointCartesian1D(1)
p2 = viennagrid.wrapper.PointCartesian1D(0.5)
p1 = p1 / 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.5)
def test_neg(self):
"""Test unary operator '-' (__neg__ in Python)."""
p1 = viennagrid.wrapper.PointCartesian1D(1)
p2 = -p1
self.assertEqual(-p1.coords[0], p2.coords[0])
class TestPointCartesian2D(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(viennagrid.wrapper.PointCartesian2D, point_equal)
def test_init(self):
"""Test constructor, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCartesian2D()
self.assertEqual(point.dim, 2)
self.assertEqual(point.coords[0], 0)
self.assertEqual(point.coords[1], 0)
def test_init_double(self):
"""Test constructor with doubles, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCartesian2D(2.0, 5.0)
self.assertEqual(point.dim, 2)
self.assertEqual(point.coords[0], 2.0)
self.assertEqual(point.coords[1], 5.0)
def test_coord_system(self):
"""Test property 'coord_system'."""
point = viennagrid.wrapper.PointCartesian2D()
self.assertEqual(point.coord_system, 'cartesian')
def test_get_coord(self):
"""Test method 'get_coord'."""
point = viennagrid.wrapper.PointCartesian2D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
point = viennagrid.wrapper.PointCartesian2D(2, 5)
self.assertEqual(point.get_coord(0), 2)
self.assertEqual(point.get_coord(1), 5)
def test_set_coord(self):
"""Test method 'set_coord'."""
point = viennagrid.wrapper.PointCartesian2D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
point.set_coord(0, 5)
point.set_coord(1, 8)
self.assertEqual(point.get_coord(0), 5)
self.assertEqual(point.get_coord(1), 8)
def test_equal(self):
"""Test operator '=='."""
p1 = viennagrid.wrapper.PointCartesian2D()
p2 = viennagrid.wrapper.PointCartesian2D()
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
self.assertEqual(p2.coords[0], 0.0)
self.assertEqual(p2.coords[1], 0.0)
p1 = viennagrid.wrapper.PointCartesian2D(2, 1)
p2 = viennagrid.wrapper.PointCartesian2D(2, 1)
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p2.coords[0], 2.0)
self.assertEqual(p2.coords[1], 1.0)
def test_assign(self):
"""Test operator '='."""
p1 = viennagrid.wrapper.PointCartesian2D(2, 1)
p2 = viennagrid.wrapper.PointCartesian2D(1, 2)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
p1 = p2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 1.0)
self.assertEqual(p1.coords[1], 2.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
def test_add(self):
"""Test operator '+' (addition)."""
p1 = viennagrid.wrapper.PointCartesian2D(2, 1)
p2 = viennagrid.wrapper.PointCartesian2D(1, 2)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
p3 = p2 + p1
self.assertEqual(p3.coords[0], 3.0)
self.assertEqual(p3.coords[1], 3.0)
def test_sub(self):
"""Test operator '-' (subtraction)."""
p1 = viennagrid.wrapper.PointCartesian2D(2, 1)
p2 = viennagrid.wrapper.PointCartesian2D(1, 2)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
p3 = p2 - p1
self.assertEqual(p3.coords[0], -1.0)
self.assertEqual(p3.coords[1], 1.0)
def test_mul(self):
"""Test operator '*' (multiplication)."""
p1 = viennagrid.wrapper.PointCartesian2D()
p2 = viennagrid.wrapper.PointCartesian2D()
p1 = p1 * 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
p1 = viennagrid.wrapper.PointCartesian2D(1, 1)
p2 = viennagrid.wrapper.PointCartesian2D(2, 2)
p1 = p1 * 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 2.0)
def test_div(self):
"""Test operator '/' (division)."""
p1 = viennagrid.wrapper.PointCartesian2D()
p2 = viennagrid.wrapper.PointCartesian2D()
p1 = p1 / 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
p1 = viennagrid.wrapper.PointCartesian2D(1, 1)
p2 = viennagrid.wrapper.PointCartesian2D(0.5, 0.5)
p1 = p1 / 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.5)
self.assertEqual(p1.coords[1], 0.5)
def test_to_polar(self):
"""Test method 'to_polar'."""
c = viennagrid.wrapper.PointCartesian2D(0, 0)
p = c.to_polar()
self.assertTrue(*equal(p.get_coord(0), 0))
self.assertTrue(*equal(p.get_coord(1), 0))
c = viennagrid.wrapper.PointCartesian2D(0, 1)
p = c.to_polar()
self.assertTrue(*equal(p.get_coord(0), 1))
self.assertTrue(*equal(p.get_coord(1), math.pi / 2))
c = viennagrid.wrapper.PointCartesian2D(1, 0)
p = c.to_polar()
self.assertTrue(*equal(p.get_coord(0), 1))
self.assertTrue(*equal(p.get_coord(1), 0))
c = viennagrid.wrapper.PointCartesian2D(1, 1)
p = c.to_polar()
self.assertTrue(*equal(p.get_coord(0), math.sqrt(2)))
self.assertTrue(*equal(p.get_coord(1), math.pi / 4))
def test_neg(self):
"""Test unary operator '-' (__neg__ in Python)."""
p1 = viennagrid.wrapper.PointCartesian2D(1, 2)
p2 = -p1
self.assertEqual(-p1.coords[0], p2.coords[0])
self.assertEqual(-p1.coords[1], p2.coords[1])
class TestPointCartesian3D(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(viennagrid.wrapper.PointCartesian3D, point_equal)
def test_init(self):
"""Test constructor, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCartesian3D()
self.assertEqual(point.dim, 3)
self.assertEqual(point.coords[0], 0)
self.assertEqual(point.coords[1], 0)
self.assertEqual(point.coords[2], 0)
def test_init_double(self):
"""Test constructor with doubles, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCartesian3D(2.0, 5.0, 4.0)
self.assertEqual(point.dim, 3)
self.assertEqual(point.coords[0], 2.0)
self.assertEqual(point.coords[1], 5.0)
self.assertEqual(point.coords[2], 4.0)
def test_coord_system(self):
"""Test property 'coord_system'."""
point = viennagrid.wrapper.PointCartesian3D()
self.assertEqual(point.coord_system, 'cartesian')
def test_get_coord(self):
"""Test method 'get_coord'."""
point = viennagrid.wrapper.PointCartesian3D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
self.assertEqual(point.get_coord(2), 0)
point = viennagrid.wrapper.PointCartesian3D(2, 5, 4)
self.assertEqual(point.get_coord(0), 2)
self.assertEqual(point.get_coord(1), 5)
self.assertEqual(point.get_coord(2), 4)
def test_set_coord(self):
"""Test method 'set_coord'."""
point = viennagrid.wrapper.PointCartesian3D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
self.assertEqual(point.get_coord(2), 0)
point.set_coord(0, 5)
point.set_coord(1, 8)
point.set_coord(2, 9)
self.assertEqual(point.get_coord(0), 5)
self.assertEqual(point.get_coord(1), 8)
self.assertEqual(point.get_coord(2), 9)
def test_equal(self):
"""Test operator '=='."""
p1 = viennagrid.wrapper.PointCartesian3D()
p2 = viennagrid.wrapper.PointCartesian3D()
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
self.assertEqual(p1.coords[2], 0.0)
self.assertEqual(p2.coords[0], 0.0)
self.assertEqual(p2.coords[1], 0.0)
self.assertEqual(p2.coords[2], 0.0)
p1 = viennagrid.wrapper.PointCartesian3D(2, 1, 4)
p2 = viennagrid.wrapper.PointCartesian3D(2, 1, 4)
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 4.0)
self.assertEqual(p2.coords[0], 2.0)
self.assertEqual(p2.coords[1], 1.0)
self.assertEqual(p2.coords[2], 4.0)
def test_assign(self):
"""Test operator '='."""
p1 = viennagrid.wrapper.PointCartesian3D(2, 1, 3)
p2 = viennagrid.wrapper.PointCartesian3D(1, 2, 4)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 3.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
p1 = p2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 1.0)
self.assertEqual(p1.coords[1], 2.0)
self.assertEqual(p1.coords[2], 4.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
def test_add(self):
"""Test operator '+' (addition)."""
p1 = viennagrid.wrapper.PointCartesian3D(2, 1, 3)
p2 = viennagrid.wrapper.PointCartesian3D(1, 2, 4)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 3.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
p3 = p2 + p1
self.assertEqual(p3.coords[0], 3.0)
self.assertEqual(p3.coords[1], 3.0)
self.assertEqual(p3.coords[2], 7.0)
def test_sub(self):
"""Test operator '-' (subtraction)."""
p1 = viennagrid.wrapper.PointCartesian3D(2, 1, 3)
p2 = viennagrid.wrapper.PointCartesian3D(1, 2, 4)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 3.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
p3 = p2 - p1
self.assertEqual(p3.coords[0], -1.0)
self.assertEqual(p3.coords[1], 1.0)
self.assertEqual(p3.coords[2], 1.0)
def test_mul(self):
"""Test operator '*' (multiplication)."""
p1 = viennagrid.wrapper.PointCartesian3D()
p2 = viennagrid.wrapper.PointCartesian3D()
p1 = p1 * 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
self.assertEqual(p1.coords[2], 0.0)
p1 = viennagrid.wrapper.PointCartesian3D(1, 1, 1)
p2 = viennagrid.wrapper.PointCartesian3D(2, 2, 2)
p1 = p1 * 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 2.0)
self.assertEqual(p1.coords[2], 2.0)
def test_div(self):
"""Test operator '/' (division)."""
p1 = viennagrid.wrapper.PointCartesian3D()
p2 = viennagrid.wrapper.PointCartesian3D()
p1 = p1 / 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
self.assertEqual(p1.coords[2], 0.0)
p1 = viennagrid.wrapper.PointCartesian3D(1, 1, 1)
p2 = viennagrid.wrapper.PointCartesian3D(0.5, 0.5, 0.5)
p1 = p1 / 2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.5)
self.assertEqual(p1.coords[1], 0.5)
self.assertEqual(p1.coords[2], 0.5)
def test_neg(self):
"""Test unary operator '-' (__neg__ in Python)."""
p1 = viennagrid.wrapper.PointCartesian3D(1, 2, 3)
p2 = -p1
self.assertEqual(-p1.coords[0], p2.coords[0])
self.assertEqual(-p1.coords[1], p2.coords[1])
self.assertEqual(-p1.coords[2], p2.coords[2])
class TestPointCylindrical3D(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(viennagrid.wrapper.PointCylindrical3D, point_equal)
def test_init(self):
"""Test constructor, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCylindrical3D()
self.assertEqual(point.dim, 3)
self.assertEqual(point.coords[0], 0)
self.assertEqual(point.coords[1], 0)
self.assertEqual(point.coords[2], 0)
def test_init_double(self):
"""Test constructor with doubles, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointCylindrical3D(2.0, 5.0, 4.0)
self.assertEqual(point.dim, 3)
self.assertEqual(point.coords[0], 2.0)
self.assertEqual(point.coords[1], 5.0)
self.assertEqual(point.coords[2], 4.0)
def test_coord_system(self):
"""Test property 'coord_system'."""
point = viennagrid.wrapper.PointCylindrical3D()
self.assertEqual(point.coord_system, 'cylindrical')
def test_get_coord(self):
"""Test method 'get_coord'."""
point = viennagrid.wrapper.PointCylindrical3D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
self.assertEqual(point.get_coord(2), 0)
point = viennagrid.wrapper.PointCylindrical3D(2, 5, 4)
self.assertEqual(point.get_coord(0), 2)
self.assertEqual(point.get_coord(1), 5)
self.assertEqual(point.get_coord(2), 4)
def test_set_coord(self):
"""Test method 'set_coord'."""
point = viennagrid.wrapper.PointCylindrical3D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
self.assertEqual(point.get_coord(2), 0)
point.set_coord(0, 5)
point.set_coord(1, 8)
point.set_coord(2, 9)
self.assertEqual(point.get_coord(0), 5)
self.assertEqual(point.get_coord(1), 8)
self.assertEqual(point.get_coord(2), 9)
def test_equal(self):
"""Test operator '=='."""
p1 = viennagrid.wrapper.PointCylindrical3D()
p2 = viennagrid.wrapper.PointCylindrical3D()
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
self.assertEqual(p1.coords[2], 0.0)
self.assertEqual(p2.coords[0], 0.0)
self.assertEqual(p2.coords[1], 0.0)
self.assertEqual(p2.coords[2], 0.0)
p1 = viennagrid.wrapper.PointCylindrical3D(2, 1, 4)
p2 = viennagrid.wrapper.PointCylindrical3D(2, 1, 4)
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 4.0)
self.assertEqual(p2.coords[0], 2.0)
self.assertEqual(p2.coords[1], 1.0)
self.assertEqual(p2.coords[2], 4.0)
def test_assign(self):
"""Test operator '='."""
p1 = viennagrid.wrapper.PointCylindrical3D(2, 1, 3)
p2 = viennagrid.wrapper.PointCylindrical3D(1, 2, 4)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 3.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
p1 = p2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 1.0)
self.assertEqual(p1.coords[1], 2.0)
self.assertEqual(p1.coords[2], 4.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
def test_neg(self):
"""Test unary operator '-' (__neg__ in Python)."""
pass
class TestPointPolar2D(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(viennagrid.wrapper.PointPolar2D, point_equal)
def test_init(self):
"""Test constructor, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointPolar2D()
self.assertEqual(point.dim, 2)
self.assertEqual(point.coords[0], 0)
self.assertEqual(point.coords[1], 0)
def test_init_double(self):
"""Test constructor with doubles, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointPolar2D(2.0, 5.0)
self.assertEqual(point.dim, 2)
self.assertEqual(point.coords[0], 2.0)
self.assertEqual(point.coords[1], 5.0)
def test_coord_system(self):
"""Test property 'coord_system'."""
point = viennagrid.wrapper.PointPolar2D()
self.assertEqual(point.coord_system, 'polar')
def test_get_coord(self):
"""Test method 'get_coord'."""
point = viennagrid.wrapper.PointPolar2D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
point = viennagrid.wrapper.PointPolar2D(2, 5)
self.assertEqual(point.get_coord(0), 2)
self.assertEqual(point.get_coord(1), 5)
def test_set_coord(self):
"""Test method 'set_coord'."""
point = viennagrid.wrapper.PointPolar2D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
point.set_coord(0, 5)
point.set_coord(1, 8)
self.assertEqual(point.get_coord(0), 5)
self.assertEqual(point.get_coord(1), 8)
def test_equal(self):
"""Test operator '=='."""
p1 = viennagrid.wrapper.PointPolar2D()
p2 = viennagrid.wrapper.PointPolar2D()
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
self.assertEqual(p2.coords[0], 0.0)
self.assertEqual(p2.coords[1], 0.0)
p1 = viennagrid.wrapper.PointPolar2D(2, 1)
p2 = viennagrid.wrapper.PointPolar2D(2, 1)
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p2.coords[0], 2.0)
self.assertEqual(p2.coords[1], 1.0)
def test_assign(self):
"""Test operator '='."""
p1 = viennagrid.wrapper.PointPolar2D(2, 1)
p2 = viennagrid.wrapper.PointPolar2D(1, 2)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
p1 = p2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 1.0)
self.assertEqual(p1.coords[1], 2.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
def test_to_cartesian(self):
"""Test method 'to_cartesian'."""
p = viennagrid.wrapper.PointPolar2D(0, 0)
c = p.to_cartesian()
self.assertTrue(*equal(c.get_coord(0), 0))
self.assertTrue(*equal(c.get_coord(1), 0))
p = viennagrid.wrapper.PointPolar2D(1, 0)
c = p.to_cartesian()
self.assertTrue(*equal(c.get_coord(0), 1))
self.assertTrue(*equal(c.get_coord(1), 0))
p = viennagrid.wrapper.PointPolar2D(1, math.pi / 2)
c = p.to_cartesian()
self.assertTrue(*equal(c.get_coord(0), 0))
self.assertTrue(*equal(c.get_coord(1), 1))
p = viennagrid.wrapper.PointPolar2D(math.sqrt(2), math.pi / 4)
c = p.to_cartesian()
self.assertTrue(*equal(c.get_coord(0), 1))
self.assertTrue(*equal(c.get_coord(1), 1))
def test_neg(self):
"""Test unary operator '-' (__neg__ in Python)."""
pass
class TestPointSpherical3D(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(viennagrid.wrapper.PointSpherical3D, point_equal)
def test_init(self):
"""Test constructor, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointSpherical3D()
self.assertEqual(point.dim, 3)
self.assertEqual(point.coords[0], 0)
self.assertEqual(point.coords[1], 0)
self.assertEqual(point.coords[2], 0)
def test_init_double(self):
"""Test constructor with doubles, and properties 'dim' and 'coords'."""
point = viennagrid.wrapper.PointSpherical3D(2.0, 5.0, 4.0)
self.assertEqual(point.dim, 3)
self.assertEqual(point.coords[0], 2.0)
self.assertEqual(point.coords[1], 5.0)
self.assertEqual(point.coords[2], 4.0)
def test_coord_system(self):
"""Test property 'coord_system'."""
point = viennagrid.wrapper.PointSpherical3D()
self.assertEqual(point.coord_system, 'spherical')
def test_get_coord(self):
"""Test method 'get_coord'."""
point = viennagrid.wrapper.PointSpherical3D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
self.assertEqual(point.get_coord(2), 0)
point = viennagrid.wrapper.PointSpherical3D(2, 5, 4)
self.assertEqual(point.get_coord(0), 2)
self.assertEqual(point.get_coord(1), 5)
self.assertEqual(point.get_coord(2), 4)
def test_set_coord(self):
"""Test method 'set_coord'."""
point = viennagrid.wrapper.PointSpherical3D()
self.assertEqual(point.get_coord(0), 0)
self.assertEqual(point.get_coord(1), 0)
self.assertEqual(point.get_coord(2), 0)
point.set_coord(0, 5)
point.set_coord(1, 8)
point.set_coord(2, 9)
self.assertEqual(point.get_coord(0), 5)
self.assertEqual(point.get_coord(1), 8)
self.assertEqual(point.get_coord(2), 9)
def test_equal(self):
"""Test operator '=='."""
p1 = viennagrid.wrapper.PointSpherical3D()
p2 = viennagrid.wrapper.PointSpherical3D()
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 0.0)
self.assertEqual(p1.coords[1], 0.0)
self.assertEqual(p1.coords[2], 0.0)
self.assertEqual(p2.coords[0], 0.0)
self.assertEqual(p2.coords[1], 0.0)
self.assertEqual(p2.coords[2], 0.0)
p1 = viennagrid.wrapper.PointSpherical3D(2, 1, 4)
p2 = viennagrid.wrapper.PointSpherical3D(2, 1, 4)
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 4.0)
self.assertEqual(p2.coords[0], 2.0)
self.assertEqual(p2.coords[1], 1.0)
self.assertEqual(p2.coords[2], 4.0)
def test_assign(self):
"""Test operator '='."""
p1 = viennagrid.wrapper.PointSpherical3D(2, 1, 3)
p2 = viennagrid.wrapper.PointSpherical3D(1, 2, 4)
self.assertNotEqual(p1, p2)
self.assertEqual(p1.coords[0], 2.0)
self.assertEqual(p1.coords[1], 1.0)
self.assertEqual(p1.coords[2], 3.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
p1 = p2
self.assertEqual(p1, p2)
self.assertEqual(p1.coords[0], 1.0)
self.assertEqual(p1.coords[1], 2.0)
self.assertEqual(p1.coords[2], 4.0)
self.assertEqual(p2.coords[0], 1.0)
self.assertEqual(p2.coords[1], 2.0)
self.assertEqual(p2.coords[2], 4.0)
def test_neg(self):
"""Test unary operator '-' (__neg__ in Python)."""
pass
if __name__ == '__main__':
unittest.main()
|
|
import os
import pprint
import unittest
import pytest
import orthauth as oa
from pyontutils import sheets
# TODO move some of this to orthauth directly
# no SCOPES
# bad path
# no path/path is null
auth_config = {'auth-variables': {'google-api-store-file': None,
'google-api-store-file-readonly': None,}}
user_configs = dict(
user_config_ok = {'auth-variables': {'google-api-store-file':
{'path': 'google api store-file'},
'google-api-store-file-readonly':
{'path': 'google api store-file-readonly'},},
'auth-stores': {'runtime': True}},
user_config_null = {'auth-variables': {'google-api-store-file': None,
'google-api-store-file-readonly': None,},
'auth-stores': {'runtime': True}},
user_config_empty = {},
user_config_no_vars = {'auth-variables': {},
'auth-stores': {'runtime': True}},
)
secrets =dict(
secrets_ok = {'google': {'api': {'store-file-readonly': '/dev/null/some-path'}}},
secrets_not_rel = {'google': {'api': {'store-file-readonly': 'some-path'}}},
secrets_null = {'google': {'api': {'store-file-readonly': None}}},
secrets_empty = {},
secrets_no_path = {'google': {'api': {}}},
)
def key_creds(e): return (isinstance(e, KeyError) and
e.args and
e.args[0] == 'google-api-creds-file')
def type_scopes(e): return (isinstance(e, TypeError) and
e.args and
e.args[0].startswith('SCOPES has not been set'))
def value_nofi(e): return (isinstance(e, ValueError) and
e.args and
e.args[0].startswith('The file (or absense of file)'))
def value_nova(e): return (isinstance(e, ValueError) and
e.args and
e.args[0].startswith('No value found'))
def value_val(e): return (isinstance(e, ValueError) and
e.args and
e.args[0].startswith('Value of secret at'))
def nbpe(e): return isinstance(e, oa.exceptions.NoBasePathError)
def default(e): return False
errors = {
('user_config_ok', 'secrets_ok'): key_creds,
('user_config_ok', 'secrets_not_rel'): nbpe,
('user_config_ok', 'secrets_null'): value_val,
('user_config_ok', 'secrets_empty'): value_nova,
('user_config_ok', 'secrets_no_path'): value_nova,
('user_config_null', 'secrets_ok'): value_nofi,
('user_config_null', 'secrets_not_rel'): value_nofi,
('user_config_null', 'secrets_null'): value_nofi,
('user_config_null', 'secrets_empty'): value_nofi,
('user_config_null', 'secrets_no_path'): value_nofi,
('user_config_empty', 'secrets_ok'): value_nofi,
('user_config_empty', 'secrets_not_rel'): value_nofi,
('user_config_empty', 'secrets_null'): value_nofi,
('user_config_empty', 'secrets_empty'): value_nofi,
('user_config_empty', 'secrets_no_path'): value_nofi,
('user_config_no_vars', 'secrets_ok'): value_nofi,
('user_config_no_vars', 'secrets_not_rel'): value_nofi,
('user_config_no_vars', 'secrets_null'): value_nofi,
('user_config_no_vars', 'secrets_empty'): value_nofi,
('user_config_no_vars', 'secrets_no_path'): value_nofi,
}
def do_test(expect, SCOPES='https://www.googleapis.com/auth/spreadsheets.readonly'):
try:
s = sheets._get_oauth_service(SCOPES=SCOPES)
except BaseException as e:
if not expect(e):
raise e
class TestGetOauthService(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.__auth = sheets.auth
@classmethod
def tearDownClass(cls):
sheets.auth = cls.__auth
def test(self):
keep_expect = ('user_config_no_vars',
'user_config_empty',
'user_config_null',
'secrets_not_rel',
'secrets_empty',
'secrets_no_path',
'secrets_null',
)
bads = []
for cname, cblob in user_configs.items():
for sname, sblob in secrets.items():
sheets.auth = oa.AuthConfig.runtimeConfig(auth_config, cblob, sblob)
expect = errors[cname, sname]
try:
do_test(expect)
except BaseException as e:
if (cname, sname) == ('user_config_ok', 'secrets_null'):
raise e
bads.append((cname, sname, e))
try:
expect = (expect
if cname in keep_expect or sname in keep_expect else
type_scopes)
do_test(expect, None) # FIXME some others come first
except BaseException as e:
bads.append((cname, sname, 'SCOPES=None', e))
assert not bads, pprint.pformat(bads)
class SheetToTest(sheets.Sheet):
name = 'pyontutils-test'
sheet_name = 'tests'
index_columns = 'id',
fetch_grid = True
@pytest.mark.skipif('CI' in os.environ, reason='Google API creds required.')
class TestSheets(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.base_sheet = SheetToTest(readonly=False)
cls.base_sheet_ro = SheetToTest()
def setUp(self):
self.sheet = SheetToTest(readonly=False, fetch=False)
self.sheet_ro = SheetToTest(fetch=False)
# keep the fetch count down at risk of desync
self.sheet._fetch_from_other_sheet(self.base_sheet)
self.sheet_ro._fetch_from_other_sheet(self.base_sheet_ro)
def test_fromUrl(self):
NewSheet = sheets.Sheet.fromUrl(self.sheet._uri_human() + '#gid=0')
ns = NewSheet()
assert ns.values == self.sheet_ro.values
def test_range(self):
""" test the corners """
#rcs = ((0, 0), (0, -1), (-1, 0), (-1, -1)) # asymmetry is hard
rcs = ((0, 0, 'A1'), (-1, -1, 'C5'))
assert self.sheet.cell_object(0, -1).range != self.sheet.cell_object(-1, -1).range
for r, c, e in rcs:
cell = self.sheet.cell_object(r, c)
cell_range = cell._range
_, rr = cell.row.range.split('!', 1)
rf, rl = rr.split(':', 1)
row_range = rf if not r else rl
_, cr = cell.column.range.split('!', 1)
cf, cl = cr.split(':', 1)
column_range = cf if not c else cl
assert cell_range == row_range == e, (r, c)
assert cell_range == column_range == e, (r, c)
def test_update(self):
row = self.sheet.row_object(1)
row.name().value = 'hello there'
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
row.name().value = ''
self.sheet.commit()
self.sheet_ro.fetch()
tv2 = self.sheet_ro.values
assert self.sheet.values == tv2
assert tv1 != tv2
def test_1_upsert_up(self):
row = ['a', 'lol', 'nope']
assert row not in self.sheet.values
row_object, _ = self.sheet._row_from_index(row=row)
original_values = list(row_object.values) # dupe the list since we mutate values
assert original_values != row
try:
self.sheet.upsert(row)
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
finally:
self.sheet.upsert(original_values)
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
def test_delete(self):
row = ['d', '', '']
assert row in self.sheet.values
row_object, _ = self.sheet._row_from_index(row=row)
try:
self.sheet.delete(row)
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
assert row not in tv1
finally:
if row not in tv1:
self.sheet.insert(row) # FIXME can only append not insert back to previous location ...
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
assert row in tv1
def test_1_upsert_in(self):
row = ['e', 'lol', 'nope']
assert row not in self.sheet.values
self.sheet.upsert(row)
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
try:
assert self.sheet.values == tv1
finally:
if row in self.sheet.values:
self.sheet.delete(row)
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
def test_append(self):
""" make sure that you are on the version of the
test sheet that doesn't have extra rows """
rows = dict(
row1 = ['f', 'append', '1'],
row2 = ['g', 'append', '2'],
row3 = ['h', 'append', '3'],)
for row in rows.values():
assert row not in self.sheet.values
self.sheet._appendRow(row)
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
try:
assert self.sheet.values == tv1
finally:
to_delete = []
for row in rows.values():
if row in self.sheet.values:
to_delete.append(row)
if to_delete:
self.sheet.delete(*to_delete)
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
@pytest.mark.skip('TODO')
def test_stash(self):
# create another instance of the sheet
# update using that instance
# fetch to make sure stashing works as expected
pass
def test_set_values_update(self):
ovalues = [list(l) for l in self.sheet.values] # duh mutation is evil
_test_value = [[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],]
# haven't implemented conversion of cell values to strings yet
test_value = [[str(c) for c in r] for r in _test_value]
self.sheet.values = test_value
assert self.sheet.uncommitted() # FIXME this needs to be run by default every test
assert ovalues != test_value # FIXME this needs to be run by default every test
assert ovalues[0] != test_value[0]
try:
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
finally:
self.sheet.update(ovalues)
self.sheet.commit()
self.sheet_ro.fetch()
tv2 = self.sheet_ro.values
# FIXME I suspect this will break due to missing ends
assert self.sheet.values == tv2 == ovalues
def test_set_values_update_more_rows(self):
ovalues = [list(l) for l in self.sheet.values] # duh mutation is evil
_test_value = [[1, 2, 3, 4], # FIXME these should probably throw errors too
[1, 2, 3, 4], # since they break the primary key assuptions?
[1, 2, 3, 4],
[1, 2, 3, 4],
[2, 2, 3, 4],
[3, 2, 3, 4],
[4, 2, 3, 4],]
# haven't implemented conversion of cell values to strings yet
test_value = [[str(c) for c in r] for r in _test_value]
self.sheet.values = test_value
assert self.sheet.uncommitted() # FIXME this needs to be run by default every test
assert ovalues != test_value # FIXME this needs to be run by default every test
assert ovalues[0] != test_value[0]
try:
self.sheet.commit()
self.sheet_ro.fetch()
tv1 = self.sheet_ro.values
assert self.sheet.values == tv1
finally:
# FIXME need to delete the new rows not just return to the size of the old values
self.sheet.update(ovalues)
self.sheet.commit()
self.sheet_ro.fetch()
tv2 = self.sheet_ro.values
# FIXME I suspect this will break due to missing ends
assert self.sheet.values == tv2 == ovalues
def test_row(self):
r = self.sheet.row_object(0)
r.header
r = r.rowAbove()
r = r.rowBelow()
r.cell_object(1).value = 'oops'
a = r.cells
b = [c.column for c in r.cells]
repr((a, b))
def test_column(self):
c = self.sheet.column_object(0)
c.header
c = c.columnLeft()
c = c.columnRight()
c.cell_object(1).value = 'oops'
a = c.cells
b = [c.row for c in c.cells]
repr((a, b))
|
|
"""
Implements a generic plotting interface which plots given figures or from plotting functions and respective data.
AnalysisPlotter is a sub-class of QWidget and can be added to any QLayout.
"""
import matplotlib
import inspect
import logging
from testbeam_analysis.gui.gui_widgets.worker import AnalysisWorker
from PyQt5 import QtWidgets, QtCore
matplotlib.use('Qt5Agg') # Make sure that we are using QT5
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
class AnalysisPlotter(QtWidgets.QWidget):
"""
Implements generic plotting area widget. Takes one or multiple plotting functions and their input files
and displays figures from their return values. Supports single and multiple figures as return values.
Also supports plotting from multiple functions at once and input of predefined figures. If figures are plotted
from provided plotting functions, the functions are executed on an extra thread
"""
startedPlotting = QtCore.pyqtSignal()
finishedPlotting = QtCore.pyqtSignal()
exceptionSignal = QtCore.pyqtSignal(Exception, str)
def __init__(self, input_file=None, plot_func=None, figures=None, thread=None, parent=None, **kwargs):
super(AnalysisPlotter, self).__init__(parent)
# Main layout
self.main_layout = QtWidgets.QVBoxLayout()
self.setLayout(self.main_layout)
# Input arguments
self.input_file = input_file
self.plot_func = plot_func
self.figures = figures
self.kwargs = kwargs
# External thread provided for plotting; must be QtCore.QThread() or None, if None, no threading.
# After finishing, thread is deleted
self.plotting_thread = thread
if input_file is None and plot_func is None and figures is None:
msg = 'Need input file and plotting function or figures to do plotting!'
raise ValueError(msg)
# Bool whether to plot from multiple functions at once
multi_plot = False
# Threading related
if self.plotting_thread:
self.plotting_thread.started.connect(lambda: self.startedPlotting.emit())
self.plotting_thread.finished.connect(lambda: self.finishedPlotting.emit())
self.plotting_thread.finished.connect(self.plotting_thread.deleteLater)
# Multiple plot_functions with respective input_data; dicts of plotting functions and input files
# must have same keys. If figures are given, they must be given as a dict with a key that is in the
# plot_functions keys. If kwargs are given for the plotting functions, keyword must be in plot_functions keys.
# Value must be dict with actual kwarg for plot function. Full example for multi plotting WITH kwargs
# for each plotting function would look like:
#
# self.input_file={'event': input_file_event, 'correlation': input_file_correlations}
# self.plot_func={'event': plot_events, 'correlation': plot_correlations}
# self.kwargs={'event': {'event_range': 40}, 'correlation':{'pixel_size':(250,50), 'dut_names':'Tel_0'}}
#
# which is equivalent to:
#
# AnalysisPlotter(self.input_files, self.plot_func, event={'event_range': 40}, correlation={'pixel_size':(250,50), 'dut_names':'Tel_0'})
if isinstance(self.input_file, dict) and isinstance(self.plot_func, dict):
if sorted(self.input_file.keys()) != sorted(self.plot_func.keys()):
msg = 'Different sets of keys! Can not assign input data to respective plotting function!'
raise KeyError(msg)
else:
if self.kwargs:
for key in self.kwargs.keys():
if key not in self.plot_func.keys():
msg = 'Can not assign keyword %s with argument %s to any plotting function.' \
' Keyword must be in keys of plotting function dictionary: %s.' \
% (key, str(self.kwargs[key]), ''.join(str(self.plot_func.keys())))
raise KeyError(msg)
multi_plot = True
# Whether to plot a single or multiple functions
if not multi_plot:
# Init resulting figures and worker
self.result_figs = None
self.plotting_worker = None
# Check whether kwargs are are args in plot_func
if self.kwargs:
self.check_kwargs(self.plot_func, self.kwargs)
if self.figures:
# Figures are already there, just add to widget
self.result_figs = self.figures
# Create respective worker instance
self._spawn_worker()
else:
# Init resulting figures and workers as dict, init counter
self.result_figs = {}
self.plotting_worker = {}
self._finished_workers = 0
# Check whether kwargs are are args in plot_func
if self.kwargs:
for key in self.kwargs.keys():
self.check_kwargs(self.plot_func[key], self.kwargs[key])
self._add_multi_figs()
def plot(self):
"""
Starts plotting by starting self.plotting_thread or emitting self.startedPlotting signal
"""
if self.plotting_thread:
self.plotting_thread.start()
else:
self.startedPlotting.emit()
def _spawn_worker(self, multi_plot_key=None, dummy_widget=None):
"""
Method to create a worker for plotting and move it to self.plotting_thread. Workers are created
with regard to whether multiple or a single plot is created.
:param multi_plot_key: Whether worker is created for specific multi_plot_key in self.plot_func.keys() or single plot
:param dummy_widget: External widget to be plotted on for multi_plot
"""
# Single plot
if multi_plot_key is None:
self.plotting_worker = AnalysisWorker(func=self._get_figs, args=multi_plot_key)
if self.plotting_thread:
self.plotting_worker.moveToThread(self.plotting_thread)
self.plotting_thread.started.connect(self.plotting_worker.work)
else:
self.startedPlotting.connect(self.plotting_worker.work)
if dummy_widget is None:
self.plotting_worker.finished.connect(lambda: self._add_figs(figures=self.result_figs))
else:
self.plotting_worker.finished.connect(lambda: self._add_figs(figures=self.result_figs,
external_widget=dummy_widget))
# Connect exceptions signal
self.plotting_worker.exceptionSignal.connect(lambda e, trc_bck: self.emit_exception(exception=e,
trace_back=trc_bck))
# Connect to slot for quitting thread and clean-up
self.plotting_worker.finished.connect(self._finish_plotting)
self.plotting_worker.finished.connect(self.plotting_worker.deleteLater)
# Multiple plots
else:
self.plotting_worker[multi_plot_key] = AnalysisWorker(func=self._get_figs, args=multi_plot_key)
if self.plotting_thread:
self.plotting_worker[multi_plot_key].moveToThread(self.plotting_thread)
self.plotting_thread.started.connect(self.plotting_worker[multi_plot_key].work)
else:
self.startedPlotting.connect(self.plotting_worker[multi_plot_key].work)
if dummy_widget is None:
self.plotting_worker[multi_plot_key].finished.connect(
lambda: self._add_figs(figures=self.result_figs[multi_plot_key]))
else:
self.plotting_worker[multi_plot_key].finished.connect(
lambda: self._add_figs(figures=self.result_figs[multi_plot_key], external_widget=dummy_widget))
# Connect exceptions signal
self.plotting_worker[multi_plot_key].exceptionSignal.connect(
lambda e, trc_bck: self.emit_exception(exception=e, trace_back=trc_bck))
# Connect to slot for quitting thread and clean-up
self.plotting_worker[multi_plot_key].finished.connect(self._finish_plotting)
self.plotting_worker[multi_plot_key].finished.connect(self.plotting_worker[multi_plot_key].deleteLater)
def _finish_plotting(self):
"""
Quits self.plotting_thread with regard to multiple or single plot if plotting thread is provided.
Otherwise emits finished signal
"""
if isinstance(self.input_file, dict):
self._finished_workers += 1
if self._finished_workers == len(self.input_file.keys()):
if self.plotting_thread:
self.plotting_thread.quit()
else:
self.finishedPlotting.emit()
else:
if self.plotting_thread:
self.plotting_thread.quit()
else:
self.finishedPlotting.emit()
def _get_figs(self, multi_plot_key):
"""
Actual function that is run in the worker on self.plotting_thread. Saves the result figures in self.figures
:param multi_plot_key: Whether to get figures for specific muli_plot_key in self.plot_func.keys() or single plot
"""
# Single plot
if multi_plot_key is None:
if not self.result_figs:
self.result_figs = self.plot_func(self.input_file, **self.kwargs)
else:
pass
# Multiple plots
else:
if multi_plot_key not in self.result_figs.keys():
if multi_plot_key in self.kwargs.keys():
self.result_figs[multi_plot_key] = self.plot_func[multi_plot_key](self.input_file[multi_plot_key],
**self.kwargs[multi_plot_key])
else:
self.result_figs[multi_plot_key] = self.plot_func[multi_plot_key](self.input_file[multi_plot_key])
else:
pass
def check_kwargs(self, plot_func, kwargs):
"""
Takes a function and keyword arguments passed to the init of this class and checks whether
or not the function takes these as arguments. If not, raise TypeError with message naming function and kwarg
:param plot_func: function
:param kwargs: dict of keyword arguments
"""
# Get plot_func's args
args = inspect.getargspec(plot_func)[0]
for kw in kwargs.keys():
if kw not in args:
msg = 'Plotting function %s got unexpected argument %s' % (plot_func.__name__, kw)
raise TypeError(msg)
else:
pass
def _add_figs(self, figures, external_widget=None):
"""
Function for plotting one or multiple plots from a single plot_func.
If the function returns multiple plots, respective widgets for navigation
through plots are created.
:param external_widget: None or QWidget; if None figs are plotted on self (single fig) or an internal
plot_widget. If QWidget figs are plotted on this widget (must have layout)
:param figures: matplotlib.Figure() or list of such figures; adds figures to plot widget
"""
if figures is None:
logging.warning('No figures returned by %s. No plotting possible' % self.plot_func.__name__)
return
# Make list of figures if not already
if isinstance(figures, list):
fig_list = figures
else:
fig_list = [figures]
# Check for multiple plots and init plot widget
if len(fig_list) > 1:
plot_widget = QtWidgets.QStackedWidget()
else:
# Plots will be on self or external_widget
plot_widget = None
# Create a dummy widget and add a figure canvas and a toolbar for each plot
for f in fig_list:
dummy_widget = QtWidgets.QWidget()
dummy_layout = QtWidgets.QVBoxLayout()
dummy_widget.setLayout(dummy_layout)
f.set_facecolor('0.99')
canvas = FigureCanvas(f)
canvas.setParent(self)
toolbar = NavigationToolbar(canvas, self)
dummy_layout.addWidget(toolbar)
dummy_layout.addWidget(canvas)
# Handle plot_widget and amount of figs
if isinstance(plot_widget, QtWidgets.QStackedWidget): # Multiple figs
plot_widget.addWidget(dummy_widget)
else: # Single fig
if external_widget is None: # Plot on self
self.main_layout.addWidget(dummy_widget)
else: # Plot on external_widget
external_widget.layout().addWidget(dummy_widget)
# If more than one fig make navigation widgets and add everything to respective widgets
if isinstance(plot_widget, QtWidgets.QStackedWidget):
# Add plot widget to external widget or self
if external_widget is None:
self.main_layout.addWidget(plot_widget)
else:
external_widget.layout().addWidget(plot_widget)
# Create buttons to navigate through different plots
layout_btn = QtWidgets.QHBoxLayout()
btn_forward = QtWidgets.QPushButton()
btn_back = QtWidgets.QPushButton()
icon_forward = btn_forward.style().standardIcon(QtWidgets.QStyle.SP_ArrowForward)
icon_back = btn_back.style().standardIcon(QtWidgets.QStyle.SP_ArrowBack)
btn_forward.setIcon(icon_forward)
btn_back.setIcon(icon_back)
btn_forward.setIconSize(QtCore.QSize(40, 40))
btn_back.setIconSize(QtCore.QSize(40, 40))
label_count = QtWidgets.QLabel('1 of %d' % plot_widget.count())
# Connect buttons
btn_forward.clicked.connect(lambda: navigate(val=1))
btn_back.clicked.connect(lambda: navigate(val=-1))
# Add buttons to layout
layout_btn.addStretch()
layout_btn.addWidget(btn_back)
layout_btn.addSpacing(20)
layout_btn.addWidget(label_count)
layout_btn.addSpacing(20)
layout_btn.addWidget(btn_forward)
layout_btn.addStretch()
# Disable back button when at first plot
if plot_widget.currentIndex() == 0:
btn_back.setDisabled(True)
# Add all to main or external layout
if external_widget is None:
self.main_layout.addLayout(layout_btn)
else:
external_widget.layout().addLayout(layout_btn)
# button slot to change plots
def navigate(val):
if 0 <= (plot_widget.currentIndex() + val) <= plot_widget.count():
index = plot_widget.currentIndex() + val
plot_widget.setCurrentIndex(index)
if index == plot_widget.count() - 1:
btn_back.setDisabled(False)
btn_forward.setDisabled(True)
elif index == 0:
btn_back.setDisabled(True)
btn_forward.setDisabled(False)
else:
btn_forward.setDisabled(False)
btn_back.setDisabled(False)
label_count.setText('%d of %d' % (index + 1, plot_widget.count()))
else:
pass
def _add_multi_figs(self):
"""
Function that allows plotting from multiple plot functions at once.
Creates a tab widget and one tab for every plot function. Uses self._add_figs() to add plots
"""
if self.figures is not None:
if isinstance(self.figures, dict):
pass
else:
msg = 'Input figures must be in dictionary! Can not assign figure(s) to respective plotting function!'
raise KeyError(msg)
tabs = QtWidgets.QTabWidget()
for key in self.input_file.keys():
dummy_widget = QtWidgets.QWidget()
dummy_widget.setLayout(QtWidgets.QVBoxLayout())
if self.figures is not None and key in self.figures.keys():
# If one of the multi_plot functions already has figures, add to result figures
if self.figures[key] is not None:
self.result_figs[key] = self.figures[key]
# Create respective worker instance
self._spawn_worker(multi_plot_key=key, dummy_widget=dummy_widget)
else:
# Create respective worker instance
self._spawn_worker(multi_plot_key=key, dummy_widget=dummy_widget)
tabs.addTab(dummy_widget, str(key).capitalize())
self.main_layout.addWidget(tabs)
def emit_exception(self, exception, trace_back):
"""
Emits exception signal
:param exception: Any Exception
:param trace_back: traceback of the exception or error
"""
self.exceptionSignal.emit(exception, trace_back)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Python script to convert Forex files into different formats (e.g. FXT/HST/HCC).
from struct import pack, pack_into, calcsize
import argparse
import bstruct
import csv
import datetime
import mmap
import os
import re
import sys
import time
class Spinner:
"""Displays an ASCII spinner"""
def __init__(self, step):
self._n = self._x = 0
self._chars = "\\|/-"
self._step = step
def spin(self):
self._n += 1
if self._n % self._step == 0:
sys.stdout.write("\b" + self._chars[self._x % 4])
sys.stdout.flush()
self._x += 1
if self._x >= 4:
self._x = 0
self._n = 0
spinner = Spinner(100000)
class Input:
def __init__(self, path):
if args.verbose:
print("[INFO] Trying to read data from %s..." % path)
try:
self.path = open(path, "r")
except OSError as e:
print(
"[ERROR] '%s' raised when tried to read the file '%s'"
% (e.strerror, e.filename)
)
sys.exit(1)
self.uniBars = []
def __del__(self):
self.path.close()
def _addBar(
self, barTimestamp, tickTimestamp, uniBar_open, high, low, close, volume
):
self.uniBars += [
{
"barTimestamp": barTimestamp,
"tickTimestamp": tickTimestamp,
"open": uniBar_open,
"high": high,
"low": low,
"close": close,
"volume": volume,
}
]
def string_to_timestamp(s):
try_microseconds = s[20:]
if try_microseconds == "":
microseconds = int(s[20:])
else:
microseconds = 0
return datetime.datetime(
int(s[0:4]), # Year
int(s[5:7]), # Month
int(s[8:10]), # Day
int(s[11:13]), # Hour
int(s[14:16]), # Minute
int(s[17:19]), # Second
microseconds, # Microseconds
datetime.timezone.utc,
)
class CSV(Input):
def __init__(self, path):
super().__init__(path)
self._map_obj = mmap.mmap(self.path.fileno(), 0, access=mmap.ACCESS_READ)
def __iter__(self):
return self
def __next__(self):
line = self._map_obj.readline()
if line:
isLastRow = self._map_obj.tell() == self._map_obj.size()
return (self._parseLine(line), isLastRow)
raise StopIteration
def _parseLine(self, line):
tick = line.split(b",")
return {
# Storing timestamp as float to preserve its precision.
# 'timestamp': time.mktime(datetime.datetime.strptime(tick[0], '%Y.%m.%d %H:%M:%S.%f').replace(tzinfo=datetime.timezone.utc).timetuple()),
"timestamp": string_to_timestamp(tick[0]).timestamp(),
"bidPrice": float(tick[1]),
"askPrice": float(tick[2]),
"bidVolume": float(tick[3]),
"askVolume": float(tick[4]), # float() handles ending '\n' character
}
class Output:
def __init__(self, timeframe, path_suffix, symbol, output_dir):
self.deltaTimestamp = timeframe * 60
self.endTimestamp = None
self.barCount = 0
self.filename = "%s%d%s" % (symbol, timeframe, path_suffix)
self.fullname = os.path.join(output_dir, self.filename)
try:
os.remove(
self.fullname
) # Remove existing output file before creating an appended new one
except (OSError, IOError) as e:
pass
try:
self.path = open(self.fullname, "wb")
except OSError as e:
print(
"[ERROR] '%s' raised when tried to open for appending the file '%s'"
% (e.strerror, e.filename)
)
sys.exit(1)
def __del__(self):
self.path.close()
def finalize(self):
pass
def _aggregate(self, tick):
if not self.endTimestamp or tick["timestamp"] >= self.endTimestamp:
uniBar = None
if self.endTimestamp:
uniBar = {
"barTimestamp": self.startTimestamp,
"tickTimestamp": tick["timestamp"],
"open": self.open,
"high": self.high,
"low": self.low,
"close": self.close,
"volume": self.volume,
}
self.startTimestamp = (
int(tick["timestamp"]) // self.deltaTimestamp
) * self.deltaTimestamp
self.endTimestamp = self.startTimestamp + self.deltaTimestamp
self.open = self.high = self.low = self.close = tick["bidPrice"]
self.volume = tick["bidVolume"] + tick["askVolume"]
if uniBar:
return (uniBar, True)
else:
self.high = max(tick["bidPrice"], self.high)
self.low = min(tick["bidPrice"], self.low)
self.close = tick["bidPrice"]
self.volume += tick["bidVolume"] + tick["askVolume"]
uniBar = {
"barTimestamp": self.startTimestamp,
"tickTimestamp": tick["timestamp"],
"open": self.open,
"high": self.high,
"low": self.low,
"close": self.close,
"volume": self.volume,
}
return (uniBar, False)
def _aggregateWithTicks(self, tick):
if not self.endTimestamp or tick["timestamp"] >= self.endTimestamp:
self.startTimestamp = (
int(tick["timestamp"]) // self.deltaTimestamp
) * self.deltaTimestamp
self.endTimestamp = self.startTimestamp + self.deltaTimestamp
self.open = self.high = self.low = tick["bidPrice"]
self.volume = tick["bidVolume"] + tick["askVolume"]
self.barCount += 1
else:
self.high = max(tick["bidPrice"], self.high)
self.low = min(tick["bidPrice"], self.low)
self.volume += tick["bidVolume"] + tick["askVolume"]
return {
"barTimestamp": self.startTimestamp,
"tickTimestamp": tick["timestamp"],
"open": self.open,
"high": self.high,
"low": self.low,
"close": tick["bidPrice"],
"volume": self.volume,
}
class HST509(Output):
def __init__(self, path, path_suffix, output_dir, timeframe, symbol):
# Initialize variables in parent constructor
super().__init__(timeframe, path_suffix, symbol, output_dir)
# Build header (148 Bytes in total)
header = bytearray()
header += pack("<i", 400) # Version
header += bytearray(
"(C)opyright 2003, MetaQuotes Software Corp.".ljust(
64, "\x00" # Copyright
),
"latin1",
"ignore",
)
header += bytearray(symbol.ljust(12, "\x00"), "latin1", "ignore") # Symbol
header += pack("<i", timeframe) # Period
header += pack("<i", 5) # Digits, using the default value of HST format
header += pack("<i", int(time.time())) # Time of sign (database creation)
header += pack("<i", 0) # Time of last synchronization
header += bytearray(13 * 4) # Space for future use
self.path.write(header)
def pack_ticks(self, tick):
# Transform universal bar list to binary bar data (44 Bytes per bar)
(uniBar, newUniBar) = self._aggregate(tick)
if newUniBar:
self.path.write(self._packUniBar(uniBar))
def _packUniBar(self, uniBar):
bar = bytearray()
bar += pack("<i", uniBar["barTimestamp"]) # Time
bar += pack("<d", uniBar["open"]) # Open
bar += pack("<d", uniBar["low"]) # Low
bar += pack("<d", uniBar["high"]) # High
bar += pack("<d", uniBar["close"]) # Close
bar += pack("<d", max(uniBar["volume"], 1.0)) # Volume
return bar
class HST574(Output):
def __init__(self, path, path_suffix, output_dir, timeframe, symbol):
# Initialize variables in parent constructor
super().__init__(timeframe, path_suffix, symbol, output_dir)
# Build header (148 Bytes in total)
header = bytearray()
header += pack("<i", 401) # Version
header += bytearray(
"(C)opyright 2003, MetaQuotes Software Corp.".ljust(
64, "\x00" # Copyright
),
"latin1",
"ignore",
)
header += bytearray(symbol.ljust(12, "\x00"), "latin1", "ignore") # Symbol
header += pack("<i", timeframe) # Period
header += pack("<i", 5) # Digits, using the default value of HST format
header += pack("<i", int(time.time())) # Time of sign (database creation)
header += pack("<i", 0) # Time of last synchronization
header += bytearray(13 * 4) # Space for future use
self.path.write(header)
def pack_ticks(self, ticks):
# Transform universal bar list to binary bar data (60 Bytes per bar)
ticksAggregated = {
"barTimestamp": ticks[0]["timestamp"],
"tickTimestamp": ticks[0]["timestamp"],
"open": ticks[0]["bidPrice"],
"low": ticks[0]["bidPrice"],
"high": ticks[0]["bidPrice"],
"close": ticks[0]["bidPrice"],
"volume": 0,
}
for tick in ticks:
ticksAggregated["low"] = min(ticksAggregated["low"], tick["bidPrice"])
ticksAggregated["high"] = max(ticksAggregated["high"], tick["bidPrice"])
ticksAggregated["volume"] += tick["bidVolume"] + tick["askVolume"]
ticksAggregated["close"] = tick["bidPrice"]
self.path.write(self._packUniBar(ticksAggregated))
def _packUniBar(self, uniBar):
bar = bytearray()
bar += pack("<i", uniBar["barTimestamp"]) # Time
bar += bytearray(4) # Add 4 bytes of padding.
# OHLCV values.
bar += pack("<d", uniBar["open"]) # Open
bar += pack("<d", uniBar["high"]) # High
bar += pack("<d", uniBar["low"]) # Low
bar += pack("<d", uniBar["close"]) # Close
bar += pack("<Q", max(int(uniBar["volume"]), 1)) # Volume
bar += pack("<i", 0) # Spread
bar += pack("<Q", 0) # Real volume
return bar
class FXT(Output):
def __init__(
self, path, path_suffix, output_dir, timeframe, symbol, server, spread, model
):
# Initialize variables in parent constructor
super().__init__(timeframe, path_suffix, symbol, output_dir)
self._priv = (timeframe, server, symbol, spread, model)
self._firstUniBar = self._lastUniBar = None
# Build header (728 Bytes in total).
header = bytearray()
header += pack("<I", 405) # FXT header version: 405
header += bytearray(
"Copyright 2001-2015, MetaQuotes Software Corp.".ljust(
64, "\x00" # Copyright text.
),
"latin1",
"ignore",
)
header += bytearray(
server.ljust(128, "\x00"), "latin1", "ignore"
) # Account server name.
header += bytearray(
symbol.ljust(12, "\x00"), "latin1", "ignore"
) # Symbol pair.
header += pack(
"<I", timeframe
) # Period of data aggregation in minutes (timeframe).
header += pack(
"<I", model
) # Model type: 0 - every tick, 1 - control points, 2 - bar open.
header += pack("<I", 0) # Bars - amount of bars in history.
header += pack("<I", 0) # Modelling start date - date of the first tick.
header += pack("<I", 0) # Modelling end date - date of the last tick.
header += bytearray(
4
) # Add 4 bytes of padding. This potentially can be totalTicks.
header += pack("<d", 99.9) # Modeling quality (max. 99.9).
# General parameters.
header += bytearray(
"EUR".ljust(12, "\x00"), "latin1", "ignore"
) # Base currency (12 bytes).
header += pack("<I", spread) # Spread in points.
header += pack("<I", 5) # Digits, using the default value of FXT format.
header += bytearray(4) # Add 4 bytes of padding.
header += pack("<d", 1e-5) # Point size (e.g. 0.00001).
header += pack("<I", 1) # Minimal lot size in centi lots (hundredths).
header += pack("<I", 50000) # Maximal lot size in centi lots (hundredths).
header += pack("<I", 1) # Lot step in centi lots (hundredths).
header += pack("<I", 10) # Stops level value (orders stop distance in points).
header += pack(
"<I", 1
) # GTC (Good till cancel) - instruction to close pending orders at end of day (default: True).
header += bytearray(4) # Add 4 bytes of padding.
# Profit Calculation parameters.
header += pack("<d", 100000.0) # ContractSize - contract size
header += pack("<d", 0.0) # Tick value in quote currency (empty).
header += pack("<d", 0.0) # Size of one tick (empty).
header += pack(
"<I", 0
) # Profit calculation mode: 0 - Forex, 1 - CFD, 2 - Futures.
# Swap calculation
header += pack("<i", 0) # Enable swap (default: False).
header += pack(
"<I", 0
) # Swap calculation method: 0 - in points, 1 - in the symbol base currency, 2 - by interest, 3 - in the margin currency.
header += bytearray(4) # Add 4 bytes of padding.
header += pack("<d", 0.0) # Swap of the buy order - long overnight swap value.
header += pack(
"<d", 0.0
) # Swap of the sell order - short overnight swap value.
header += pack(
"<I", 3
) # Day of week to charge 3 days swap rollover. Default: WEDNESDAY (3).
# Margin calculation.
header += pack("<I", 100) # Account leverage (default: 100).
header += pack(
"<I", 1
) # Free margin calculation mode {MARGIN_DONT_USE, MARGIN_USE_ALL, MARGIN_USE_PROFIT, MARGIN_USE_LOSS}
header += pack(
"<I", 0
) # Margin calculation mode: 0 - Forex, 1 - CFD, 2 - Futures, 3 - CFD for indexes.
header += pack("<I", 30) # Margin stopout level (default: 30).
header += pack(
"<I", 0
) # Margin stop out check mode {MARGIN_TYPE_PERCENT, MARGIN_TYPE_CURRENCY}
header += pack("<d", 0.0) # Margin requirements.
header += pack("<d", 0.0) # Margin maintenance requirements.
header += pack("<d", 50000.0) # Margin requirements for hedged positions.
header += pack("<d", 1.25) # Margin divider used for leverage calculation.
header += bytearray(
"USD".ljust(12, "\x00"), "latin1", "ignore"
) # Margin currency.
header += bytearray(4) # Padding space - add 4 bytes to align the next double.
# Commission calculation.
header += pack("<d", 0.0) # Basic commission.
header += pack(
"<i", 1
) # Basic commission type {COMM_TYPE_MONEY, COMM_TYPE_PIPS, COMM_TYPE_PERCENT}.
header += pack(
"<i", 0
) # Commission per lot or per deal {COMMISSION_PER_LOT, COMMISSION_PER_DEAL}.
# For internal use.
header += pack(
"<I", 1
) # Index of the first bar at which modeling started (0 for the first bar).
header += pack(
"<I", 0
) # Index of the last bar at which modeling started (0 for the last bar).
header += pack(
"<I", 0
) # Bar index where modeling started using M1 bars (0 for the first bar).
header += pack(
"<I", 0
) # Bar index where modeling started using M5 bars (0 for the first bar).
header += pack(
"<I", 0
) # Bar index where modeling started using M15 bars (0 for the first bar).
header += pack(
"<I", 0
) # Bar index where modeling started using M30 bars (0 for the first bar).
header += pack(
"<I", 0
) # Bar index where modeling started using H1 bars (0 for the first bar).
header += pack(
"<I", 0
) # Bar index where modeling started using H4 bars (0 for the first bar).
header += pack("<I", 0) # Begin date from tester settings (must be zero).
header += pack("<I", 0) # End date from tester settings (must be zero).
header += pack("<I", 0) # Order's freeze level in points.
header += pack(
"<I", 0
) # Number of errors during model generation which needs to be fixed before testing.
header += bytearray(60 * 4) # Reserved - Space for future use.
self.path.write(header)
def write_unibar(self, tick):
# We're getting an array
uniBar = {
"barTimestamp": tick["barTimestamp"],
"tickTimestamp": tick["timestamp"],
"open": tick["bidPrice"],
"high": tick["bidPrice"],
"low": tick["bidPrice"],
"close": tick["bidPrice"],
"volume": tick["bidVolume"],
}
if not self._firstUniBar:
self._firstUniBar = uniBar # Store first and ...
self._lastUniBar = uniBar # ... last bar data for header.
self.path.write(
pack(
"<iiddddQii",
int(uniBar["barTimestamp"]), # Bar datetime.
0, # Add 4 bytes of padding.
uniBar["open"],
uniBar["high"],
uniBar["low"],
uniBar["close"], # OHLCV values.
max(
int(uniBar["volume"]), 1
), # Volume (documentation says it's a double, though it's stored as a long int).
int(uniBar["tickTimestamp"]), # The current time within a bar.
4,
)
) # Flag to launch an expert (0 - bar will be modified, but the expert will not be launched).
def pack_ticks(self, ticks):
# Transform universal bar list to binary bar data (56 Bytes per bar)
model = self._priv[4]
# Every tick model
if model == 0:
for tick in ticks:
self.write_unibar(tick)
# Control points model
elif model == 1:
startTimestamp = None
self.write_unibar(ticks[0])
lowPrice = highPrice = ticks[0]["bidPrice"]
for tick in ticks[1:]:
# Beginning of the M1 bar's timeline.
tick["barTimestamp"] = (
int(tick["timestamp"]) - int(tick["timestamp"]) % 60
)
if not startTimestamp:
startTimestamp = tick["barTimestamp"]
# Determines the end of the M1 bar.
endTimestampTimeline = startTimestamp + 60
if tick["bidPrice"] < lowPrice:
lowPrice = tick["bidPrice"]
self.write_unibar(tick)
elif tick["bidPrice"] > highPrice:
highPrice = tick["bidPrice"]
self.write_unibar(tick)
elif tick["timestamp"] >= endTimestampTimeline:
startTimestamp = tick["barTimestamp"]
self.write_unibar(tick)
# Open price model
elif model == 2:
self.write_unibar(ticks[0])
def finalize(self):
# Fixup the header.
self.path.seek(216)
fix = bytearray()
fix += pack(
"<III",
self.barCount,
int(
self._firstUniBar["barTimestamp"]
), # Modelling start date - date of the first tick.
int(self._lastUniBar["barTimestamp"]),
) # Modelling end date - date of the last tick.
self.path.write(fix)
self.path.seek(472)
fix = bytearray()
fix += pack(
"<II",
int(
self._firstUniBar["barTimestamp"]
), # Tester start date - date of the first tick.
int(self._lastUniBar["barTimestamp"]),
) # Tester end date - date of the last tick.
self.path.write(fix)
class HCC(Output):
"""Output ticks in HCC file format."""
def __init__(self, path_suffix, output_dir, timeframe, symbol):
"""Create file and write headers."""
super().__init__(timeframe, path_suffix, symbol, output_dir)
# Build header (228 Bytes in total)
header = bytearray()
header += pack("<I", 501) # Magic
header += bytearray(
"Copyright 2001-2016, MetaQuotes Software Corp.".ljust(
64, "\x00" # Copyright
),
"utf-16",
"ignore",
)[2:]
header += bytearray("History".ljust(16, "\x00"), "utf-16", "ignore")[2:] # Name
header += bytearray("EURUSD".ljust(32, "\x00"), "utf-16", "ignore")[2:] # Title
assert 228 == self.path.write(header)
# Build EMPTY table (18 Bytes in total)
table = bytearray()
table += pack("<i", 0) # unknown_0
table += pack("<i", 0) # unknown_1
table += pack("<h", 0) # unknown_2
table += pack("<i", 0) # size
table += pack("<i", 0) # off
# write main table (18 Bytes in total)
assert 18 == self.path.write(table)
self.table_end = self.path.tell()
# write an empty table record to indicate that there are no more tables
assert 18 == self.path.write(table)
# Build record header (189 Bytes in total)
record_header = bytearray()
record_header += pack("<H", 0x81) # magic
record_header += bytearray("LABEL".ljust(32, "\x00"), "utf-16", "ignore")[
2:
] # label
record_header += bytearray("UN0".ljust(9, "\x00"), "utf-16", "ignore")[
2:
] # unknown_0
record_header += pack("<I", 0) # rows
record_header += bytearray("UN1".ljust(50, "\x00"), "utf-16", "ignore")[
2:
] # unknown_1
record_header += pack("<c", b"0")
self.record_header_begin = self.path.tell()
assert 189 == self.path.write(record_header)
self.record_header_end = self.path.tell()
def pack_ticks(self, ticks):
"""Prepare and write ticks in file."""
self.count_ticks = len(ticks)
# Transform universal bar list to binary bar data (40 Bytes per bar)
for tick in ticks:
# We're getting an array
uniBar = {
"barTimestamp": tick["barTimestamp"],
"tickTimestamp": tick["timestamp"],
"open": tick["bidPrice"],
"high": tick["bidPrice"],
"low": tick["bidPrice"],
"close": tick["bidPrice"],
"volume": tick["bidVolume"],
}
self.path.write(
pack(
"<iidddd",
0x00088884, # Separator
int(uniBar["barTimestamp"]), # Bar datetime.
uniBar["open"],
uniBar["high"],
uniBar["low"],
uniBar["close"],
)
) # Values.
def finalize(self):
"""Write data count in headers."""
# fixup the table
fix = pack("<i", self.record_header_begin)
self.path.seek(self.table_end - 4)
self.path.write(fix)
# fixup the record header
fix = pack("<I", self.count_ticks)
self.path.seek(self.record_header_end - 101 - 4)
self.path.write(fix)
def config_argparser():
argumentParser = argparse.ArgumentParser(add_help=False)
argumentParser.add_argument(
"-i",
"--input-file",
action="store",
dest="inputFile",
help="Input filename (in CSV format)",
default=None,
required=True,
)
argumentParser.add_argument(
"-f",
"--output-format",
action="store",
dest="outputFormat",
help="Format of the output file (fxt/hst/hst509/hcc)",
default="fxt",
)
argumentParser.add_argument(
"-p",
"--pair",
action="store",
dest="pair",
help="Symbol pair code (max. 12 chars)",
default="FOOBAR",
)
argumentParser.add_argument(
"-t",
"--timeframe",
action="store",
dest="timeframe",
help="Timeframe (M1, M5, M15, M30, H1, H4, D1, W1, MN1)",
default="M1",
)
argumentParser.add_argument(
"-s",
"--spread",
action="store",
dest="spread",
help="Spread value in points",
default=10,
)
argumentParser.add_argument(
"-d",
"--output-dir",
action="store",
dest="outputDir",
help="Destination directory to save the output file",
default=".",
)
argumentParser.add_argument(
"-S",
"--server",
action="store",
dest="server",
help="Name of FX server",
default="default",
)
argumentParser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="Enables verbose messages",
)
argumentParser.add_argument(
"-D",
"--debug",
action="store_true",
dest="debug",
help="Enables debugging messages",
)
argumentParser.add_argument(
"-m",
"--model",
action="store",
dest="model",
help="Mode of modeling price for FXT format (0 - Every tick, 1 - Control points, 2 - Open prices)",
default="0",
)
argumentParser.add_argument(
"-h", "--help", action="help", help="Show this help message and exit"
)
return argumentParser
def construct_queue(timeframe_list):
"""Select the apropriate classes and begin the work."""
for timeframe in timeframe_list:
if multiple_timeframes:
print("[INFO] Queueing the {}m timeframe for conversion".format(timeframe))
# Checking output file format argument and doing conversion
if outputFormat == "hst509":
yield HST509(None, ".hst", args.outputDir, timeframe, symbol)
elif outputFormat == "hst":
yield HST574(None, ".hst", args.outputDir, timeframe, symbol)
elif outputFormat == "fxt":
for m in args.model.split(","):
yield FXT(
None,
"_{0}.fxt".format(m),
args.outputDir,
timeframe,
symbol,
server,
spread,
int(m),
)
elif outputFormat == "hcc":
yield HCC(".hcc", args.outputDir, timeframe, symbol)
else:
print("[ERROR] Unknown output file format: {}!".format(outputFormat))
sys.exit(1)
def process_queue(queue):
"""Process the queue, process all the timeframes at the same time to amortize the cost of the parsing."""
queue = list(queue)
try:
for obj in queue:
ticks = CSV(args.inputFile)
startTimestamp = None
# We will retrieve all ticks in the timeframe into following array and update their LowBid/HighBid
ticksToJoin = []
ticksToAggregate = []
for (tick, isLastRow) in ticks:
# Beginning of the bar's timeline.
tick["barTimestamp"] = (
int(tick["timestamp"]) - int(tick["timestamp"]) % obj.deltaTimestamp
)
# Tick's timestamp will be rounded to 1 for M1 and 60 for other.
tick["timestamp"] = int(
tick["timestamp"]
) # - int(tick['timestamp']) % (1 if obj.deltaTimestamp == 60 else 60)
if not startTimestamp:
startTimestamp = tick["barTimestamp"]
# Tick after this time won't be used for LowBid/HighBid aggregation.
endTimestampAggregate = startTimestamp + 60
# Determines the end of the current bar.
endTimestampTimeline = startTimestamp + obj.deltaTimestamp
if tick["timestamp"] < endTimestampTimeline:
# Tick is within the current bar's timeline, queuing for
# aggregation.
ticksToAggregate.append(tick)
else:
# Tick is beyond current bar's timeline, aggregating unaggregated
# ticks:
if len(ticksToAggregate) > 0:
obj.pack_ticks(ticksToAggregate)
# Next bar's timeline will begin from this new tick's bar
# timestamp.
startTimestamp = tick["barTimestamp"]
# Tick beyond delta timeframe will be aggregated in the next
# timeframe
ticksToAggregate = [tick]
spinner.spin()
# Writting the last tick if not yet written.
if len(ticksToAggregate) > 0:
obj.pack_ticks(ticksToAggregate)
if args.verbose:
print("[INFO] Finalizing...")
for obj in queue:
obj.finalize()
if args.verbose:
print("[INFO] Done.")
except KeyboardInterrupt as e:
print("\n[INFO] Exiting by user request...")
sys.exit()
if __name__ == "__main__":
# Parse the arguments.
arg_parser = config_argparser()
args = arg_parser.parse_args()
# Checking input file argument.
if args.verbose:
print("[INFO] Input file: %s" % args.inputFile)
# Checking symbol pair argument.
if args.pair and len(args.pair) > 12:
print("[WARNING] Symbol is more than 12 characters, cutting its end off!")
symbol = args.pair[0:12]
else:
symbol = args.pair
if args.verbose:
print("[INFO] Symbol pair name: %s" % symbol)
# Converting timeframe argument to minutes.
timeframe_list = []
timeframe_conv = {
"M": 1,
"H": 60,
"D": 24 * 60,
"W": 7 * 24 * 60,
"MN": 30 * 24 * 60,
}
for arg in args.timeframe.strip().upper().split(","):
match_obj = re.match(r"(M|H|D|W|MN)(\d+)", arg, re.I)
if match_obj:
model = match_obj.group(1).upper()
value = int(match_obj.group(2))
timeframe_list.append(timeframe_conv[model] * value)
else:
print("[ERROR] Bad timeframe setting '{}'!".format(arg))
sys.exit(1)
if args.verbose:
print(
"[INFO] Timeframe: %s - %s minute(s)"
% (args.timeframe.upper(), timeframe_list)
)
# Checking spread argument
spread = int(args.spread)
if args.verbose:
print("[INFO] Spread: %d" % spread)
# Create output directory
os.makedirs(args.outputDir, 0o755, True)
if args.verbose:
print("[INFO] Output directory: %s" % args.outputDir)
# Checking server argument
if len(args.server) > 128:
print(
"[WARNING] Server name is longer than 128 characters, cutting its end off!"
)
server = args.server[0:128]
else:
server = args.server
if args.verbose:
print("[INFO] Server name: %s" % server)
outputFormat = args.outputFormat.strip().lower()
if args.verbose:
print("[INFO] Output format: %s" % outputFormat)
multiple_timeframes = len(timeframe_list) > 1
queue = construct_queue(timeframe_list)
process_queue(queue)
|
|
import numpy as np
util = """
#include <stdio.h>
#define PI 3.14159265
typedef struct Point {
double x;
double y;
double z;
} Point;
typedef struct Plane {
Point n;
Point c;
} Plane;
typedef struct Ray {
Point r0;
Point r1;
} Ray;
__device__ Point add(struct Point p1, struct Point p2) {
Point out = {p1.x + p2.x, p1.y + p2.y, p1.z + p2.z};
return out;
}
__device__ Point subtract(struct Point p1, struct Point p2) {
Point out = {p1.x - p2.x, p1.y - p2.y, p1.z - p2.z};
return out;
}
__device__ Point scale(struct Point p1, double s) {
Point out = {s*p1.x, s*p1.y, s*p1.z};
return out;
}
__device__ double dot(struct Point p1, struct Point p2) {
double out = p1.x*p2.x + p1.y*p2.y + p1.z*p2.z;
return out;
}
__device__ Point cross(struct Point p1, struct Point p2) {
Point out = {p1.y*p2.z - p1.z*p2.y,
p1.z*p2.x + p1.x*p2.z,
p1.x*p2.y - p1.y*p2.x};
return out;
}
__device__ double len(struct Point p1) {
double out = sqrt(p1.x*p1.x + p1.y*p1.y + p1.z*p1.z);
return out;
}
__device__ Point intersect(struct Ray r, struct Plane p) {
Point l = subtract(r.r1, r.r0);
double d = dot(subtract(p.c, r.r0), p.n)/dot(l, p.n);
Point out = add(scale(l, d), r.r0); // Intersection point
return out;
}
__device__ Point projectPointToPlane(struct Point r, struct Plane p) {
// Double check that p.n is the unit normal (from origin not c)
// http://math.stackexchange.com/a/445015/357869
Point no = subtract(p.n, p.c);
double d = dot(subtract(r, p.c), p.n);
printf("XXX: %lf \\n", d);
Point out = subtract(r, scale(p.n, d));
return out;
}
"""
class Point():
"""A point in a 3-dimensional Euclidean space."""
def __init__(self, x=0, y=0, z=0):
self.x = np.float64(x)
self.y = np.float64(y)
self.z = np.float64(z)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
def __neg__(self):
return Point(-self.x, -self.y, -self.z)
def __sub__(self, other):
return self + (-other)
def __mul__(self, other):
return Point(other*self.x, other*self.y, other*self.z)
def __contains__(self, other):
return self.__eq__(other)
def __str__(self):
return 'Point(' + '{:.2f}'.format(self.x) + ', ' + '{:.2f}'.format(self.y) + ', ' + '{:.2f}'.format(self.z) + ')'
def __repr__(self):
return self.__str__()
@property
def length(self):
return np.sqrt(self.x**2 + self.y**2 + self.z**2)
def dot(self, other):
return self.x*other.x + self.y*other.y + self.z*other.z
def cross(self, other):
return Point(self.y*other.z - self.z*other.y,
self.z*other.x - self.x*other.z,
self.x*other.y - self.y*other.x)
def is_collinear(self, p2, p3):
if (self - p2).cross(self - p3).length <= 1e-6:
return True
else:
return False
def normalize(self):
return Point(self.x/self.length,
self.y/self.length,
self.z/self.length)
class Ray:
"""A half line in 3-dimensional Euclidean space from point1 to infinity in
the direction of point2."""
def __init__(self, origin=None, direction=None):
if origin == direction:
raise ValueError("Provide two unique points for the origin and direction.")
else:
self.origin = origin
self.direction = direction
# TODO: Test that direction is the same too
# TODO: Clean this method
def __eq__(self, other):
if self.origin.is_collinear(other.origin, other.direction):
if self.direction.is_collinear(other.origin, other.direction):
return True
if self.__dict__ == other.__dict__:
return True
return False
def __contains__(self, other):
if isinstance(other, Point): # Is a point on a half line
if (other == self.origin) or (other == self.direction):
return True
if self.direction.cross(self.origin - other) == Point(0, 0, 0):
if self.direction.dot(other) >= 0:
return True
else:
return False
else:
raise TypeError("Can't determine if" + str(type(other)) +
"is in Ray.")
def __str__(self):
return 'Ray(' + str(self.origin) + ', ' + str(self.direction) + ')'
def __repr__(self):
return self.__str__()
class Plane:
"""A 2-dimensional plane in 3-dimensional space. """
def __init__(self, p1, p2=None, p3=None, **kwargs):
if p2 and p3:
if p1.is_collinear(p2, p3):
raise ValueError("Provide 3 points that are not collinear.")
else:
self.p1 = p1
self.normal = (p2 - p1).cross(p3 - p2).normalize()
else:
n = kwargs.pop('normal', p2)
if isinstance(n, Point):
self.normal = n.normalize()
else:
raise ValueError("Either provide 3 3D points or a point with\
a normal vector.")
def __contains__(self, other):
if isinstance(other, Point): # Is a point on the plane?
if (other - self.p1).dot(self.normal) == 0:
return True
else:
return False
elif isinstance(other, Ray): # Is a ray in the plane?
if (other.origin in self) and (other.direction in self):
return True
else:
return False
else:
raise TypeError("Can't determine if" + str(type(other)) +
"is in Plane.")
def __str__(self):
return 'Plane(' + str(self.p1) + ', normal=' + str(self.normal) + ')'
def __repr__(self):
return self.__str__()
def intersect(self, o):
""" Change this function to return just the new ray. """
""" Returns a list of intersections with a Ray or Point."""
if isinstance(o, Point):
if o in self:
return o
else:
print("POINT")
return None
if isinstance(o, Ray):
# If ray is entirely in the plane
if o in self:
return o
# If ray is parallel to the plane
if (o.direction - o.origin).dot(self.normal) == 0:
print("PARALLEL")
return None
# If ray line has a single intersection with the plane
else:
p0 = self.p1
l0 = o.origin
n = self.normal
l = o.direction - l0
d = ((p0 - l0).dot(n))/(l.dot(n))
if d >= 0:
new_o = l*d + l0
return Ray(origin=new_o, direction=(new_o + o.direction))
# If ray is in wrong direction to intersect plane
else:
print("Wrong direction")
return None
if isinstance(o, Plane):
# TODO
return None
|
|
# import_export_maplight/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from datetime import datetime
from django.db import models
from election_office_measure.models import CandidateCampaign, ContestOffice, ContestOfficeManager
from exception.models import handle_record_found_more_than_one_exception, handle_record_not_saved_exception
import json
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
def validate_maplight_date(d):
try:
datetime.strptime(d, '%Y-%m-%d')
return True
except ValueError:
return False
# TODO Also create MapLightContestMeasure
class MapLightContestOffice(models.Model):
election_date = models.DateField('election date', default=None, null=True, blank=True) # "2014-09-03"
contest_id = models.CharField(
verbose_name='contest id', max_length=255, null=False, blank=False, unique=True) # "O1524"
title = models.CharField(
verbose_name='title', max_length=255, null=False, blank=False, unique=False) # "Governor - California"
type = models.CharField(
verbose_name='type', max_length=255, null=False, blank=False, unique=False) # "office"
# "http://votersedge.org/california/2014/november/state/candidates/governor"
url = models.CharField(verbose_name='url', max_length=255, null=False, blank=False, unique=False)
class MapLightContestOfficeManager(models.Model):
def __unicode__(self):
return "MapLightContestOfficeManager"
def retrieve_maplight_contest_office_from_id(self, contest_office_id):
maplight_contest_office_manager = MapLightContestOfficeManager()
return maplight_contest_office_manager.retrieve_maplight_contest_office(contest_office_id)
def fetch_maplight_contest_office_from_id_maplight(self, id_maplight):
maplight_contest_office_manager = MapLightContestOfficeManager()
results = maplight_contest_office_manager.retrieve_maplight_contest_office_from_id_maplight(id_maplight)
if results['success']:
return results['maplight_contest_office']
return MapLightContestOffice()
def retrieve_maplight_contest_office_from_id_maplight(self, id_maplight):
contest_office_id = 0
maplight_contest_office_manager = MapLightContestOfficeManager()
return maplight_contest_office_manager.retrieve_maplight_contest_office(contest_office_id, id_maplight)
def fetch_maplight_contest_office_id_from_id_maplight(self, id_maplight):
contest_office_id = 0
maplight_contest_office_manager = MapLightContestOfficeManager()
results = maplight_contest_office_manager.retrieve_maplight_contest_office(contest_office_id, id_maplight)
if results['success']:
return results['maplight_contest_office_id']
return 0
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_maplight_contest_office(self, contest_office_id, id_maplight=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
maplight_contest_office_on_stage = MapLightContestOffice()
try:
if contest_office_id > 0:
maplight_contest_office_on_stage = MapLightContestOffice.objects.get(id=contest_office_id)
contest_office_id = maplight_contest_office_on_stage.id
elif len(id_maplight) > 0:
maplight_contest_office_on_stage = MapLightContestOffice.objects.get(contest_id=id_maplight)
contest_office_id = maplight_contest_office_on_stage.id
except MapLightContestOffice.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
exception_multiple_object_returned = True
except MapLightContestOffice.DoesNotExist as e:
exception_does_not_exist = True
results = {
'success': True if contest_office_id > 0 else False,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'maplight_contest_office_found': True if contest_office_id > 0 else False,
'contest_office_id': contest_office_id,
'maplight_contest_office': maplight_contest_office_on_stage,
}
return results
class MapLightCandidate(models.Model):
candidate_id = models.IntegerField(verbose_name='candidate id', null=False, blank=False, unique=True) # "5746"
display_name = models.CharField(
verbose_name='display name', max_length=255, null=False, blank=False, unique=False) # "Jerry Brown"
first_name = models.CharField(
verbose_name='first name', max_length=255, null=False, blank=True, unique=False)
models.CharField(
verbose_name='display name', max_length=255, null=False, blank=False, unique=False)
gender = models.CharField(
verbose_name='gender', max_length=1, null=False, blank=False, default='U', unique=False) # "M"
last_funding_update = models.DateField(
verbose_name='last funding update date', default=None, null=True, blank=True) # "2014-09-03"
last_name = models.CharField(
verbose_name='last name', max_length=255, null=False, blank=True, unique=False) # "Brown"
middle_name = models.CharField(verbose_name='middle name', max_length=255, null=False, blank=False, unique=False)
name_prefix = models.CharField(verbose_name='name prefix', max_length=255, null=False, blank=True, unique=False)
name_suffix = models.CharField(verbose_name='name suffix', max_length=255, null=False, blank=True, unique=False)
original_name = models.CharField(
verbose_name='original name', max_length=255, null=False, blank=True, unique=False) # "Edmund G Brown"
party = models.CharField(
verbose_name='political party', max_length=255, null=False, blank=True, unique=False) # "Democratic"
# "http://votersedge.org/sites/all/modules/map/modules/map_proposition/images/politicians/2633.jpg?v"
photo = models.CharField(
verbose_name='photo url', max_length=255, null=False, blank=True, unique=False)
politician_id = models.IntegerField(verbose_name='politician id', null=False, blank=False, unique=True) # "2633"
roster_name = models.CharField(
verbose_name='roster name', max_length=255, null=False, blank=True, unique=False) # "Jerry Brown"
type = models.CharField(verbose_name='type', max_length=255, null=False, blank=True, unique=False)
# "http://votersedge.org/california/2014/november/state/candidates/governor/2633-jerry-brown"
url = models.CharField(verbose_name='url', max_length=255, null=False, blank=True, unique=False)
class MapLightCandidateManager(models.Model):
def __unicode__(self):
return "MapLightCandidateManager"
def retrieve_maplight_candidate_from_id(self, candidate_id):
maplight_candidate_manager = MapLightCandidateManager()
return maplight_candidate_manager.retrieve_maplight_candidate(candidate_id)
def retrieve_maplight_candidate_from_candidate_id_maplight(self, candidate_id_maplight):
candidate_id = 0
politician_id_maplight = 0
maplight_candidate_manager = MapLightCandidateManager()
return maplight_candidate_manager.retrieve_maplight_candidate(
candidate_id, candidate_id_maplight, politician_id_maplight)
def fetch_maplight_candidate_from_candidate_id_maplight(self, candidate_id_maplight):
maplight_candidate_manager = MapLightCandidateManager()
results = maplight_candidate_manager.retrieve_maplight_candidate_from_candidate_id_maplight(
candidate_id_maplight)
if results['success']:
return results['maplight_candidate']
else:
return MapLightCandidate()
def retrieve_maplight_candidate_from_politician_id_maplight(self, politician_id_maplight):
candidate_id = 0
candidate_id_maplight = 0
maplight_candidate_manager = MapLightCandidateManager()
return maplight_candidate_manager.retrieve_maplight_candidate(
candidate_id, candidate_id_maplight, politician_id_maplight)
def fetch_maplight_candidate_from_politician_id_maplight(self, politician_id_maplight):
maplight_candidate_manager = MapLightCandidateManager()
results = maplight_candidate_manager.retrieve_maplight_candidate_from_politician_id_maplight(
politician_id_maplight)
if results['success']:
return results['maplight_candidate']
else:
return MapLightCandidate()
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_maplight_candidate(self, candidate_id, candidate_id_maplight=None, politician_id_maplight=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
maplight_candidate_on_stage = MapLightCandidate()
try:
if candidate_id > 0:
maplight_candidate_on_stage = MapLightCandidate.objects.get(id=candidate_id)
candidate_id = maplight_candidate_on_stage.id
elif len(candidate_id_maplight) > 0:
maplight_candidate_on_stage = MapLightCandidate.objects.get(candidate_id=candidate_id_maplight)
candidate_id = maplight_candidate_on_stage.id
elif len(politician_id_maplight) > 0:
maplight_candidate_on_stage = MapLightCandidate.objects.get(politician_id=politician_id_maplight)
candidate_id = maplight_candidate_on_stage.id
except MapLightCandidate.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
exception_multiple_object_returned = True
except MapLightCandidate.DoesNotExist as e:
exception_does_not_exist = True
results = {
'success': True if candidate_id > 0 else False,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'maplight_candidate_found': True if candidate_id > 0 else False,
'candidate_id': candidate_id,
'maplight_candidate': maplight_candidate_on_stage,
}
return results
|
|
# scoping.py
# Copyright (C) the SQLAlchemy authors and contributors
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sqlalchemy.exceptions as sa_exc
from sqlalchemy.util import ScopedRegistry, ThreadLocalRegistry, \
to_list, get_cls_kwargs, deprecated
from sqlalchemy.orm import (
EXT_CONTINUE, MapperExtension, class_mapper, object_session
)
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm.session import Session
__all__ = ['ScopedSession']
class ScopedSession(object):
"""Provides thread-local management of Sessions.
Usage::
Session = scoped_session(sessionmaker())
... use Session normally.
The internal registry is accessible as well,
and by default is an instance of :class:`.ThreadLocalRegistry`.
"""
def __init__(self, session_factory, scopefunc=None):
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
self.extension = _ScopedExt(self)
def __call__(self, **kwargs):
if kwargs:
scope = kwargs.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError("Scoped session is already present; no new arguments may be specified.")
else:
sess = self.session_factory(**kwargs)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kwargs)
else:
return self.registry()
def remove(self):
"""Dispose of the current contextual session."""
if self.registry.has():
self.registry().close()
self.registry.clear()
@deprecated("0.5", ":meth:`.ScopedSession.mapper` is deprecated. "
"Please see http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper "
"for information on how to replicate its behavior.")
def mapper(self, *args, **kwargs):
"""return a :func:`.mapper` function which associates this ScopedSession with the Mapper.
"""
from sqlalchemy.orm import mapper
extension_args = dict((arg, kwargs.pop(arg))
for arg in get_cls_kwargs(_ScopedExt)
if arg in kwargs)
kwargs['extension'] = extension = to_list(kwargs.get('extension', []))
if extension_args:
extension.append(self.extension.configure(**extension_args))
else:
extension.append(self.extension)
return mapper(*args, **kwargs)
def configure(self, **kwargs):
"""reconfigure the sessionmaker used by this ScopedSession."""
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a `Query` object against the
class when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(ScopedSession, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', 'is_active', 'autoflush'):
setattr(ScopedSession, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(ScopedSession, prop, clslevel(prop))
class _ScopedExt(MapperExtension):
def __init__(self, context, validate=False, save_on_init=True):
self.context = context
self.validate = validate
self.save_on_init = save_on_init
self.set_kwargs_on_init = True
def validating(self):
return _ScopedExt(self.context, validate=True)
def configure(self, **kwargs):
return _ScopedExt(self.context, **kwargs)
def instrument_class(self, mapper, class_):
class query(object):
def __getattr__(s, key):
return getattr(self.context.registry().query(class_), key)
def __call__(s):
return self.context.registry().query(class_)
def __get__(self, instance, cls):
return self
if not 'query' in class_.__dict__:
class_.query = query()
if self.set_kwargs_on_init and class_.__init__ is object.__init__:
class_.__init__ = self._default__init__(mapper)
def _default__init__(ext, mapper):
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
if ext.validate:
if not mapper.get_property(key, resolve_synonyms=False,
raiseerr=False):
raise sa_exc.ArgumentError(
"Invalid __init__ argument: '%s'" % key)
setattr(self, key, value)
return __init__
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
if self.save_on_init:
session = kwargs.pop('_sa_session', None)
if session is None:
session = self.context.registry()
session._save_without_cascade(instance)
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
sess = object_session(instance)
if sess:
sess.expunge(instance)
return EXT_CONTINUE
def dispose_class(self, mapper, class_):
if hasattr(class_, 'query'):
delattr(class_, 'query')
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, bitcoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transaction in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
import os
import time
import logging
import test_framework.loginit
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
########## Check the memory pool persistence ###########
logging.info("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
logging.info("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
self.sync_all()
logging.info("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
logging.info("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
stop_nodes(self.nodes)
wait_bitcoinds()
node_args = [[ ], ['-persistmempool=0']]
self.nodes = start_nodes(2, self.options.tmpdir, node_args)
waitFor(10, lambda: len(self.nodes[0].getrawmempool()) == 5)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
logging.info("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
stop_nodes(self.nodes)
wait_bitcoinds()
node_args = [['-persistmempool=0']]
self.nodes = start_nodes(1, self.options.tmpdir, node_args)
# Give bitcoind a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
logging.info("Stop-start node0. Verify that it has the transactions in its mempool.")
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(1, self.options.tmpdir)
waitFor(10, lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat')
logging.info("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
logging.info("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(2, self.options.tmpdir)
waitFor(10, lambda: len(self.nodes[1].getrawmempool()) == 5)
logging.info("Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# try to dump mempool content on a directory rather than a file
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
########## Check the orphan pool persistence ###########
stop_nodes(self.nodes)
wait_bitcoinds()
node_args = [["-debug=net", "-debug=mempool"]]
self.nodes = start_nodes(1, self.options.tmpdir, node_args)
self.nodes = start_nodes(2, self.options.tmpdir)
connect_nodes_full(self.nodes)
self.sync_blocks()
#create coins that we can use for creating multi input transactions
BCH_UNCONF_DEPTH = 50
DELAY_TIME = 240
self.relayfee = self.nodes[1].getnetworkinfo()['relayfee']
utxo_count = BCH_UNCONF_DEPTH * 3 + 1
startHeight = self.nodes[1].getblockcount()
logging.info("Starting at %d blocks" % startHeight)
utxos = create_confirmed_utxos(self.relayfee, self.nodes[1], utxo_count)
startHeight = self.nodes[1].getblockcount()
logging.info("Initial sync to %d blocks" % startHeight)
# create multi input transactions that are chained. This will cause any transactions that are greater
# than the BCH default chain limit to be prevented from entering the mempool, however they will enter the
# orphanpool instead.
tx_amount = 0
for i in range(1, BCH_UNCONF_DEPTH + 6):
try:
inputs = []
inputs.append(utxos.pop())
if (i == 1):
inputs.append(utxos.pop())
else:
inputs.append({ "txid" : txid, "vout" : 0})
outputs = {}
if (i == 1):
tx_amount = inputs[0]["amount"] + inputs[1]["amount"] - self.relayfee
else:
tx_amount = inputs[0]["amount"] + tx_amount - self.relayfee
outputs[self.nodes[1].getnewaddress()] = tx_amount
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
signed_tx = self.nodes[1].signrawtransaction(rawtx)["hex"]
txid = self.nodes[1].sendrawtransaction(signed_tx, False, "standard", True)
logging.info("tx depth %d" % i) # Keep travis from timing out
except JSONRPCException as e: # an exception you don't catch is a testing error
print(str(e))
raise
waitFor(DELAY_TIME, lambda: self.nodes[0].getorphanpoolinfo()["size"] == 0, lambda: print (getNodeInfo(self.nodes[0])))
waitFor(DELAY_TIME, lambda: self.nodes[1].getorphanpoolinfo()["size"] == 5, lambda: print (getNodeInfo(self.nodes[1])))
#stop and start nodes and verify that the orphanpool was resurrected
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(2, self.options.tmpdir)
waitFor(DELAY_TIME, lambda: self.nodes[0].getorphanpoolinfo()["size"] == 0, lambda: print (getNodeInfo(self.nodes[0])))
waitFor(DELAY_TIME, lambda: self.nodes[1].getorphanpoolinfo()["size"] == 5, lambda: print (getNodeInfo(self.nodes[1])))
orphanpooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'orphanpool.dat')
orphanpooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'orphanpool.dat')
logging.info("Remove the orphanpool.dat file. Verify that saveorphanpool to disk via RPC re-creates it")
os.remove(orphanpooldat0)
self.nodes[0].saveorphanpool()
assert os.path.isfile(orphanpooldat0)
logging.info("Stop nodes, make node1 use orphanpool.dat from node0. Verify it has 5 transactions")
os.rename(orphanpooldat0, orphanpooldat1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(2, self.options.tmpdir)
waitFor(10, lambda: len(self.nodes[1].getraworphanpool()) == 5)
logging.info("Prevent bitcoind from writing orphanpool.dat to disk. Verify that `saveorphanpool` fails")
# try to dump orphanpool content on a directory rather than a file
# which is an implementation detail that could change and break this test
orphanpooldotnew1 = orphanpooldat1 + '.new'
os.mkdir(orphanpooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump orphanpool to disk", self.nodes[1].saveorphanpool)
os.rmdir(orphanpooldotnew1)
#stop and start with persistmempool off and verify that the orphan pool was not resurrected
stop_nodes(self.nodes)
wait_bitcoinds()
node_args = [['-persistmempool=0'], ['-persistmempool=0']]
self.nodes = start_nodes(2, self.options.tmpdir, node_args)
waitFor(DELAY_TIME, lambda: self.nodes[0].getorphanpoolinfo()["size"] == 0)
waitFor(DELAY_TIME, lambda: self.nodes[1].getorphanpoolinfo()["size"] == 0)
if __name__ == '__main__':
MempoolPersistTest().main()
def Test():
t = MempoolPersistTest()
t.drop_to_pdb = True
bitcoinConf = {
"debug": ["blk", "mempool", "net", "req", "-event"],
"logtimemicros": 1
}
flags = standardFlags()
t.main(flags, bitcoinConf, None)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor summaries for exporting information about a model.
See the @{$python/summary} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import json_format as _json_format
# exports Summary, SummaryDescription, Event, TaggedRunMetadata, SessionLog
# pylint: disable=unused-import
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.framework.summary_pb2 import SummaryDescription
from tensorflow.core.util.event_pb2 import Event
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.core.util.event_pb2 import TaggedRunMetadata
# pylint: enable=unused-import
from tensorflow.python.eager import context as _context
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
from tensorflow.python.ops import gen_summary_ops as _gen_summary_ops # pylint: disable=unused-import
from tensorflow.python.ops import summary_op_util as _summary_op_util
# exports tensor-related summaries
# pylint: disable=unused-import
from tensorflow.python.ops.summary_ops import tensor_summary
# pylint: enable=unused-import
# exports text
# pylint: disable=unused-import
from tensorflow.python.summary.text_summary import text_summary as text
# pylint: enable=unused-import
# exports FileWriter, FileWriterCache
# pylint: disable=unused-import
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.summary.writer.writer_cache import FileWriterCache
# pylint: enable=unused-import
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.tf_export import tf_export
@tf_export('summary.scalar')
def scalar(name, tensor, collections=None, family=None):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.scalar_summary(tags=tag, values=tensor, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export('summary.image')
def image(name, tensor, max_outputs=3, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.image_summary(
tag=tag, tensor=tensor, max_images=max_outputs, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export('summary.histogram')
def histogram(name, values, collections=None, family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[values],
default_name='HistogramSummary') as (tag, scope):
val = _gen_logging_ops.histogram_summary(
tag=tag, values=values, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export('summary.audio')
def audio(name, tensor, sample_rate, max_outputs=3, collections=None,
family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family=family, values=[tensor]) as (tag, scope):
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops.audio_summary_v2(
tag=tag, tensor=tensor, max_outputs=max_outputs,
sample_rate=sample_rate, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export('summary.merge')
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager mode enabled.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatbility
"""
# pylint: enable=line-too-long
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
if _summary_op_util.skip_summary():
return _constant_op.constant('')
name = _summary_op_util.clean_tag(name)
with _ops.name_scope(name, 'Merge', inputs):
val = _gen_logging_ops.merge_summary(inputs=inputs, name=name)
_summary_op_util.collect(val, collections, [])
return val
@tf_export('summary.merge_all')
def merge_all(key=_ops.GraphKeys.SUMMARIES, scope=None):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
scope: Optional scope used to filter the summary ops, using `re.match`
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatbility
"""
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
summary_ops = _ops.get_collection(key, scope=scope)
if not summary_ops:
return None
else:
return merge(summary_ops)
@tf_export('summary.get_summary_description')
def get_summary_description(node_def):
"""Given a TensorSummary node_def, retrieve its SummaryDescription.
When a Summary op is instantiated, a SummaryDescription of associated
metadata is stored in its NodeDef. This method retrieves the description.
Args:
node_def: the node_def_pb2.NodeDef of a TensorSummary op
Returns:
a summary_pb2.SummaryDescription
Raises:
ValueError: if the node is not a summary op.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatbility
"""
if node_def.op != 'TensorSummary':
raise ValueError("Can't get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr['description'].s)
summary_description = SummaryDescription()
_json_format.Parse(description_str, summary_description)
return summary_description
|
|
"""Tests of http client with custom Connector"""
import asyncio
import gc
import os.path
import platform
import shutil
import socket
import ssl
import tempfile
import unittest
from unittest import mock
import pytest
from yarl import URL
import aiohttp
from aiohttp import client, helpers, web
from aiohttp.client import ClientRequest
from aiohttp.connector import Connection
from aiohttp.test_utils import unused_port
@pytest.fixture()
def key():
"""Connection key"""
return ('localhost1', 80, False)
@pytest.fixture
def key2():
"""Connection key"""
return ('localhost2', 80, False)
@pytest.fixture
def ssl_key():
"""Connection key"""
return ('localhost', 80, True)
def test_del(loop):
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock(should_close=False)
conn._release('a', proto)
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not conns_impl
proto.close.assert_called_with()
msg = {'connector': mock.ANY, # conn was deleted
'connections': mock.ANY,
'message': 'Unclosed connector'}
if loop.get_debug():
msg['source_traceback'] = mock.ANY
exc_handler.assert_called_with(loop, msg)
@pytest.mark.xfail
@asyncio.coroutine
def test_del_with_scheduled_cleanup(loop):
loop.set_debug(True)
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=0.01)
transp = mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
# obviously doesn't deletion because loop has a strong
# reference to connector's instance method, isn't it?
del conn
yield from asyncio.sleep(0.01, loop=loop)
gc.collect()
assert not conns_impl
transp.close.assert_called_with()
msg = {'connector': mock.ANY, # conn was deleted
'message': 'Unclosed connector'}
if loop.get_debug():
msg['source_traceback'] = mock.ANY
exc_handler.assert_called_with(loop, msg)
def test_del_with_closed_loop(loop):
conn = aiohttp.BaseConnector(loop=loop)
transp = mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
loop.close()
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not conns_impl
assert not transp.close.called
assert exc_handler.called
def test_del_empty_conector(loop):
conn = aiohttp.BaseConnector(loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
del conn
assert not exc_handler.called
@asyncio.coroutine
def test_create_conn(loop):
conn = aiohttp.BaseConnector(loop=loop)
with pytest.raises(NotImplementedError):
yield from conn._create_connection(object())
def test_context_manager(loop):
conn = aiohttp.BaseConnector(loop=loop)
conn.close = mock.Mock()
with conn as c:
assert conn is c
assert conn.close.called
def test_ctor_loop():
with mock.patch('aiohttp.connector.asyncio') as m_asyncio:
session = aiohttp.BaseConnector()
assert session._loop is m_asyncio.get_event_loop.return_value
def test_close(loop):
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
assert not conn.closed
conn._conns[('host', 8080, False)] = [(proto, object())]
conn.close()
assert not conn._conns
assert proto.close.called
assert conn.closed
def test_get(loop):
conn = aiohttp.BaseConnector(loop=loop)
assert conn._get(1) is None
proto = mock.Mock()
conn._conns[1] = [(proto, loop.time())]
assert conn._get(1) == proto
conn.close()
def test_get_expired(loop):
conn = aiohttp.BaseConnector(loop=loop)
assert conn._get(('localhost', 80, False)) is None
proto = mock.Mock()
conn._conns[('localhost', 80, False)] = [(proto, loop.time() - 1000)]
assert conn._get(('localhost', 80, False)) is None
assert not conn._conns
conn.close()
def test_get_expired_ssl(loop):
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
assert conn._get(('localhost', 80, True)) is None
proto = mock.Mock()
conn._conns[('localhost', 80, True)] = [(proto, loop.time() - 1000)]
assert conn._get(('localhost', 80, True)) is None
assert not conn._conns
assert conn._cleanup_closed_transports == [proto.close.return_value]
conn.close()
def test_release_acquired(loop, key):
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop, limit=5)
conn._release_waiter = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._release_acquired(key, proto)
assert 0 == len(conn._acquired)
assert 0 == len(conn._acquired_per_host)
assert conn._release_waiter.called
conn._release_acquired(key, proto)
assert 0 == len(conn._acquired)
assert 0 == len(conn._acquired_per_host)
conn.close()
def test_release_acquired_closed(loop, key):
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop, limit=5)
conn._release_waiter = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._closed = True
conn._release_acquired(key, proto)
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
assert not conn._release_waiter.called
conn.close()
def test_release(loop, key):
loop.time = mock.Mock(return_value=10)
conn = aiohttp.BaseConnector(loop=loop)
conn._release_waiter = mock.Mock()
proto = mock.Mock(should_close=False)
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._release(key, proto)
assert conn._release_waiter.called
assert conn._conns[key][0] == (proto, 10)
assert not conn._cleanup_closed_transports
conn.close()
def test_release_ssl_transport(loop, ssl_key):
loop.time = mock.Mock(return_value=10)
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
conn._release_waiter = mock.Mock()
proto = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[ssl_key].add(proto)
conn._release(ssl_key, proto, should_close=True)
assert conn._cleanup_closed_transports == [proto.close.return_value]
conn.close()
def test_release_already_closed(loop):
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock()
key = 1
conn._acquired.add(proto)
conn.close()
conn._release_waiters = mock.Mock()
conn._release_acquired = mock.Mock()
conn._release(key, proto)
assert not conn._release_waiters.called
assert not conn._release_acquired.called
def test_release_waiter(loop, key, key2):
# limit is 0
conn = aiohttp.BaseConnector(limit=0, loop=loop)
w = mock.Mock()
w.done.return_value = False
conn._waiters[key].append(w)
conn._release_waiter()
assert len(conn._waiters) == 1
assert not w.done.called
conn.close()
# release first available
conn = aiohttp.BaseConnector(loop=loop)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key].append(w2)
conn._waiters[key2].append(w1)
conn._release_waiter()
assert (w1.set_result.called and not w2.set_result.called or
not w1.set_result.called and w2.set_result.called)
conn.close()
# limited available
conn = aiohttp.BaseConnector(loop=loop, limit=1)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key] = [w1, w2]
conn._release_waiter()
assert w1.set_result.called
assert not w2.set_result.called
conn.close()
# limited available
conn = aiohttp.BaseConnector(loop=loop, limit=1)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = True
w2.done.return_value = False
conn._waiters[key] = [w1, w2]
conn._release_waiter()
assert not w1.set_result.called
assert not w2.set_result.called
conn.close()
def test_release_waiter_per_host(loop, key, key2):
# no limit
conn = aiohttp.BaseConnector(loop=loop, limit=0, limit_per_host=2)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key] = [w1]
conn._waiters[key2] = [w2]
conn._release_waiter()
assert ((w1.set_result.called and not w2.set_result.called) or
(not w1.set_result.called and w2.set_result.called))
conn.close()
def test_release_close(loop):
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock(should_close=True)
key = ('localhost', 80, False)
conn._acquired.add(proto)
conn._release(key, proto)
assert not conn._conns
assert proto.close.called
@asyncio.coroutine
def test_tcp_connector_resolve_host_use_dns_cache(loop):
conn = aiohttp.TCPConnector(loop=loop, use_dns_cache=True)
res = yield from conn._resolve_host('localhost', 8080)
assert res
for rec in res:
if rec['family'] == socket.AF_INET:
assert rec['host'] == '127.0.0.1'
assert rec['hostname'] == 'localhost'
assert rec['port'] == 8080
elif rec['family'] == socket.AF_INET6:
assert rec['hostname'] == 'localhost'
assert rec['port'] == 8080
if platform.system() == 'Darwin':
assert rec['host'] in ('::1', 'fe80::1', 'fe80::1%lo0')
else:
assert rec['host'] == '::1'
@asyncio.coroutine
def test_tcp_connector_resolve_host_twice_use_dns_cache(loop):
conn = aiohttp.TCPConnector(loop=loop, use_dns_cache=True)
res = yield from conn._resolve_host('localhost', 8080)
res2 = yield from conn._resolve_host('localhost', 8080)
assert res is res2
def test_get_pop_empty_conns(loop):
# see issue #473
conn = aiohttp.BaseConnector(loop=loop)
key = ('127.0.0.1', 80, False)
conn._conns[key] = []
proto = conn._get(key)
assert proto is None
assert not conn._conns
def test_release_close_do_not_add_to_pool(loop):
# see issue #473
conn = aiohttp.BaseConnector(loop=loop)
key = ('127.0.0.1', 80, False)
proto = mock.Mock(should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert not conn._conns
def test_release_close_do_not_delete_existing_connections(loop):
key = ('127.0.0.1', 80, False)
proto1 = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
conn._conns[key] = [(proto1, 1)]
proto = mock.Mock(should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert conn._conns[key] == [(proto1, 1)]
assert proto.close.called
conn.close()
def test_release_not_started(loop):
loop.time = mock.Mock(return_value=10)
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock(should_close=False)
key = 1
conn._acquired.add(proto)
conn._release(key, proto)
assert conn._conns == {1: [(proto, 10)]}
assert not proto.close.called
conn.close()
def test_release_not_opened(loop):
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock()
key = ('localhost', 80, False)
conn._acquired.add(proto)
conn._release(key, proto)
assert proto.close.called
@asyncio.coroutine
def test_connect(loop):
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop)
key = ('host', 80, False)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
conn._create_connection.return_value.set_result(proto)
connection = yield from conn.connect(req)
assert not conn._create_connection.called
assert connection._protocol is proto
assert connection.transport is proto.transport
assert isinstance(connection, Connection)
connection.close()
@asyncio.coroutine
def test_connect_oserr(loop):
conn = aiohttp.BaseConnector(loop=loop)
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
err = OSError(1, 'permission error')
conn._create_connection.return_value.set_exception(err)
with pytest.raises(aiohttp.ClientOSError) as ctx:
req = mock.Mock()
yield from conn.connect(req)
assert 1 == ctx.value.errno
assert ctx.value.strerror.startswith('Cannot connect to')
assert ctx.value.strerror.endswith('[permission error]')
def test_ctor_cleanup():
loop = mock.Mock()
loop.time.return_value = 1.5
conn = aiohttp.BaseConnector(
loop=loop, keepalive_timeout=10, enable_cleanup_closed=True)
assert conn._cleanup_handle is None
assert conn._cleanup_closed_handle is not None
def test_cleanup():
key = ('localhost', 80, False)
testset = {
key: [(mock.Mock(), 10),
(mock.Mock(), 300)],
}
testset[key][0][0].is_connected.return_value = True
testset[key][1][0].is_connected.return_value = False
loop = mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop)
conn._conns = testset
existing_handle = conn._cleanup_handle = mock.Mock()
conn._cleanup()
assert existing_handle.cancel.called
assert conn._conns == {}
assert conn._cleanup_handle is not None
def test_cleanup_close_ssl_transport():
proto = mock.Mock()
key = ('localhost', 80, True)
testset = {key: [(proto, 10)]}
loop = mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
conn._conns = testset
existing_handle = conn._cleanup_handle = mock.Mock()
conn._cleanup()
assert existing_handle.cancel.called
assert conn._conns == {}
assert conn._cleanup_closed_transports == [proto.close.return_value]
def test_cleanup2():
testset = {1: [(mock.Mock(), 300)]}
testset[1][0][0].is_connected.return_value = True
loop = mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
assert conn._conns == testset
assert conn._cleanup_handle is not None
loop.call_at.assert_called_with(310, mock.ANY, mock.ANY)
conn.close()
def test_cleanup3():
key = ('localhost', 80, False)
testset = {key: [(mock.Mock(), 290.1),
(mock.Mock(), 305.1)]}
testset[key][0][0].is_connected.return_value = True
loop = mock.Mock()
loop.time.return_value = 308.5
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
assert conn._conns == {key: [testset[key][1]]}
assert conn._cleanup_handle is not None
loop.call_at.assert_called_with(319, mock.ANY, mock.ANY)
conn.close()
def test_cleanup_closed(loop, mocker):
mocker.spy(loop, 'call_at')
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
tr = mock.Mock()
conn._cleanup_closed_handle = cleanup_closed_handle = mock.Mock()
conn._cleanup_closed_transports = [tr]
conn._cleanup_closed()
assert tr.abort.called
assert not conn._cleanup_closed_transports
assert loop.call_at.called
assert cleanup_closed_handle.cancel.called
def test_cleanup_closed_disabled(loop, mocker):
conn = aiohttp.BaseConnector(
loop=loop, enable_cleanup_closed=False)
tr = mock.Mock()
conn._cleanup_closed_transports = [tr]
conn._cleanup_closed()
assert tr.abort.called
assert not conn._cleanup_closed_transports
def test_tcp_connector_ctor(loop):
conn = aiohttp.TCPConnector(loop=loop)
assert conn.verify_ssl
assert conn.fingerprint is None
assert conn.use_dns_cache
assert conn.family == 0
assert conn.cached_hosts == {}
def test_tcp_connector_ctor_fingerprint_valid(loop):
valid = b'\xa2\x06G\xad\xaa\xf5\xd8\\J\x99^by;\x06='
conn = aiohttp.TCPConnector(loop=loop, fingerprint=valid)
assert conn.fingerprint == valid
def test_tcp_connector_fingerprint_invalid(loop):
invalid = b'\x00'
with pytest.raises(ValueError):
aiohttp.TCPConnector(loop=loop, fingerprint=invalid)
def test_tcp_connector_clear_dns_cache(loop):
conn = aiohttp.TCPConnector(loop=loop)
info = object()
conn._cached_hosts[('localhost', 123)] = info
conn._cached_hosts[('localhost', 124)] = info
conn.clear_dns_cache('localhost', 123)
assert conn.cached_hosts == {('localhost', 124): info}
conn.clear_dns_cache('localhost', 123)
assert conn.cached_hosts == {('localhost', 124): info}
conn.clear_dns_cache()
assert conn.cached_hosts == {}
def test_tcp_connector_clear_dns_cache_bad_args(loop):
conn = aiohttp.TCPConnector(loop=loop)
with pytest.raises(ValueError):
conn.clear_dns_cache('localhost')
def test_ambigous_verify_ssl_and_ssl_context(loop):
with pytest.raises(ValueError):
aiohttp.TCPConnector(
verify_ssl=False,
ssl_context=ssl.SSLContext(ssl.PROTOCOL_SSLv23),
loop=loop)
def test_dont_recreate_ssl_context(loop):
conn = aiohttp.TCPConnector(loop=loop)
ctx = conn.ssl_context
assert ctx is conn.ssl_context
def test_respect_precreated_ssl_context(loop):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
conn = aiohttp.TCPConnector(loop=loop, ssl_context=ctx)
assert ctx is conn.ssl_context
def test_close_twice(loop):
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
conn._conns[1] = [(proto, object())]
conn.close()
assert not conn._conns
assert proto.close.called
assert conn.closed
conn._conns = 'Invalid' # fill with garbage
conn.close()
assert conn.closed
def test_close_cancels_cleanup_handle(loop):
conn = aiohttp.BaseConnector(loop=loop)
conn._release(1, mock.Mock(should_close=False))
assert conn._cleanup_handle is not None
conn.close()
assert conn._cleanup_handle is None
def test_close_abort_closed_transports(loop):
tr = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
conn._cleanup_closed_transports.append(tr)
conn.close()
assert not conn._cleanup_closed_transports
assert tr.abort.called
assert conn.closed
def test_close_cancels_cleanup_closed_handle(loop):
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
assert conn._cleanup_closed_handle is not None
conn.close()
assert conn._cleanup_closed_handle is None
def test_ctor_with_default_loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
conn = aiohttp.BaseConnector()
assert loop is conn._loop
loop.close()
@asyncio.coroutine
def test_connect_with_limit(loop, key):
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost1:80'),
loop=loop,
response_class=mock.Mock())
conn = aiohttp.BaseConnector(loop=loop, limit=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
conn._create_connection.return_value.set_result(proto)
connection1 = yield from conn.connect(req)
assert connection1._protocol == proto
assert 1 == len(conn._acquired)
assert proto in conn._acquired
assert key in conn._acquired_per_host
assert proto in conn._acquired_per_host[key]
acquired = False
@asyncio.coroutine
def f():
nonlocal acquired
connection2 = yield from conn.connect(req)
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = helpers.ensure_future(f(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
assert not acquired
connection1.release()
yield from asyncio.sleep(0, loop=loop)
assert acquired
yield from task
conn.close()
@asyncio.coroutine
def test_connect_with_limit_and_limit_per_host(loop, key):
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost1:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1000, limit_per_host=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = yield from conn.connect(req)
@asyncio.coroutine
def f():
nonlocal acquired
connection2 = yield from conn.connect(req)
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = helpers.ensure_future(f(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
assert not acquired
connection1.release()
yield from asyncio.sleep(0, loop=loop)
assert acquired
yield from task
conn.close()
@asyncio.coroutine
def test_connect_with_no_limit_and_limit_per_host(loop, key):
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost1:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=0, limit_per_host=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = yield from conn.connect(req)
@asyncio.coroutine
def f():
nonlocal acquired
connection2 = yield from conn.connect(req)
acquired = True
connection2.release()
task = helpers.ensure_future(f(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
assert not acquired
connection1.release()
yield from asyncio.sleep(0, loop=loop)
assert acquired
yield from task
conn.close()
@asyncio.coroutine
def test_connect_with_no_limits(loop, key):
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost1:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=0, limit_per_host=0)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = yield from conn.connect(req)
@asyncio.coroutine
def f():
nonlocal acquired
connection2 = yield from conn.connect(req)
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = helpers.ensure_future(f(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
assert acquired
connection1.release()
yield from task
conn.close()
@asyncio.coroutine
def test_connect_with_limit_cancelled(loop):
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
conn._create_connection.return_value.set_result(proto)
connection = yield from conn.connect(req)
assert connection._protocol == proto
assert connection.transport == proto.transport
assert 1 == len(conn._acquired)
with pytest.raises(asyncio.TimeoutError):
# limit exhausted
yield from asyncio.wait_for(conn.connect(req), 0.01,
loop=loop)
connection.close()
@asyncio.coroutine
def test_connect_with_capacity_release_waiters(loop):
def check_with_exc(err):
conn = aiohttp.BaseConnector(limit=1, loop=loop)
conn._create_connection = mock.Mock()
conn._create_connection.return_value = \
helpers.create_future(loop)
conn._create_connection.return_value.set_exception(err)
with pytest.raises(Exception):
req = mock.Mock()
yield from conn.connect(req)
assert not conn._waiters
check_with_exc(OSError(1, 'permission error'))
check_with_exc(RuntimeError())
check_with_exc(asyncio.TimeoutError())
@asyncio.coroutine
def test_connect_with_limit_concurrent(loop):
proto = mock.Mock()
proto.should_close = False
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
max_connections = 2
num_connections = 0
conn = aiohttp.BaseConnector(limit=max_connections, loop=loop)
# Use a real coroutine for _create_connection; a mock would mask
# problems that only happen when the method yields.
@asyncio.coroutine
def create_connection(req):
nonlocal num_connections
num_connections += 1
yield from asyncio.sleep(0, loop=loop)
# Make a new transport mock each time because acquired
# transports are stored in a set. Reusing the same object
# messes with the count.
proto = mock.Mock(should_close=False)
proto.is_connected.return_value = True
return proto
conn._create_connection = create_connection
# Simulate something like a crawler. It opens a connection, does
# something with it, closes it, then creates tasks that make more
# connections and waits for them to finish. The crawler is started
# with multiple concurrent requests and stops when it hits a
# predefined maximum number of requests.
max_requests = 10
num_requests = 0
start_requests = max_connections + 1
@asyncio.coroutine
def f(start=True):
nonlocal num_requests
if num_requests == max_requests:
return
num_requests += 1
if not start:
connection = yield from conn.connect(req)
yield from asyncio.sleep(0, loop=loop)
connection.release()
tasks = [
helpers.ensure_future(f(start=False), loop=loop)
for i in range(start_requests)
]
yield from asyncio.wait(tasks, loop=loop)
yield from f()
conn.close()
assert max_connections == num_connections
@asyncio.coroutine
def test_close_with_acquired_connection(loop):
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = helpers.create_future(loop)
conn._create_connection.return_value.set_result(proto)
connection = yield from conn.connect(req)
assert 1 == len(conn._acquired)
conn.close()
assert 0 == len(conn._acquired)
assert conn.closed
proto.close.assert_called_with()
assert not connection.closed
connection.close()
assert connection.closed
def test_default_force_close(loop):
connector = aiohttp.BaseConnector(loop=loop)
assert not connector.force_close
def test_limit_property(loop):
conn = aiohttp.BaseConnector(loop=loop, limit=15)
assert 15 == conn.limit
conn.close()
def test_limit_by_host_property(loop):
conn = aiohttp.BaseConnector(loop=loop, limit_per_host=15)
assert 15 == conn.limit_per_host
conn.close()
def test_limit_property_default(loop):
conn = aiohttp.BaseConnector(loop=loop)
assert conn.limit == 100
conn.close()
def test_limit_per_host_property_default(loop):
conn = aiohttp.BaseConnector(loop=loop)
assert conn.limit_per_host == 0
conn.close()
def test_force_close_and_explicit_keep_alive(loop):
with pytest.raises(ValueError):
aiohttp.BaseConnector(loop=loop, keepalive_timeout=30,
force_close=True)
conn = aiohttp.BaseConnector(loop=loop, force_close=True,
keepalive_timeout=None)
assert conn
conn = aiohttp.BaseConnector(loop=loop, force_close=True)
assert conn
@asyncio.coroutine
def test_tcp_connector(test_client, loop):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(app)
r = yield from client.get('/')
assert r.status == 200
def test_default_use_dns_cache(loop):
conn = aiohttp.TCPConnector(loop=loop)
assert conn.use_dns_cache
class TestHttpClientConnector(unittest.TestCase):
def setUp(self):
self.handler = None
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
if self.handler:
self.loop.run_until_complete(self.handler.finish_connections())
self.loop.stop()
self.loop.run_forever()
self.loop.close()
gc.collect()
@asyncio.coroutine
def create_server(self, method, path, handler):
app = web.Application()
app.router.add_route(method, path, handler)
port = unused_port()
self.handler = app.make_handler(loop=self.loop, tcp_keepalive=False)
srv = yield from self.loop.create_server(
self.handler, '127.0.0.1', port)
url = "http://127.0.0.1:{}".format(port) + path
self.addCleanup(srv.close)
return app, srv, url
@asyncio.coroutine
def create_unix_server(self, method, path, handler):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
app = web.Application()
app.router.add_route(method, path, handler)
self.handler = app.make_handler(
loop=self.loop, tcp_keepalive=False, access_log=None)
sock_path = os.path.join(tmpdir, 'socket.sock')
srv = yield from self.loop.create_unix_server(
self.handler, sock_path)
url = "http://127.0.0.1" + path
self.addCleanup(srv.close)
return app, srv, url, sock_path
def test_tcp_connector_uses_provided_local_addr(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url = self.loop.run_until_complete(
self.create_server('get', '/', handler)
)
port = unused_port()
conn = aiohttp.TCPConnector(loop=self.loop,
local_addr=('127.0.0.1', port))
session = aiohttp.ClientSession(connector=conn)
r = self.loop.run_until_complete(
session.request('get', url)
)
r.release()
first_conn = next(iter(conn._conns.values()))[0][0]
self.assertEqual(
first_conn.transport._sock.getsockname(), ('127.0.0.1', port))
r.close()
session.close()
conn.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'requires unix')
def test_unix_connector(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url, sock_path = self.loop.run_until_complete(
self.create_unix_server('get', '/', handler))
connector = aiohttp.UnixConnector(sock_path, loop=self.loop)
self.assertEqual(sock_path, connector.path)
session = client.ClientSession(
connector=connector, loop=self.loop)
r = self.loop.run_until_complete(
session.request('get', url))
self.assertEqual(r.status, 200)
r.close()
session.close()
def test_resolver_not_called_with_address_is_ip(self):
resolver = mock.MagicMock()
connector = aiohttp.TCPConnector(resolver=resolver, loop=self.loop)
req = ClientRequest('GET',
URL('http://127.0.0.1:{}'.format(unused_port())),
loop=self.loop,
response_class=mock.Mock())
with self.assertRaises(OSError):
self.loop.run_until_complete(connector.connect(req))
resolver.resolve.assert_not_called()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import logging
import os
import shutil
from six.moves import input # pylint: disable=redefined-builtin
from telemetry.core import exceptions
from telemetry.core import platform as platform_module
from telemetry.internal.backends.chrome import gpu_compositing_checker
from telemetry.internal.browser import browser_info as browser_info_module
from telemetry.internal.browser import browser_interval_profiling_controller
from telemetry.internal.platform import android_device
from telemetry.page import cache_temperature
from telemetry.page import legacy_page_test
from telemetry.page import traffic_setting
from telemetry import story as story_module
from telemetry.util import screenshot
class SharedPageState(story_module.SharedState):
"""
This class contains all specific logic necessary to run a Chrome browser
benchmark.
"""
_device_type = None
def __init__(self, test, finder_options, story_set, possible_browser):
super(SharedPageState, self).__init__(
test, finder_options, story_set, possible_browser)
self._page_test = None
if issubclass(type(test), legacy_page_test.LegacyPageTest):
# We only need a page_test for legacy measurements that involve running
# some commands before/after starting the browser or navigating to a page.
# This is not needed for newer timeline (tracing) based benchmarks which
# just collect a trace, then measurements are done after the fact by
# analysing the trace itself.
self._page_test = test
self._page_test_results = None
if (self._device_type == 'desktop' and
platform_module.GetHostPlatform().GetOSName() == 'chromeos'):
self._device_type = 'chromeos'
if (possible_browser.browser_type == 'web-engine-shell' or
possible_browser.browser_type == 'fuchsia-chrome'):
self._device_type = None
browser_options = finder_options.browser_options
browser_options.browser_user_agent_type = self._device_type
if self._page_test:
self._page_test.CustomizeBrowserOptions(browser_options)
self._browser = None
self._extra_browser_args = None
self._finder_options = finder_options
self._first_browser = True
self._current_page = None
self._current_tab = None
if self._page_test:
self._page_test.SetOptions(self._finder_options)
self._extra_wpr_args = browser_options.extra_wpr_args
if (hasattr(finder_options, 'use_local_wpr') and
finder_options.use_local_wpr):
self._extra_wpr_args.append('--use-local-wpr')
if (hasattr(finder_options, 'disable_fuzzy_url_matching') and
finder_options.disable_fuzzy_url_matching):
self._extra_wpr_args.append('--disable-fuzzy-url-matching')
profiling_mod = browser_interval_profiling_controller
self._interval_profiling_controller = (
profiling_mod.BrowserIntervalProfilingController(
possible_browser=self._possible_browser,
process_name=finder_options.interval_profiling_target,
periods=finder_options.interval_profiling_periods,
frequency=finder_options.interval_profiling_frequency,
profiler_options=finder_options.interval_profiler_options))
self.platform.SetPerformanceMode(finder_options.performance_mode)
self._perf_mode_set = (finder_options.performance_mode !=
android_device.KEEP_PERFORMANCE_MODE)
self.platform.network_controller.Open(self.wpr_mode)
self.platform.Initialize()
self._video_recording_enabled = (self._finder_options.capture_screen_video
and self.platform.CanRecordVideo())
@property
def interval_profiling_controller(self):
return self._interval_profiling_controller
@property
def browser(self):
return self._browser
def DumpStateUponStoryRunFailure(self, results):
# Dump browser standard output and log.
if self._browser:
self._browser.DumpStateUponFailure()
else:
logging.warning('Cannot dump browser state: No browser.')
# Capture a screenshot
if self._finder_options.browser_options.take_screenshot_for_failed_page:
fh = screenshot.TryCaptureScreenShot(self.platform, self._current_tab)
if fh is not None:
with results.CaptureArtifact('screenshot.png') as path:
shutil.move(fh.GetAbsPath(), path)
else:
logging.warning('Taking screenshots upon failures disabled.')
def DidRunStory(self, results):
self._AllowInteractionForStage('after-run-story')
try:
if not self.ShouldReuseBrowserForAllStoryRuns():
self._StopBrowser()
elif self._current_tab:
# We might hang while trying to close the connection, and need to
# guarantee the page will get cleaned up to avoid future tests failing
# in weird ways.
try:
if self._current_tab.IsAlive():
self._current_tab.CloseConnections()
except Exception as exc: # pylint: disable=broad-except
logging.warning(
'%s raised while closing tab connections; tab will be closed.',
type(exc).__name__)
self._current_tab.Close()
self._interval_profiling_controller.GetResults(
self._current_page.file_safe_name, results)
finally:
self._current_page = None
self._current_tab = None
if self._video_recording_enabled:
with results.CaptureArtifact('recording.mp4') as video_path:
self.platform.StopVideoRecording(video_path)
def ShouldReuseBrowserForAllStoryRuns(self):
"""Whether a single browser instance should be reused to run all stories.
This should return False in most situations in order to help maitain
independence between measurements taken on different story runs.
The default implementation only allows reusing the browser in ChromeOs,
where bringing up the browser for each story is expensive.
"""
return self.platform.GetOSName() == 'chromeos'
@property
def platform(self):
return self._possible_browser.platform
def _AllowInteractionForStage(self, stage):
if self._finder_options.pause == stage:
input('Pausing for interaction at %s... Press Enter to continue.' %
stage)
def _StartBrowser(self, page):
assert self._browser is None
self._AllowInteractionForStage('before-start-browser')
if self._page_test:
self._page_test.WillStartBrowser(self.platform)
# Create a deep copy of browser_options so that we can add page-level
# arguments and url to it without polluting the run for the next page.
browser_options = self._finder_options.browser_options.Copy()
self._extra_browser_args = page.extra_browser_args
browser_options.AppendExtraBrowserArgs(page.extra_browser_args)
self._possible_browser.SetUpEnvironment(browser_options)
# Clear caches before starting browser.
self.platform.FlushDnsCache()
if browser_options.flush_os_page_caches_on_start:
self._possible_browser.FlushOsPageCaches()
self._browser = self._possible_browser.Create()
if self._page_test:
self._page_test.DidStartBrowser(self.browser)
if browser_options.assert_gpu_compositing:
gpu_compositing_checker.AssertGpuCompositingEnabled(
self._browser.GetSystemInfo())
if self._first_browser:
self._first_browser = False
# Cut back on mostly redundant logs length per crbug.com/943650.
self._finder_options.browser_options.trim_logs = True
self._AllowInteractionForStage('after-start-browser')
def WillRunStory(self, story):
reusing_browser = self.browser is not None
page = story
# Make sure we don't have accidentally diverging browser args.
if reusing_browser and self._extra_browser_args != page._extra_browser_args:
self._StopBrowser()
reusing_browser = False
if not self.platform.tracing_controller.is_tracing_running:
# For TimelineBasedMeasurement benchmarks, tracing has already started.
# For PageTest benchmarks, tracing has not yet started. We need to make
# sure no tracing state is left before starting the browser for PageTest
# benchmarks.
self.platform.tracing_controller.ClearStateIfNeeded()
self._current_page = page
archive_path = page.story_set.WprFilePathForStory(
page, self.platform.GetOSName())
# TODO(crbug.com/1029785): Ideally we should just let the network
# controller raise an exception when the archive_path is not found.
if archive_path is not None and not os.path.isfile(archive_path):
logging.warning('WPR archive missing: %s', archive_path)
archive_path = None
self.platform.network_controller.StartReplay(
archive_path, page.make_javascript_deterministic, self._extra_wpr_args)
if self._video_recording_enabled:
self.platform.StartVideoRecording()
if reusing_browser:
assert self._extra_browser_args == page._extra_browser_args
else:
self._StartBrowser(page)
if self.browser.supports_tab_control:
if reusing_browser:
# Try to close all previous tabs to maintain some independence between
# individual story runs. Note that the final tab.Close(keep_one=True)
# will create a fresh new tab before the last one is closed.
while len(self.browser.tabs) > 1:
self.browser.tabs[-1].Close()
self.browser.tabs[-1].Close(keep_one=True)
else:
# Create a tab if there's none.
if len(self.browser.tabs) == 0:
self.browser.tabs.New()
# Must wait for tab to commit otherwise it can commit after the next
# navigation has begun and RenderFrameHostManager::DidNavigateMainFrame()
# will cancel the next navigation because it's pending. This manifests as
# the first navigation in a PageSet freezing indefinitely because the
# navigation was silently canceled when |self.browser.tabs[0]| was
# committed.
self.browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
# Reset traffic shaping to speed up cache temperature setup.
self.platform.network_controller.UpdateTrafficSettings(0, 0, 0)
cache_temperature.EnsurePageCacheTemperature(
self._current_page, self.browser)
if self._current_page.traffic_setting != traffic_setting.NONE:
s = traffic_setting.NETWORK_CONFIGS[self._current_page.traffic_setting]
self.platform.network_controller.UpdateTrafficSettings(
round_trip_latency_ms=s.round_trip_latency_ms,
download_bandwidth_kbps=s.download_bandwidth_kbps,
upload_bandwidth_kbps=s.upload_bandwidth_kbps)
self._AllowInteractionForStage('before-run-story')
def CanRunStory(self, story):
return self.CanRunOnBrowser(browser_info_module.BrowserInfo(self.browser),
story)
def CanRunOnBrowser(self, browser_info, page):
"""Override this to return whether the browser brought up by this state
instance is suitable for running the given page.
Args:
browser_info: an instance of telemetry.core.browser_info.BrowserInfo
page: an instance of telemetry.page.Page
"""
del browser_info, page # unused
return True
def _GetCurrentTab(self):
try:
return self.browser.tabs[0]
# The tab may have gone away in some case, so we create a new tab and retry
# (See crbug.com/496280)
except exceptions.DevtoolsTargetCrashException as e:
logging.error('Tab may have crashed: %s' % str(e))
self.browser.tabs.New()
# See below in WillRunStory for why this waiting is needed.
self.browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
return self.browser.tabs[0]
def _PreparePage(self):
self._current_tab = self._GetCurrentTab()
if self._current_page.is_file:
self.platform.SetHTTPServerDirectories(
self._current_page.story_set.serving_dirs
| {self._current_page.serving_dir},
self._current_page.story_set.request_handler_class)
@property
def current_page(self):
return self._current_page
@property
def current_tab(self):
return self._current_tab
def NavigateToPage(self, action_runner, page):
# Method called by page.Run(), lives in shared_state to avoid exposing
# references to the legacy self._page_test object.
if self._page_test:
self._page_test.WillNavigateToPage(page, action_runner.tab)
with self.interval_profiling_controller.SamplePeriod(
'navigation', action_runner):
page.RunNavigateSteps(action_runner)
if self._page_test:
self._page_test.DidNavigateToPage(page, action_runner.tab)
def RunPageInteractions(self, action_runner, page):
# The purpose is similar to NavigateToPage.
with self.interval_profiling_controller.SamplePeriod(
'interactions', action_runner):
page.RunPageInteractions(action_runner)
if self._page_test:
self._page_test.ValidateAndMeasurePage(
page, action_runner.tab, self._page_test_results)
def RunStory(self, results):
self._PreparePage()
self._page_test_results = results
self._current_page.Run(self)
self._page_test_results = None
def TearDownState(self):
self._StopBrowser()
self.platform.StopAllLocalServers()
self.platform.network_controller.Close()
if self._perf_mode_set:
self.platform.SetPerformanceMode(android_device.NORMAL_PERFORMANCE_MODE)
def _StopBrowser(self):
if self._browser:
self._browser.Close()
self._browser = None
if self._possible_browser:
self._possible_browser.CleanUpEnvironment()
class SharedMobilePageState(SharedPageState):
_device_type = 'mobile'
class SharedDesktopPageState(SharedPageState):
_device_type = 'desktop'
class SharedTabletPageState(SharedPageState):
_device_type = 'tablet'
class Shared10InchTabletPageState(SharedPageState):
_device_type = 'tablet_10_inch'
|
|
import random
import string
import sys
import click
from collections import OrderedDict
import hvac
import os
import requests.packages.urllib3
file_name = "vault_token.txt"
def get_vault_client():
"""
Return a vault client if possible.
"""
# Disable warnings for the insecure calls
requests.packages.urllib3.disable_warnings()
token_type = "GITHUB|SANCTUARY"
vault_addr = os.getenv("VAULT_ADDR", "https://vault.drud.com:8200")
sanctuary_token_path = os.path.join('/var/jenkins_home/workspace/ops-create-sanctuary-token/', file_name)
if os.path.exists(sanctuary_token_path):
with open(sanctuary_token_path, 'r') as fp:
vault_token = fp.read().strip()
token_type = "SANCTUARY"
else:
vault_token = os.getenv("GITHUB_TOKEN")
token_type = "GITHUB"
if not vault_addr or not vault_token:
print "You must provide a GITHUB_TOKEN environment variables."
print "(Have you authenticated with drud using `drud auth github` to create your GITHUB_TOKEN?)"
sys.exit(1)
if token_type == "SANCTUARY":
vault_client = hvac.Client(url=vault_addr, token=vault_token, verify=False)
elif token_type == "GITHUB":
vault_client = hvac.Client(url=vault_addr, verify=False)
vault_client.auth_github(vault_token)
else: # The token value was not overridden
print "Something went wrong."
sys.exit(1)
if vault_client.is_initialized() and vault_client.is_sealed():
print "Vault is initialized but sealed."
sys.exit(1)
if not vault_client.is_authenticated():
print "Could not get auth."
sys.exit(1)
print "Using {t_type} for authentication.".format(t_type=token_type.lower())
return vault_client
def to_boolean(x):
return x.lower() == 'true'
def rand_string(num_digits=16, string_type=string.hexdigits):
if string_type == "base64":
string_type = string.ascii_letters + string.digits + "+" + "/"
return ''.join([random.choice(string_type) for _ in range(num_digits)])
# sitename = sys.argv[4]
# site_type = sys.argv[5]
# db_server_local = sys.argv[6]
# db_server_staging = sys.argv[7]
# db_server_production = sys.argv[8]
# admin_username = sys.argv[9]
# #production_domain = sys.argv[10]
# new_site = to_boolean(sys.argv[10])
# wp_active_theme = sys.argv[14]
# wp_multisite = to_boolean(sys.argv[13])
@click.command()
@click.option('--sitename', type=click.STRING)
@click.option('--site-type', type=click.Choice(['wp', 'd7', 'd8', 'none']))
@click.option('--db-server-local', type=click.STRING)
@click.option('--db-server-staging', type=click.STRING)
@click.option('--db-server-production', type=click.STRING)
@click.option('--admin-username', type=click.STRING)
@click.option('--new-site', type=click.BOOL)
@click.option('--wp-active-theme', type=click.STRING)
@click.option('--wp-multisite', type=click.BOOL)
@click.option('--staging-url', type=click.STRING)
@click.option('--production-url', type=click.STRING)
@click.option('--admin-mail', type=click.STRING)
@click.option('--github-org', type=click.STRING)
def create_bag(sitename,
site_type,
db_server_local,
db_server_staging,
db_server_production,
admin_username,
new_site,
wp_active_theme,
wp_multisite,
staging_url,
production_url,
admin_mail,
github_org):
if site_type == 'd7' or site_type == 'd8':
site_type = 'drupal'
# values that are consistent across environment and platform
common = {
'sitename': sitename,
'site_type': site_type,
'apache_owner': 'nginx',
'apache_group': 'nginx',
'admin_mail': admin_mail,
'db_name': sitename,
'db_username': sitename + '_db',
'php': {
'version': "5.6"
},
'docroot': '/var/www/' + sitename + '/current/docroot'
}
# values that are different per environment
default = {
'admin_username': admin_username,
'admin_password': rand_string(),
'repository': '[email protected]:{org}/{site}.git'.format(org=github_org, site=sitename),
'revision': 'staging',
'db_host': db_server_local,
'db_user_password': rand_string(),
'server_aliases': [
"localhost"
],
'hosts': [
"localhost"
]
}
staging = {
'admin_username': admin_username,
'admin_password': rand_string(),
'repository': '[email protected]:{org}/{site}.git'.format(org=github_org, site=sitename),
'revision': 'staging',
'db_host': db_server_staging,
'db_user_password': rand_string(),
'search_replace': [
'http://localhost:1025',
],
'server_aliases': [
staging_url
]
}
production = {
'admin_username': admin_username,
'admin_password': rand_string(),
'repository': '[email protected]:{org}/{site}.git'.format(org=github_org, site=sitename),
'revision': 'master',
'db_host': db_server_production,
'db_user_password': rand_string(),
'search_replace': [
'http://'+staging_url,
'https://'+staging_url
],
'server_aliases': [
production_url
]
}
client_metadata = {
'company_legal_name': '',
'company_city': '',
'company_state': '',
'primary_contact': {
'name': '',
'phone_number': '',
'email_address': ''
},
'production_url': '',
'we_are_hosting': True,
'transactional_email': {
'provider': '',
'username': '',
'password': ''
},
'dns': {
'provider': '',
'username': '',
'password': ''
},
'ssl': {
'provider': '',
'username': '',
'password': ''
},
'theme_license': {
'item_title': '',
'item_url': '',
'item_purchase_code': '',
'purchase_date': ''
}
}
# new site
if new_site:
new_site = {
'new_site': True,
'install_profile': sitename
}
else:
new_site = {}
# Need to set new_site in all envs - add to common
common = dict(common.items() + new_site.items())
# values that are different per platform
if site_type == 'wp':
type_keys_default = {
'auth_key': rand_string(48, 'base64'),
'secure_auth_key': rand_string(48, 'base64'),
'logged_in_key': rand_string(48, 'base64'),
'nonce_key': rand_string(48, 'base64'),
'auth_salt': rand_string(48, 'base64'),
'secure_auth_salt': rand_string(48, 'base64'),
'logged_in_salt': rand_string(48, 'base64'),
'nonce_salt': rand_string(48, 'base64'),
'url': 'http://localhost:1025',
'active_theme': wp_active_theme,
'multisite': wp_multisite
}
type_keys_staging = {
'auth_key': rand_string(48, 'base64'),
'secure_auth_key': rand_string(48, 'base64'),
'logged_in_key': rand_string(48, 'base64'),
'nonce_key': rand_string(48, 'base64'),
'auth_salt': rand_string(48, 'base64'),
'secure_auth_salt': rand_string(48, 'base64'),
'logged_in_salt': rand_string(48, 'base64'),
'nonce_salt': rand_string(48, 'base64'),
'url': 'https://' + staging_url,
'active_theme': wp_active_theme,
'multisite': wp_multisite
}
type_keys_production = {
'auth_key': rand_string(48, 'base64'),
'secure_auth_key': rand_string(48, 'base64'),
'logged_in_key': rand_string(48, 'base64'),
'nonce_key': rand_string(48, 'base64'),
'auth_salt': rand_string(48, 'base64'),
'secure_auth_salt': rand_string(48, 'base64'),
'logged_in_salt': rand_string(48, 'base64'),
'nonce_salt': rand_string(48, 'base64'),
'url': 'https://' + production_url,
'active_theme': wp_active_theme,
'multisite': wp_multisite
}
elif site_type == 'drupal':
type_keys_default = {
'hash_salt': rand_string(48, 'base64'),
'cmi_sync': '/var/www/' + sitename + '/current/sync',
}
type_keys_staging = {
'hash_salt': rand_string(48, 'base64'),
'cmi_sync': '/var/www/' + sitename + '/current/sync',
}
type_keys_production = {
'hash_salt': rand_string(48, 'base64'),
'cmi_sync': '/var/www/' + sitename + '/current/sync',
}
else:
type_keys_default = {}
type_keys_staging = {}
type_keys_production = {}
# # Construct a new data bag
# bag_item = Chef::DataBagItem.new
bag_item = OrderedDict()
# bag_item.data_bag('nmdhosting')
bag_item['id'] = sitename
bag_item['_default'] = dict(common.items() + default.items() + type_keys_default.items())
bag_item['staging'] = dict(common.items() + staging.items() + type_keys_staging.items())
bag_item['production'] = dict(common.items() + production.items() + type_keys_production.items())
bag_item['client_metadata'] = client_metadata
client = get_vault_client()
is_drud_jenkins = bool(int(os.getenv("IS_DRUD_JENKINS", "0")))
folder = 'drudhosting' if is_drud_jenkins else 'nmdhosting'
secret_path = 'secret/databags/{folder}/{site}'.format(folder=folder,site=sitename)
secret = client.read(secret_path)
if secret and 'data' in secret:
raise Exception("A secret already exists at path: {path}. Please run this job again with a new site name.".format(path=secret_path))
client.write(secret_path, **bag_item)
if __name__ == '__main__':
create_bag()
|
|
"""
This module provides decorator functions which can be applied to test objects
in order to skip those objects when certain conditions occur. A sample use case
is to detect if the platform is missing ``matplotlib``. If so, any test objects
which require ``matplotlib`` and decorated with ``@td.skip_if_no_mpl`` will be
skipped by ``pytest`` during the execution of the test suite.
To illustrate, after importing this module:
import pandas.util._test_decorators as td
The decorators can be applied to classes:
@td.skip_if_some_reason
class Foo():
...
Or individual functions:
@td.skip_if_some_reason
def test_foo():
...
For more information, refer to the ``pytest`` documentation on ``skipif``.
"""
from distutils.version import LooseVersion
import locale
import pytest
from pandas.compat import (
PY3, import_lzma, is_platform_32bit, is_platform_windows)
from pandas.compat.numpy import _np_version_under1p15
from pandas.core.computation.expressions import (
_NUMEXPR_INSTALLED, _USE_NUMEXPR)
def safe_import(mod_name, min_version=None):
"""
Parameters:
-----------
mod_name : str
Name of the module to be imported
min_version : str, default None
Minimum required version of the specified mod_name
Returns:
--------
object
The imported module if successful, or False
"""
try:
mod = __import__(mod_name)
except ImportError:
return False
if not min_version:
return mod
else:
import sys
try:
version = getattr(sys.modules[mod_name], '__version__')
except AttributeError:
# xlrd uses a capitalized attribute name
version = getattr(sys.modules[mod_name], '__VERSION__')
if version:
from distutils.version import LooseVersion
if LooseVersion(version) >= LooseVersion(min_version):
return mod
return False
def _skip_if_no_mpl():
mod = safe_import("matplotlib")
if mod:
mod.use("Agg", warn=False)
else:
return True
def _skip_if_mpl_2_2():
mod = safe_import("matplotlib")
if mod:
v = mod.__version__
if LooseVersion(v) > LooseVersion('2.1.2'):
return True
else:
mod.use("Agg", warn=False)
def _skip_if_has_locale():
lang, _ = locale.getlocale()
if lang is not None:
return True
def _skip_if_not_us_locale():
lang, _ = locale.getlocale()
if lang != 'en_US':
return True
def _skip_if_no_scipy():
return not (safe_import('scipy.stats') and
safe_import('scipy.sparse') and
safe_import('scipy.interpolate') and
safe_import('scipy.signal'))
def _skip_if_no_lzma():
try:
import_lzma()
except ImportError:
return True
def skip_if_no(package, min_version=None):
"""
Generic function to help skip test functions when required packages are not
present on the testing system.
Intended for use as a decorator, this function will wrap the decorated
function with a pytest ``skip_if`` mark. During a pytest test suite
execution, that mark will attempt to import the specified ``package`` and
optionally ensure it meets the ``min_version``. If the import and version
check are unsuccessful, then the decorated function will be skipped.
Parameters
----------
package: str
The name of the package required by the decorated function
min_version: str or None, default None
Optional minimum version of the package required by the decorated
function
Returns
-------
decorated_func: function
The decorated function wrapped within a pytest ``skip_if`` mark
"""
def decorated_func(func):
msg = "Could not import '{}'".format(package)
if min_version:
msg += " satisfying a min_version of {}".format(min_version)
return pytest.mark.skipif(
not safe_import(package, min_version=min_version), reason=msg
)(func)
return decorated_func
skip_if_no_mpl = pytest.mark.skipif(_skip_if_no_mpl(),
reason="Missing matplotlib dependency")
skip_if_np_lt_115 = pytest.mark.skipif(_np_version_under1p15,
reason="NumPy 1.15 or greater required")
skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(),
reason="matplotlib is present")
xfail_if_mpl_2_2 = pytest.mark.xfail(_skip_if_mpl_2_2(),
reason="matplotlib 2.2")
skip_if_32bit = pytest.mark.skipif(is_platform_32bit(),
reason="skipping for 32 bit")
skip_if_windows = pytest.mark.skipif(is_platform_windows(),
reason="Running on Windows")
skip_if_windows_python_3 = pytest.mark.skipif(is_platform_windows() and PY3,
reason=("not used on python3/"
"win32"))
skip_if_has_locale = pytest.mark.skipif(_skip_if_has_locale(),
reason="Specific locale is set {lang}"
.format(lang=locale.getlocale()[0]))
skip_if_not_us_locale = pytest.mark.skipif(_skip_if_not_us_locale(),
reason="Specific locale is set "
"{lang}".format(
lang=locale.getlocale()[0]))
skip_if_no_scipy = pytest.mark.skipif(_skip_if_no_scipy(),
reason="Missing SciPy requirement")
skip_if_no_lzma = pytest.mark.skipif(_skip_if_no_lzma(),
reason="need backports.lzma to run")
skip_if_no_ne = pytest.mark.skipif(not _USE_NUMEXPR,
reason="numexpr enabled->{enabled}, "
"installed->{installed}".format(
enabled=_USE_NUMEXPR,
installed=_NUMEXPR_INSTALLED))
def parametrize_fixture_doc(*args):
"""
Intended for use as a decorator for parametrized fixture,
this function will wrap the decorated function with a pytest
``parametrize_fixture_doc`` mark. That mark will format
initial fixture docstring by replacing placeholders {0}, {1} etc
with parameters passed as arguments.
Parameters:
----------
args: iterable
Positional arguments for docstring.
Returns:
-------
documented_fixture: function
The decorated function wrapped within a pytest
``parametrize_fixture_doc`` mark
"""
def documented_fixture(fixture):
fixture.__doc__ = fixture.__doc__.format(*args)
return fixture
return documented_fixture
|
|
# third party
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
import pytest
# syft absolute
import syft as sy
from syft import ReprMessage
from syft import serialize
from syft.core.common.message import SignedImmediateSyftMessageWithoutReply
from syft.util import get_fully_qualified_name
def get_signing_key() -> SigningKey:
# return a the signing key used to sign the get_signed_message_bytes fixture
key = "e89ff2e651b42393b6ecb5956419088781309d953d72bd73a0968525a3a6a951"
return SigningKey(bytes.fromhex(key))
def get_signed_message_bytes() -> bytes:
# return a signed message fixture containing the uid from get_uid
blob = (
b"\n?syft.core.common.message.SignedImmediateSyftMessageWithoutReply"
+ b"\x12\xd5\x02\n\x12\n\x10o\xdeI!\x8eH@\xf7\x89xQ7\x8dWN\x8b\x12"
+ b"Lsyft.core.node.common.node_service.testing_services.repr_service.ReprMessage"
+ b"\x1a@\xfe\xc3\xc9\xe4\xb7a\xc1n\xa8t\xb9\xe6n\x0c\x89\xd4Om~c\xb4\xfe\xb5\x9e\xa5"
+ b"\x19\xdeD\x18\xa8\x82zd\x11\xd9bZ<\xa6\xf4\xcb\xf6v\xc9P\xeb\x91`N\x8b\x13%\xd1\xc41"
+ b'\xbe\x18\xa22\x81B\x8f\xc2\x04" \x81\xff\xcc\xfc7\xc4U.\x8a*\x1f"=0\x10\xc4\xef\x88\xc80'
+ b"\x01\xf0}3\x0b\xd4\x97\xad/P\x8f\x0f*\x8c\x01"
+ b"\nLsyft.core.node.common.node_service.testing_services.repr_service.ReprMessage"
+ b"\x12<\n\x12\n\x10o\xdeI!\x8eH@\xf7\x89xQ7\x8dWN\x8b\x12&\n\x05alice(\x012\x1b\n\x12\n\x10"
+ b'\x8b\x8cU\x94\xad@E\x95\x8f\x9a\x8c\x10#"\x12\xb7\x12\x05alice'
)
return blob
def get_repr_message_bytes() -> bytes:
blob = (
b"\nLsyft.core.node.common.node_service.testing_services.repr_service.ReprMessage\x12<\n"
+ b"\x12\n\x10o\xdeI!\x8eH@\xf7\x89xQ7\x8dWN\x8b\x12&\n\x05alice(\x012\x1b\n\x12\n\x10\x8b"
+ b'\x8cU\x94\xad@E\x95\x8f\x9a\x8c\x10#"\x12\xb7\x12\x05alice'
)
return blob
def get_repr_message() -> ReprMessage:
# return a repr message fixture
blob = get_repr_message_bytes()
return sy.deserialize(blob=blob, from_bytes=True)
def get_verify_key() -> VerifyKey:
# return the verification key derived from the get_signing_key signing key
return get_signing_key().verify_key
def test_create_signed_message() -> None:
"""Tests that SignedMessage can be created and serialized"""
# we will be signing this serializable object
msg = get_repr_message()
signing_key = get_signing_key()
sig_msg = msg.sign(signing_key=signing_key)
assert sig_msg.obj_type == get_fully_qualified_name(obj=msg)
assert (
str(sig_msg.obj_type)
== "syft.core.node.common.node_service.testing_services.repr_service.ReprMessage"
)
assert type(sig_msg.signature) == bytes
assert get_fully_qualified_name(obj=msg) in str(type(sig_msg.message))
assert len(sig_msg.signature) > 0
assert sig_msg.verify_key == get_verify_key()
assert sig_msg.message == msg
assert sig_msg.serialized_message == serialize(msg, to_bytes=True)
def test_deserialize_signed_message() -> None:
"""Tests that SignedMessage can be deserialized"""
sig_msg_blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=sig_msg_blob, from_bytes=True)
msg = get_repr_message()
signing_key = get_signing_key()
sig_msg_comp = msg.sign(signing_key=signing_key)
assert sig_msg.obj_type == sig_msg_comp.obj_type
assert (
str(sig_msg.obj_type)
== "syft.core.node.common.node_service.testing_services.repr_service.ReprMessage"
)
assert type(sig_msg.signature) == bytes
assert type(sig_msg.verify_key) == VerifyKey
assert sig_msg.signature == sig_msg_comp.signature
assert sig_msg.verify_key == get_verify_key()
assert sig_msg.message == msg
assert sig_msg.message == sig_msg_comp.message
assert sig_msg.serialized_message == sig_msg_comp.serialized_message
def test_serde_matches() -> None:
"""Tests that the nested serde is reversible at all levels"""
# serial
sig_msg_blob = get_signed_message_bytes()
# deserial should be expected type
sig_msg = sy.deserialize(blob=sig_msg_blob, from_bytes=True)
assert type(sig_msg) == SignedImmediateSyftMessageWithoutReply
# reserial should be same as original fixture
comp_blob = serialize(sig_msg, to_bytes=True)
assert type(comp_blob) == bytes
assert comp_blob == sig_msg_blob
# now try sub message
msg = sig_msg.message
assert type(msg) == ReprMessage
# resign and the result should be the same
signing_key = get_signing_key()
sig_msg_comp = msg.sign(signing_key=signing_key)
assert type(sig_msg_comp) == SignedImmediateSyftMessageWithoutReply
assert type(sig_msg_comp) == type(sig_msg)
# make sure they have the same message id for comparison
sig_msg_comp._id = sig_msg.id
# identical (except the auto generated UID for the envelope)
assert sig_msg_comp == sig_msg
def test_verify_message() -> None:
"""Tests that SignedMessage can be verified"""
blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=blob, from_bytes=True)
veri_msg = sig_msg.message
obj = get_repr_message()
assert veri_msg == obj
def test_verify_message_fails_key() -> None:
"""Tests that SignedMessage cant be verified with the wrong verification key"""
blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=blob, from_bytes=True)
# everything is good
assert sig_msg.is_valid is True
# change verify_key
signing_key = SigningKey.generate()
sig_msg.verify_key = signing_key.verify_key
# not so good
assert sig_msg.is_valid is False
def test_verify_message_fails_sig() -> None:
"""Tests that SignedMessage cant be verified with the wrong signature"""
blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=blob, from_bytes=True)
# everything is good
assert sig_msg.is_valid is True
# change signature
sig = list(sig_msg.signature)
sig[-1] = 0 # change last byte
sig_msg.signature = bytes(sig)
# not so good
assert sig_msg.is_valid is False
def test_verify_message_fails_message() -> None:
"""Tests that SignedMessage cant be verified with the wrong message"""
blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=blob, from_bytes=True)
# everything is good
assert sig_msg.is_valid is True
# change message
sig_msg.serialized_message += b"a"
# not so good
assert sig_msg.is_valid is False
def test_verify_message_fails_empty() -> None:
"""Tests that SignedMessage cant be verified with empty sig"""
blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=blob, from_bytes=True)
# everything is good
assert sig_msg.is_valid is True
# change message
sig_msg.signature = b""
# not so good
with pytest.raises(ValueError):
assert sig_msg.is_valid is False
def test_decode_message() -> None:
"""Tests that SignedMessage serialized_message is not encrypted"""
blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=blob, from_bytes=True)
nonveri_msg = sy.deserialize(blob=sig_msg.serialized_message, from_bytes=True)
obj = get_repr_message()
assert nonveri_msg == obj
def test_get_message() -> None:
"""Tests that SignedMessage verification can be ignored"""
blob = get_signed_message_bytes()
sig_msg = sy.deserialize(blob=blob, from_bytes=True)
sig_msg.signature += b"a"
nonveri_msg = sig_msg.message
obj = get_repr_message()
assert nonveri_msg == obj
|
|
import taichi as ti
from tests import test_utils
n = 128
def run_atomic_add_global_case(vartype, step, valproc=lambda x: x):
x = ti.field(vartype)
y = ti.field(vartype)
c = ti.field(vartype)
ti.root.dense(ti.i, n).place(x, y)
ti.root.place(c)
# Make Taichi correctly infer the type
# TODO: Taichi seems to treat numpy.int32 as a float type, fix that.
init_ck = 0 if vartype == ti.i32 else 0.0
@ti.kernel
def func():
ck = init_ck
for i in range(n):
x[i] = ti.atomic_add(c[None], step)
y[i] = ti.atomic_add(ck, step)
func()
assert valproc(c[None]) == n * step
x_actual = sorted(x.to_numpy())
y_actual = sorted(y.to_numpy())
expect = [i * step for i in range(n)]
for (xa, ya, e) in zip(x_actual, y_actual, expect):
print(xa, ya, e)
assert valproc(xa) == e
assert valproc(ya) == e
@test_utils.test()
def test_atomic_add_global_i32():
run_atomic_add_global_case(ti.i32, 42)
@test_utils.test()
def test_atomic_add_global_f32():
run_atomic_add_global_case(
ti.f32, 4.2, valproc=lambda x: test_utils.approx(x, rel=1e-5))
@test_utils.test(arch=[ti.cpu, ti.cuda])
def test_atomic_min_max_uint():
x = ti.field(ti.u64, shape=100)
@ti.kernel
def test0():
for I in x:
x[I] = 0
x[1] = ti.cast(1, ti.u64) << 63
for I in x:
ti.atomic_max(x[0], x[I])
test0()
assert x[0] == 9223372036854775808
@ti.kernel
def test1():
for I in x:
x[I] = ti.cast(1, ti.u64) << 63
x[1] = 100
for I in x:
ti.atomic_min(x[0], x[I])
test1()
assert x[0] == 100
@test_utils.test()
def test_atomic_add_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
@ti.kernel
def func():
for i in range(n):
# this is an expr with side effect, make sure it's not optimized out.
ti.atomic_add(c[None], step)
func()
assert c[None] == n * step
@test_utils.test()
def test_atomic_add_demoted():
# Ensure demoted atomics do not crash the program.
x = ti.field(ti.i32)
y = ti.field(ti.i32)
step = 42
ti.root.dense(ti.i, n).place(x, y)
@ti.kernel
def func():
for i in range(n):
s = i
# Both adds should get demoted.
x[i] = ti.atomic_add(s, step)
y[i] = ti.atomic_add(s, step)
func()
for i in range(n):
assert x[i] == i
assert y[i] == i + step
@test_utils.test()
def test_atomic_add_with_local_store_simplify1():
# Test for the following LocalStoreStmt simplification case:
#
# local store [$a <- ...]
# atomic add ($a, ...)
# local store [$a <- ...]
#
# Specifically, the second store should not suppress the first one, because
# atomic_add can return value.
x = ti.field(ti.i32)
y = ti.field(ti.i32)
step = 42
ti.root.dense(ti.i, n).place(x, y)
@ti.kernel
def func():
for i in range(n):
# do a local store
j = i
x[i] = ti.atomic_add(j, step)
# do another local store, make sure the previous one is not optimized out
j = x[i]
y[i] = j
func()
for i in range(n):
assert x[i] == i
assert y[i] == i
@test_utils.test()
def test_atomic_add_with_local_store_simplify2():
# Test for the following LocalStoreStmt simplification case:
#
# local store [$a <- ...]
# atomic add ($a, ...)
#
# Specifically, the local store should not be removed, because
# atomic_add can return its value.
x = ti.field(ti.i32)
step = 42
ti.root.dense(ti.i, n).place(x)
@ti.kernel
def func():
for i in range(n):
j = i
x[i] = ti.atomic_add(j, step)
func()
for i in range(n):
assert x[i] == i
@test_utils.test()
def test_atomic_add_with_if_simplify():
# Make sure IfStmt simplification doesn't move stmts depending on the result
# of atomic_add()
x = ti.field(ti.i32)
step = 42
ti.root.dense(ti.i, n).place(x)
boundary = n / 2
@ti.kernel
def func():
for i in range(n):
if i > boundary:
# A sequence of commands designed such that atomic_add() is the only
# thing to decide whether the if branch can be simplified.
s = i
j = ti.atomic_add(s, s)
k = j + s
x[i] = k
else:
# If we look at the IR, this branch should be simplified, since nobody
# is using atomic_add's result.
ti.atomic_add(x[i], i)
x[i] += step
func()
for i in range(n):
expect = i * 3 if i > boundary else (i + step)
assert x[i] == expect
@test_utils.test()
def test_local_atomic_with_if():
ret = ti.field(dtype=ti.i32, shape=())
@ti.kernel
def test():
if True:
x = 0
x += 1
ret[None] = x
test()
assert ret[None] == 1
@test_utils.test()
def test_atomic_sub_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
@ti.kernel
def func():
for i in range(n):
# this is an expr with side effect, make sure it's not optimized out.
ti.atomic_sub(c[None], step)
func()
assert c[None] == -n * step
@test_utils.test()
def test_atomic_max_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
@ti.kernel
def func():
for i in range(n):
# this is an expr with side effect, make sure it's not optimized out.
ti.atomic_max(c[None], i * step)
func()
assert c[None] == (n - 1) * step
@test_utils.test()
def test_atomic_min_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
@ti.kernel
def func():
c[None] = 1000
for i in range(n):
# this is an expr with side effect, make sure it's not optimized out.
ti.atomic_min(c[None], i * step)
func()
assert c[None] == 0
@test_utils.test()
def test_atomic_and_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
max_int = 2147483647
@ti.kernel
def func():
c[None] = 1023
for i in range(10):
# this is an expr with side effect, make sure it's not optimized out.
ti.atomic_and(c[None], max_int - 2**i)
func()
assert c[None] == 0
@test_utils.test()
def test_atomic_or_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
@ti.kernel
def func():
c[None] = 0
for i in range(10):
# this is an expr with side effect, make sure it's not optimized out.
ti.atomic_or(c[None], 2**i)
func()
assert c[None] == 1023
@test_utils.test()
def test_atomic_xor_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
@ti.kernel
def func():
c[None] = 1023
for i in range(10):
# this is an expr with side effect, make sure it's not optimized out.
ti.atomic_xor(c[None], 2**i)
func()
assert c[None] == 0
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tfr_gen` module."""
# pylint: disable=missing-function-docstring
# pylint: disable=invalid-name
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.compiler.mlir.python.mlir_wrapper import filecheck_wrapper as fw
from tensorflow.compiler.mlir.tfr.python import composite
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module as tfr_gen
from tensorflow.compiler.mlir.tfr.resources import gen_test_ops as test_ops
from tensorflow.python.ops import gen_array_ops as array_ops
from tensorflow.python.ops import gen_math_ops as math_ops
from tensorflow.python.platform import test
Composite = composite.Composite
#--- test fn for mlir location ---
@Composite('TestInputNOp')
def _tfr_loc_test(x):
n = 10
x_sum = x[0]
for i in range(1, n):
x_sum = math_ops.Add(x_sum, x[i])
return x_sum
#--- test fn for tfr tensors ---
@composite.Composite('TestNoOp')
def _tfr_tensor_empty_arg():
pass
@composite.Composite('TestIdentityOp')
def _tfr_tensor_tensor(x):
return x
@composite.Composite('TestIdentityNOp')
def _tfr_tensor_tensor_list(x):
return x
@composite.Composite('TestInputNOp')
def _tfr_tensor_tensor_list_get_elt(x):
return x[1]
@composite.Composite('TestOutputNOp')
def _tfr_tensor_tensor_list_output(x):
return [x, x]
@composite.Composite('TestTwoInputsOp')
def _tfr_tensor_tensor_list_split(x, y, pred):
z, _ = array_ops.Split(axis=0, value=x, num_split=2)
(y, pred) # pylint: disable=pointless-statement
return z
@composite.Composite('TestTwoOutputsOp')
def _tfr_tensor_two_output(x):
z = array_ops.Split(axis=0, value=x, num_split=2)
return z[0], z[1]
@composite.Composite('TestNumAttrsOp')
def _tfr_tensor_tensor_with_cst(x1, y1, x2, y2):
x = array_ops.OneHot(
indices=[0, 2, -1, x1], depth=y1, on_value=True, off_value=False)
(x, x2, y2) # pylint: disable=pointless-statement
return
#--- test fn for scf control flow ---
@composite.Composite('TestTwoInputsOp')
def _tfr_control_flow_if(x, y, pred):
if pred:
return x
else:
return y
@composite.Composite('TestThreeInputsOp')
def _tfr_control_flow_nested_if(x, y, z, select):
if select == 'x':
return x
elif select == 'y':
return y
else:
return z
@composite.Composite('TestInputNOp')
def _tfr_control_flow_range_for(x):
# TODO(fengliuai): use len(x) instead
n = 10
x_sum = x[0]
for i in range(1, n):
x_sum = math_ops.Add(x_sum, x[i])
return x_sum
@composite.Composite('TestInputNOp')
def _tfr_control_flow_tensor_list_size(ins):
n = len(ins)
if n == 1:
return ins[0]
else:
return math_ops.AddN(ins)
#--- test fn for tf ops ---
@composite.Composite('TestComplexTFOp')
def _tfr_tf_ops_complex(lhs, rhs):
left_padding, _ = array_ops.SplitV(
value=lhs, size_splits=[rhs, -1], axis=0, num_split=2)
_, right_padding = array_ops.SplitV(
value=lhs, size_splits=[rhs, rhs], axis=1, num_split=2)
return [left_padding, right_padding]
@composite.Composite('TestIdentityOp')
def _tfr_tf_ops_tensor(x):
return array_ops.Identity(x)
@composite.Composite('TestTwoInputsOp')
def _tfr_tf_ops_tensors(x, y, pred):
if pred:
return math_ops.Add(x, y)
else:
return array_ops.Concat(0, [x, y])
@composite.Composite('TestInputNOp')
def _tfr_tf_ops_with_defaults(ins):
return test_ops.TestTwoInputsOp(ins[0], ins[1])
#--- test fn for tfr attributes ---
@composite.Composite('TestNumAttrsOp')
def _tfr_attrs_num_type(x, y, x1, y1):
# int
z0 = [x, y]
z1 = x == y
z2 = x < y
z3 = x <= y
z4 = x > y
z5 = x >= y
z6 = x != y
z7 = x + y
z8 = x - y
z8 += x
z8 += 1
(z0, z1, z2, z3, z4, z5, z6, z7, z8) # pylint: disable=pointless-statement
# float
z9 = x1 > y1
z10 = x1 + y1
z11 = [x1, y1]
(z9, z10, z11) # pylint: disable=pointless-statement
return
@composite.Composite('TestNonNumAttrsOp')
def _tfr_attrs_tfr_type(x, y, z):
z1 = x == y
z2 = x == 'test'
z3 = y == z
(z1, z2, z3) # pylint: disable=pointless-statement
return
#--- test fn for shapes ---
@composite.Composite('TestIdentityOp')
def _tfr_shapes(x):
s1 = x.shape
s3 = x.shape.as_list()
for i in range(len(s3)):
s3[i] # pylint: disable=pointless-statement
for i in range(1, len(s3), 2):
s3[i] # pylint: disable=pointless-statement
s5 = array_ops.Shape(x)
(s1, s3, s5) # pylint: disable=pointless-statement
return x
#--- test fn for nested functions ---
@composite.Composite('TestIdentityNOp')
def _tfr_temp_op(x):
return x
@composite.Composite('TestIdentityOp')
def _tfr_temp_use_op(x):
y = _tfr_temp_op([x])
return y[0]
class TFRGenTestBase(test.TestCase):
def _check_code(self, tfr_code, exp_tfr_code):
return self.assertTrue(fw.check(str(tfr_code), exp_tfr_code), str(tfr_code))
class TFRGenTensorTest(TFRGenTestBase):
"""MLIR Generation Tests for MLIR TFR Program."""
def test_tfr_loc(self):
mlir_code = tfr_gen(sys.modules[__name__], '_tfr_loc', [test_ops])
mlir_code_exp = r"""
CHECK-LABEL: tfr.func @tf__test_input_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor) {
CHECK-NEXT: %[[n:.*]] = constant 10 : i64
CHECK-SAME loc("tfr_gen_test.py":%{{.*}}:6)
CHECK-NEXT: %[[cst:.*]] = constant 0 : index
CHECK-SAME loc("tfr_gen_test.py":%[[sum_line:.*]]:10)
CHECK-NEXT: %[[elt:.*]] = tfr.get_element %x[%[[cst]]] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-SAME loc("tfr_gen_test.py":%[[sum_line]]:10)
CHECK-NEXT: %[[cst_1:.*]] = constant 1 : i64
CHECK-SAME loc("tfr_gen_test.py":%[[for_line:.*]]:2)
CHECK-NEXT: %[[begin:.*]] = index_cast %[[cst_1]] : i64 to index
CHECK-SAME loc("tfr_gen_test.py":%[[for_line]]:2)
CHECK-NEXT: %[[end:.*]] = index_cast %[[n]] : i64 to index
CHECK-SAME loc("tfr_gen_test.py":%[[for_line]]:2)
CHECK-NEXT: %[[step:.*]] = constant 1 : index
CHECK-SAME loc("tfr_gen_test.py":%[[for_line]]:2)
CHECK-NEXT: %[[for_stmt:.*]] = scf.for %[[itr_1:.*]] = %[[begin]] to %[[end]] step %[[step]]
CHECK-SAME: iter_args(%[[it_arg:.*]] = %[[elt]]) -> (!tfr.tensor) {
CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %x[%itr_1] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-SAME loc("tfr_gen_test.py":%[[add_line:.*]]:34)
CHECK-NEXT: %[[Add:.*]] = tfr.call @tf__add(%[[it_arg]], %[[elt_1]]) : (!tfr.tensor, !tfr.tensor) -> (!tfr.tensor)
CHECK-SAME loc("tfr_gen_test.py":%[[add_line]]:12)
CHECK-NEXT: scf.yield %[[Add]] : !tfr.tensor
CHECK-SAME loc(unknown)
CHECK-NEXT: }
CHECK-SAME loc("tfr_gen_test.py":%[[for_line]]:2)
CHECK-NEXT: %{{.*}} = constant true
CHECK-SAME loc(unknown)
CHECK-NEXT: tfr.return %[[for_stmt]] : !tfr.tensor
CHECK-SAME loc(unknown)
CHECK-NEXT: }
CHECK-SAME loc("tfr_gen_test.py":%{{def_line:.*}}:0)
"""
self._check_code(mlir_code, mlir_code_exp)
def test_tfr_tensors(self):
mlir_code = tfr_gen(sys.modules[__name__], '_tfr_tensor', [test_ops])
mlir_code_exp = r"""
CHECK-LABEL: tfr.func @tf__test_no_op() -> () {
CHECK-NEXT: tfr.return
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {
CHECK-NEXT: constant true
CHECK-NEXT: tfr.return %x : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_identity_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor_list) {
CHECK-NEXT: constant true
CHECK-NEXT: tfr.return %x : !tfr.tensor_list
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_input_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor) {
CHECK-NEXT: constant true
CHECK-NEXT: %[[index:.*]] = constant 1 : index
CHECK-NEXT: %[[sub:.*]] = tfr.get_element %x[%cst_1] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: tfr.return %[[sub]] : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_output_n_op(%x: !tfr.tensor) -> (!tfr.tensor_list) {
CHECK-NEXT: constant true
CHECK-NEXT: %[[list:.*]] = "tfr.build_list"(%x, %x) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list
CHECK-NEXT: tfr.return %[[list]] : !tfr.tensor_list
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_two_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor, %pred: i1{tfr.name="pred",tfr.default=false}) -> (!tfr.tensor) {
CHECK-NEXT: %[[cst:.*]] = constant 0 : i64
CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64
CHECK-NEXT: %[[cst_2:.*]] = "tfr.constant_tensor"(%[[cst]]) : (i64) -> !tfr.tensor
CHECK-NEXT: %[[Split:.*]] = tfr.call @tf__split(%[[cst_2]], %x, %[[cst_1]]) : (!tfr.tensor, !tfr.tensor, i64) -> (!tfr.tensor_list)
CHECK-NEXT: %[[cst_4:.*]] = constant 0 : index
CHECK-NEXT: %[[elt:.*]] = tfr.get_element %[[Split]][%idx] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[cst_5:.*]] = constant 1 : index
CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %[[Split]][%idx_1] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: constant true
CHECK-NEXT: tfr.return %[[elt]] : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_two_outputs_op(%x: !tfr.tensor) -> (!tfr.tensor, !tfr.tensor) {
CHECK-NEXT: %[[cst:.*]] = constant 0 : i64
CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64
CHECK-NEXT: %[[cst_2:.*]] = "tfr.constant_tensor"(%[[cst]]) : (i64) -> !tfr.tensor
CHECK-NEXT: %[[Split:.*]] = tfr.call @tf__split(%[[cst_2]], %x, %[[cst_1]]) : (!tfr.tensor, !tfr.tensor, i64) -> (!tfr.tensor_list)
CHECK-NEXT: constant true
CHECK-NEXT: %[[cst_4:.*]] = constant 0 : index
CHECK-NEXT: %[[elt:.*]] = tfr.get_element %[[Split]][%cst_4] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[cst_5:.*]] = constant 1 : index
CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %[[Split]][%cst_5] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: tfr.return %[[elt]], %[[elt_1]] : !tfr.tensor, !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_num_attrs_op(%x1: i64{tfr.name="x1",tfr.default=-10}, %y1: i64{tfr.name="y1",tfr.default=1}, %x2: f32{tfr.name="x2",tfr.default=0.0}, %y2: f32{tfr.name="y2",tfr.default=-3.0}) -> () {
CHECK-NEXT: %[[cst:.*]] = constant 0 : i64
CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64
CHECK-NEXT: %[[cst_2:.*]] = constant 1 : i64
CHECK-NEXT: %[[zero:.*]] = constant 0 : i64
CHECK-NEXT: %[[cst_3:.*]] = subi %zero, %cst_2 : i64
CHECK-NEXT: %[[list:.*]] = "tfr.build_list"(%[[cst]], %[[cst_1]], %[[cst_3]], %x1) : (i64, i64, i64, i64) -> !tfr.attr
CHECK-NEXT: %[[cst_4:.*]] = constant true
CHECK-NEXT: %[[cst_5:.*]] = constant false
CHECK-NEXT: %[[cst_6:.*]] = "tfr.constant_tensor"(%[[list]]) : (!tfr.attr) -> !tfr.tensor
CHECK-NEXT: %[[cst_7:.*]] = "tfr.constant_tensor"(%y1) : (i64) -> !tfr.tensor
CHECK-NEXT: %[[cst_8:.*]] = "tfr.constant_tensor"(%[[cst_4]]) : (i1) -> !tfr.tensor
CHECK-NEXT: %[[cst_9:.*]] = "tfr.constant_tensor"(%[[cst_5]]) : (i1) -> !tfr.tensor
CHECK-NEXT: %[[cst_10:.*]] = constant -1 : i64
CHECK-NEXT: %[[OneHot:.*]] = tfr.call @tf__one_hot(%[[cst_6]], %[[cst_7]], %[[cst_8]], %[[cst_9]], %[[cst_10]])
CHECK-SAME: (!tfr.tensor, !tfr.tensor, !tfr.tensor, !tfr.tensor, i64) -> (!tfr.tensor)
CHECK-NEXT: constant true
CHECK-NEXT: tfr.return
CHECK-NEXT: }
"""
self._check_code(mlir_code, mlir_code_exp)
def test_tfr_control_flow(self):
mlir_code = tfr_gen(sys.modules[__name__], '_tfr_control_flow', [test_ops])
mlir_code_exp = r"""
CHECK-LABEL: tfr.func @tf__test_two_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor,
CHECK-SAME: %pred: i1{tfr.name="pred",tfr.default=false}) -> (!tfr.tensor) {
CHECK-NEXT: %[[if:.*]] = scf.if %pred -> (!tfr.tensor) {
CHECK-NEXT: constant true
CHECK-NEXT: scf.yield %x : !tfr.tensor
CHECK-NEXT: } else {
CHECK-NEXT: constant true
CHECK-NEXT: scf.yield %y : !tfr.tensor
CHECK-NEXT: }
CHECK-NEXT: tfr.return %if_stmt : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_three_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor, %z: !tfr.tensor,
CHECK-SAME: %select: !tfr.attr{tfr.name="act",tfr.default="z"}) -> (!tfr.tensor) {
CHECK-NEXT: %[[cst:.*]] = tfr.constant "x" -> !tfr.attr
CHECK-NEXT: %[[eq:.*]] = tfr.equal %select, %[[cst]] -> i1
CHECK-NEXT: %[[if_stmt:.*]] = scf.if %[[eq]] -> (!tfr.tensor) {
CHECK-NEXT: %[[cst_1:.*]] = constant true
CHECK-NEXT: scf.yield %x : !tfr.tensor
CHECK-NEXT: } else {
CHECK-NEXT: %[[cst_2:.*]] = tfr.constant "y" -> !tfr.attr
CHECK-NEXT: %[[eq_1:.*]] = tfr.equal %select, %[[cst_2]] -> i1
CHECK-NEXT: %[[if_stmt1:.*]] = scf.if %[[eq_1]] -> (!tfr.tensor) {
CHECK-NEXT: %[[cst_3:.*]] = constant true
CHECK-NEXT: scf.yield %y : !tfr.tensor
CHECK-NEXT: } else {
CHECK-NEXT: %[[cst_4:.*]] = constant true
CHECK-NEXT: scf.yield %z : !tfr.tensor
CHECK-NEXT: }
CHECK-NEXT: scf.yield %[[if_stmt1]] : !tfr.tensor
CHECK-NEXT: }
CHECK-NEXT: tfr.return %[[if_stmt]] : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_input_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor) {
CHECK-NEXT: %[[n:.*]] = constant 10 : i64
CHECK-NEXT: %[[cst:.*]] = constant 0 : index
CHECK-NEXT: %[[elt:.*]] = tfr.get_element %x[%[[cst]]] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[cst_1:.*]] = constant 1 : i64
CHECK-NEXT: %[[begin:.*]] = index_cast %[[cst_1]] : i64 to index
CHECK-NEXT: %[[end:.*]] = index_cast %[[n]] : i64 to index
CHECK-NEXT: %[[step:.*]] = constant 1 : index
CHECK-NEXT: %[[for_stmt:.*]] = scf.for %[[itr_1:.*]] = %[[begin]] to %[[end]] step %[[step]]
CHECK-SAME: iter_args(%[[it_arg:.*]] = %[[elt]]) -> (!tfr.tensor) {
CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %x[%itr_1] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[Add:.*]] = tfr.call @tf__add(%[[it_arg]], %[[elt_1]]) : (!tfr.tensor, !tfr.tensor) -> (!tfr.tensor)
CHECK-NEXT: scf.yield %[[Add]] : !tfr.tensor
CHECK-NEXT: }
CHECK-NEXT: %{{.*}} = constant true
CHECK-NEXT: tfr.return %[[for_stmt]] : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_input_n_op(%ins: !tfr.tensor_list) -> (!tfr.tensor) {
CHECK-NEXT: %[[len:.*]] = tfr.get_length %ins -> index
CHECK-NEXT: %[[cst:.*]] = constant 1 : i64
CHECK-NEXT: %[[casted:.*]] = index_cast %[[cst]] : i64 to index
CHECK-NEXT: %[[eq:.*]] = cmpi "eq", %[[len]], %[[casted]] : index
CHECK-NEXT: %[[if:.*]] = scf.if %[[eq]] -> (!tfr.tensor) {
CHECK-NEXT: %{{.*}} = constant true
CHECK-NEXT: %{{.*}} = constant 0 : index
CHECK-NEXT: %[[elt:.*]] = tfr.get_element %ins[%cst_2] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: scf.yield %[[elt]] : !tfr.tensor
CHECK-NEXT: } else {
CHECK-NEXT: %{{.*}} = constant true
CHECK-NEXT: %[[AddN:.*]] = tfr.call @tf__add_n(%ins) : (!tfr.tensor_list) -> (!tfr.tensor)
CHECK-NEXT: scf.yield %[[AddN]] : !tfr.tensor
CHECK-NEXT: }
CHECK-NEXT: tfr.return %[[if_stmt]] : !tfr.tensor
CHECK-NEXT: }
"""
self._check_code(mlir_code, mlir_code_exp)
def test_tfr_tf_ops(self):
mlir_code = tfr_gen(sys.modules[__name__], '_tfr_tf_ops', [test_ops])
mlir_code_exp = r"""
CHECK-LABEL: tfr.func @tf__test_complex_tf_op(%lhs: !tfr.tensor, %rhs: !tfr.tensor) -> (!tfr.tensor_list) {
CHECK-NEXT: %[[cst:.*]] = constant 1 : i64
CHECK-NEXT: %[[zero:.*]] = constant 0 : i64
CHECK-NEXT: %[[cst_1:.*]] = subi %[[zero]], %cst : i64
CHECK-NEXT: %[[cst_2:.*]] = "tfr.constant_tensor"(%[[cst_1]]) : (i64) -> !tfr.tensor
CHECK-NEXT: %[[list:.*]] = "tfr.build_list"(%rhs, %[[cst_2]]) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list
CHECK-NEXT: %[[cst_3:.*]] = constant 0 : i64
CHECK-NEXT: %[[cst_4:.*]] = constant 2 : i64
CHECK-NEXT: %[[zero_1:.*]] = constant 0 : i64
CHECK-NEXT: %[[pack:.*]] = tfr.call @tf__pack(%[[list]], %[[zero_1]]) : (!tfr.tensor_list, i64) -> !tfr.tensor
CHECK-NEXT: %[[cst_5:.*]] = "tfr.constant_tensor"(%[[cst_3]]) : (i64) -> !tfr.tensor
CHECK-NEXT: %[[SplitV:.*]] = tfr.call @tf__split_v(%lhs, %[[pack]], %[[cst_5]], %[[cst_4]])
CHECK-NEXT: %[[idx:.*]] = constant 0 : index
CHECK-NEXT: %[[elt:.*]] = tfr.get_element %SplitV[%idx] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[idx_1:.*]] = constant 1 : index
CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %SplitV[%idx_1] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[list_1:.*]] = "tfr.build_list"(%rhs, %rhs) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list
CHECK-NEXT: %[[cst_6:.*]] = constant 1 : i64
CHECK-NEXT: %[[cst_7:.*]] = constant 2 : i64
CHECK-NEXT: %[[zero_2:.*]] = constant 0 : i64
CHECK-NEXT: %[[pack_1:.*]] = tfr.call @tf__pack(%[[list_1]], %[[zero_2]]) : (!tfr.tensor_list, i64) -> !tfr.tensor
CHECK-NEXT: %[[cst_8:.*]] = "tfr.constant_tensor"(%[[cst_6]]) : (i64) -> !tfr.tensor
CHECK-NEXT: %[[SplitV_1:.*]] = tfr.call @tf__split_v(%lhs, %[[pack_1]], %[[cst_8]], %[[cst_7]])
CHECK-NEXT: %[[idx_2:.*]] = constant 0 : index
CHECK-NEXT: %[[elt_2:.*]] = tfr.get_element %SplitV_1[%idx_2] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[idx_3:.*]] = constant 1 : index
CHECK-NEXT: %[[elt_3:.*]] = tfr.get_element %SplitV_1[%idx_3] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[cst_9:.*]] = constant true
CHECK-NEXT: %[[list_2:.*]] = "tfr.build_list"(%[[elt]], %[[elt_3]]) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list
CHECK-NEXT: tfr.return %[[list_2]] : !tfr.tensor_list
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {
CHECK-NEXT: %cst = constant true
CHECK-NEXT: %[[Id:.*]] = tfr.call @tf__identity(%x) : (!tfr.tensor) -> (!tfr.tensor)
CHECK-NEXT: tfr.return %[[Id]] : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_two_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor,
CHECK-SAME: %pred: i1{tfr.name="pred",tfr.default=false}) -> (!tfr.tensor) {
CHECK-NEXT: %[[if_stmt:.*]] = scf.if %pred -> (!tfr.tensor) {
CHECK-NEXT: %cst = constant true
CHECK-NEXT: %[[Add:.*]] = tfr.call @tf__add(%x, %y) : (!tfr.tensor, !tfr.tensor) -> (!tfr.tensor)
CHECK-NEXT: scf.yield %[[Add]] : !tfr.tensor
CHECK-NEXT: } else {
CHECK-NEXT: %cst_1 = constant true
CHECK-NEXT: %[[cst_2:.*]] = constant 0 : i64
CHECK-NEXT: %[[list:.*]] = "tfr.build_list"(%x, %y) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list
CHECK-NEXT: %[[Concat:.*]] = tfr.call @tf__concat(%[[cst_2]], %[[list]]) : (i64, !tfr.tensor_list) -> (!tfr.tensor)
CHECK-NEXT: scf.yield %[[Concat]] : !tfr.tensor
CHECK-NEXT: }
CHECK-NEXT: tfr.return %[[if_stmt]] : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_input_n_op(%ins: !tfr.tensor_list) -> (!tfr.tensor) {
CHECK-NEXT: %cst = constant true
CHECK-NEXT: %[[cst_1:.*]] = constant 0 : index
CHECK-NEXT: %[[elt:.*]] = tfr.get_element %ins[%cst_1] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[cst_2:.*]] = constant 1 : index
CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %ins[%cst_2] : (!tfr.tensor_list, index) -> !tfr.tensor
CHECK-NEXT: %[[cst_3:.*]] = constant false
CHECK-NEXT: %[[call:.*]] = tfr.call @tf__test_two_inputs_op(
CHECK-SAME: %[[elt]], %[[elt_1]], %[[cst_3]]) : (!tfr.tensor, !tfr.tensor, i1) -> (!tfr.tensor)
CHECK-NEXT: tfr.return %[[call]] : !tfr.tensor
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__add_(!tfr.tensor<T>,!tfr.tensor<T>) -> (!tfr.tensor<T>) attributes {T}
CHECK-LABEL: tfr.func @tf__concat_(!tfr.tensor<i32_>,!tfr.tensor_list<N,T>) -> (!tfr.tensor<T>) attributes {N,T,i32_}
CHECK-LABEL: tfr.func @tf__identity_(!tfr.tensor<T>) -> (!tfr.tensor<T>) attributes {T}
CHECK-LABEL: tfr.func @tf__pack_(!tfr.tensor_list<N,T>,i64{tfr.name="axis"}) -> (!tfr.tensor<T>) attributes {N,T,axis}
CHECK-LABEL: tfr.func @tf__split_v_(!tfr.tensor<T>,!tfr.tensor<Tlen>,!tfr.tensor<i32_>,i64{tfr.name="num_split"}) -> (!tfr.tensor_list<num_split,T>) attributes {T,Tlen,i32_,num_split}
CHECK-LABEL: tfr.func @tf__test_two_inputs_op_(!tfr.tensor<T>,!tfr.tensor<T>,i1{tfr.name="pred"}) -> (!tfr.tensor<T>) attributes {T,pred}
CHECK-LABEL: tfr.func @tf__test_complex_tf_op_(!tfr.tensor<T>,!tfr.tensor<Tlen>,i64{tfr.name="N"}) -> (!tfr.tensor_list<N,T>) attributes {N,T,Tlen}
CHECK-LABEL: tfr.func @tf__test_identity_op_(!tfr.tensor<T>) -> (!tfr.tensor<T>) attributes {T}
CHECK-LABEL: tfr.func @tf__test_two_inputs_op_(!tfr.tensor<T>,!tfr.tensor<T>,i1{tfr.name="pred"}) -> (!tfr.tensor<T>) attributes {T,pred}
CHECK-LABEL: tfr.func @tf__test_input_n_op_(!tfr.tensor_list<N,T>) -> (!tfr.tensor<T>) attributes {N,T}
"""
self._check_code(mlir_code, mlir_code_exp)
def test_tfr_attrs(self):
mlir_code = tfr_gen(sys.modules[__name__], '_tfr_attrs', [test_ops])
mlir_code_exp = r"""
CHECK-LABEL: tfr.func @tf__test_num_attrs_op(
CHECK-SAME: %x: i64{tfr.name="x1",tfr.default=-10},
CHECK-SAME: %y: i64{tfr.name="y1",tfr.default=1},
CHECK-SAME: %x1: f32{tfr.name="x2",tfr.default=0.0},
CHECK-SAME: %y1: f32{tfr.name="y2",tfr.default=-3.0}) -> () {
CHECK-NEXT: %{{.*}} = "tfr.build_list"(%x, %y) : (i64, i64) -> !tfr.attr
CHECK-NEXT: %{{.*}} = cmpi "eq", %x, %y : i64
CHECK-NEXT: %{{.*}} = cmpi "ult", %x, %y : i64
CHECK-NEXT: %{{.*}} = cmpi "ule", %x, %y : i64
CHECK-NEXT: %{{.*}} = cmpi "ugt", %x, %y : i64
CHECK-NEXT: %{{.*}} = cmpi "uge", %x, %y : i64
CHECK-NEXT: %{{.*}} = cmpi "ne", %x, %y : i64
CHECK-NEXT: %{{.*}} = addi %x, %y : i64
CHECK-NEXT: %{{.*}} = subi %x, %y : i64
CHECK-NEXT: %[[add_1:.*]] = addi %sub, %x : i64
CHECK-NEXT: %[[cst:.*]] = constant 1 : i64
CHECK-NEXT: %{{.*}} = addi %[[add_1]], %[[cst]] : i64
CHECK-NEXT: %{{.*}} = cmpf "ugt", %x1, %y1 : f32
CHECK-NEXT: %{{.*}} = addf %x1, %y1 : f32
CHECK-NEXT: %{{.*}} = "tfr.build_list"(%x1, %y1) : (f32, f32) -> !tfr.attr
CHECK-NEXT: %{{.*}} = constant true
CHECK-NEXT: tfr.return
CHECK-NEXT: }
CHECK-LABEL: tfr.func @tf__test_non_num_attrs_op(
CHECK-SAME: %x: !tfr.attr{tfr.name="z"},
CHECK-SAME: %y: !tfr.attr{tfr.name="x",tfr.default="hello"},
CHECK-SAME: %z: !tfr.attr{tfr.name="y",tfr.default=f32}) -> () {
CHECK-NEXT: %{{.*}} = tfr.equal %x, %y -> i1
CHECK-NEXT: %[[cst:.*]] = tfr.constant "test" -> !tfr.attr
CHECK-NEXT: %{{.*}} = tfr.equal %x, %[[cst]] -> i1
CHECK-NEXT: %{{.*}} = tfr.equal %y, %z -> i1
CHECK-NEXT: %{{.*}} = constant true
CHECK-NEXT: tfr.return
CHECK-NEXT: }
"""
self._check_code(mlir_code, mlir_code_exp)
def test_tf_tensor_shape(self):
mlir_code = tfr_gen(sys.modules[__name__], '_tfr_shapes', [test_ops])
mlir_code_exp = r"""
CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {
CHECK-NEXT: %[[shape:.*]] = tfr.get_shape %x -> !shape.shape
CHECK-NEXT: %[[shape_1:.*]] = tfr.get_shape %x -> !shape.shape
CHECK-NEXT: %[[len:.*]] = shape.rank %[[shape_1]] : !shape.shape -> !shape.size
CHECK-NEXT: %[[index:.*]] = shape.size_to_index %[[len]] : !shape.size
CHECK-NEXT: %[[begin:.*]] = constant 0 : index
CHECK-NEXT: %[[step:.*]] = constant 1 : index
CHECK-NEXT: scf.for %[[itr_1:.*]] = %[[begin]] to %[[index]] step %[[step]] {
CHECK-NEXT: %[[size:.*]] = shape.get_extent %[[shape_1]], %[[itr_1]]: !shape.shape, index -> !shape.size
CHECK-NEXT: %[[elt:.*]] = shape.size_to_index %[[size]] : !shape.size
CHECK-NEXT: scf.yield
CHECK-NEXT: }
CHECK-NEXT: %[[cst:.*]] = constant 1 : i64
CHECK-NEXT: %[[len_1:.*]] = shape.rank %shape_1 : !shape.shape -> !shape.size
CHECK-NEXT: %[[len_size_1:.*]] = shape.size_to_index %[[len_1]] : !shape.size
CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64
CHECK-NEXT: %[[begin_1:.*]] = index_cast %[[cst]] : i64 to index
CHECK-NEXT: %[[step_1:.*]] = index_cast %[[cst_1]] : i64 to index
CHECK-NEXT: scf.for %[[itr_3:.*]] = %[[begin_1]] to %[[len_size_1]] step %[[step_1]]
CHECK: %[[cst:.*]] = tfr.constant i32 -> !tfr.attr
CHECK-NEXT: %[[Shape:.*]] = tfr.call @tf__shape(%x, %[[cst]]) : (!tfr.tensor, !tfr.attr) -> (!tfr.tensor)
CHECK-NEXT: %{{.*}} = constant true
CHECK-NEXT: tfr.return %x : !tfr.tensor
CHECK-NEXT: }
"""
self._check_code(mlir_code, mlir_code_exp)
def test_temp_function(self):
mlir_code = tfr_gen(sys.modules[__name__], '_tfr_temp', [test_ops])
mlir_code_exp = r"""
CHECK-LABEL: tfr.func @tf__test_identity_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor_list)
CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {
CHECK-NEXT: %[[list:.*]] = "tfr.build_list"(%x) : (!tfr.tensor) -> !tfr.tensor_list
CHECK-NEXT: %[[call:.*]] = tfr.call @tf__test_identity_n_op(%[[list]]) : (!tfr.tensor_list)
"""
self._check_code(mlir_code, mlir_code_exp)
if __name__ == '__main__':
test.main()
|
|
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
from scipy.sparse import issparse
from warnings import warn
from ..tree import ExtraTreeRegressor
from ..utils import (
check_random_state,
check_array,
gen_batches,
get_chunk_n_rows,
)
from ..utils.fixes import _joblib_parallel_args
from ..utils.validation import check_is_fitted, _num_samples
from ..base import OutlierMixin
from ._bagging import BaseBagging
__all__ = ["IsolationForest"]
class IsolationForest(OutlierMixin, BaseBagging):
"""
Isolation Forest Algorithm.
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, default=100
The number of base estimators in the ensemble.
max_samples : "auto", int or float, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.21
Attributes
----------
base_estimator_ : ExtraTreeRegressor instance
The child estimator template used to create the collection of
fitted sub-estimators.
estimators_ : list of ExtraTreeRegressor instances
The collection of fitted sub-estimators.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : int
The actual number of samples.
offset_ : float
Offset used to define the decision function from the raw scores. We
have the relation: ``decision_function = score_samples - offset_``.
``offset_`` is defined as follows. When the contamination parameter is
set to "auto", the offset is equal to -0.5 as the scores of inliers are
close to 0 and the scores of outliers are close to -1. When a
contamination parameter different than "auto" is provided, the offset
is defined in such a way we obtain the expected number of outliers
(samples with decision function < 0) in training.
.. versionadded:: 0.20
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Notes
-----
The implementation is based on an ensemble of ExtraTreeRegressor. The
maximum depth of each tree is set to ``ceil(log_2(n))`` where
:math:`n` is the number of samples used to build the tree
(see (Liu et al., 2008) for more details).
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
See Also
----------
sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a
Gaussian distributed dataset.
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection
using Local Outlier Factor (LOF).
Examples
--------
>>> from sklearn.ensemble import IsolationForest
>>> X = [[-1.1], [0.3], [0.5], [100]]
>>> clf = IsolationForest(random_state=0).fit(X)
>>> clf.predict([[0.1], [0], [90]])
array([ 1, 1, -1])
"""
def __init__(self, *,
n_estimators=100,
max_samples="auto",
contamination="auto",
max_features=1.,
bootstrap=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(
max_features=1,
splitter='random',
random_state=random_state),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.contamination = contamination
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def _parallel_args(self):
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
# a thread-based backend rather than a process-based backend so as
# to avoid suffering from communication overhead and extra memory
# copies.
return _joblib_parallel_args(prefer='threads')
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
X = self._validate_data(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if self.contamination != 'auto':
if not(0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5], "
"got: %f" % self.contamination)
if isinstance(self.max_samples, str):
if self.max_samples == 'auto':
max_samples = min(256, n_samples)
else:
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0. < self.max_samples <= 1.:
raise ValueError("max_samples must be in (0, 1], got %r"
% self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(X, y, max_samples,
max_depth=max_depth,
sample_weight=sample_weight)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X),
100. * self.contamination)
return self
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse='csr', reset=False)
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
return is_inlier
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self)
# Check data
X = self._validate_data(X, accept_sparse='csr', reset=False)
# Take the opposite of the scores as bigger is better (here less
# abnormal)
return -self._compute_chunked_score_samples(X)
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
# We get as many rows as possible within our working_memory budget
# (defined by sklearn.get_config()['working_memory']) to store
# self._max_features in each row during computation.
#
# Note:
# - this will get at least 1 row, even if 1 row of score will
# exceed working_memory.
# - this does only account for temporary memory usage while loading
# the data needed to compute the scores -- the returned scores
# themselves are 1D.
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * self._max_features,
max_n_rows=n_samples)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order="f")
for sl in slices:
# compute score on the slices of test samples:
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
return scores
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
for tree, features in zip(self.estimators_, self.estimators_features_):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset)
n_samples_leaf = tree.tree_.n_node_samples[leaves_index]
depths += (
np.ravel(node_indicator.sum(axis=1))
+ _average_path_length(n_samples_leaf)
- 1.0
)
denominator = (
len(self.estimators_) * _average_path_length([self.max_samples_])
)
scores = 2 ** (
# For a single training sample, denominator and depth are 0.
# Therefore, we set the score manually to 1.
-np.divide(depths, denominator, out=np.ones_like(depths),
where=denominator != 0)
)
return scores
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
('zero sample_weight is not equivalent to removing samples'),
}
}
def _average_path_length(n_samples_leaf):
"""
The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.
average_path_length[mask_2] = 1.
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
|
|
import os
import subprocess
from conda_concourse_ci import execute
import conda_concourse_ci
from conda_concourse_ci.utils import HashableDict
import pytest
import yaml
from .utils import test_data_dir, graph_data_dir, test_config_dir
def test_collect_tasks(mocker, testing_conda_resolve, testing_graph):
mocker.patch.object(execute, 'Resolve')
mocker.patch.object(execute, 'get_build_index')
mocker.patch.object(conda_concourse_ci.compute_build_graph, '_installable')
execute.Resolve.return_value = testing_conda_resolve
conda_concourse_ci.compute_build_graph._installable.return_value = True
task_graph = execute.collect_tasks(graph_data_dir, folders=['a'],
matrix_base_dir=test_config_dir)
build_platforms = os.listdir(os.path.join(test_config_dir, 'build_platforms.d'))
# one build, one test per platform, uploads only for builds.
n_platforms = len(build_platforms)
# minimum args means build and test provided folders. Two tasks.
assert len(task_graph.nodes()) == n_platforms
boilerplate_test_vars = {'base-name': 'steve',
'aws-bucket': '123',
'aws-key-id': 'abc',
'aws-secret-key': 'weee',
'aws-region-name': 'frank'}
def test_get_build_task(testing_graph):
# ensure that our channels make it into the args
node = 'b-on-linux'
meta = testing_graph.nodes[node]['meta']
worker = testing_graph.nodes[node]['worker']
meta.config.channel_urls = ['conda_build_test']
task = execute.get_build_task(node, meta, worker)
assert task['config']['platform'] == 'linux'
assert task['config']['inputs'] == [{'name': 'rsync-recipes'}]
assert 'rsync-recipes/b-on-linux' in task['config']['run']['args'][-1]
assert 'conda_build_test' in task['config']['run']['args'][-1]
def test_graph_to_plan_with_jobs(mocker, testing_graph):
# stub out uploads, since it depends on config file stuff and we want to manipulate it
# get_upload = mocker.patch.object(execute, "get_upload_tasks")
# get_upload.return_value = []
with open(os.path.join(test_config_dir, 'config.yml')) as f:
config_vars = yaml.safe_load(f)
pipeline = execute.graph_to_plan_with_jobs(graph_data_dir, testing_graph, 'abc123',
test_config_dir, config_vars)
# rsync-recipes, rsync-source, rsync-stats, and one artifact resource per build
assert len(pipeline.resources) == 6
# a, b, c
assert len(pipeline.jobs) == 3
def test_submit(mocker):
mocker.patch.object(execute, 'subprocess')
mocker.patch.object(conda_concourse_ci.concourse, 'subprocess')
pipeline_file = os.path.join(test_config_dir, 'plan_director.yml')
execute.submit(pipeline_file, base_name="test", pipeline_name="test-pipeline",
src_dir='.', config_root_dir=os.path.join(test_data_dir, 'config-test'))
@pytest.mark.serial
def test_submit_one_off(mocker):
mocker.patch.object(conda_concourse_ci.concourse, 'subprocess')
check_call = mocker.patch.object(execute.subprocess, 'check_call')
execute.submit_one_off('frank', os.path.join(test_data_dir, 'one-off-recipes'),
folders=('bzip2', 'pytest', 'pytest-cov'),
config_root_dir=test_config_dir)
# basically what we're checking here is that the config_overrides have been passed correctly
check_call.assert_has_calls([mocker.call(['rsync', '--delete', '-av', '-e',
mocker.ANY, # ssh command that we don't care about much
mocker.ANY, # temp source directory that we don't care about
mocker.ANY, # -p (makes chmod flags work)
mocker.ANY, # chmod flags
('your-intermediate-user@your-intermediate-server:'
# this is what we care about. The middle entry here
# needs 'test' replaced with 'frank'. Also, we're syncing a
# plan and recipe folder, not a config folder
'/ci/frank/plan_and_recipes')
])])
@pytest.mark.serial
def test_submit_batch(mocker):
mocker.patch.object(execute, 'subprocess')
mocker.patch.object(conda_concourse_ci.concourse, 'subprocess')
submit_one_off = mocker.patch.object(execute, 'submit_one_off')
get_activate_builds = mocker.patch.object(execute, '_get_activate_builds', return_value=3)
execute.submit_batch(
os.path.join(test_data_dir, 'batch_sample.txt'),
os.path.join(test_data_dir, 'one-off-recipes'),
config_root_dir=test_config_dir,
max_builds=999, poll_time=0, build_lookback=500, label_prefix='sentinel_')
# submit_one_off should be called twice
submit_one_off.assert_has_calls([
mocker.call('sentinel_bzip', mocker.ANY, ['bzip'],
mocker.ANY, pass_throughs=None, clobber_sections_file='example.yaml'),
mocker.call('sentinel_pytest', mocker.ANY, ['pytest', 'pytest-cov'],
mocker.ANY, pass_throughs=None),
])
get_activate_builds.assert_called()
def test_bootstrap(mocker, testing_workdir):
execute.bootstrap('frank')
assert os.path.isfile('plan_director.yml')
assert os.path.isdir('frank')
assert os.path.isfile('frank/config.yml')
assert os.path.isdir('frank/uploads.d')
assert os.path.isdir('frank/build_platforms.d')
assert os.path.isdir('frank/test_platforms.d')
def test_compute_builds(testing_workdir, mocker, monkeypatch):
monkeypatch.chdir(test_data_dir)
output = os.path.join(testing_workdir, 'output')
# neutralize git checkout so we're really testing the HEAD commit
execute.compute_builds('.', 'config-name',
folders=['python_test', 'conda_forge_style_recipe'],
matrix_base_dir=os.path.join(test_data_dir, 'linux-config-test'),
output_dir=output)
assert os.path.isdir(output)
files = os.listdir(output)
assert 'plan.yml' in files
assert os.path.isfile(os.path.join(output, 'frank-1.0-python_2.7-on-centos5-64', 'meta.yaml'))
assert os.path.isfile(os.path.join(output, 'frank-1.0-python_2.7-on-centos5-64/', 'conda_build_config.yaml'))
assert os.path.isfile(os.path.join(output, 'frank-1.0-python_3.6-on-centos5-64', 'meta.yaml'))
assert os.path.isfile(os.path.join(output, 'frank-1.0-python_3.6-on-centos5-64/', 'conda_build_config.yaml'))
assert os.path.isfile(os.path.join(output, 'dummy_conda_forge_test-1.0-on-centos5-64', 'meta.yaml'))
with open(os.path.join(output, 'dummy_conda_forge_test-1.0-on-centos5-64/', 'conda_build_config.yaml')) as f:
cfg = f.read()
assert cfg is not None
if hasattr(cfg, 'decode'):
cfg = cfg.decode()
assert "HashableDict" not in cfg
def test_compute_builds_intradependencies(testing_workdir, monkeypatch, mocker):
"""When we build stuff, and upstream dependencies are part of the batch, but they're
also already installable, then we do extra work to make sure that we order our build
so that downstream builds depend on upstream builds (and don't directly use the
already-available packages.)"""
monkeypatch.chdir(os.path.join(test_data_dir, 'intradependencies'))
# neutralize git checkout so we're really testing the HEAD commit
output_dir = os.path.join(testing_workdir, 'output')
execute.compute_builds('.', 'config-name',
folders=['zlib', 'uses_zlib'],
matrix_base_dir=os.path.join(test_data_dir, 'linux-config-test'),
output_dir=output_dir)
assert os.path.isdir(output_dir)
files = os.listdir(output_dir)
assert 'plan.yml' in files
with open(os.path.join(output_dir, 'plan.yml')) as f:
plan = yaml.safe_load(f)
uses_zlib_job = [job for job in plan['jobs'] if job['name'] == 'uses_zlib-1.0-on-centos5-64'][0]
assert any(task.get('passed') == ['zlib_wannabe-1.2.8-on-centos5-64']
for task in uses_zlib_job['plan'])
def test_python_build_matrix_expansion(monkeypatch):
monkeypatch.chdir(test_data_dir)
tasks = execute.collect_tasks('.', matrix_base_dir=os.path.join(test_data_dir, 'linux-config-test'),
folders=['python_test'])
assert len(tasks.nodes()) == 2
assert 'frank-1.0-python_2.7-on-centos5-64' in tasks.nodes()
assert 'frank-1.0-python_3.6-on-centos5-64' in tasks.nodes()
def test_subpackage_matrix_no_subpackages(monkeypatch):
"""Subpackages should not constitute new entries in the build graph. They should be lumped in
with their parent recipe. However, we have to include them initially for the sake of
dependency ordering. Thus we initially include them as though they were full packages, but
then we squish them together and re-assign and dependency edges."""
monkeypatch.chdir(test_data_dir)
tasks = execute.collect_tasks('.', matrix_base_dir=os.path.join(test_data_dir, 'linux-config-test'),
folders=['has_subpackages', 'depends_on_subpackage'])
assert len(tasks.nodes()) == 2
assert 'has_subpackages_toplevel-1.0-on-centos5-64' in tasks.nodes()
assert 'depends_on_subpackage-1.0-on-centos5-64' in tasks.nodes()
assert 'has_subpackages_subpackage-1.0-on-centos5-64' not in tasks.nodes()
# this is the actual dependency
assert ('depends_on_subpackage-1.0-on-centos5-64', 'has_subpackages_subpackage-1.0-on-centos5-64') not in tasks.edges()
# this is what we remap it to
assert ('depends_on_subpackage-1.0-on-centos5-64', 'has_subpackages_toplevel-1.0-on-centos5-64') in tasks.edges()
def test_dependency_with_selector_cross_compile(testing_conda_resolve):
g = execute.collect_tasks(test_data_dir, ['selector_run', 'functools32-feedstock'],
matrix_base_dir=os.path.join(test_data_dir, 'config-win'),
variant_config_files=os.path.join(test_data_dir, 'conda_build_config.yaml'))
assert len(g.nodes()) == 6
# native edge
assert ('test_run_deps_with_selector-1.0-python_2.7-on-win-64',
'functools32_wannabe-3.2.3.2-python_2.7-on-win-64') in g.edges()
# cross edge
assert ('test_run_deps_with_selector-1.0-python_2.7-target_win-32-on-win-64',
'functools32_wannabe-3.2.3.2-python_2.7-target_win-32-on-win-64') in g.edges()
def test_collapse_with_win_matrix_and_subpackages(monkeypatch):
monkeypatch.chdir(test_data_dir)
tasks = execute.collect_tasks('.', matrix_base_dir=os.path.join(test_data_dir, 'config-win'),
folders=['win_split_outputs_compiler_reduction'])
# 8 subpackages, but 4 total builds - 2 subpackages per build
assert len(tasks.nodes()) == 4
assert 'postgresql-split-10.1-c_compiler_vs2008-on-win-64' in tasks.nodes()
assert 'postgresql-split-10.1-c_compiler_vs2015-on-win-64' in tasks.nodes()
assert 'postgresql-split-10.1-c_compiler_vs2008-target_win-32-on-win-64' in tasks.nodes()
assert 'postgresql-split-10.1-c_compiler_vs2015-target_win-32-on-win-64' in tasks.nodes()
def test_collapse_noarch_python():
path = os.path.join(test_data_dir, 'noarch_python_recipes')
folders = ['pkg_a', 'pkg_b']
variant_file = os.path.join(test_data_dir, 'noarch_python_recipes', 'conda_build_config.yaml')
tasks = execute.collect_tasks(path, folders, matrix_base_dir=test_config_dir,
variant_config_files=variant_file)
# 9 nodes,
# * 1 for the noarch: python pkg_a build on centos5-64
# * 2 for the noarch: python pkg_a tests on osx-109 and win-32
# * 6 for pkg_b (3 platforms x 2 python version)
print(tasks.nodes())
assert len(tasks.nodes()) == 9
assert 'pkg_a-1.0.0-on-centos5-64' in tasks.nodes()
assert 'test-pkg_a-1.0.0-on-osx-109' in tasks.nodes()
assert 'test-pkg_a-1.0.0-on-win-32' in tasks.nodes()
assert 'pkg_b-1.0.0-python_3.6-on-osx-109' in tasks.nodes()
assert 'pkg_b-1.0.0-python_2.7-on-osx-109' in tasks.nodes()
assert 'pkg_b-1.0.0-python_3.6-on-win-32' in tasks.nodes()
assert 'pkg_b-1.0.0-python_2.7-on-win-32' in tasks.nodes()
assert 'pkg_b-1.0.0-python_3.6-on-centos5-64' in tasks.nodes()
assert 'pkg_b-1.0.0-python_2.7-on-centos5-64' in tasks.nodes()
# test nodes should be labeled as such
assert tasks.nodes['test-pkg_a-1.0.0-on-osx-109']['test_only'] == True
assert tasks.nodes['test-pkg_a-1.0.0-on-win-32']['test_only'] == True
# 8 edges
# * 6 pkg_b nodes have an edge to the pkg_a build node
# * 2 pkg_a tests nodes with edges to the pkg_a build_node
print(tasks.edges())
assert len(tasks.edges()) == 8
a_build_node = 'pkg_a-1.0.0-on-centos5-64'
assert ('test-pkg_a-1.0.0-on-osx-109', a_build_node) in tasks.edges()
assert ('test-pkg_a-1.0.0-on-win-32', a_build_node) in tasks.edges()
assert ('pkg_b-1.0.0-python_3.6-on-osx-109', a_build_node) in tasks.edges()
assert ('pkg_b-1.0.0-python_2.7-on-osx-109', a_build_node) in tasks.edges()
assert ('pkg_b-1.0.0-python_3.6-on-win-32', a_build_node) in tasks.edges()
assert ('pkg_b-1.0.0-python_2.7-on-win-32', a_build_node) in tasks.edges()
assert ('pkg_b-1.0.0-python_3.6-on-centos5-64', a_build_node) in tasks.edges()
assert ('pkg_b-1.0.0-python_2.7-on-centos5-64', a_build_node) in tasks.edges()
|
|
import unittest
import os
import random
from toil.lib.bioio import getTempFile
from toil.job import Job, JobGraphDeadlockException
from toil.test import ToilTest
class JobTest(ToilTest):
"""
Tests testing the job class
"""
def testStatic(self):
"""
Create a DAG of jobs non-dynamically and run it. DAG is:
A -> F
\-------
B -> D \
\ \
------- C -> E
Follow on is marked by ->
"""
#Temporary file
outFile = getTempFile(rootDir=os.getcwd())
#Create the jobs
A = Job.wrapFn(f, "A", outFile)
B = Job.wrapFn(f, A.rv(0), outFile)
C = Job.wrapFn(f, B.rv(0), outFile)
D = Job.wrapFn(f, C.rv(0), outFile)
E = Job.wrapFn(f, D.rv(0), outFile)
F = Job.wrapFn(f, E.rv(0), outFile)
#Connect them into a workflow
A.addChild(B)
A.addChild(C)
B.addChild(C)
B.addFollowOn(E)
C.addFollowOn(D)
A.addFollowOn(F)
#Create the runner for the workflow.
options = Job.Runner.getDefaultOptions()
options.logLevel = "INFO"
#Run the workflow, the return value being the number of failed jobs
self.assertEquals(Job.Runner.startToil(A, options), 0)
Job.Runner.cleanup(options) #This removes the jobStore
#Check output
self.assertEquals(open(outFile, 'r').readline(), "ABCDEF")
#Cleanup
os.remove(outFile)
def testDeadlockDetection(self):
"""
Randomly generate job graphs with various types of cycle in them and
check they cause an exception properly. Also check that multiple roots
causes a deadlock exception.
"""
for test in xrange(100):
#Make a random DAG for the set of child edges
nodeNumber = random.choice(xrange(2, 20))
childEdges = self.makeRandomDAG(nodeNumber)
#Get an adjacency list representation and check is acyclic
adjacencyList = self.getAdjacencyList(nodeNumber, childEdges)
self.assertTrue(self.isAcyclic(adjacencyList))
#Add in follow on edges - these are returned as a list, and as a set
#of augmented edges in the adjacency list
followOnEdges = self.addRandomFollowOnEdges(adjacencyList)
self.assertTrue(self.isAcyclic(adjacencyList))
#Make the job graph
rootJob = self.makeJobGraph(nodeNumber, childEdges, followOnEdges, None)
rootJob.checkJobGraphAcylic() #This should not throw an exception
rootJob.checkJobGraphConnected() #Nor this
#Check root detection explicitly
self.assertEquals(rootJob.getRootJobs(), set((rootJob,)))
#Test making multiple roots
childEdges2 = childEdges.copy()
childEdges2.add((nodeNumber, 1)) #This creates an extra root at "nodeNumber"
rootJob2 = self.makeJobGraph(nodeNumber+1, childEdges2, followOnEdges, None)
try:
rootJob2.checkJobGraphConnected()
self.assertTrue(False) #Multiple roots were not detected
except JobGraphDeadlockException:
pass #This is the expected behaviour
def checkChildEdgeCycleDetection(fNode, tNode):
childEdges.add((fNode, tNode)) #Create a cycle
adjacencyList[fNode].add(tNode)
self.assertTrue(not self.isAcyclic(adjacencyList))
try:
self.makeJobGraph(nodeNumber, childEdges,
followOnEdges, None).checkJobGraphAcylic()
self.assertTrue(False) #A cycle was not detected
except JobGraphDeadlockException:
pass #This is the expected behaviour
#Remove the edges
childEdges.remove((fNode, tNode))
adjacencyList[fNode].remove(tNode)
#Check is now acyclic again
self.makeJobGraph(nodeNumber, childEdges,
followOnEdges, None).checkJobGraphAcylic()
#Now try adding edges that create a cycle
##Try adding a child edge from a descendant to an ancestor
fNode, tNode = self.getRandomEdge(nodeNumber)
while fNode not in self.reachable(tNode, adjacencyList):
fNode, tNode = self.getRandomEdge(nodeNumber)
checkChildEdgeCycleDetection(fNode, tNode)
##Try adding a self child edge
node = random.choice(xrange(nodeNumber))
checkChildEdgeCycleDetection(node, node)
def checkFollowOnEdgeCycleDetection(fNode, tNode):
followOnEdges.add((fNode, tNode)) #Create a cycle
try:
self.makeJobGraph(nodeNumber, childEdges,
followOnEdges, None).checkJobGraphAcylic()
#self.assertTrue(False) #The cycle was not detected
except JobGraphDeadlockException:
pass #This is the expected behaviour
#Remove the edges
followOnEdges.remove((fNode, tNode))
#Check is now acyclic again
self.makeJobGraph(nodeNumber, childEdges,
followOnEdges, None).checkJobGraphAcylic()
##Try adding a follow on edge from a descendant to an ancestor
fNode, tNode = self.getRandomEdge(nodeNumber)
while fNode not in self.reachable(tNode, adjacencyList):
fNode, tNode = self.getRandomEdge(nodeNumber)
checkFollowOnEdgeCycleDetection(fNode, tNode)
##Try adding a self follow on edge
node = random.choice(xrange(nodeNumber))
checkFollowOnEdgeCycleDetection(node, node)
##Try adding a follow on edge between two nodes with shared descendants
fNode, tNode = self.getRandomEdge(nodeNumber)
if (len(self.reachable(tNode, adjacencyList).\
intersection(self.reachable(fNode, adjacencyList))) > 0 and
(fNode, tNode) not in childEdges):
checkFollowOnEdgeCycleDetection(fNode, tNode)
def testEvaluatingRandomDAG(self):
"""
Randomly generate test input then check that the ordering of the running
respected the constraints.
"""
for test in xrange(30):
#Temporary file
outFile = getTempFile(rootDir=os.getcwd())
#Make a random DAG for the set of child edges
nodeNumber = random.choice(xrange(2, 20))
childEdges = self.makeRandomDAG(nodeNumber)
#Get an adjacency list representation and check is acyclic
adjacencyList = self.getAdjacencyList(nodeNumber, childEdges)
self.assertTrue(self.isAcyclic(adjacencyList))
#Add in follow on edges - these are returned as a list, and as a set
#of augmented edges in the adjacency list
followOnEdges = self.addRandomFollowOnEdges(adjacencyList)
self.assertTrue(self.isAcyclic(adjacencyList))
#Make the job graph
rootJob = self.makeJobGraph(nodeNumber, childEdges, followOnEdges, outFile)
#Run the job graph
options = Job.Runner.getDefaultOptions()
failedJobs = Job.Runner.startToil(rootJob, options)
self.assertEquals(failedJobs, 0)
#Get the ordering add the implied ordering to the graph
with open(outFile, 'r') as fH:
ordering = map(int, fH.readline().split())
#Check all the jobs were run
self.assertEquals(set(ordering), set(xrange(nodeNumber)))
#Add the ordering to the graph
for i in xrange(nodeNumber-1):
adjacencyList[ordering[i]].add(ordering[i+1])
#Check the ordering retains an acyclic graph
if not self.isAcyclic(adjacencyList):
print "ORDERING", ordering
print "CHILD EDGES", childEdges
print "FOLLOW ON EDGES", followOnEdges
print "ADJACENCY LIST", adjacencyList
self.assertTrue(self.isAcyclic(adjacencyList))
#Cleanup
os.remove(outFile)
@staticmethod
def getRandomEdge(nodeNumber):
assert nodeNumber > 1
fNode = random.choice(xrange(nodeNumber-1))
return (fNode, random.choice(xrange(fNode,nodeNumber)))
@staticmethod
def makeRandomDAG(nodeNumber):
"""
Makes a random dag with "nodeNumber" nodes in which all nodes are
connected. Return value is list of edges, each of form (a, b),
where a and b are integers >= 0 < nodeNumber
referring to nodes and the edge is from a to b.
"""
#Pick number of total edges to create
edgeNumber = random.choice(xrange(nodeNumber-1, 1 + (nodeNumber * (nodeNumber-1)) / 2))
#Make a spanning tree of edges so that nodes are connected
edges = set(map(lambda i : (random.choice(xrange(i)), i), xrange(1, nodeNumber)))
#Add extra random edges until there are edgeNumber edges
while edgeNumber < len(edges):
edges.add(JobTest.getRandomEdge(nodeNumber))
return edges
@staticmethod
def getAdjacencyList(nodeNumber, edges):
"""
Make adjacency list representation of edges
"""
adjacencyList = [ set() for i in xrange(nodeNumber) ]
for fNode, tNode in edges:
adjacencyList[fNode].add(tNode)
return adjacencyList
@staticmethod
def reachable(node, adjacencyList, followOnAdjacencyList=None):
"""
Find the set of nodes reachable from this node (including the node).
Return is a set of integers.
"""
visited = set()
def dfs(fNode):
if fNode not in visited:
visited.add(fNode)
for tNode in adjacencyList[fNode]:
dfs(tNode)
if followOnAdjacencyList != None:
for tNode in followOnAdjacencyList[fNode]:
dfs(tNode)
dfs(node)
return visited
@staticmethod
def addRandomFollowOnEdges(childAdjacencyList):
"""
Adds random follow on edges to the graph, represented as an adjacency list.
The follow on edges are returned as a set and their augmented edges
are added to the adjacency list.
"""
#This function makes the augmented adjacency list
def makeAugmentedAdjacencyList():
augmentedAdjacencyList = map(lambda i : childAdjacencyList[i].union(followOnAdjacencyList[i]),
range(len(childAdjacencyList)))
def addImpliedEdges(node, followOnEdges):
visited = set()
def f(node):
if node not in visited:
visited.add(node)
for i in followOnEdges:
augmentedAdjacencyList[node].add(i)
map(f, childAdjacencyList[node])
map(f, followOnAdjacencyList[node])
map(f, childAdjacencyList[node])
for node in xrange(len(followOnAdjacencyList)):
addImpliedEdges(node, followOnAdjacencyList[node])
return augmentedAdjacencyList
followOnEdges = set()
followOnAdjacencyList = map(lambda i : set(), childAdjacencyList)
#Loop to create the follow on edges (try 1000 times)
while random.random() > 0.001:
fNode, tNode = JobTest.getRandomEdge(len(childAdjacencyList))
#Get the descendants of fNode not on a path of edges starting with a follow-on
#edge from fNode
fDescendants = reduce(lambda i, j : i.union(j), map(lambda c :
JobTest.reachable(c, childAdjacencyList, followOnAdjacencyList), childAdjacencyList[fNode]), set())
fDescendants.add(fNode)
#Make an adjacency list including augmented edges and proposed
#follow on edge
#Add the new follow on edge
followOnAdjacencyList[fNode].add(tNode)
augmentedAdjacencyList = makeAugmentedAdjacencyList()
#If the augmented adjacency doesn't contain a cycle then add the follow on edge
#to the list of follow ons else remove the follow on edge from the follow on
#adjacency list
if JobTest.isAcyclic(augmentedAdjacencyList):
followOnEdges.add((fNode, tNode))
else:
followOnAdjacencyList[fNode].remove(tNode)
#Update adjacency list adding in augmented edges
childAdjacencyList[:] = makeAugmentedAdjacencyList()[:]
return followOnEdges
@staticmethod
def makeJobGraph(nodeNumber, childEdges, followOnEdges, outFile):
"""
Converts a DAG into a job graph. childEdges and followOnEdges are
the lists of child and followOn edges.
"""
jobs = map(lambda i : Job.wrapFn(f, str(i) + " ", outFile), xrange(nodeNumber))
for fNode, tNode in childEdges:
jobs[fNode].addChild(jobs[tNode])
for fNode, tNode in followOnEdges:
jobs[fNode].addFollowOn(jobs[tNode])
return jobs[0]
@staticmethod
def isAcyclic(adjacencyList):
"""
Returns true if there are any cycles in the graph, which is represented as an
adjacency list.
"""
def cyclic(fNode, visited, stack):
if fNode not in visited:
visited.add(fNode)
assert fNode not in stack
stack.append(fNode)
for tNode in adjacencyList[fNode]:
if cyclic(tNode, visited, stack):
return True
assert stack.pop() == fNode
return fNode in stack
visited = set()
for i in xrange(len(adjacencyList)):
if cyclic(i, visited, []):
return False
return True
def f(string, outFile):
"""
Function appends string to output file, then returns the
next ascii character of the first character in the string, e.g.
if string is "AA" returns "B"
"""
fH = open(outFile, 'a')
fH.write(string)
fH.close()
return chr(ord(string[0])+1)
if __name__ == '__main__':
unittest.main()
|
|
# Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Jason Narad <[email protected]>
# Steven Bird <[email protected]> (modifications)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from six import string_types
from nltk.tree import Tree
from nltk.compat import python_2_unicode_compatible
#################################################################
# DependencyGraph Class
#################################################################
@python_2_unicode_compatible
class DependencyGraph(object):
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(self, tree_str=None, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Dependency graph.
We place a dummy `TOP` node with the index 0, since the root node is
often assigned 0 as its head. This also means that the indexing of the
nodes corresponds directly to the Malt-TAB format, which starts at 1.
If zero-based is True, then Malt-TAB-like input with node numbers
starting at 0 and the root node assigned -1 (as produced by, e.g.,
zpar).
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
self.nodes = defaultdict(lambda: {'address': None,
'word': None,
'lemma': None,
'ctag': None,
'tag': None,
'feats': None,
'head': None,
'deps': defaultdict(list),
'rel': None,
})
self.nodes[0].update(
{
'ctag': 'TOP',
'tag': 'TOP',
'address': 0,
}
)
self.root = None
if tree_str:
self._parse(
tree_str,
cell_extractor=cell_extractor,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
del self.nodes[address]
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodes.values():
new_deps = []
for dep in node['deps']:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node['deps'] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
relation = self.nodes[mod_address]['rel']
self.nodes[head_address]['deps'].setdefault(relation, [])
self.nodes[head_address]['deps'][relation].append(mod_address)
#self.nodes[head_address]['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodes.values():
for node2 in self.nodes.values():
if node1['address'] != node2['address'] and node2['rel'] != 'TOP':
relation = node2['rel']
node1['deps'].setdefault(relation, [])
node1['deps'][relation].append(node2['address'])
#node1['deps'].append(node2['address'])
def get_by_address(self, node_address):
"""Return the node with the given address."""
return self.nodes[node_address]
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
return node_address in self.nodes
def to_dot(self):
"""Return a dot representation suitable for using with Graphviz.
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> print(dg.to_dot())
digraph G{
edge [dir=forward]
node [shape=plaintext]
<BLANKLINE>
0 [label="0 (None)"]
0 -> 2 [label="ROOT"]
1 [label="1 (John)"]
2 [label="2 (loves)"]
2 -> 1 [label=""]
2 -> 3 [label=""]
3 [label="3 (Mary)"]
}
"""
# Start the digraph specification
s = 'digraph G{\n'
s += 'edge [dir=forward]\n'
s += 'node [shape=plaintext]\n'
# Draw the remaining nodes
for node in sorted(self.nodes.values(), key=lambda v: v['address']):
s += '\n%s [label="%s (%s)"]' % (node['address'], node['address'], node['word'])
for rel, deps in node['deps'].items():
for dep in deps:
if rel is not None:
s += '\n%s -> %s [label="%s"]' % (node['address'], dep, rel)
else:
s += '\n%s -> %s ' % (node['address'], dep)
s += "\n}"
return s
def _repr_svg_(self):
"""Show SVG representation of the transducer (IPython magic).
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> dg._repr_svg_().split('\\n')[0]
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
"""
dot_string = self.to_dot()
try:
process = subprocess.Popen(
['dot', '-Tsvg'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError:
raise Exception('Cannot find the dot binary from Graphviz package')
out, err = process.communicate(dot_string)
if err:
raise Exception(
'Cannot create svg representation by running dot from string: {}'
''.format(dot_string))
return out
def __str__(self):
return pformat(self.nodes)
def __repr__(self):
return "<DependencyGraph with {0} nodes>".format(len(self.nodes))
@staticmethod
def load(filename, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""
:param filename: a name of a file in Malt-TAB format
:param zero_based: nodes in the input file are numbered starting from 0
rather than 1 (as produced by, e.g., zpar)
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
:return: a list of DependencyGraphs
"""
with open(filename) as infile:
return [
DependencyGraph(
tree_str,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
for tree_str in infile.read().split('\n\n')
]
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node['address']):
self.nodes[node['address']].update(node)
def _parse(self, input_, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Parse a sentence.
:param extractor: a function that given a tuple of cells returns a
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
rel``.
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, '', head, ''
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, '', head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, tag, tag, '', head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, string_types):
input_ = (line for line in input_.split('\n'))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(cell_number)
)
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells, index)
except (TypeError, ValueError):
# cell_extractor doesn't take 2 arguments or doesn't return 8
# values; assume the cell_extractor is an older external
# extractor and doesn't accept or return an index.
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == '_':
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
'address': index,
'word': word,
'lemma': lemma,
'ctag': ctag,
'tag': tag,
'feats': feats,
'head': head,
'rel': rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]['deps'][rel].append(index)
if self.nodes[0]['deps'][top_relation_label]:
root_address = self.nodes[0]['deps'][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node "
"that depends on the root element."
)
def _word(self, node, filter=True):
w = node['word']
if filter:
if w != ',':
return w
return w
def _tree(self, i):
""" Turn dependency graphs into NLTK trees.
:param int i: index of a node
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
"""
Extract dependency triples of the form:
((head word, head tag), rel, (dep word, dep tag))
"""
if not node:
node = self.root
head = (node['word'], node['ctag'])
for i in sorted(chain.from_iterable(node['deps'].values())):
dep = self.get_by_address(i)
yield (head, dep['rel'], (dep['word'], dep['ctag']))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]['head']
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]['rel']
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
"""Check whether there are cycles.
>>> dg = DependencyGraph(treebank_data)
>>> dg.contains_cycle()
False
>>> cyclic_dg = DependencyGraph()
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
>>> cyclic_dg.nodes = {
... 0: top,
... 1: child1,
... 2: child2,
... 3: child3,
... 4: child4,
... }
>>> cyclic_dg.root = top
>>> cyclic_dg.contains_cycle()
[3, 1, 2, 4]
"""
distances = {}
for node in self.nodes.values():
for dep in node['deps']:
key = tuple([node['address'], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node['deps']:
if dep == goal_node_index:
return [curr_node['address']]
for dep in curr_node['deps']:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node['address'])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
if style == 3:
template = '{word}\t{tag}\t{head}\n'
elif style == 4:
template = '{word}\t{tag}\t{head}\t{rel}\n'
elif style == 10:
template = '{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n'
else:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(style)
)
return ''.join(template.format(i=i, **node) for i, node in sorted(self.nodes.items()) if node['tag'] != 'TOP')
def nx_graph(self):
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n))
for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]['word']
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
"""Dependency graph exception."""
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph("""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
""")
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig('tree.png')
pylab.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print('Mass conll_read demo...')
graphs = [DependencyGraph(entry)
for entry in conll_data2.split('\n\n') if entry]
for graph in graphs:
tree = graph.tree()
print('\n')
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0})
cyclic_dg.add_node({'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1})
cyclic_dg.add_node({'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2})
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3})
cyclic_dg.add_node({'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == '__main__':
demo()
|
|
"""Starts a service to scan in intervals for new devices."""
from datetime import timedelta
import json
import logging
from netdisco.discovery import NetworkDiscovery
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_discover, async_load_platform
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.dt as dt_util
DOMAIN = "discovery"
SCAN_INTERVAL = timedelta(seconds=300)
SERVICE_APPLE_TV = "apple_tv"
SERVICE_DAIKIN = "daikin"
SERVICE_DLNA_DMR = "dlna_dmr"
SERVICE_ENIGMA2 = "enigma2"
SERVICE_HASS_IOS_APP = "hass_ios"
SERVICE_HASSIO = "hassio"
SERVICE_HEOS = "heos"
SERVICE_KONNECTED = "konnected"
SERVICE_MOBILE_APP = "hass_mobile_app"
SERVICE_NETGEAR = "netgear_router"
SERVICE_OCTOPRINT = "octoprint"
SERVICE_SABNZBD = "sabnzbd"
SERVICE_SAMSUNG_PRINTER = "samsung_printer"
SERVICE_TELLDUSLIVE = "tellstick"
SERVICE_YEELIGHT = "yeelight"
SERVICE_WEMO = "belkin_wemo"
SERVICE_WINK = "wink"
SERVICE_XIAOMI_GW = "xiaomi_gw"
# These have custom protocols
CONFIG_ENTRY_HANDLERS = {
SERVICE_TELLDUSLIVE: "tellduslive",
"logitech_mediaserver": "squeezebox",
}
# These have no config flows
SERVICE_HANDLERS = {
SERVICE_NETGEAR: ("device_tracker", None),
SERVICE_ENIGMA2: ("media_player", "enigma2"),
SERVICE_SABNZBD: ("sabnzbd", None),
"yamaha": ("media_player", "yamaha"),
"frontier_silicon": ("media_player", "frontier_silicon"),
"openhome": ("media_player", "openhome"),
"bose_soundtouch": ("media_player", "soundtouch"),
"bluesound": ("media_player", "bluesound"),
"lg_smart_device": ("media_player", "lg_soundbar"),
"nanoleaf_aurora": ("light", "nanoleaf"),
}
OPTIONAL_SERVICE_HANDLERS = {SERVICE_DLNA_DMR: ("media_player", "dlna_dmr")}
MIGRATED_SERVICE_HANDLERS = [
SERVICE_APPLE_TV,
"axis",
"deconz",
SERVICE_DAIKIN,
"denonavr",
"esphome",
"google_cast",
SERVICE_HASS_IOS_APP,
SERVICE_HASSIO,
SERVICE_HEOS,
"harmony",
"homekit",
"ikea_tradfri",
"kodi",
SERVICE_KONNECTED,
SERVICE_MOBILE_APP,
SERVICE_OCTOPRINT,
"philips_hue",
SERVICE_SAMSUNG_PRINTER,
"sonos",
"songpal",
SERVICE_WEMO,
SERVICE_WINK,
SERVICE_XIAOMI_GW,
"volumio",
SERVICE_YEELIGHT,
]
DEFAULT_ENABLED = (
list(CONFIG_ENTRY_HANDLERS) + list(SERVICE_HANDLERS) + MIGRATED_SERVICE_HANDLERS
)
DEFAULT_DISABLED = list(OPTIONAL_SERVICE_HANDLERS) + MIGRATED_SERVICE_HANDLERS
CONF_IGNORE = "ignore"
CONF_ENABLE = "enable"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Optional(CONF_IGNORE, default=[]): vol.All(
cv.ensure_list, [vol.In(DEFAULT_ENABLED)]
),
vol.Optional(CONF_ENABLE, default=[]): vol.All(
cv.ensure_list, [vol.In(DEFAULT_DISABLED + DEFAULT_ENABLED)]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Start a discovery service."""
logger = logging.getLogger(__name__)
netdisco = NetworkDiscovery()
already_discovered = set()
if DOMAIN in config:
# Platforms ignore by config
ignored_platforms = config[DOMAIN][CONF_IGNORE]
# Optional platforms enabled by config
enabled_platforms = config[DOMAIN][CONF_ENABLE]
else:
ignored_platforms = []
enabled_platforms = []
for platform in enabled_platforms:
if platform in DEFAULT_ENABLED:
logger.warning(
"Please remove %s from your discovery.enable configuration "
"as it is now enabled by default",
platform,
)
zeroconf_instance = await zeroconf.async_get_instance(hass)
async def new_service_found(service, info):
"""Handle a new service if one is found."""
if service in MIGRATED_SERVICE_HANDLERS:
return
if service in ignored_platforms:
logger.info("Ignoring service: %s %s", service, info)
return
discovery_hash = json.dumps([service, info], sort_keys=True)
if discovery_hash in already_discovered:
logger.debug("Already discovered service %s %s.", service, info)
return
already_discovered.add(discovery_hash)
if service in CONFIG_ENTRY_HANDLERS:
await hass.config_entries.flow.async_init(
CONFIG_ENTRY_HANDLERS[service],
context={"source": config_entries.SOURCE_DISCOVERY},
data=info,
)
return
comp_plat = SERVICE_HANDLERS.get(service)
if not comp_plat and service in enabled_platforms:
comp_plat = OPTIONAL_SERVICE_HANDLERS[service]
# We do not know how to handle this service.
if not comp_plat:
logger.debug("Unknown service discovered: %s %s", service, info)
return
logger.info("Found new service: %s %s", service, info)
component, platform = comp_plat
if platform is None:
await async_discover(hass, service, info, component, config)
else:
await async_load_platform(hass, component, platform, info, config)
async def scan_devices(now):
"""Scan for devices."""
try:
results = await hass.async_add_executor_job(
_discover, netdisco, zeroconf_instance
)
for result in results:
hass.async_create_task(new_service_found(*result))
except OSError:
logger.error("Network is unreachable")
async_track_point_in_utc_time(
hass, scan_devices, dt_util.utcnow() + SCAN_INTERVAL
)
@callback
def schedule_first(event):
"""Schedule the first discovery when Home Assistant starts up."""
async_track_point_in_utc_time(hass, scan_devices, dt_util.utcnow())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, schedule_first)
return True
def _discover(netdisco, zeroconf_instance):
"""Discover devices."""
results = []
try:
netdisco.scan(zeroconf_instance=zeroconf_instance)
for disc in netdisco.discover():
for service in netdisco.get_info(disc):
results.append((disc, service))
finally:
netdisco.stop()
return results
|
|
# Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.utils import units
from nova.compute import arch
from nova import test
from nova.tests import matchers
from nova.virt.libvirt import config
class LibvirtConfigBaseTest(test.NoDBTestCase):
def assertXmlEqual(self, expectedXmlstr, actualXmlstr):
self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
class LibvirtConfigTest(LibvirtConfigBaseTest):
def test_config_plain(self):
obj = config.LibvirtConfigObject(root_name="demo")
xml = obj.to_xml()
self.assertXmlEqual(xml, "<demo/>")
def test_config_ns(self):
obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo",
ns_uri="http://example.com/foo")
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<foo:demo xmlns:foo="http://example.com/foo"/>""")
def test_config_text(self):
obj = config.LibvirtConfigObject(root_name="demo")
root = obj.format_dom()
root.append(obj._text_node("foo", "bar"))
xml = etree.tostring(root)
self.assertXmlEqual(xml, "<demo><foo>bar</foo></demo>")
def test_config_parse(self):
inxml = "<demo><foo/></demo>"
obj = config.LibvirtConfigObject(root_name="demo")
obj.parse_str(inxml)
class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
def test_config_host(self):
xmlin = """
<capabilities>
<host>
<uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
<cpu>
<arch>x86_64</arch>
<model>Opteron_G3</model>
<vendor>AMD</vendor>
<topology sockets='1' cores='4' threads='1'/>
<feature name='ibs'/>
<feature name='osvw'/>
</cpu>
<topology>
<cells num='2'>
<cell id='0'>
<memory unit='KiB'>4048280</memory>
<cpus num='4'>
<cpu id='0' socket_id='0' core_id='0' siblings='0'/>
<cpu id='1' socket_id='0' core_id='1' siblings='1'/>
<cpu id='2' socket_id='0' core_id='2' siblings='2'/>
<cpu id='3' socket_id='0' core_id='3' siblings='3'/>
</cpus>
</cell>
<cell id='1'>
<memory unit='KiB'>4127684</memory>
<cpus num='4'>
<cpu id='4' socket_id='1' core_id='0' siblings='4'/>
<cpu id='5' socket_id='1' core_id='1' siblings='5'/>
<cpu id='6' socket_id='1' core_id='2' siblings='6'/>
<cpu id='7' socket_id='1' core_id='3' siblings='7'/>
</cpus>
</cell>
</cells>
</topology>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'/>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='i686'/>
</guest>
</capabilities>"""
obj = config.LibvirtConfigCaps()
obj.parse_str(xmlin)
self.assertIsInstance(obj.host, config.LibvirtConfigCapsHost)
self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809")
xmlout = obj.to_xml()
self.assertXmlEqual(xmlin, xmlout)
class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest):
def test_config_platform(self):
obj = config.LibvirtConfigGuestTimer()
obj.track = "host"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<timer name="platform" track="host"/>
""")
def test_config_pit(self):
obj = config.LibvirtConfigGuestTimer()
obj.name = "pit"
obj.tickpolicy = "discard"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<timer name="pit" tickpolicy="discard"/>
""")
def test_config_hpet(self):
obj = config.LibvirtConfigGuestTimer()
obj.name = "hpet"
obj.present = False
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<timer name="hpet" present="no"/>
""")
class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest):
def test_config_utc(self):
obj = config.LibvirtConfigGuestClock()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="utc"/>
""")
def test_config_localtime(self):
obj = config.LibvirtConfigGuestClock()
obj.offset = "localtime"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="localtime"/>
""")
def test_config_timezone(self):
obj = config.LibvirtConfigGuestClock()
obj.offset = "timezone"
obj.timezone = "EDT"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="timezone" timezone="EDT"/>
""")
def test_config_variable(self):
obj = config.LibvirtConfigGuestClock()
obj.offset = "variable"
obj.adjustment = "123456"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="variable" adjustment="123456"/>
""")
def test_config_timers(self):
obj = config.LibvirtConfigGuestClock()
tmpit = config.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "discard"
tmrtc = config.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "merge"
obj.add_timer(tmpit)
obj.add_timer(tmrtc)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="utc">
<timer name="pit" tickpolicy="discard"/>
<timer name="rtc" tickpolicy="merge"/>
</clock>
""")
class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigCPUFeature("mtrr")
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<feature name="mtrr"/>
""")
class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestCPUFeature("mtrr")
obj.policy = "force"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<feature name="mtrr" policy="force"/>
""")
class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest):
def test_parse_dom(self):
xml = """
<numa>
<cell id="0" cpus="0-1" memory="1000000"/>
<cell id="1" cpus="2-3" memory="1500000"/>
</numa>
"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestCPUNUMA()
obj.parse_dom(xmldoc)
self.assertEqual(2, len(obj.cells))
def test_config_simple(self):
obj = config.LibvirtConfigGuestCPUNUMA()
cell = config.LibvirtConfigGuestCPUNUMACell()
cell.id = 0
cell.cpus = set([0, 1])
cell.memory = 1000000
obj.cells.append(cell)
cell = config.LibvirtConfigGuestCPUNUMACell()
cell.id = 1
cell.cpus = set([2, 3])
cell.memory = 1500000
obj.cells.append(cell)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<numa>
<cell id="0" cpus="0-1" memory="1000000"/>
<cell id="1" cpus="2-3" memory="1500000"/>
</numa>
""")
class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigCPU()
obj.model = "Penryn"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu>
<model>Penryn</model>
</cpu>
""")
def test_config_complex(self):
obj = config.LibvirtConfigCPU()
obj.model = "Penryn"
obj.vendor = "Intel"
obj.arch = arch.X86_64
obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name="apic"/>
<feature name="mtrr"/>
</cpu>
""")
def test_only_uniq_cpu_featues(self):
obj = config.LibvirtConfigCPU()
obj.model = "Penryn"
obj.vendor = "Intel"
obj.arch = arch.X86_64
obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name="apic"/>
<feature name="mtrr"/>
</cpu>
""")
def test_config_topology(self):
obj = config.LibvirtConfigCPU()
obj.model = "Penryn"
obj.sockets = 4
obj.cores = 4
obj.threads = 2
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu>
<model>Penryn</model>
<topology sockets="4" cores="4" threads="2"/>
</cpu>
""")
class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestCPU()
obj.model = "Penryn"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu match="exact">
<model>Penryn</model>
</cpu>
""")
def test_config_complex(self):
obj = config.LibvirtConfigGuestCPU()
obj.model = "Penryn"
obj.vendor = "Intel"
obj.arch = arch.X86_64
obj.mode = "custom"
obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr"))
obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic"))
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu mode="custom" match="exact">
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name="apic" policy="require"/>
<feature name="mtrr" policy="require"/>
</cpu>
""")
def test_config_host(self):
obj = config.LibvirtConfigGuestCPU()
obj.mode = "host-model"
obj.match = "exact"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu mode="host-model" match="exact"/>
""")
def test_config_host_with_numa(self):
obj = config.LibvirtConfigGuestCPU()
obj.mode = "host-model"
obj.match = "exact"
numa = config.LibvirtConfigGuestCPUNUMA()
cell = config.LibvirtConfigGuestCPUNUMACell()
cell.id = 0
cell.cpus = set([0, 1])
cell.memory = 1000000
numa.cells.append(cell)
cell = config.LibvirtConfigGuestCPUNUMACell()
cell.id = 1
cell.cpus = set([2, 3])
cell.memory = 1500000
numa.cells.append(cell)
obj.numa = numa
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu mode="host-model" match="exact">
<numa>
<cell id="0" cpus="0-1" memory="1000000"/>
<cell id="1" cpus="2-3" memory="1500000"/>
</numa>
</cpu>
""")
class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestSMBIOS()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<smbios mode="sysinfo"/>
""")
class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestSysinfo()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios"/>
""")
def test_config_bios(self):
obj = config.LibvirtConfigGuestSysinfo()
obj.bios_vendor = "Acme"
obj.bios_version = "6.6.6"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios">
<bios>
<entry name="vendor">Acme</entry>
<entry name="version">6.6.6</entry>
</bios>
</sysinfo>
""")
def test_config_system(self):
obj = config.LibvirtConfigGuestSysinfo()
obj.system_manufacturer = "Acme"
obj.system_product = "Wile Coyote"
obj.system_version = "6.6.6"
obj.system_serial = "123456"
obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios">
<system>
<entry name="manufacturer">Acme</entry>
<entry name="product">Wile Coyote</entry>
<entry name="version">6.6.6</entry>
<entry name="serial">123456</entry>
<entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
</system>
</sysinfo>
""")
def test_config_mixed(self):
obj = config.LibvirtConfigGuestSysinfo()
obj.bios_vendor = "Acme"
obj.system_manufacturer = "Acme"
obj.system_product = "Wile Coyote"
obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios">
<bios>
<entry name="vendor">Acme</entry>
</bios>
<system>
<entry name="manufacturer">Acme</entry>
<entry name="product">Wile Coyote</entry>
<entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
</system>
</sysinfo>
""")
class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
def test_config_file(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>""")
def test_config_file_parse(self):
xml = """<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDisk()
obj.parse_dom(xmldoc)
self.assertEqual(obj.source_type, 'file')
self.assertEqual(obj.source_path, '/tmp/hello')
self.assertEqual(obj.target_dev, '/dev/hda')
self.assertEqual(obj.target_bus, 'ide')
def test_config_file_serial(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
<serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
</disk>""")
def test_config_file_serial_parse(self):
xml = """<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
<serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
</disk>"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDisk()
obj.parse_dom(xmldoc)
self.assertEqual(obj.source_type, 'file')
self.assertEqual(obj.serial, '7a97c4a3-6f59-41d4-bf47-191d7f97f8e9')
def test_config_file_discard(self):
obj = config.LibvirtConfigGuestDisk()
obj.driver_name = "qemu"
obj.driver_format = "qcow2"
obj.driver_cache = "none"
obj.driver_discard = "unmap"
obj.source_type = "file"
obj.source_path = "/tmp/hello.qcow2"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
xml = obj.to_xml()
self.assertXmlEqual("""
<disk type="file" device="disk">
<driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
<source file="/tmp/hello.qcow2"/>
<target bus="ide" dev="/dev/hda"/>
<serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
</disk>""", xml)
def test_config_file_discard_parse(self):
xml = """
<disk type="file" device="disk">
<driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
<source file="/tmp/hello.qcow2"/>
<target bus="ide" dev="/dev/hda"/>
<serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
</disk>"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDisk()
obj.parse_dom(xmldoc)
self.assertEqual('unmap', obj.driver_discard)
def test_config_block(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "block"
obj.source_path = "/tmp/hello"
obj.source_device = "cdrom"
obj.driver_name = "qemu"
obj.target_dev = "/dev/hdc"
obj.target_bus = "ide"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="block" device="cdrom">
<driver name="qemu"/>
<source dev="/tmp/hello"/>
<target bus="ide" dev="/dev/hdc"/>
</disk>""")
def test_config_block_parse(self):
xml = """<disk type="block" device="cdrom">
<driver name="qemu"/>
<source dev="/tmp/hello"/>
<target bus="ide" dev="/dev/hdc"/>
</disk>"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDisk()
obj.parse_dom(xmldoc)
self.assertEqual(obj.source_type, 'block')
self.assertEqual(obj.source_path, '/tmp/hello')
self.assertEqual(obj.target_dev, '/dev/hdc')
self.assertEqual(obj.target_bus, 'ide')
def test_config_network(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "network"
obj.source_protocol = "iscsi"
obj.source_name = "foo.bar.com"
obj.driver_name = "qemu"
obj.driver_format = "qcow2"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="qcow2"/>
<source name="foo.bar.com" protocol="iscsi"/>
<target bus="ide" dev="/dev/hda"/>
</disk>""")
def test_config_network_parse(self):
xml = """<disk type="network" device="disk">
<driver name="qemu" type="qcow2"/>
<source name="foo.bar.com" protocol="iscsi"/>
<target bus="ide" dev="/dev/hda"/>
</disk>"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDisk()
obj.parse_dom(xmldoc)
self.assertEqual(obj.source_type, 'network')
self.assertEqual(obj.source_protocol, 'iscsi')
self.assertEqual(obj.source_name, 'foo.bar.com')
self.assertEqual(obj.driver_name, 'qemu')
self.assertEqual(obj.driver_format, 'qcow2')
self.assertEqual(obj.target_dev, '/dev/hda')
self.assertEqual(obj.target_bus, 'ide')
def test_config_network_no_name(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = 'network'
obj.source_protocol = 'nbd'
obj.source_hosts = ['foo.bar.com']
obj.source_ports = [None]
obj.driver_name = 'qemu'
obj.driver_format = 'raw'
obj.target_dev = '/dev/vda'
obj.target_bus = 'virtio'
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="raw"/>
<source protocol="nbd">
<host name="foo.bar.com"/>
</source>
<target bus="virtio" dev="/dev/vda"/>
</disk>""")
def test_config_network_multihost(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = 'network'
obj.source_protocol = 'rbd'
obj.source_name = 'pool/image'
obj.source_hosts = ['foo.bar.com', '::1', '1.2.3.4']
obj.source_ports = [None, '123', '456']
obj.driver_name = 'qemu'
obj.driver_format = 'raw'
obj.target_dev = '/dev/vda'
obj.target_bus = 'virtio'
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="raw"/>
<source name="pool/image" protocol="rbd">
<host name="foo.bar.com"/>
<host name="::1" port="123"/>
<host name="1.2.3.4" port="456"/>
</source>
<target bus="virtio" dev="/dev/vda"/>
</disk>""")
def test_config_network_auth(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "network"
obj.source_protocol = "rbd"
obj.source_name = "pool/image"
obj.driver_name = "qemu"
obj.driver_format = "raw"
obj.target_dev = "/dev/vda"
obj.target_bus = "virtio"
obj.auth_username = "foo"
obj.auth_secret_type = "ceph"
obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="raw"/>
<source name="pool/image" protocol="rbd"/>
<auth username="foo">
<secret type="ceph"
uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/>
</auth>
<target bus="virtio" dev="/dev/vda"/>
</disk>""")
def test_config_iotune(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
obj.disk_read_bytes_sec = 1024000
obj.disk_read_iops_sec = 1000
obj.disk_total_bytes_sec = 2048000
obj.disk_write_bytes_sec = 1024000
obj.disk_write_iops_sec = 1000
obj.disk_total_iops_sec = 2000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
<iotune>
<read_bytes_sec>1024000</read_bytes_sec>
<read_iops_sec>1000</read_iops_sec>
<write_bytes_sec>1024000</write_bytes_sec>
<write_iops_sec>1000</write_iops_sec>
<total_bytes_sec>2048000</total_bytes_sec>
<total_iops_sec>2000</total_iops_sec>
</iotune>
</disk>""")
def test_config_blockio(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
obj.logical_block_size = "4096"
obj.physical_block_size = "4096"
xml = obj.to_xml()
self.assertXmlEqual("""
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
<blockio logical_block_size="4096" physical_block_size="4096"/>
</disk>""", xml)
class LibvirtConfigGuestSnapshotDiskTest(LibvirtConfigBaseTest):
def test_config_file(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>""")
def test_config_file_parse(self):
xml = """<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDisk()
obj.parse_dom(xmldoc)
self.assertEqual(obj.source_type, 'file')
self.assertEqual(obj.source_path, '/tmp/hello')
self.assertEqual(obj.target_dev, '/dev/hda')
self.assertEqual(obj.target_bus, 'ide')
class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
def test_config_file_parse(self):
xml = """<backingStore type='file'>
<driver name='qemu' type='qcow2'/>
<source file='/var/lib/libvirt/images/mid.qcow2'/>
<backingStore type='file'>
<driver name='qemu' type='qcow2'/>
<source file='/var/lib/libvirt/images/base.qcow2'/>
<backingStore/>
</backingStore>
</backingStore>
"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDiskBackingStore()
obj.parse_dom(xmldoc)
self.assertEqual(obj.driver_name, 'qemu')
self.assertEqual(obj.driver_format, 'qcow2')
self.assertEqual(obj.source_type, 'file')
self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2')
self.assertEqual(obj.backing_store.driver_name, 'qemu')
self.assertEqual(obj.backing_store.source_type, 'file')
self.assertEqual(obj.backing_store.source_file,
'/var/lib/libvirt/images/base.qcow2')
self.assertIsNone(obj.backing_store.backing_store)
def test_config_network_parse(self):
xml = """<backingStore type='network' index='1'>
<format type='qcow2'/>
<source protocol='gluster' name='volume1/img1'>
<host name='host1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<format type='qcow2'/>
<source protocol='gluster' name='volume1/img2'>
<host name='host1' port='24007'/>
</source>
<backingStore/>
</backingStore>
</backingStore>
"""
xmldoc = etree.fromstring(xml)
obj = config.LibvirtConfigGuestDiskBackingStore()
obj.parse_dom(xmldoc)
self.assertEqual(obj.source_type, 'network')
self.assertEqual(obj.source_protocol, 'gluster')
self.assertEqual(obj.source_name, 'volume1/img1')
self.assertEqual(obj.source_hosts[0], 'host1')
self.assertEqual(obj.source_ports[0], '24007')
self.assertEqual(obj.index, '1')
self.assertEqual(obj.backing_store.source_name, 'volume1/img2')
self.assertEqual(obj.backing_store.index, '2')
self.assertEqual(obj.backing_store.source_hosts[0], 'host1')
self.assertEqual(obj.backing_store.source_ports[0], '24007')
self.assertIsNone(obj.backing_store.backing_store)
class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
def test_config_mount(self):
obj = config.LibvirtConfigGuestFilesys()
obj.source_type = "mount"
obj.source_dir = "/tmp/hello"
obj.target_dir = "/mnt"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<filesystem type="mount">
<source dir="/tmp/hello"/>
<target dir="/mnt"/>
</filesystem>""")
class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
def test_config_tablet(self):
obj = config.LibvirtConfigGuestInput()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<input type="tablet" bus="usb"/>""")
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
def test_config_graphics(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
obj.keymap = "en_US"
obj.listen = "127.0.0.1"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
def test_config_pci_guest_host_dev(self):
obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci')
xml = obj.to_xml()
expected = """
<hostdev mode="subsystem" type="pci" managed="yes"/>
"""
self.assertXmlEqual(xml, expected)
def test_parse_GuestHostdev(self):
xmldoc = """<hostdev mode="subsystem" type="pci" managed="yes"/>"""
obj = config.LibvirtConfigGuestHostdev()
obj.parse_str(xmldoc)
self.assertEqual(obj.mode, 'subsystem')
self.assertEqual(obj.type, 'pci')
self.assertEqual(obj.managed, 'yes')
def test_parse_GuestHostdev_non_pci(self):
xmldoc = """<hostdev mode="subsystem" type="usb" managed="no"/>"""
obj = config.LibvirtConfigGuestHostdev()
obj.parse_str(xmldoc)
self.assertEqual(obj.mode, 'subsystem')
self.assertEqual(obj.type, 'usb')
self.assertEqual(obj.managed, 'no')
class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
expected = """
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address bus="0x11" domain="0x1234" function="0x3"
slot="0x22" />
</source>
</hostdev>
"""
def test_config_guest_hosdev_pci(self):
hostdev = config.LibvirtConfigGuestHostdevPCI()
hostdev.domain = "1234"
hostdev.bus = "11"
hostdev.slot = "22"
hostdev.function = "3"
xml = hostdev.to_xml()
self.assertXmlEqual(self.expected, xml)
def test_parse_guest_hosdev_pci(self):
xmldoc = self.expected
obj = config.LibvirtConfigGuestHostdevPCI()
obj.parse_str(xmldoc)
self.assertEqual(obj.mode, 'subsystem')
self.assertEqual(obj.type, 'pci')
self.assertEqual(obj.managed, 'yes')
self.assertEqual(obj.domain, '0x1234')
self.assertEqual(obj.bus, '0x11')
self.assertEqual(obj.slot, '0x22')
self.assertEqual(obj.function, '0x3')
def test_parse_guest_hosdev_usb(self):
xmldoc = """<hostdev mode='subsystem' type='usb'>
<source startupPolicy='optional'>
<vendor id='0x1234'/>
<product id='0xbeef'/>
</source>
<boot order='2'/>
</hostdev>"""
obj = config.LibvirtConfigGuestHostdevPCI()
obj.parse_str(xmldoc)
self.assertEqual(obj.mode, 'subsystem')
self.assertEqual(obj.type, 'usb')
class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest):
def test_config_file(self):
obj = config.LibvirtConfigGuestSerial()
obj.type = "file"
obj.source_path = "/tmp/vm.log"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<serial type="file">
<source path="/tmp/vm.log"/>
</serial>""")
def test_config_serial_port(self):
obj = config.LibvirtConfigGuestSerial()
obj.type = "tcp"
obj.listen_port = 11111
obj.listen_host = "0.0.0.0"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<serial type="tcp">
<source host="0.0.0.0" service="11111" mode="bind"/>
</serial>""")
class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest):
def test_config_pty(self):
obj = config.LibvirtConfigGuestConsole()
obj.type = "pty"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<console type="pty"/>""")
class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest):
def test_config_spice_minimal(self):
obj = config.LibvirtConfigGuestChannel()
obj.type = "spicevmc"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<channel type="spicevmc">
<target type='virtio'/>
</channel>""")
def test_config_spice_full(self):
obj = config.LibvirtConfigGuestChannel()
obj.type = "spicevmc"
obj.target_name = "com.redhat.spice.0"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<channel type="spicevmc">
<target type='virtio' name='com.redhat.spice.0'/>
</channel>""")
def test_config_qga_full(self):
obj = config.LibvirtConfigGuestChannel()
obj.type = "unix"
obj.target_name = "org.qemu.guest_agent.0"
obj.source_path = "/var/lib/libvirt/qemu/%s.%s.sock" % (
obj.target_name, "instance-name")
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<channel type="unix">
<source path="%s" mode="bind"/>
<target type="virtio" name="org.qemu.guest_agent.0"/>
</channel>""" % obj.source_path)
class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
def test_config_ethernet(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "ethernet"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "vnet0"
obj.driver_name = "vhost"
obj.vif_inbound_average = 1024000
obj.vif_inbound_peak = 10240000
obj.vif_inbound_burst = 1024000
obj.vif_outbound_average = 1024000
obj.vif_outbound_peak = 10240000
obj.vif_outbound_burst = 1024000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="ethernet">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<driver name="vhost"/>
<target dev="vnet0"/>
<bandwidth>
<inbound average="1024000" peak="10240000" burst="1024000"/>
<outbound average="1024000" peak="10240000" burst="1024000"/>
</bandwidth>
</interface>""")
def test_config_bridge(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "bridge"
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "tap12345678"
obj.filtername = "clean-traffic"
obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
obj.vif_inbound_average = 1024000
obj.vif_inbound_peak = 10240000
obj.vif_inbound_burst = 1024000
obj.vif_outbound_average = 1024000
obj.vif_outbound_peak = 10240000
obj.vif_outbound_burst = 1024000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="bridge">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="tap12345678"/>
<filterref filter="clean-traffic">
<parameter name="IP" value="192.168.122.1"/>
</filterref>
<bandwidth>
<inbound average="1024000" peak="10240000" burst="1024000"/>
<outbound average="1024000" peak="10240000" burst="1024000"/>
</bandwidth>
</interface>""")
def test_config_bridge_ovs(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "bridge"
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "tap12345678"
obj.vporttype = "openvswitch"
obj.vportparams.append({"key": "instanceid", "value": "foobar"})
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="bridge">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="tap12345678"/>
<virtualport type="openvswitch">
<parameters instanceid="foobar"/>
</virtualport>
</interface>""")
def test_config_8021Qbh(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "direct"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "tap12345678"
obj.source_dev = "eth0"
obj.vporttype = "802.1Qbh"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="direct">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source dev="eth0" mode="private"/>
<target dev="tap12345678"/>
<virtualport type="802.1Qbh"/>
</interface>""")
def test_config_direct(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "direct"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.source_dev = "eth0"
obj.source_mode = "passthrough"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="direct">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source dev="eth0" mode="passthrough"/>
</interface>""")
def test_config_8021Qbh_hostdev(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "hostdev"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.source_dev = "0000:0a:00.1"
obj.vporttype = "802.1Qbh"
obj.add_vport_param("profileid", "MyPortProfile")
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="hostdev" managed="yes">
<mac address="DE:AD:BE:EF:CA:FE"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
<virtualport type="802.1Qbh">
<parameters profileid="MyPortProfile"/>
</virtualport>
</interface>""")
def test_config_hw_veb_hostdev(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "hostdev"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.source_dev = "0000:0a:00.1"
obj.vlan = "100"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="hostdev" managed="yes">
<mac address="DE:AD:BE:EF:CA:FE"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
<vlan>
<tag id="100"/>
</vlan>
</interface>""")
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
def test_config_lxc(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "lxc"
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = set([0, 1, 3, 4, 5])
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "exe"
obj.os_init_path = "/sbin/init"
fs = config.LibvirtConfigGuestFilesys()
fs.source_dir = "/root/lxc"
fs.target_dir = "/"
obj.add_device(fs)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="lxc">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-1,3-5">2</vcpu>
<os>
<type>exe</type>
<init>/sbin/init</init>
</os>
<devices>
<filesystem type="mount">
<source dir="/root/lxc"/>
<target dir="/"/>
</filesystem>
</devices>
</domain>""")
def test_config_lxc_with_idmap(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "lxc"
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = set([0, 1, 3, 4, 5])
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "exe"
obj.os_init_path = "/sbin/init"
uidmap = config.LibvirtConfigGuestUIDMap()
uidmap.target = "10000"
uidmap.count = "1"
obj.idmaps.append(uidmap)
gidmap = config.LibvirtConfigGuestGIDMap()
gidmap.target = "10000"
gidmap.count = "1"
obj.idmaps.append(gidmap)
fs = config.LibvirtConfigGuestFilesys()
fs.source_dir = "/root/lxc"
fs.target_dir = "/"
obj.add_device(fs)
xml = obj.to_xml()
self.assertXmlEqual("""
<domain type="lxc">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-1,3-5">2</vcpu>
<os>
<type>exe</type>
<init>/sbin/init</init>
</os>
<devices>
<filesystem type="mount">
<source dir="/root/lxc"/>
<target dir="/"/>
</filesystem>
</devices>
<idmap>
<uid start="0" target="10000" count="1"/>
<gid start="0" target="10000" count="1"/>
</idmap>
</domain>""", xml)
def test_config_xen_pv(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "xen"
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = set([0, 1, 3, 4, 5])
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
obj.os_kernel = "/tmp/vmlinuz"
obj.os_initrd = "/tmp/ramdisk"
obj.os_cmdline = "console=xvc0"
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/img"
disk.target_dev = "/dev/xvda"
disk.target_bus = "xen"
obj.add_device(disk)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="xen">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-1,3-5">2</vcpu>
<os>
<type>linux</type>
<kernel>/tmp/vmlinuz</kernel>
<initrd>/tmp/ramdisk</initrd>
<cmdline>console=xvc0</cmdline>
</os>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
<target bus="xen" dev="/dev/xvda"/>
</disk>
</devices>
</domain>""")
def test_config_xen_hvm(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "xen"
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = set([0, 1, 3, 4, 5])
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "hvm"
obj.os_loader = '/usr/lib/xen/boot/hvmloader'
obj.os_root = "root=xvda"
obj.os_cmdline = "console=xvc0"
obj.pae = True
obj.acpi = True
obj.apic = True
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/img"
disk.target_dev = "/dev/xvda"
disk.target_bus = "xen"
obj.add_device(disk)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="xen">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-1,3-5">2</vcpu>
<os>
<type>hvm</type>
<loader>/usr/lib/xen/boot/hvmloader</loader>
<cmdline>console=xvc0</cmdline>
<root>root=xvda</root>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
<target bus="xen" dev="/dev/xvda"/>
</disk>
</devices>
</domain>""")
def test_config_kvm(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "kvm"
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = set([0, 1, 3, 4, 5])
obj.cputune = config.LibvirtConfigGuestCPUTune()
obj.cputune.shares = 100
obj.cputune.quota = 50000
obj.cputune.period = 25000
obj.membacking = config.LibvirtConfigGuestMemoryBacking()
obj.membacking.hugepages = True
obj.memtune = config.LibvirtConfigGuestMemoryTune()
obj.memtune.hard_limit = 496
obj.memtune.soft_limit = 672
obj.memtune.swap_hard_limit = 1638
obj.memtune.min_guarantee = 2970
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
obj.os_boot_dev = ["hd", "cdrom", "fd"]
obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
obj.pae = True
obj.acpi = True
obj.apic = True
obj.sysinfo = config.LibvirtConfigGuestSysinfo()
obj.sysinfo.bios_vendor = "Acme"
obj.sysinfo.system_version = "1.0.0"
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/img"
disk.target_dev = "/dev/vda"
disk.target_bus = "virtio"
obj.add_device(disk)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="kvm">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<memoryBacking>
<hugepages/>
</memoryBacking>
<memtune>
<hard_limit units="K">496</hard_limit>
<soft_limit units="K">672</soft_limit>
<swap_hard_limit units="K">1638</swap_hard_limit>
<min_guarantee units="K">2970</min_guarantee>
</memtune>
<vcpu cpuset="0-1,3-5">2</vcpu>
<sysinfo type='smbios'>
<bios>
<entry name="vendor">Acme</entry>
</bios>
<system>
<entry name="version">1.0.0</entry>
</system>
</sysinfo>
<os>
<type>linux</type>
<boot dev="hd"/>
<boot dev="cdrom"/>
<boot dev="fd"/>
<smbios mode="sysinfo"/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cputune>
<shares>100</shares>
<quota>50000</quota>
<period>25000</period>
</cputune>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
<target bus="virtio" dev="/dev/vda"/>
</disk>
</devices>
</domain>""")
def test_config_machine_type(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "kvm"
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "hvm"
obj.os_mach_type = "fake_machine_type"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="kvm">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu>2</vcpu>
<os>
<type machine="fake_machine_type">hvm</type>
</os>
</domain>""")
def test_ConfigGuest_parse_devices(self):
xmldoc = """ <domain type="kvm">
<devices>
<hostdev mode="subsystem" type="pci" managed="no">
</hostdev>
</devices>
</domain>
"""
obj = config.LibvirtConfigGuest()
obj.parse_str(xmldoc)
self.assertEqual(len(obj.devices), 1)
self.assertIsInstance(obj.devices[0],
config.LibvirtConfigGuestHostdevPCI)
self.assertEqual(obj.devices[0].mode, 'subsystem')
self.assertEqual(obj.devices[0].managed, 'no')
def test_ConfigGuest_parse_devices_wrong_type(self):
xmldoc = """ <domain type="kvm">
<devices>
<hostdev mode="subsystem" type="xxxx" managed="no">
</hostdev>
</devices>
</domain>
"""
obj = config.LibvirtConfigGuest()
obj.parse_str(xmldoc)
self.assertEqual(len(obj.devices), 0)
def test_ConfigGuest_parese_cpu(self):
xmldoc = """ <domain>
<cpu mode='custom' match='exact'>
<model>kvm64</model>
</cpu>
</domain>
"""
obj = config.LibvirtConfigGuest()
obj.parse_str(xmldoc)
self.assertEqual(obj.cpu.mode, 'custom')
self.assertEqual(obj.cpu.match, 'exact')
self.assertEqual(obj.cpu.model, 'kvm64')
class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
def test_config_snapshot(self):
obj = config.LibvirtConfigGuestSnapshot()
obj.name = "Demo"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domainsnapshot>
<name>Demo</name>
<disks/>
</domainsnapshot>""")
def test_config_snapshot_with_disks(self):
obj = config.LibvirtConfigGuestSnapshot()
obj.name = "Demo"
disk = config.LibvirtConfigGuestSnapshotDisk()
disk.name = 'vda'
disk.source_path = 'source-path'
disk.source_type = 'file'
disk.snapshot = 'external'
disk.driver_name = 'qcow2'
obj.add_disk(disk)
disk2 = config.LibvirtConfigGuestSnapshotDisk()
disk2.name = 'vdb'
disk2.snapshot = 'no'
obj.add_disk(disk2)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domainsnapshot>
<name>Demo</name>
<disks>
<disk name='vda' snapshot='external' type='file'>
<source file='source-path'/>
</disk>
<disk name='vdb' snapshot='no'/>
</disks>
</domainsnapshot>""")
def test_config_snapshot_with_network_disks(self):
obj = config.LibvirtConfigGuestSnapshot()
obj.name = "Demo"
disk = config.LibvirtConfigGuestSnapshotDisk()
disk.name = 'vda'
disk.source_name = 'source-file'
disk.source_type = 'network'
disk.source_hosts = ['host1']
disk.source_ports = ['12345']
disk.source_protocol = 'glusterfs'
disk.snapshot = 'external'
disk.driver_name = 'qcow2'
obj.add_disk(disk)
disk2 = config.LibvirtConfigGuestSnapshotDisk()
disk2.name = 'vdb'
disk2.snapshot = 'no'
obj.add_disk(disk2)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domainsnapshot>
<name>Demo</name>
<disks>
<disk name='vda' snapshot='external' type='network'>
<source protocol='glusterfs' name='source-file'>
<host name='host1' port='12345'/>
</source>
</disk>
<disk name='vdb' snapshot='no'/>
</disks>
</domainsnapshot>""")
class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
def test_config_virt_usb_device(self):
xmlin = """
<device>
<name>usb_0000_09_00_0</name>
<parent>pci_0000_00_1c_0</parent>
<driver>
<name>vxge</name>
</driver>
<capability type="usb">
<domain>0</domain>
<capability type="fake_usb">
<address fake_usb="fake"/>
</capability>
</capability>
</device>"""
obj = config.LibvirtConfigNodeDevice()
obj.parse_str(xmlin)
self.assertIsNone(obj.pci_capability)
def test_config_virt_device(self):
xmlin = """
<device>
<name>pci_0000_09_00_0</name>
<parent>pci_0000_00_1c_0</parent>
<driver>
<name>vxge</name>
</driver>
<capability type="pci">
<domain>0</domain>
<bus>9</bus>
<slot>0</slot>
<function>0</function>
<product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
<vendor id="0x17d5">Neterion Inc.</vendor>
<capability type="virt_functions">
<address domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/>
<address domain="0x0000" bus="0x0a" slot="0x00" function="0x2"/>
<address domain="0x0000" bus="0x0a" slot="0x00" function="0x3"/>
</capability>
</capability>
</device>"""
obj = config.LibvirtConfigNodeDevice()
obj.parse_str(xmlin)
self.assertIsInstance(obj.pci_capability,
config.LibvirtConfigNodeDevicePciCap)
self.assertIsInstance(obj.pci_capability.fun_capability[0],
config.LibvirtConfigNodeDevicePciSubFunctionCap)
self.assertEqual(obj.pci_capability.fun_capability[0].type,
"virt_functions")
self.assertEqual(len(obj.pci_capability.fun_capability[0].
device_addrs),
3)
self.assertEqual(obj.pci_capability.bus, 9)
def test_config_phy_device(self):
xmlin = """
<device>
<name>pci_0000_33_00_0</name>
<parent>pci_0000_22_1c_0</parent>
<driver>
<name>vxx</name>
</driver>
<capability type="pci">
<domain>0</domain>
<bus>9</bus>
<slot>0</slot>
<function>0</function>
<product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
<vendor id="0x17d5">Neterion Inc.</vendor>
<capability type="phys_function">
<address domain='0x0000' bus='0x09' slot='0x00' function='0x0'/>
</capability>
</capability>
</device>"""
obj = config.LibvirtConfigNodeDevice()
obj.parse_str(xmlin)
self.assertIsInstance(obj.pci_capability,
config.LibvirtConfigNodeDevicePciCap)
self.assertIsInstance(obj.pci_capability.fun_capability[0],
config.LibvirtConfigNodeDevicePciSubFunctionCap)
self.assertEqual(obj.pci_capability.fun_capability[0].type,
"phys_function")
self.assertEqual(len(obj.pci_capability.fun_capability[0].
device_addrs),
1)
def test_config_non_device(self):
xmlin = """
<device>
<name>pci_0000_33_00_0</name>
<parent>pci_0000_22_1c_0</parent>
<driver>
<name>vxx</name>
</driver>
<capability type="pci">
<domain>0</domain>
<bus>9</bus>
<slot>0</slot>
<function>0</function>
<product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
<vendor id="0x17d5">Neterion Inc.</vendor>
<capability type="virt_functions"/>
</capability>
</device>"""
obj = config.LibvirtConfigNodeDevice()
obj.parse_str(xmlin)
self.assertIsInstance(obj.pci_capability,
config.LibvirtConfigNodeDevicePciCap)
self.assertIsInstance(obj.pci_capability.fun_capability[0],
config.LibvirtConfigNodeDevicePciSubFunctionCap)
self.assertEqual(obj.pci_capability.fun_capability[0].type,
"virt_functions")
def test_config_fail_device(self):
xmlin = """
<device>
<name>pci_0000_33_00_0</name>
<parent>pci_0000_22_1c_0</parent>
<driver>
<name>vxx</name>
</driver>
<capability type="pci">
<domain>0</domain>
<bus>9</bus>
<slot>0</slot>
<function>0</function>
<product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
<vendor id="0x17d5">Neterion Inc.</vendor>
<capability type="virt_functions">
</capability>
</capability>
</device>"""
obj = config.LibvirtConfigNodeDevice()
obj.parse_str(xmlin)
self.assertIsInstance(obj.pci_capability,
config.LibvirtConfigNodeDevicePciCap)
self.assertIsInstance(obj.pci_capability.fun_capability[0],
config.LibvirtConfigNodeDevicePciSubFunctionCap)
self.assertEqual(obj.pci_capability.fun_capability[0].type,
"virt_functions")
def test_config_2cap_device(self):
xmlin = """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
</capability>
</device>"""
obj = config.LibvirtConfigNodeDevice()
obj.parse_str(xmlin)
self.assertIsInstance(obj.pci_capability,
config.LibvirtConfigNodeDevicePciCap)
self.assertIsInstance(obj.pci_capability.fun_capability[0],
config.LibvirtConfigNodeDevicePciSubFunctionCap)
self.assertEqual(obj.pci_capability.fun_capability[0].type,
"phys_function")
self.assertEqual(obj.pci_capability.fun_capability[1].type,
"virt_functions")
class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
def test_config_device_pci_cap(self):
xmlin = """
<capability type="pci">
<domain>0</domain>
<bus>10</bus>
<slot>1</slot>
<function>5</function>
<product id="0x8086-3">Intel 10 Gigabit Ethernet</product>
<vendor id="0x8086">Intel Inc.</vendor>
<capability type="virt_functions">
<address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
<address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
</capability>
</capability>"""
obj = config.LibvirtConfigNodeDevicePciCap()
obj.parse_str(xmlin)
self.assertEqual(obj.domain, 0)
self.assertEqual(obj.bus, 10)
self.assertEqual(obj.slot, 1)
self.assertEqual(obj.function, 5)
self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
self.assertEqual(obj.product_id, '0x8086-3')
self.assertEqual(obj.vendor, "Intel Inc.")
self.assertEqual(obj.vendor_id, "0x8086")
self.assertIsInstance(obj.fun_capability[0],
config.LibvirtConfigNodeDevicePciSubFunctionCap)
self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
self.assertEqual(obj.fun_capability[0].device_addrs,
[("0000", "0x0a", "0x1", "0x1"),
("0001", "0x0a", "0x02", "0x03"), ])
def test_config_device_pci_2cap(self):
xmlin = """
<capability type="pci">
<domain>0</domain>
<bus>10</bus>
<slot>1</slot>
<function>5</function>
<product id="0x8086-3">Intel 10 Gigabit Ethernet</product>
<vendor id="0x8086">Intel Inc.</vendor>
<capability type="virt_functions">
<address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
<address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
</capability>
<capability type="phys_function">
<address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
</capability>
</capability>"""
obj = config.LibvirtConfigNodeDevicePciCap()
obj.parse_str(xmlin)
self.assertEqual(obj.domain, 0)
self.assertEqual(obj.bus, 10)
self.assertEqual(obj.slot, 1)
self.assertEqual(obj.function, 5)
self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
self.assertEqual(obj.product_id, '0x8086-3')
self.assertEqual(obj.vendor, "Intel Inc.")
self.assertEqual(obj.vendor_id, "0x8086")
self.assertIsInstance(obj.fun_capability[0],
config.LibvirtConfigNodeDevicePciSubFunctionCap)
self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
self.assertEqual(obj.fun_capability[0].device_addrs,
[("0000", '0x0a', '0x1', "0x1"),
("0001", "0x0a", "0x02", "0x03"), ])
self.assertEqual(obj.fun_capability[1].type, 'phys_function')
self.assertEqual(obj.fun_capability[1].device_addrs,
[("0000", '0x0a', '0x1', "0x1"), ])
def test_config_read_only_disk(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "disk"
obj.source_device = "disk"
obj.driver_name = "kvm"
obj.target_dev = "/dev/hdc"
obj.target_bus = "virtio"
obj.readonly = True
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="disk" device="disk">
<driver name="kvm"/>
<target bus="virtio" dev="/dev/hdc"/>
<readonly/>
</disk>""")
obj.readonly = False
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="disk" device="disk">
<driver name="kvm"/>
<target bus="virtio" dev="/dev/hdc"/>
</disk>""")
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
def test_config_device_pci_subfunction(self):
xmlin = """
<capability type="virt_functions">
<address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
<address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
</capability>"""
fun_capability = config.LibvirtConfigNodeDevicePciSubFunctionCap()
fun_capability.parse_str(xmlin)
self.assertEqual('virt_functions', fun_capability.type)
self.assertEqual([("0000", "0x0a", "0x1", "0x1"),
("0001", "0x0a", "0x02", "0x03"), ],
fun_capability.device_addrs)
class LibvirtConfigGuestVideoTest(LibvirtConfigBaseTest):
def test_config_video_driver(self):
obj = config.LibvirtConfigGuestVideo()
obj.type = 'qxl'
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<video>
<model type='qxl'/>
</video>""")
def test_config_video_driver_vram_heads(self):
obj = config.LibvirtConfigGuestVideo()
obj.type = 'qxl'
obj.vram = '9216'
obj.heads = '1'
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<video>
<model type='qxl' vram='9216' heads='1'/>
</video>""")
class LibvirtConfigGuestSeclabel(LibvirtConfigBaseTest):
def test_config_seclabel_config(self):
obj = config.LibvirtConfigSeclabel()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<seclabel type='dynamic'/>""")
def test_config_seclabel_baselabel(self):
obj = config.LibvirtConfigSeclabel()
obj.type = 'dynamic'
obj.baselabel = 'system_u:system_r:my_svirt_t:s0'
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<seclabel type='dynamic'>
<baselabel>system_u:system_r:my_svirt_t:s0</baselabel>
</seclabel>""")
class LibvirtConfigGuestRngTest(LibvirtConfigBaseTest):
def test_config_rng_driver(self):
obj = config.LibvirtConfigGuestRng()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<rng model='virtio'>
<backend model='random'/>
</rng>""")
def test_config_rng_driver_with_rate(self):
obj = config.LibvirtConfigGuestRng()
obj.backend = '/dev/random'
obj.rate_period = '12'
obj.rate_bytes = '34'
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<rng model='virtio'>
<rate period='12' bytes='34'/>
<backend model='random'>/dev/random</backend>
</rng>""")
class LibvirtConfigGuestControllerTest(LibvirtConfigBaseTest):
def test_config_guest_contoller(self):
obj = config.LibvirtConfigGuestController()
obj.type = 'scsi'
obj.index = 0
obj.model = 'virtio-scsi'
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<controller type='scsi' index='0' model='virtio-scsi'/>""")
class LibvirtConfigGuestWatchdogTest(LibvirtConfigBaseTest):
def test_config_watchdog(self):
obj = config.LibvirtConfigGuestWatchdog()
obj.action = 'none'
xml = obj.to_xml()
self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='none'/>")
def test_config_watchdog_default_action(self):
obj = config.LibvirtConfigGuestWatchdog()
xml = obj.to_xml()
self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='reset'/>")
class LibvirtConfigGuestCPUTuneTest(LibvirtConfigBaseTest):
def test_config_cputune_timeslice(self):
cputune = config.LibvirtConfigGuestCPUTune()
cputune.shares = 100
cputune.quota = 50000
cputune.period = 25000
xml = cputune.to_xml()
self.assertXmlEqual(xml, """
<cputune>
<shares>100</shares>
<quota>50000</quota>
<period>25000</period>
</cputune>""")
def test_config_cputune_vcpus(self):
cputune = config.LibvirtConfigGuestCPUTune()
vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin()
vcpu0.id = 0
vcpu0.cpuset = set([0, 1])
vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin()
vcpu1.id = 1
vcpu1.cpuset = set([2, 3])
vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin()
vcpu2.id = 2
vcpu2.cpuset = set([4, 5])
vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin()
vcpu3.id = 3
vcpu3.cpuset = set([6, 7])
cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3])
xml = cputune.to_xml()
self.assertXmlEqual(xml, """
<cputune>
<vcpupin vcpu="0" cpuset="0-1"/>
<vcpupin vcpu="1" cpuset="2-3"/>
<vcpupin vcpu="2" cpuset="4-5"/>
<vcpupin vcpu="3" cpuset="6-7"/>
</cputune>""")
class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest):
def test_config_memory_backing_none(self):
obj = config.LibvirtConfigGuestMemoryBacking()
xml = obj.to_xml()
self.assertXmlEqual(xml, "<memoryBacking/>")
def test_config_memory_backing_all(self):
obj = config.LibvirtConfigGuestMemoryBacking()
obj.locked = True
obj.sharedpages = False
obj.hugepages = True
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<memoryBacking>
<hugepages/>
<nosharedpages/>
<locked/>
</memoryBacking>""")
class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest):
def test_config_memory_backing_none(self):
obj = config.LibvirtConfigGuestMemoryTune()
xml = obj.to_xml()
self.assertXmlEqual(xml, "<memtune/>")
def test_config_memory_backing_all(self):
obj = config.LibvirtConfigGuestMemoryTune()
obj.soft_limit = 6
obj.hard_limit = 28
obj.swap_hard_limit = 140
obj.min_guarantee = 270
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<memtune>
<hard_limit units="K">28</hard_limit>
<soft_limit units="K">6</soft_limit>
<swap_hard_limit units="K">140</swap_hard_limit>
<min_guarantee units="K">270</min_guarantee>
</memtune>""")
class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest):
def test_config_metadata(self):
meta = config.LibvirtConfigGuestMetaNovaInstance()
meta.package = "2014.2.3"
meta.name = "moonbuggy"
meta.creationTime = 1234567890
meta.roottype = "image"
meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426"
owner = config.LibvirtConfigGuestMetaNovaOwner()
owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670"
owner.username = "buzz"
owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021"
owner.projectname = "moonshot"
meta.owner = owner
flavor = config.LibvirtConfigGuestMetaNovaFlavor()
flavor.name = "m1.lowgravity"
flavor.vcpus = 8
flavor.memory = 2048
flavor.swap = 10
flavor.disk = 50
flavor.ephemeral = 10
meta.flavor = flavor
xml = meta.to_xml()
self.assertXmlEqual(xml, """
<nova:instance xmlns:nova='http://openstack.org/xmlns/libvirt/nova/1.0'>
<nova:package version="2014.2.3"/>
<nova:name>moonbuggy</nova:name>
<nova:creationTime>2009-02-13 23:31:30</nova:creationTime>
<nova:flavor name="m1.lowgravity">
<nova:memory>2048</nova:memory>
<nova:disk>50</nova:disk>
<nova:swap>10</nova:swap>
<nova:ephemeral>10</nova:ephemeral>
<nova:vcpus>8</nova:vcpus>
</nova:flavor>
<nova:owner>
<nova:user
uuid="3472c2a6-de91-4fb5-b618-42bc781ef670">buzz</nova:user>
<nova:project
uuid="f241e906-010e-4917-ae81-53f4fb8aa021">moonshot</nova:project>
</nova:owner>
<nova:root type="image" uuid="fe55c69a-8b2e-4bbc-811a-9ad2023a0426"/>
</nova:instance>
""")
class LibvirtConfigGuestIDMap(LibvirtConfigBaseTest):
def test_config_id_map_parse_start_not_int(self):
xmlin = "<uid start='a' target='20000' count='5'/>"
obj = config.LibvirtConfigGuestIDMap()
self.assertRaises(ValueError, obj.parse_str, xmlin)
def test_config_id_map_parse_target_not_int(self):
xmlin = "<uid start='2' target='a' count='5'/>"
obj = config.LibvirtConfigGuestIDMap()
self.assertRaises(ValueError, obj.parse_str, xmlin)
def test_config_id_map_parse_count_not_int(self):
xmlin = "<uid start='2' target='20000' count='a'/>"
obj = config.LibvirtConfigGuestIDMap()
self.assertRaises(ValueError, obj.parse_str, xmlin)
def test_config_uid_map(self):
obj = config.LibvirtConfigGuestUIDMap()
obj.start = 1
obj.target = 10000
obj.count = 2
xml = obj.to_xml()
self.assertXmlEqual("<uid start='1' target='10000' count='2'/>", xml)
def test_config_uid_map_parse(self):
xmlin = "<uid start='2' target='20000' count='5'/>"
obj = config.LibvirtConfigGuestUIDMap()
obj.parse_str(xmlin)
self.assertEqual(2, obj.start)
self.assertEqual(20000, obj.target)
self.assertEqual(5, obj.count)
def test_config_gid_map(self):
obj = config.LibvirtConfigGuestGIDMap()
obj.start = 1
obj.target = 10000
obj.count = 2
xml = obj.to_xml()
self.assertXmlEqual("<gid start='1' target='10000' count='2'/>", xml)
def test_config_gid_map_parse(self):
xmlin = "<gid start='2' target='20000' count='5'/>"
obj = config.LibvirtConfigGuestGIDMap()
obj.parse_str(xmlin)
self.assertEqual(2, obj.start)
self.assertEqual(20000, obj.target)
self.assertEqual(5, obj.count)
class LibvirtConfigMemoryBalloonTest(LibvirtConfigBaseTest):
def test_config_memory_balloon_period(self):
balloon = config.LibvirtConfigMemoryBalloon()
balloon.model = 'fake_virtio'
balloon.period = 11
xml = balloon.to_xml()
expected_xml = """
<memballoon model='fake_virtio'>
<stats period='11'/>
</memballoon>"""
self.assertXmlEqual(expected_xml, xml)
|
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Base classes and constants involved in pool stock samples creation tasks.
AAB
"""
from thelma.tools.semiconstants import PIPETTING_SPECS_NAMES
from thelma.tools.semiconstants import RACK_SHAPE_NAMES
from thelma.tools.semiconstants import get_min_transfer_volume
from thelma.tools.iso.base import StockRackLayout
from thelma.tools.iso.base import StockRackLayoutConverter
from thelma.tools.iso.base import StockRackParameters
from thelma.tools.iso.base import StockRackPosition
from thelma.tools.iso.base import _ISO_LABELS_BASE
from thelma.tools.stock.base import \
get_default_stock_concentration
from thelma.tools.utils.base import CONCENTRATION_CONVERSION_FACTOR
from thelma.tools.utils.base import VOLUME_CONVERSION_FACTOR
from thelma.tools.utils.base import get_trimmed_string
from thelma.tools.utils.base import is_larger_than
from thelma.tools.utils.base import round_up
from thelma.tools.utils.converters import \
MoleculeDesignPoolLayoutConverter
from thelma.tools.utils.layouts import FIXED_POSITION_TYPE
from thelma.tools.utils.layouts import MoleculeDesignPoolLayout
from thelma.tools.utils.layouts import MoleculeDesignPoolParameters
from thelma.tools.utils.layouts import MoleculeDesignPoolPosition
from thelma.entities.moleculedesign import MoleculeDesignPoolSet
from thelma.entities.tagging import Tag
__docformat__ = 'reStructuredText en'
__all__ = ['LABELS',
'DILUENT_INFO',
'VolumeCalculator',
'StockSampleCreationParameters',
'StockSampleCreationPosition',
'StockSampleCreationLayout',
'StockSampleCreationLayoutConverter',
'PoolCreationParameters',
'PoolCreationStockRackPosition',
'PoolCreationStockRackLayoutConverter',
'SingleDesignStockRackLayout',
'SingleDesignStockRackLayoutConverter']
#: Default preparation plate volume in ul.
DEFAULT_PREPARATION_PLATE_VOLUME = 43.3
class LABELS(_ISO_LABELS_BASE):
"""
Generates and parses worklist and rack labels involved in lab ISO
processing.
"""
#: Marker for stock racks that will contain the new pools.
ROLE_POOL_STOCK = 'ps'
#: Marker for stock racks that contain single designs that will be used
#: to generate the new pools.
ROLE_SINGLE_DESIGN_STOCK = 'sds'
#: Marker for ISO labels.
MARKER_ISO_LABEL = 'iso_label'
#: Marker for ISO request labels.
MARKER_ISO_REQUEST_LABEL = 'iso_request_label'
#: Marker for the layout number.
MARKER_LAYOUT_NUMBER = 'layout_number'
#: Is part of stock transfer worklist labels.
_FILL_WORKLIST_STOCK_TRANSFER = 'stock_transfer'
@classmethod
def create_iso_label(cls, iso_request_label, layout_number):
"""
Creates a label for a future ISO. The label contains the ISO request
label and the layout number.
"""
layout_num_str = '%02i' % (layout_number)
value_parts = [iso_request_label, layout_num_str]
return cls._create_label(value_parts)
@classmethod
def create_job_label(cls, iso_request_label, job_number):
"""
The job label contains the ISO request label and a running number
as job number (you can get a new ISO number with
:func:`get_new_job_number`).
"""
job_num_str = '%02i' % (job_number)
value_parts = [iso_request_label, cls._FILL_ISO_JOB, job_num_str]
return cls._create_label(value_parts)
@classmethod
def create_stock_transfer_worklist_label(cls, iso_label):
"""
The stock transfer worklist label contains the ISO label and a
filler.
"""
value_parts = [cls._FILL_WORKLIST_STOCK_TRANSFER, iso_label]
return cls._create_label(value_parts)
@classmethod
def create_buffer_worklist_label(cls, iso_request_label):
"""
The buffer dilution worklist contains the ISO request label and a
filler.
"""
value_parts = [iso_request_label, cls._FILL_WORKLIST_DILUTION]
return cls._create_label(value_parts)
@classmethod
def create_stock_rack_label(cls, iso_label, rack_marker):
"""
The stock rack label contains the ISO label and the rack marker
(rack role and (optionally) rack_number).
"""
value_parts = [iso_label, rack_marker]
return cls._create_label(value_parts)
@classmethod
def parse_stock_rack_label(cls, stock_rack_label):
"""
The stock rack label contains the ISO label and the rack marker
(rack role and (optionally) rack_number).
e.g. ssgen_test_01_sds#3 --> rack marker: sds#3
--> layout number: 1
--> ISO request label = ssgen_test
--> ISO label = ssgen_test_01
"""
value_parts = cls._get_value_parts(stock_rack_label)
rack_marker = value_parts[-1]
values = cls.parse_rack_marker(rack_marker)
values[cls.MARKER_RACK_MARKER] = rack_marker
ir_label = cls.SEPARATING_CHAR.join(value_parts[:-2])
layout_num = cls._parse_int_str(value_parts[-2])
values[cls.MARKER_ISO_REQUEST_LABEL] = ir_label
values[cls.MARKER_LAYOUT_NUMBER] = layout_num
iso_label = cls.create_iso_label(ir_label, layout_num)
values[cls.MARKER_ISO_LABEL] = iso_label
return values
#: The diluent info for the planned container dilutions (always buffer).
DILUENT_INFO = 'annealing buffer'
class VolumeCalculator(object):
"""
Calculates the volume that has to be transferred from a single design
stock tube to a future pool stock tube (for the given volume, concentration,
and number of designs).
"""
def __init__(self, target_volume, target_concentration, number_designs,
stock_concentration):
"""
Constructor:
:param target_volume: The requested volume for the new pool stock sample
*in ul*.
:type target_volume: positive number, unit ul
:param target_concentration: The requested pool concentration for the
new pool stock sample *in nM*.
:type target_concentration: positive number
:param number_designs: The number of designs per pool must be the same
for all pools to be created.
:type number_designs: positive integer
:param stock_concentration: The stock concentration for single designs
*in nM*.
:type stock_concentration: positive number, unit nM
"""
self.__target_volume = target_volume
self.__target_concentration = target_concentration
self.__number_designs = number_designs
self.__stock_concentration = stock_concentration
self.__adjusted_target_vol = None
self.__stock_transfer_vol = None
self.__buffer_volume = None
self.__min_cybio_transfer_vol = get_min_transfer_volume(
PIPETTING_SPECS_NAMES.CYBIO)
@classmethod
def from_iso_request(cls, iso_request):
"""
Factory method generating a :class:`VolumeCalculator` for pool
:class:`StockSampleIsoRequest` objects.
The calculator determines the stock transfer volume for each single
molecule design, the buffer volume and checks whether the target
volume of the ISO request needs to be adjusted.
:param iso_request: Contains all required numbers.
:type iso_request:
:class:`thelma.entities.iso.StockSampleCreationIsoRequest`
"""
pool_set = iso_request.molecule_design_pool_set
single_design_stock_concentration = \
get_default_stock_concentration(pool_set.molecule_type,
number_designs=1)
kw = dict(
target_volume=iso_request.stock_volume * VOLUME_CONVERSION_FACTOR,
target_concentration=iso_request.stock_concentration \
* CONCENTRATION_CONVERSION_FACTOR,
number_designs=iso_request.number_designs,
stock_concentration=single_design_stock_concentration)
return cls(**kw)
def calculate(self):
"""
Determines the volumes for the annealing buffer and also the
single design stock transfers and adjusts the target volume, if
necessary.
:raises ValueErrors: if something the values are not compatible
"""
self.__calculate_single_stock_transfer_volume()
self.__calculate_buffer_volume()
def __calculate_single_stock_transfer_volume(self):
# Determines the volume that has to be transferred from a single design
# stock tube to a future pool stock tube (for the given volume,
# concentration, and number of designs). The target volume might
# be increased if the resulting single design transfer volume has
# more than 1 decimal place.
# :raises ValueErrors: if something the values are not compatible
target_single_conc = float(self.__target_concentration) \
/ self.__number_designs
if target_single_conc > self.__stock_concentration:
msg = 'The requested target concentration (%i nM) cannot be ' \
'achieved since it would require a concentration of %s nM ' \
'for each single design in the pool. However, the stock ' \
'concentration for this design type is only %s nM.' \
% (self.__target_concentration,
get_trimmed_string(target_single_conc),
get_trimmed_string(self.__stock_concentration))
raise ValueError(msg)
dil_factor = self.__stock_concentration / target_single_conc
min_target_volume = round_up(dil_factor * self.__min_cybio_transfer_vol)
if (min_target_volume > self.__target_volume):
msg = 'The target volume you have requested (%i ul) is too low ' \
'for the required dilution (1:%s) since the CyBio cannot ' \
'pipet less than %.1f ul per transfer. The volume that has ' \
'to be taken from the stock for each single molecule ' \
'design would be lower that that. Increase the target ' \
'volume to %.1f ul or increase the target concentration.' \
% (self.__target_volume, get_trimmed_string(dil_factor),
self.__min_cybio_transfer_vol,
round_up(min_target_volume, 0))
raise ValueError(msg)
self.__stock_transfer_vol = round_up(self.__target_volume / dil_factor)
self.__adjusted_target_vol = round(
self.__stock_transfer_vol * dil_factor, 1)
# must be at least 1 ul according to the last check
total_transfer_vol = self.__stock_transfer_vol * self.__number_designs
if total_transfer_vol > self.__target_volume:
msg = 'The target volume you have requested (%i ul) is too low ' \
'for the concentration you have ordered (%i uM) since it ' \
'would require already %s ul per molecule design (%s ul in ' \
'total) to achieve the requested concentration. Increase ' \
'the volume or lower the concentration, please.' \
% (self.__target_volume, self.__target_concentration,
get_trimmed_string(self.__stock_transfer_vol),
get_trimmed_string(total_transfer_vol))
raise ValueError(msg)
def __calculate_buffer_volume(self):
# Calculates the volume of the annealing buffer (*in ul*) required to
# generate the desired concentration and volume. Also adjusts the
# target volume if the necessary (e.g.
buffer_volume = self.__adjusted_target_vol \
- (self.__stock_transfer_vol * self.__number_designs)
if (buffer_volume < 0.01 and buffer_volume >= 0):
buffer_volume = None
elif buffer_volume < self.__min_cybio_transfer_vol:
corr_factor = self.__min_cybio_transfer_vol / buffer_volume
target_single_conc = \
float(self.__target_concentration) / self.__number_designs
dil_factor = self.__stock_concentration / target_single_conc
self.__stock_transfer_vol = \
self.__stock_transfer_vol * corr_factor
self.__adjusted_target_vol = \
self.__number_designs * self.__stock_transfer_vol * dil_factor
self.__buffer_volume = buffer_volume
def get_single_design_stock_transfer_volume(self):
"""
Returns the volume that has to be transferred from a single design
stock tube to a future pool stock tube (for the given volume,
concentration, and number of designs)
"""
return self.__stock_transfer_vol
def get_adjusted_target_volume(self):
"""
The target volume for the ISO request might need to be increased
in order to maintain a accurate target concentration since the
minimum step size for all pipetting methods is 0.1 ul.
An increase of the target volume can be triggered by both odd
single design stock transfer volumes and the buffer volume.
Example:
Thus, if a volume and concentration combination would result in a stock
transfer volume of e.g. 1.333 ul the volume for the single transfer
is increased to 1.4 ul and the target volume adjusted accordingly.
The adjusted target concentration is determined in the
:func:get_single_stock_transfer_volume` method. If no adjustment has
taken place, the method returns *None*.
"""
if is_larger_than(self.__adjusted_target_vol, self.__target_volume):
return self.__adjusted_target_vol
else:
return None
def get_buffer_volume(self):
"""
Returns the volume of the annealing buffer required to generate
the desired concentration and volume *in ul*.
:raises
"""
return self.__buffer_volume
def __str__(self):
return self.__class__.__name__
class StockSampleCreationParameters(MoleculeDesignPoolParameters):
"""
Deals with the pools to be generated and the involved tubes.
"""
DOMAIN = 'stock_sample_generation'
ALLOWED_POSITION_TYPES = [FIXED_POSITION_TYPE]
#: A molecule design pool ID.
MOLECULE_DESIGN_POOL = MoleculeDesignPoolParameters.MOLECULE_DESIGN_POOL
#: A shortcut for :attr:`MOLECULE_DESIGN_POOL`.
POOL = MOLECULE_DESIGN_POOL
#: The molecule design IDs the pool consists of.
MOLECULE_DESIGNS = 'molecule_designs'
#: The barcodes for the single design stock tubes to be used (determined
#: via an optimizer).
STOCK_TUBE_BARCODES = 'stock_tube_barcodes'
REQUIRED = [POOL, MOLECULE_DESIGNS, STOCK_TUBE_BARCODES]
ALL = [POOL, MOLECULE_DESIGNS, STOCK_TUBE_BARCODES]
ALIAS_MAP = dict(MoleculeDesignPoolParameters.ALIAS_MAP,
**{MOLECULE_DESIGNS : ['molecule design IDs'],
STOCK_TUBE_BARCODES : []})
DOMAIN_MAP = dict(MoleculeDesignPoolParameters.DOMAIN_MAP, **{
MOLECULE_DESIGNS : DOMAIN, STOCK_TUBE_BARCODES : DOMAIN})
class StockSampleCreationPosition(MoleculeDesignPoolPosition):
"""
The pool ID, single molecule design IDs and stock tubes for a particular
position.
**Equality condition**: equal :attr:`rack_position` and :attr:`pool`.
"""
PARAMETER_SET = StockSampleCreationParameters
DELIMITER = '-'
EXPOSE_POSITION_TYPE = False
def __init__(self, rack_position, molecule_design_pool,
stock_tube_barcodes):
"""
:param rack_position: The source rack position in the source rack.
:type rack_position: :class:`thelma.entities.rack.RackPosition`
:param molecule_design_pool: A molecule design pool to generate.
:type molecule_design_pool:
:class:`thelma.entities.moleculedesign.MoleculeDesignPool`
:param stock_tube_barcodes: The stock tube barcodes for the single
molecule design tubes used to generate this pool.
:type stock_tube_barcodes: :class:`list`
"""
MoleculeDesignPoolPosition.__init__(self, rack_position=rack_position,
molecule_design_pool=molecule_design_pool,
position_type=FIXED_POSITION_TYPE)
if not isinstance(stock_tube_barcodes, list):
msg = 'The stock tube barcodes must be a list (obtained: %s).' \
% (stock_tube_barcodes.__class__.__name__)
raise TypeError(msg)
#: A list of molecules contained in the pool (ordered by ID).
self.molecule_designs = []
for md in molecule_design_pool.molecule_designs:
self.molecule_designs.append(md)
self.molecule_designs.sort()
#: The stock tube barcodes for the single molecule design tubes
#: used to generate this pool
self.stock_tube_barcodes = sorted(stock_tube_barcodes)
@property
def pool(self):
"""
Shortcut to the :attr:`molecule_design_pool`.
"""
return self.molecule_design_pool
def get_parameter_tag(self, parameter):
"""
The method needs to be overwritten because the value for the molecule
designs tag is a concatenated string. Position types are not important
"""
if parameter == self.PARAMETER_SET.MOLECULE_DESIGNS:
return self.__get_molecule_designs_tag()
elif parameter == self.PARAMETER_SET.STOCK_TUBE_BARCODES:
return self.__get_stock_barcodes_tag()
else:
return MoleculeDesignPoolPosition.get_parameter_tag(self, parameter)
@classmethod
def __get_molecule_designs_tag_value(cls, molecule_designs):
"""
The tag values contains the molecule designs as concatenated string.
"""
return cls.DELIMITER.join([str(md.id) for md in molecule_designs])
def get_molecule_designs_tag_value(self):
"""
This parameter requires a special method because the value for the
molecule designs tag is a concatenated string.
"""
return self.__get_molecule_designs_tag_value(self.molecule_designs)
def __get_molecule_designs_tag(self):
"""
This parameter requires a special method because the value for the
molecule designs tag is a concatenated string.
"""
return Tag(self.PARAMETER_SET.DOMAIN,
self.PARAMETER_SET.MOLECULE_DESIGNS,
self.get_molecule_designs_tag_value())
@classmethod
def validate_molecule_designs(cls, pool, md_tag_value):
"""
Compares the molecule design of the pool to a molecule design tag
value. Is used by the layout converter for validation.
"""
pool_str = cls.__get_molecule_designs_tag_value(
sorted(pool.molecule_designs))
return pool_str == md_tag_value
def get_stock_barcodes_tag_value(self):
"""
This parameter requires a special method because the value for the
stock barcodes tag is a concatenated string.
Reverse method: :func:`get_tube_barcodes_from_tag_value`.
"""
return self.DELIMITER.join(self.stock_tube_barcodes)
def __get_stock_barcodes_tag(self):
"""
This parameter requires a special method because the value for the
stock barcodes tag is a concatenated string.
"""
return Tag(self.PARAMETER_SET.DOMAIN,
self.PARAMETER_SET.STOCK_TUBE_BARCODES,
self.get_stock_barcodes_tag_value())
@classmethod
def get_tube_barcodes_from_tag_value(cls, tube_barcode_tag_value):
"""
Converts a tag value for the stock tubes into a list of stock tube
barcodes (reverse method: :func:`get_stock_barcodes_tag_value`).
"""
return tube_barcode_tag_value.split(cls.DELIMITER)
def _get_parameter_values_map(self):
"""
The position type is not included.
"""
return {self.PARAMETER_SET.POOL : self.pool,
self.PARAMETER_SET.MOLECULE_DESIGNS : self.molecule_designs,
self.PARAMETER_SET.STOCK_TUBE_BARCODES : \
self.stock_tube_barcodes}
def __eq__(self, other):
if not MoleculeDesignPoolPosition.__eq__(self, other):
result = False
else:
result = self.stock_tube_barcodes == other.stock_tube_barcodes
return result
def __repr__(self):
str_format = '<%s rack position: %s, pool ID: %s, molecule ' \
'designs: %s, stock tubes: %s>'
params = (self.__class__.__name__, self.rack_position, self.pool.id,
self.get_molecule_designs_tag_value(),
self.get_stock_barcodes_tag_value())
return str_format % params
class StockSampleCreationLayout(MoleculeDesignPoolLayout):
"""
Defines the molecule design pool data for a stock tube rack or a
library plate.
"""
POSITION_CLS = StockSampleCreationPosition
__DEFAULT_SHAPE_NAME = RACK_SHAPE_NAMES.SHAPE_96
def __init__(self, shape=None):
"""
Constructor:
:param shape: The rack shape - usually a 96-well plate, but you can
overwrite that.
:type shape: :class:`thelma.entities.rack.RackShape`
:default shape: *None* (96-well)
"""
if shape is None:
shape = RACK_SHAPE_NAMES.from_name(self.__DEFAULT_SHAPE_NAME)
MoleculeDesignPoolLayout.__init__(self, shape)
def get_pool_set(self, molecule_type):
"""
Returns a pool set containing all pools from the layout.
:param molecule_type: The type of the pools in the set is derived
from the molecule type of the stock sample creation ISO request.
:type molecule_type: :class:`thelma.entities.moleculetype.MoleculeType`
"""
pools = set([lp.pool for lp in self._position_map.values()])
return MoleculeDesignPoolSet(molecule_type=molecule_type,
molecule_design_pools=pools)
class StockSampleCreationLayoutConverter(MoleculeDesignPoolLayoutConverter):
"""
Converts a :class:`thelma.entities.racklayout.RackLayout` into a
:class:`StockSampleCreationLayout`.
"""
NAME = 'Stock Sample Creation Layout Converter'
PARAMETER_SET = StockSampleCreationParameters
LAYOUT_CLS = StockSampleCreationLayout
POSITION_CLS = StockSampleCreationPosition
def __init__(self, rack_layout, parent=None):
MoleculeDesignPoolLayoutConverter.__init__(self, rack_layout,
parent=parent)
# intermediate storage of invalid rack positions
self.__mismatching_mds = None
self.__missing_tubes = None
self.__mismatching_tube_num = None
def reset(self):
MoleculeDesignPoolLayoutConverter.reset(self)
self.__mismatching_mds = []
self.__missing_tubes = []
self.__mismatching_tube_num = []
def _get_position_init_values(self, parameter_map, rack_pos):
kw = MoleculeDesignPoolLayoutConverter._get_position_init_values(
self,
parameter_map, rack_pos)
result = None
if not kw is None:
invalid = False
pos_label = rack_pos.label
pool = kw['molecule_design_pool']
md_str = parameter_map[self.PARAMETER_SET.MOLECULE_DESIGNS]
if not self.POSITION_CLS.validate_molecule_designs(pool, md_str):
exp_mds = [md.id for md in pool]
info = '%s (pool %s, found: %s, expected: %s)' \
% (pos_label, pool.id, md_str,
self._get_joined_str(
exp_mds, is_strs=False, separator='-'))
self.__mismatching_mds.append(info)
invalid = True
tube_str = parameter_map[self.PARAMETER_SET.STOCK_TUBE_BARCODES]
if tube_str is None:
self.__missing_tubes.append(pos_label)
result = None
else:
tubes = self.POSITION_CLS.get_tube_barcodes_from_tag_value(
tube_str)
if not len(tubes) == len(pool):
info = '%s (%s, number mds: %i)' \
% (pos_label, tube_str, len(pool))
self.__mismatching_tube_num.append(info)
invalid = True
if invalid:
result = None
else:
kw['stock_tube_barcodes'] = tubes
# Success!
result = kw
return result
def _record_errors(self):
MoleculeDesignPoolLayoutConverter._record_errors(self)
if len(self.__mismatching_mds) > 0:
msg = 'The molecule designs IDs for some pools do not match: %s.' \
% (self._get_joined_str(self.__mismatching_mds))
self.add_error(msg)
if len(self.__missing_tubes) > 0:
msg = 'The following rack positions do not contain stock tube ' \
'barcodes: %s.' % (self._get_joined_str(self.__missing_tubes))
self.add_error(msg)
if len(self.__mismatching_tube_num) > 0:
msg = 'For some positions the number of tubes does not match ' \
'the number of molecule designs: %s.' \
% (self._get_joined_str(self.__mismatching_tube_num))
self.add_error(msg)
class PoolCreationParameters(StockRackParameters):
"""
Deals with the pools to be generated in pure pool generation ISOs that
do not involve further processing after the pools have been
generated. Unlike normal :class:`StockRackParameters` the
positions do not need to have transfer targets.
"""
MUST_HAVE_TRANSFER_TARGETS = \
{StockRackParameters.TRANSFER_TARGETS : False}
class PoolCreationStockRackPosition(StockRackPosition):
"""
Represents a position in a ISO stock rack for a stock sample creation
ISO that does not involve further processing after the pools have been
generated. Unlike normal :class:`StockRackPosition` objects they
do not need to have transfer targets.
"""
PARAMETER_SET = PoolCreationParameters
def __init__(self, rack_position, molecule_design_pool, tube_barcode,
transfer_targets=None):
StockRackPosition.__init__(self, rack_position, molecule_design_pool,
tube_barcode, transfer_targets)
class PoolCreationStockRackLayoutConverter(StockRackLayoutConverter):
"""
Converts a rack layout into a :class:`StockRackLayout`.
Unlike normal with normal stock racks these positions do not need to have
transfer targets.
"""
NAME = 'Pool Creation Stock Rack Layout Converter'
PARAMETER_SET = PoolCreationParameters
POSITION_CLS = PoolCreationStockRackPosition
class SingleDesignStockRackLayout(StockRackLayout):
"""
Represent an ISO single design stock rack for stock sample creation.
Unlike normal :class:`StockRackPosition` objects the positions within
the layout might have the same transfer targets.
"""
ALLOW_DUPLICATE_TARGET_WELLS = \
{StockRackLayout.POSITION_CLS.PARAMETER_SET.TRANSFER_TARGETS : True}
class SingleDesignStockRackLayoutConverter(StockRackLayoutConverter):
"""
Converts a rack layout into a :class:`SingleDesignStockRackLayout`.
Unlike normal with normal stock racks there might be several transfers
targets with the same target.
"""
NAME = 'Single Design Stock Rack Layout Converter'
LAYOUT_CLS = SingleDesignStockRackLayout
|
|
# Author: Xavier Paredes-Fortuny ([email protected])
# License: MIT, see LICENSE.md
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
from numpy import ma
def autocrop_img(filename):
"""Call epstools from bash to autocrop image"""
import subprocess
import os
try:
cwd, img_name = os.path.split(filename)
bashcmd = 'epstool --copy --bbox %s %s' % (img_name, 'tmp_'+img_name)
process = subprocess.Popen(bashcmd.split(), stdout=subprocess.PIPE, cwd=cwd)
process.wait()
bashcmd2 = 'mv %s %s' % ('tmp_'+img_name, img_name)
process2 = subprocess.Popen(bashcmd2.split(), stdout=subprocess.PIPE, cwd=cwd)
except:
raise RuntimeError('Unable to tight layout. Increase pad_inches?')
def tracer_plot(x, y, z, vx, vy, plots_path, CGS_units, all_lines):
"""Plot the tracer map of the RHD simulation"""
# Set-up
from setup import params
input_file = params['input_file']
numbering = 1
vel_vectors = 0
cax_z = [0.855, 0.510, 0.03, 0.390]
cax_vel = [0.855, 0.100, 0.03, 0.390]
nmask = 20
qscale = 0.1
width0 = 0.005
counti0 = 11
countj0 = 16
#
print 'Tracer map'
np.seterr(divide='ignore', invalid='ignore')
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
if CGS_units == 0:
ax.set_xlabel(r'$r~[a]$')
ax.set_ylabel(r'$z~[a]$')
else:
ax.set_xlabel(r'$r~{\rm [cm]}$')
ax.set_ylabel(r'$z~{\rm [cm]}$')
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
im = ax.pcolormesh(xv, yv, z, cmap=cm.binary, vmin=0.01, vmax=0.99,
rasterized=True)
ax.set_xlim((x.min(), x.max()))
ax.set_ylim((y.min(), y.max()))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=0.1, pad=0.07)
cb0 = fig.colorbar(im, cax=cax)
if CGS_units == 0:
cb0.set_label(r'${\rm Tracer}$', labelpad=5)
else:
cb0.set_label(r'${\rm Tracer}$', labelpad=5)
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e15)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
numbering_mod = 5
offset_y = 0.0
if input_file == 'JET':
ax.xaxis.set_major_formatter(FixedOrderFormatter(15))
numbering_mod = 10
offset_y = 0.01e15
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e15)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
elif input_file == 'PULSAR':
ax.xaxis.set_major_formatter(FixedOrderFormatter(12))
ax.yaxis.set_major_formatter(FixedOrderFormatter(12))
numbering = 1
numbering_mod = 10
offset_y = 0.0
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e12)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
from matplotlib.ticker import AutoMinorLocator
ax.xaxis.set_minor_locator(AutoMinorLocator())
fig.savefig(plots_path+'tracer_map.eps',
bbox_inches='tight', pad_inches=0.02, dpi=300)
autocrop_img(plots_path+'tracer_map.eps')
if vel_vectors == 0:
# c = ['r','b','c','y','k']
c = ['0.1','0.25','0.4','0.55','0.7']
c += 500*c
for ll, line in enumerate(all_lines):
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if l != 0 and t==0:
nonzero_sel[l] = 0
if numbering == 1:
if (ll+1)%numbering_mod == 0 or ll == 0:
ax.annotate(str(ll+1), xy=(x[0], y[0]+offset_y), xycoords='data', size=6, color='b')
ax.plot(x[nonzero_sel], y[nonzero_sel], lw=0.5, color=c[ll])
fig.savefig(plots_path+'tracer_map_with_lines.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
autocrop_img(plots_path+'tracer_map_with_lines.eps')
plt.close(fig)
return
def pressure_plot(x, y, z, vx, vy, plots_path, CGS_units, all_lines):
"""Plot the pressure map of the RHD simulation"""
print 'Pressure map'
np.seterr(divide='ignore', invalid='ignore')
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
if CGS_units == 0:
ax.set_xlabel(r'$r~[a]$')
ax.set_ylabel(r'$z~[a]$')
else:
ax.set_xlabel(r'$r~[cm]$')
ax.set_ylabel(r'$z~[cm]$')
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
ax.set_xlim((x.min(), x.max()))
ax.set_ylim((y.min(), y.max()))
cb0 = fig.colorbar(im)
if CGS_units == 0:
cb0.set_label(r'${\rm Pressure}~[\rho_0 c^2]$', labelpad=1)
else:
cb0.set_label(r'${\rm Pressure}~[erg/cm^3]$', labelpad=1)
from matplotlib.ticker import AutoMinorLocator
ax.xaxis.set_minor_locator(AutoMinorLocator())
fig.savefig(plots_path+'pressure_map.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
for ll, line in enumerate(all_lines):
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if l != 0 and t==0:
nonzero_sel[l] = 0
if (ll+1)%5 == 0 or ll == 0:
ax.annotate(str(ll+1), xy=(x[0], y[0]), xycoords='data', size=6)
ax.plot(x[nonzero_sel], y[nonzero_sel])
fig.savefig(plots_path+'pressure_map_with_lines.eps',
bbox_inches='tight', pad_inches=0.02, dpi=300)
plt.close(fig)
return
def beta_plot(x, y, z, vx, vy, plots_path, CGS_units, all_lines):
"""Plot the beta map of the RHD simulation"""
print 'Beta map'
np.seterr(divide='ignore', invalid='ignore')
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
if CGS_units == 0:
ax.set_xlabel(r'$r~[a]$')
ax.set_ylabel(r'$z~[a]$')
else:
ax.set_xlabel(r'$r~[cm]$')
ax.set_ylabel(r'$z~[cm]$')
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
ax.set_xlim((x.min(), x.max()))
ax.set_ylim((y.min(), y.max()))
cb0 = fig.colorbar(im)
if CGS_units == 0:
cb0.set_label(r'${\rm \beta}$', labelpad=1)
else:
cb0.set_label(r'${\rm \beta}$', labelpad=1)
from matplotlib.ticker import AutoMinorLocator
ax.xaxis.set_minor_locator(AutoMinorLocator())
fig.savefig(plots_path+'beta_map.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
for ll, line in enumerate(all_lines):
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if l != 0 and t==0:
nonzero_sel[l] = 0
if (ll+1)%5 == 0 or ll == 0:
ax.annotate(str(ll+1), xy=(x[0], y[0]), xycoords='data', size=6)
ax.plot(x[nonzero_sel], y[nonzero_sel])
fig.savefig(plots_path+'beta_map_with_lines.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
plt.close(fig)
return
def eps_plot(x, y, z, vx, vy, plots_path, CGS_units, all_lines):
"""Plot the internal energy map of the RHD simulation"""
print 'Specific internal energy map'
np.seterr(divide='ignore', invalid='ignore')
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
if CGS_units == 0:
ax.set_xlabel(r'$r~[a]$')
ax.set_ylabel(r'$z~[a]$')
else:
ax.set_xlabel(r'$r~[cm]$')
ax.set_ylabel(r'$z~[cm]$')
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
ax.set_xlim((x.min(), x.max()))
ax.set_ylim((y.min(), y.max()))
cb0 = fig.colorbar(im)
if CGS_units == 0:
cb0.set_label(r'${\rm Specific~internal~energy}~[c^2]$', labelpad=1)
else:
cb0.set_label(r'${\rm Specific~internal~energy}~[erg/g]$', labelpad=1)
from matplotlib.ticker import AutoMinorLocator
ax.xaxis.set_minor_locator(AutoMinorLocator())
fig.savefig(plots_path+'eps_map.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
for ll, line in enumerate(all_lines):
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if l != 0 and t==0:
nonzero_sel[l] = 0
if (ll+1)%5 == 0 or ll == 0:
ax.annotate(str(ll+1), xy=(x[0], y[0]), xycoords='data', size=6)
ax.plot(x[nonzero_sel], y[nonzero_sel])
fig.savefig(plots_path+'eps_map_with_lines.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
plt.close(fig)
return
def density_plot(x, y, z, vx, vy, plots_path, CGS_units, all_lines):
"""Plot the density map of the RHD simulation"""
# Set-up
from setup import params
input_file = params['input_file']
numbering = 1
vel_vectors = 0
cax_z = [0.855, 0.510, 0.03, 0.390]
cax_vel = [0.855, 0.100, 0.03, 0.390]
nmask = 20
qscale = 0.1
width0 = 0.005
counti0 = 11
countj0 = 16
#
print 'Density map'
np.seterr(divide='ignore', invalid='ignore')
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
if CGS_units == 0:
ax.set_xlabel(r'$r~[a]$')
ax.set_ylabel(r'$z~[a]$')
else:
ax.set_xlabel(r'$r~{\rm [cm]}$')
ax.set_ylabel(r'$z~{\rm [cm]}$')
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
ax.set_xlim((x.min(), x.max()))
ax.set_ylim((y.min(), y.max()))
if vel_vectors == 0:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=0.1, pad=0.07)
cb0 = plt.colorbar(im, cax=cax)
if CGS_units == 0:
cb0.set_label(r'$\rho~{\rm[\rho_0]}$', labelpad=5)
else:
cb0.set_label(r'$\rho~{\rm[g/cm^3]}$', labelpad=5)
else:
from numpy import ma
M = np.ones(vx.shape, dtype='bool')
counti = counti0
for i in range(len(vx[:,0])):
if counti == nmask:
countj = countj0
for j in range(len(vx[0,:])):
if countj == nmask:
M[i,j] = False
countj = 0
countj += 1
counti = 0
counti += 1
vxc = ma.masked_array(vx, mask=M)
vyc = ma.masked_array(vy, mask=M)
color_scale = np.sqrt(vxc**2.+vyc**2.)/3e10
vxc_norm = vxc/3e10/color_scale # equal length for all vectors
vyc_norm = vyc/3e10/color_scale
Q = ax.quiver(xv,yv,vxc_norm,vyc_norm,color_scale,angles='xy',
scale_units='dots', scale=qscale)#, width=width0)
cax = fig.add_axes(cax_z)
cb0 = fig.colorbar(im,cax = cax)
cax2 = fig.add_axes(cax_vel)
cb = fig.colorbar(Q,cax=cax2,ticks=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
cb.set_label(r'$\beta$',labelpad=9.5)
if CGS_units == 0:
cb0.set_label(r'$\rho~{\rm[\rho_0]}$', labelpad=6)
else:
cb0.set_label(r'$\rho~{\rm[g/cm^3]}$', labelpad=6)
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e15)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
numbering_mod = 5
offset_y = 0.0
if input_file == 'JET':
ax.xaxis.set_major_formatter(FixedOrderFormatter(15))
numbering_mod = 10
offset_y = 0.01e15
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e15)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
elif input_file == 'PULSAR':
ax.xaxis.set_major_formatter(FixedOrderFormatter(12))
ax.yaxis.set_major_formatter(FixedOrderFormatter(12))
numbering = 1
numbering_mod = 10
offset_y = 0.0
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e12)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
if x.max() >= 1.5e15:
from matplotlib.ticker import AutoMinorLocator
ax.xaxis.set_minor_locator(AutoMinorLocator())
fig.savefig(plots_path+'density_map.eps',
bbox_inches='tight', pad_inches=0.02, dpi=300)
autocrop_img(plots_path+'density_map.eps')
if vel_vectors == 0:
# c = ['r','b','c','y','k']
c = ['0.1','0.25','0.4','0.55','0.7']
c += 500*c
for ll, line in enumerate(all_lines):
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if l != 0 and t==0:
nonzero_sel[l] = 0
if numbering == 1:
if (ll+1)%numbering_mod == 0 or ll == 0:
ax.annotate(str(ll+1), xy=(x[0], y[0]+offset_y), xycoords='data', size=6, color='w')
ax.plot(x[nonzero_sel], y[nonzero_sel], lw=0.5, color=c[ll])
fig.savefig(plots_path+'density_map_with_lines.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
autocrop_img(plots_path+'density_map_with_lines.eps')
plt.close(fig)
return
def doppler_plot(x, y, z, vx, vy, plots_path, CGS_units, all_lines, fsuff):
"""Plot the Doppler Boosting map of the RHD simulation"""
# Set-up
from setup import params
input_file = params['input_file']
numbering = 1
vel_vectors = 0
cax_z = [0.855, 0.510, 0.03, 0.390]
cax_vel = [0.855, 0.100, 0.03, 0.390]
nmask = 20
qscale = 0.1
width0 = 0.005
counti0 = 11
countj0 = 16
#
print 'Doppler Boosting map'
np.seterr(divide='ignore', invalid='ignore')
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
if CGS_units == 0:
ax.set_xlabel(r'$r~[a]$')
ax.set_ylabel(r'$z~[a]$')
else:
ax.set_xlabel(r'$r~{\rm [cm]}$')
ax.set_ylabel(r'$z~{\rm [cm]}$')
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
from setup import params
try:
nick_name = params['nick_name']
except:
nick_name = []
pass
if params['input_file'] == 'JET':
if x.max() >= 1.5e15:
if nick_name == 'brac':
if fsuff == '_gamma_times_beta_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=1e-1, vmax=2.5e0),
cmap=cm.jet, rasterized=True)
elif fsuff == '_factor_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=1e0, vmax=4e2),
cmap=cm.jet, rasterized=True)
else:
if fsuff == '_gamma_times_beta_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=7e-2, vmax=3e0),
cmap=cm.jet, rasterized=True)
elif fsuff == '_factor_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=2e0, vmax=5e2),
cmap=cm.jet, rasterized=True)
else:
if fsuff == '_gamma_times_beta_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=1e-1, vmax=4e0),
cmap=cm.jet, rasterized=True)
elif fsuff == '_factor_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=0.95e0, vmax=1.5e3),#5e3
cmap=cm.jet, rasterized=True)
else:
if nick_name == 'steady':
if fsuff == '_gamma_times_beta_':
#im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
elif fsuff == '_factor_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
elif nick_name == 'clump1':
if fsuff == '_gamma_times_beta_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
elif fsuff == '_factor_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
elif nick_name == 'clump5':
if fsuff == '_gamma_times_beta_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
elif fsuff == '_factor_':
im = ax.pcolor(xv, yv, z, norm=LogNorm(vmin=z[z!=0].min(), vmax=z.max()),
cmap=cm.jet, rasterized=True)
ax.set_xlim((x.min(), x.max()))
ax.set_ylim((y.min(), y.max()))
if vel_vectors == 0:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=0.1, pad=0.07)
cb0 = plt.colorbar(im, cax=cax)
if fsuff == '_gamma_times_beta_':
cb0.set_label(r'$\Gamma~\beta$', labelpad=5)
elif fsuff == '_factor_':
cb0.set_label(r'$\delta^4$', labelpad=5)
else:
from numpy import ma
M = np.ones(vx.shape, dtype='bool')
counti = counti0
for i in range(len(vx[:,0])):
if counti == nmask:
countj = countj0
for j in range(len(vx[0,:])):
if countj == nmask:
M[i,j] = False
countj = 0
countj += 1
counti = 0
counti += 1
vxc = ma.masked_array(vx, mask=M)
vyc = ma.masked_array(vy, mask=M)
color_scale = np.sqrt(vxc**2.+vyc**2.)/3e10
vxc_norm = vxc/3e10/color_scale # equal length for all vectors
vyc_norm = vyc/3e10/color_scale
Q = ax.quiver(xv,yv,vxc_norm,vyc_norm,color_scale,angles='xy',
scale_units='dots', scale=qscale)#, width=width0)
cax = fig.add_axes(cax_z)
cb0 = fig.colorbar(im,cax = cax)
cax2 = fig.add_axes(cax_vel)
cb = fig.colorbar(Q,cax=cax2,ticks=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
cb.set_label(r'$\beta$',labelpad=9.5)
if CGS_units == 0:
cb0.set_label(r'$\rho~{\rm[\rho_0]}$', labelpad=6)
else:
cb0.set_label(r'$\rho~{\rm[g/cm^3]}$', labelpad=6)
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e15)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
numbering_mod = 5
offset_y = 0.0
if input_file == 'JET':
ax.xaxis.set_major_formatter(FixedOrderFormatter(15))
numbering_mod = 10
offset_y = 0.01e15
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e15)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
elif input_file == 'PULSAR':
ax.xaxis.set_major_formatter(FixedOrderFormatter(12))
ax.yaxis.set_major_formatter(FixedOrderFormatter(12))
numbering = 1
numbering_mod = 10
offset_y = 0.0
if CGS_units == 0:
minorLocator = MultipleLocator(5)
else:
minorLocator = MultipleLocator(0.1e12)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
if x.max() >= 1.5e15:
from matplotlib.ticker import AutoMinorLocator
ax.xaxis.set_minor_locator(AutoMinorLocator())
fig.savefig(plots_path+'doppler_'+fsuff+'map.eps',
bbox_inches='tight', pad_inches=0.02, dpi=300)
autocrop_img(plots_path+'doppler_'+fsuff+'map.eps')
if vel_vectors == 0:
# c = ['r','b','c','y','k']
c = ['0.1','0.25','0.4','0.55','0.7']
c += 500*c
for ll, line in enumerate(all_lines):
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if l != 0 and t==0:
nonzero_sel[l] = 0
if numbering == 1:
if (ll+1)%numbering_mod == 0 or ll == 0:
ax.annotate(str(ll+1), xy=(x[0], y[0]+offset_y), xycoords='data', size=6, color='w')
ax.plot(x[nonzero_sel], y[nonzero_sel], lw=0.5, color=c[ll])
fig.savefig(plots_path+'doppler_'+fsuff+'map_with_lines.eps',
bbox_inches='tight', pad_inches=0.07, dpi=300)
autocrop_img(plots_path+'doppler_'+fsuff+'map_with_lines.eps')
plt.close(fig)
return
def lines_plot(plots_path, all_lines, CGS_units, lx, ly, a):
"""Plot the computed current lines"""
print '\nPlotting lines...'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
if CGS_units == 0:
ax.set_xlabel(r'$r~[a]$')
ax.set_ylabel(r'$z~[a]$')
ax.set_xlim((0, lx))
ax.set_ylim((0, ly))
else:
ax.set_xlabel(r'$r~[cm]$')
ax.set_ylabel(r'$z~[cm]$')
ax.set_xlim((0, lx*a))
ax.set_ylim((0, ly*a))
fig2 = plt.figure()
ax2 = fig2.add_subplot(111, aspect='equal')
ax2.set_xlabel(r'$i$')
ax2.set_ylabel(r'$j$')
for ll, line in enumerate(all_lines):
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
i = np.array(i)
j = np.array(j)
dens = np.array(dens)
eps = np.array(eps)
vx = np.array(vx)
vy = np.array(vy)
time = np.array(time)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if l != 0 and t==0:
nonzero_sel[l] = 0
ax.plot(x[nonzero_sel], y[nonzero_sel])
ax2.plot(i[nonzero_sel], j[nonzero_sel])
if (ll+1)%5 == 0 or ll == 0:
ax.annotate(str(ll+1), xy=(x[0], y[0]), xycoords='data', size=6)
ax2.annotate(str(ll+1), xy=(i[0], j[0]), xycoords='data', size=6)
fig.savefig(plots_path+'lines_xy.eps', bbox_inches='tight',
pad_inches=0.07)
plt.close(fig)
ax2.set_xlim(left=-1)
fig2.savefig(plots_path+'lines_ij.eps', bbox_inches='tight',
pad_inches=0.07)
plt.close(fig2)
print "Done"
return
def profile_plot(plots_path, all_lines, CGS_units):
"""Plot the phyisical quantities a long the line"""
print '\nPlotting profiles along the lines...'
fig = plt.figure()
ax = fig.add_subplot(111)
if CGS_units == 0:
ax.set_xlabel(r'$t~[t_0]$')
ax.set_ylabel(r'${\rm Density}~[\rho_0]$')
else:
ax.set_xlabel(r'$t~[s]$')
ax.set_ylabel(r'${\rm Density}~[g/cm^3]$')
ax.set_yscale('log')
for line in all_lines:
x, y, i, j, dens, eps, vx, vy, div, tracer, time = zip(*line)
x = np.array(x)
y = np.array(y)
i = np.array(i)
j = np.array(j)
dens = np.array(dens)
eps = np.array(eps)
vx = np.array(vx)
vy = np.array(vy)
time = np.array(time)
nonzero_sel = np.ones_like(time, dtype=bool)
for l, t in enumerate(time):
if t==0:
nonzero_sel[l] = 0
ax.plot(time, dens)
# ax.plot(time, div)
# ax.set_ylim(1e-22,1e-3)
fig.savefig(plots_path+'density_profile.eps',
bbox_inches='tight', pad_inches=0.07)
plt.close(fig)
print "Done"
return
def plots(x, y, dens, eps, vx, vy, div, plots_path, all_lines,
plot_maps, plot_lines, plot_profiles, CGS_units, lx, ly,
a, rho0, c, gammaad, tracer):
"""Perform the selected plots"""
# GENERAL PLOT PARAMETERS
fig_width_pt = 0.5*512.1496 # From Latex \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*golden_mean # height in inches
if x.max() > y.max():
fig_size = [fig_width, fig_height]
else:
fig_size = [fig_height, fig_width]
params = {'backend': 'ps',
'font.family': 'serif',
'axes.labelsize': 9,
'axes.linewidth': 0.5,
'ytick.major.width': 0.5,
'ytick.minor.width': 0.5,
'font.size': 9,
'legend.fontsize': 9,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'text.latex.preamble': [r'\usepackage{txfonts}'],
'ps.usedistiller': 'xpdf',
'figure.figsize': fig_size}
plt.rcdefaults()
plt.rcParams.update(params)
if not os.path.exists(plots_path):
os.makedirs(plots_path)
beta = np.sqrt(vx**2.+vy**2.)
P = (gammaad-1.)*dens*eps
if CGS_units == 1:
x = np.array(x)*a
y = np.array(y)*a
dens = np.array(dens)*rho0
eps = np.array(eps)*c**2.
P = (gammaad-1.)*dens*eps
beta = np.sqrt(vx**2.+vy**2.)
vx = np.array(vx)*c
vy = np.array(vy)*c
div = np.array(div)*c/a
#time = np.array(time)*a/c
if plot_maps == 1:
print '\nPlotting maps...'
### pressure_plot(x, y, P, vx, vy, plots_path, CGS_units, all_lines)
### beta_plot(x, y, beta, vx, vy, plots_path, CGS_units, all_lines)
### eps_plot(x, y, eps, vx, vy, plots_path, CGS_units, all_lines)
tracer_plot(x, y, tracer, vx, vy, plots_path, CGS_units, all_lines)
density_plot(x, y, dens, vx, vy, plots_path, CGS_units, all_lines)
b = np.sqrt(vx**2.+vy**2.)/c
bx = vx/c
by = vy/c
g = 1./np.sqrt(1.-b**2.)
D4 = (1./g/(1.-by))**4.
doppler_plot(x, y, D4, vx, vy, plots_path, CGS_units, all_lines, '_factor_')
doppler_plot(x, y, g*b, vx, vy, plots_path, CGS_units, all_lines, '_gamma_times_beta_')
if plot_lines == 1:
lines_plot(plots_path, all_lines, CGS_units, lx, ly, a)
if plot_profiles == 1:
profile_plot(plots_path, all_lines, CGS_units)
return
|
|
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
from functools import partial
from threading import local
from django.conf import settings as django_settings
from django.contrib import admin, messages
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.templatetags.static import static
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from feincms import settings
from feincms.admin import item_editor, tree_editor
# ------------------------------------------------------------------------
from .forms import PageAdminForm
_local = local()
# ------------------------------------------------------------------------
class PageAdmin(item_editor.ItemEditor, tree_editor.TreeEditor):
class Media:
css = {}
js = []
form = PageAdminForm
fieldset_insertion_index = 2
fieldsets = [
(None, {"fields": [("title", "slug"), ("active", "in_navigation")]}),
(
_("Other options"),
{
"classes": ["collapse"],
"fields": ["template_key", "parent", "override_url", "redirect_to"],
},
),
# <-- insertion point, extensions appear here, see insertion_index
# above
item_editor.FEINCMS_CONTENT_FIELDSET,
]
readonly_fields = []
list_display = [
"short_title",
"is_visible_admin",
"in_navigation_toggle",
"template",
]
list_filter = ["active", "in_navigation", "template_key", "parent"]
search_fields = ["title", "slug"]
prepopulated_fields = {"slug": ("title",)}
raw_id_fields = ["parent"]
radio_fields = {"template_key": admin.HORIZONTAL}
@classmethod
def add_extension_options(cls, *f):
if isinstance(f[-1], dict): # called with a fieldset
cls.fieldsets.insert(cls.fieldset_insertion_index, f)
f[1]["classes"] = list(f[1].get("classes", []))
f[1]["classes"].append("collapse")
else: # assume called with "other" fields
cls.fieldsets[1][1]["fields"].extend(f)
def __init__(self, model, admin_site):
if len(model._feincms_templates) > 4 and "template_key" in self.radio_fields:
del self.radio_fields["template_key"]
super().__init__(model, admin_site)
in_navigation_toggle = tree_editor.ajax_editable_boolean(
"in_navigation", _("in navigation")
)
def get_readonly_fields(self, request, obj=None):
readonly = super().get_readonly_fields(request, obj=obj)
if not settings.FEINCMS_SINGLETON_TEMPLATE_CHANGE_ALLOWED:
if obj and obj.template and obj.template.singleton:
return tuple(readonly) + ("template_key",)
return readonly
def get_form(self, *args, **kwargs):
form = super().get_form(*args, **kwargs)
return partial(form, modeladmin=self)
def _actions_column(self, page):
addable = getattr(page, "feincms_addable", True)
preview_url = "../../r/{}/{}/".format(
ContentType.objects.get_for_model(self.model).id,
page.id,
)
actions = super()._actions_column(page)
if addable:
if not page.template.enforce_leaf:
actions.insert(
0,
'<a href="add/?parent=%s" title="%s">'
'<img src="%s" alt="%s" />'
"</a>"
% (
page.pk,
_("Add child page"),
static("feincms/img/icon_addlink.gif"),
_("Add child page"),
),
)
actions.insert(
0,
'<a href="%s" title="%s">'
'<img src="%s" alt="%s" />'
"</a>"
% (
preview_url,
_("View on site"),
static("feincms/img/selector-search.gif"),
_("View on site"),
),
)
return actions
def add_view(self, request, **kwargs):
kwargs["form_url"] = request.get_full_path() # Preserve GET parameters
if "translation_of" in request.GET and "language" in request.GET:
try:
original = self.model._tree_manager.get(
pk=request.GET.get("translation_of")
)
except (AttributeError, self.model.DoesNotExist):
pass
else:
language_code = request.GET["language"]
language = dict(django_settings.LANGUAGES).get(language_code, "")
kwargs["extra_context"] = {
"adding_translation": True,
"title": _('Add %(language)s translation of "%(page)s"')
% {"language": language, "page": original},
"language_name": language,
"translation_of": original,
}
return super().add_view(request, **kwargs)
def response_add(self, request, obj, *args, **kwargs):
response = super().response_add(request, obj, *args, **kwargs)
if (
"parent" in request.GET
and "_addanother" in request.POST
and response.status_code in (301, 302)
):
# Preserve GET parameters if we are about to add another page
response["Location"] += "?parent=%s" % request.GET["parent"]
if (
"translation_of" in request.GET
and "_copy_content_from_original" in request.POST
):
# Copy all contents
for content_type in obj._feincms_content_types:
if content_type.objects.filter(parent=obj).exists():
# Short-circuit processing -- don't copy any contents if
# newly added object already has some
return response
try:
original = self.model._tree_manager.get(
pk=request.GET.get("translation_of")
)
original = original.original_translation
obj.copy_content_from(original)
obj.save()
self.message_user(
request,
_(
"The content from the original translation has been copied"
" to the newly created page."
),
)
except (AttributeError, self.model.DoesNotExist):
pass
return response
def change_view(self, request, object_id, **kwargs):
try:
return super().change_view(request, object_id, **kwargs)
except PermissionDenied:
messages.add_message(
request,
messages.ERROR,
_("You don't have the necessary permissions to edit this" " object"),
)
return HttpResponseRedirect(reverse("admin:page_page_changelist"))
def has_delete_permission(self, request, obj=None):
if not settings.FEINCMS_SINGLETON_TEMPLATE_DELETION_ALLOWED:
if obj and obj.template.singleton:
return False
return super().has_delete_permission(request, obj=obj)
def changelist_view(self, request, *args, **kwargs):
_local.visible_pages = list(
self.model.objects.active().values_list("id", flat=True)
)
return super().changelist_view(request, *args, **kwargs)
def is_visible_admin(self, page):
"""
Instead of just showing an on/off boolean, also indicate whether this
page is not visible because of publishing dates or inherited status.
"""
if page.parent_id and page.parent_id not in _local.visible_pages:
# parent page's invisibility is inherited
if page.id in _local.visible_pages:
_local.visible_pages.remove(page.id)
return tree_editor.ajax_editable_boolean_cell(
page, "active", override=False, text=_("inherited")
)
if page.active and page.id not in _local.visible_pages:
# is active but should not be shown, so visibility limited by
# extension: show a "not active"
return tree_editor.ajax_editable_boolean_cell(
page, "active", override=False, text=_("extensions")
)
return tree_editor.ajax_editable_boolean_cell(page, "active")
is_visible_admin.short_description = _("is active")
is_visible_admin.editable_boolean_field = "active"
# active toggle needs more sophisticated result function
def is_visible_recursive(self, page):
# Have to refresh visible_pages here, because TreeEditor.toggle_boolean
# will have changed the value when inside this code path.
_local.visible_pages = list(
self.model.objects.active().values_list("id", flat=True)
)
retval = []
for c in page.get_descendants(include_self=True):
retval.append(self.is_visible_admin(c))
return retval
is_visible_admin.editable_boolean_result = is_visible_recursive
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
|
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise the wallet. Ported from wallet.sh.
# Does the following:
# a) creates 3 nodes, with an empty chain (no blocks).
# b) node0 mines a block
# c) node1 mines 101 blocks, so now nodes 0 and 1 have 50btc, node2 has none.
# d) node0 sends 21 btc to node2, in two transactions (11 btc, then 10 btc).
# e) node0 mines a block, collects the fee on the second transaction
# f) node1 mines 100 blocks, to mature node0's just-mined block
# g) check that node0 has 100-21, node2 has 21
# h) node0 should now have 2 unspent outputs; send these to node2 via raw tx broadcast by node1
# i) have node1 mine a block
# j) check balances - node0 should have 0, node2 should have 100
# k) test ResendWalletTransactions - create transactions, startup fourth node, make sure it syncs
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 100)
assert_equal(self.nodes[2].getbalance("from1"), 100-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('90'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), set([txid1, txid2]))
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0.00000000'));
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2);
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal); #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal + Decimal('2')); #should not be
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2);
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal + Decimal('4')); #should not be
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2.00000000'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.00010000'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.00010000'))
#this should fail
errorString = ""
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("Invalid amount" in errorString, True);
errorString = ""
try:
self.nodes[0].generate("2") #use a string to as block amount parameter must fail because it's not interpreted as amount
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("not an integer" in errorString, True);
if __name__ == '__main__':
WalletTest ().main ()
|
|
"""Base classes for creating GUI objects to create manually selected points.
The definition of X,Y axis is the following:
xmin,ymin o---------o xmax,ymin
| |
| |
| |
| |
xmin,ymax o---------o xmax,ymax
"""
from collections import namedtuple
import logging
import sys
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
logger = logging.getLogger(__name__)
Position = namedtuple('Position', ('x', 'y', 'z'))
class AnatomicalParams(object):
"""The base parameter object for GUI configuration"""
def __init__(self,
cmap='gray',
interp='nearest',
perc_min=5.,
perc_max=95.,
vmode='percentile',
alpha=1.0):
"""
Parameters
----------
cmap : str
interp : str
perc_min : float: low percentile threshold for intensity adjustment
perc_max : float: high percentile threshold for intensity adjustment
vmode : str: "percentile": intensity adjustment based on vmin/vmax percentile,
"mean-std": intensity adjustment based on
"clahe: CLAHE (not implemented yet)
alpha : float
"""
self.cmap = cmap
self.interp = interp
self.perc_min = perc_min
self.perc_max = perc_max
self.vmode = vmode
self.alpha = alpha
self.start_vertebrae = 50
self.end_vertebrae = -1
self.num_points = 0
self._title = '' # figure title
self.subtitle = '' # subplot title (will be displayed above the image)
self._vertebraes = []
self.input_file_name = ""
self.starting_slice = 'top' # used in centerline.py canvas and corresponds to the location of
# the first axial slice for labeling. Possible values are: 'top': top slice; 'midfovminusinterval': mid-FOV
# minus the interval.
self.interval_in_mm = 15 # superior-inferior distance between two consecutive labels in AUTO mode
@property
def dialog_title(self):
if not self._title:
self._title = '{}: manual labeling'.format(self.input_file_name)
return self._title
@property
def vertebraes(self):
return self._vertebraes
@vertebraes.setter
def vertebraes(self, values):
if not values:
return
self._vertebraes = values
self.start_vertebrae = values[0]
self.end_vertebrae = values[-1]
class BaseDialog(QtWidgets.QWidget):
"""Abstract base class to a Anatomical GUI.
Attributes
----------
update_canvas_signal : QtCore.Signal
Signal emits when dialog has a point to add to the
"""
lb_status = None
lb_warning = None
btn_ok = None
btn_undo = None
def __init__(self, controller):
"""Initialize the UI parameters
Parameters
----------
controller : BaseController
The logical object that controls the state of the UI
"""
super(BaseDialog, self).__init__()
self.params = controller.params
self._controller = controller
self.image = controller.image
self._controller._dialog = self
self._init_ui()
def _init_ui(self):
self.resize(1200, 800)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
layout = QtWidgets.QVBoxLayout(self)
self._init_header(layout)
self._init_canvas(layout)
self._init_controls(layout)
self._init_footer(layout)
events = (
(QtGui.QKeySequence.Undo, self.on_undo),
(QtGui.QKeySequence.Save, self.on_save_quit),
(QtGui.QKeySequence.Quit, self.close),
(QtGui.QKeySequence.MoveToNextChar, self.increment_vertical_nav),
(QtGui.QKeySequence.MoveToPreviousChar, self.decrement_vertical_nav),
(QtGui.QKeySequence.MoveToNextLine, self.increment_horizontal_nav),
(QtGui.QKeySequence.MoveToPreviousLine, self.decrement_horizontal_nav)
)
for event, action in events:
QtWidgets.QShortcut(event, self, action)
self.setWindowTitle(self.params.dialog_title)
def increment_vertical_nav(self):
"""Action to increment the anatonical viewing position.
The common case is when the right arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def decrement_vertical_nav(self):
"""Action to decrement the anatonical viewing position.
The common case is when the left arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def increment_horizontal_nav(self):
"""Action to increment the anatonical viewing position.
The common case is when the down arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def decrement_horizontal_nav(self):
"""Action to decrement the anatonical viewing position.
The common case is when the up arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def _init_canvas(self, parent):
"""
Parameters
----------
parent : QtGui.QWidget
The widget / dialog that will host the canvas layout
"""
raise NotImplementedError('Include _init_canvas in your class declaration')
def _init_controls(self, parent):
"""
Parameters
----------
parent : QtGui.QWidget
The widget / dialog that will host the control layout
"""
raise NotImplementedError('Include _init_controls in your class declaration')
def _init_header(self, parent):
self.lb_status = QtWidgets.QLabel('Label Status')
self.lb_status.setStyleSheet("color:black")
self.lb_status.setAlignment(QtCore.Qt.AlignCenter)
self.lb_warning = QtWidgets.QLabel()
self.lb_warning.setStyleSheet('color:red')
self.lb_warning.setAlignment(QtCore.Qt.AlignCenter)
message_label = getattr(self.params, 'message_warn', '')
self.Label = QtWidgets.QLabel(message_label)
self.Label.setAlignment(QtCore.Qt.AlignLeft)
parent.addWidget(self.lb_status)
parent.addWidget(self.lb_warning)
parent.addWidget(self.Label)
parent.addStretch()
message = getattr(self.params, 'init_message', '')
self.update_status(message)
def _init_footer(self, parent):
"""
Parameters
----------
parent : QtGui.QWidget
The widget / dialog that will host the footer layout
Returns
-------
The footer layout created
"""
ctrl_layout = QtWidgets.QHBoxLayout()
if sys.platform.lower() == 'darwin':
cmd_key = 'Cmd'
else:
cmd_key = 'Ctrl'
self.btn_ok = QtWidgets.QPushButton('Save and Quit [%s+S]' % cmd_key)
self.btn_undo = QtWidgets.QPushButton('Undo [%s+Z]' % cmd_key)
ctrl_layout.addStretch()
ctrl_layout.addWidget(self.btn_undo)
ctrl_layout.addWidget(self.btn_ok)
self.btn_undo.clicked.connect(self.on_undo)
self.btn_ok.clicked.connect(self.on_save_quit)
parent.addLayout(ctrl_layout)
return ctrl_layout
def on_save_quit(self):
self._controller.save()
self.close()
def on_undo(self):
try:
self._controller.undo()
except InvalidActionWarning as err:
self.update_warning(str(err))
def show(self):
"""Override the base class show to fix a bug found in MAC"""
super(BaseDialog, self).show()
self.activateWindow()
self.raise_()
def update_status(self, msg):
"""Print the message into the dialog's status widget and clear the warning widget
Parameters
----------
msg : str The message to display in the header of dialog
"""
self.lb_status.setText(msg)
self.lb_warning.setText('')
def update_warning(self, msg):
"""Print the message into the dialog's warning widget and clear the status widget
Parameters
----------
msg : str The message to display in the header of dialog
"""
self.lb_warning.setText(msg)
self.lb_status.setText('')
class BaseController(object):
orientation = None
_overlay_image = None
_dialog = None
default_position = ()
position = ()
saved = False
def __init__(self, image, params, init_values=None):
self.image = image
self.params = params
self.points = []
self._overlay_image = init_values
self.setup_intensity()
def setup_intensity(self):
if self.params.vmode == 'percentile':
self.params.vmin, self.params.vmax = np.percentile(self.image.data,
(self.params.perc_min, self.params.perc_max))
elif self.params.vmode == 'mean-std':
# TODO: update this
self.mean_intensity = (self.params.vmax + self.params.vmin) / 2.0
self.std_intensity = (self.params.vmax - self.params.vmin) / 2.0
elif self.params.vmode == 'clahe':
# TODO: implement
logger.warning("CLAHE is not implemented yet.")
def reformat_image(self):
"""Set the camera position and increase contrast.
The image orientation is set to SAL. And set the default contrast, and
axes position for all canvases. Need to run before displaying the GUI
with the image.
"""
logger.debug('Image orientation {}'.format(self.image.orientation))
self.orientation = self.image.orientation
self.image.change_orientation('SAL')
if self._overlay_image:
self._overlay_image.change_orientation('SAL')
x, y, z, t, dx, dy, dz, dt = self.image.dim
self.params.aspect = dx / dy
self.params.offset = x * dx
self.default_position = Position(x // 2, y // 2, z // 2)
self.setup_intensity()
self.reset_position()
def reset_position(self):
"""Set the canvas position to the center of the image"""
self.position = self.default_position
def valid_point(self, x, y, z):
dim = self.image.dim
if -1 < x < dim[0] and -1 < y < dim[1] and -1 < z < dim[2]:
return True
return False
def save(self):
logger.debug('Overlay shape {}'.format(self._overlay_image.data.shape))
for point in self.points:
x, y, z, label = [int(i) for i in point]
self._overlay_image.data[x, y, z] = label
if self.orientation != self._overlay_image.orientation:
self._overlay_image.change_orientation(self.orientation)
self.saved = True
def undo(self):
"""Remove the last point selected and refresh the UI"""
if self.points:
x, y, z, label = self.points[-1]
self.position = Position(x, y, z)
self.points = self.points[:-1]
self.label = label
logger.debug('Point removed {}'.format(self.position))
else:
raise InvalidActionWarning('There is no points selected to undo')
def as_string(self):
if self._overlay_image is None:
logger.warning('There is no information to save')
return ''
output = []
data = self._overlay_image.data
xs, ys, zs = np.where(data)
for x, y, z in zip(xs, ys, zs):
output.append('{},{},{},{}'.format(x, y, z, int(data[x, y, z])))
return ':'.join(output)
def as_niftii(self, file_name=None):
if not self._overlay_image:
logger.warning('There is no information to save')
raise IOError('There is no information to save')
if file_name:
self._overlay_image.absolutepath = file_name
if self._overlay_image.absolutepath == self.image.absolutepath:
raise IOError('Aborting: the original file and the labeled file are the same', self._overlay_image.absolutepath)
logger.debug('Data: {}'.format(np.where(self._overlay_image.data)))
self._overlay_image.save()
class TooManyPointsWarning(StopIteration):
message = 'Reached the maximum number of points'
class InvalidActionWarning(ValueError):
pass
class MissingLabelWarning(ValueError):
pass
def launch_dialog(controller, dialog_class):
app = QtWidgets.QApplication([])
dialog = dialog_class(controller)
dialog.show()
app.exec_()
return controller
|
|
from __future__ import absolute_import
import responses
import six
from six.moves.urllib.parse import parse_qs, urlencode, urlparse
from sentry.utils.compat.mock import patch, Mock
from sentry.integrations.gitlab import GitlabIntegrationProvider
from sentry.models import (
Identity,
IdentityProvider,
IdentityStatus,
Integration,
OrganizationIntegration,
Repository,
)
from sentry.testutils import IntegrationTestCase
class GitlabIntegrationTest(IntegrationTestCase):
provider = GitlabIntegrationProvider
config = {
# Trailing slash is intentional to ensure that valid
# URLs are generated even if the user inputs a trailing /
"url": "https://gitlab.example.com/",
"name": "Test App",
"group": "cool-group",
"verify_ssl": True,
"client_id": "client_id",
"client_secret": "client_secret",
"include_subgroups": True,
}
default_group_id = 4
def setUp(self):
super(GitlabIntegrationTest, self).setUp()
self.init_path_without_guide = "%s%s" % (self.init_path, "?completed_installation_guide")
def assert_setup_flow(self, user_id="user_id_1"):
resp = self.client.get(self.init_path)
assert resp.status_code == 200
self.assertContains(resp, "you will need to create a Sentry app in your GitLab instance")
resp = self.client.get(self.init_path_without_guide)
assert resp.status_code == 200
resp = self.client.post(self.init_path_without_guide, data=self.config)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "gitlab.example.com"
assert redirect.path == "/oauth/authorize"
params = parse_qs(redirect.query)
assert params["state"]
assert params["redirect_uri"] == ["http://testserver/extensions/gitlab/setup/"]
assert params["response_type"] == ["code"]
assert params["client_id"] == ["client_id"]
# once we've asserted on it, switch to a singular values to make life
# easier
authorize_params = {k: v[0] for k, v in six.iteritems(params)}
access_token = "xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"
responses.add(
responses.POST,
"https://gitlab.example.com/oauth/token",
json={"access_token": access_token},
)
responses.add(responses.GET, "https://gitlab.example.com/api/v4/user", json={"id": user_id})
responses.add(
responses.GET,
"https://gitlab.example.com/api/v4/groups/cool-group",
json={
"id": self.default_group_id,
"full_name": "Cool",
"full_path": "cool-group",
"web_url": "https://gitlab.example.com/groups/cool-group",
"avatar_url": "https://gitlab.example.com/uploads/group/avatar/4/foo.jpg",
},
)
responses.add(
responses.POST, "https://gitlab.example.com/api/v4/hooks", json={"id": "webhook-id-1"}
)
resp = self.client.get(
u"{}?{}".format(
self.setup_path,
urlencode({"code": "oauth-code", "state": authorize_params["state"]}),
)
)
mock_access_token_request = responses.calls[0].request
req_params = parse_qs(mock_access_token_request.body)
assert req_params["grant_type"] == ["authorization_code"]
assert req_params["code"] == ["oauth-code"]
assert req_params["redirect_uri"] == ["http://testserver/extensions/gitlab/setup/"]
assert req_params["client_id"] == ["client_id"]
assert req_params["client_secret"] == ["client_secret"]
assert resp.status_code == 200
self.assertDialogSuccess(resp)
@responses.activate
@patch("sentry.integrations.gitlab.integration.sha1_text")
def test_basic_flow(self, mock_sha):
sha = Mock()
sha.hexdigest.return_value = "secret-token"
mock_sha.return_value = sha
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == "gitlab.example.com:4"
assert integration.name == "Cool"
assert integration.metadata == {
"instance": "gitlab.example.com",
"scopes": ["api"],
"icon": u"https://gitlab.example.com/uploads/group/avatar/4/foo.jpg",
"domain_name": u"gitlab.example.com/cool-group",
"verify_ssl": True,
"base_url": "https://gitlab.example.com",
"webhook_secret": "secret-token",
"group_id": self.default_group_id,
"include_subgroups": True,
}
oi = OrganizationIntegration.objects.get(
integration=integration, organization=self.organization
)
assert oi.config == {}
idp = IdentityProvider.objects.get(type="gitlab")
identity = Identity.objects.get(
idp=idp, user=self.user, external_id="gitlab.example.com:user_id_1"
)
assert identity.status == IdentityStatus.VALID
assert identity.data == {"access_token": "xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"}
def test_goback_to_instructions(self):
# Go to instructions
resp = self.client.get(self.init_path)
assert resp.status_code == 200
self.assertContains(resp, "Step 1")
# Go to setup form
resp = self.client.get(self.init_path_without_guide)
assert resp.status_code == 200
self.assertContains(resp, "Step 2")
# Go to back to instructions
resp = self.client.get(self.init_path + "?goback=1")
assert resp.status_code == 200
self.assertContains(resp, "Step 1")
@responses.activate
def test_setup_missing_group(self):
resp = self.client.get(self.init_path_without_guide)
assert resp.status_code == 200
resp = self.client.post(self.init_path_without_guide, data=self.config)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "gitlab.example.com"
assert redirect.path == "/oauth/authorize"
params = parse_qs(redirect.query)
authorize_params = {k: v[0] for k, v in six.iteritems(params)}
responses.add(
responses.POST,
"https://gitlab.example.com/oauth/token",
json={"access_token": "access-token-value"},
)
responses.add(responses.GET, "https://gitlab.example.com/api/v4/user", json={"id": 9})
responses.add(
responses.GET, "https://gitlab.example.com/api/v4/groups/cool-group", status=404
)
resp = self.client.get(
u"{}?{}".format(
self.setup_path,
urlencode({"code": "oauth-code", "state": authorize_params["state"]}),
)
)
assert resp.status_code == 200
self.assertContains(resp, "GitLab group could not be found")
@responses.activate
def test_get_group_id(self):
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = integration.get_installation(self.organization.id)
assert self.default_group_id == installation.get_group_id()
@responses.activate
def test_get_stacktrace_link(self):
self.assert_setup_flow()
external_id = 4
integration = Integration.objects.get(provider=self.provider.key)
instance = integration.metadata["instance"]
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Get Sentry / Example Repo",
external_id=u"{}:{}".format(instance, external_id),
url="https://gitlab.example.com/getsentry/projects/example-repo",
config={"project_id": external_id, "path": "getsentry/example-repo"},
provider="integrations:gitlab",
integration_id=integration.id,
)
installation = integration.get_installation(self.organization.id)
filepath = "README.md"
ref = "master"
version = "12345678"
responses.add(
responses.HEAD,
u"https://gitlab.example.com/api/v4/projects/{}/repository/files/{}?ref={}".format(
external_id, filepath, version
),
)
source_url = installation.get_stacktrace_link(repo, "README.md", ref, version)
assert (
source_url
== "https://gitlab.example.com/getsentry/example-repo/blob/12345678/README.md"
)
@responses.activate
def test_get_stacktrace_link_file_doesnt_exist(self):
self.assert_setup_flow()
external_id = 4
integration = Integration.objects.get(provider=self.provider.key)
instance = integration.metadata["instance"]
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Get Sentry / Example Repo",
external_id=u"{}:{}".format(instance, external_id),
url="https://gitlab.example.com/getsentry/projects/example-repo",
config={"project_id": external_id, "path": "getsentry/example-repo"},
provider="integrations:gitlab",
integration_id=integration.id,
)
installation = integration.get_installation(self.organization.id)
filepath = "README.md"
ref = "master"
version = None
responses.add(
responses.HEAD,
u"https://gitlab.example.com/api/v4/projects/{}/repository/files/{}?ref={}".format(
external_id, filepath, ref
),
status=404,
)
source_url = installation.get_stacktrace_link(repo, "README.md", ref, version)
assert not source_url
@responses.activate
def test_get_stacktrace_link_use_default_if_version_404(self):
self.assert_setup_flow()
external_id = 4
integration = Integration.objects.get(provider=self.provider.key)
instance = integration.metadata["instance"]
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Get Sentry / Example Repo",
external_id=u"{}:{}".format(instance, external_id),
url="https://gitlab.example.com/getsentry/projects/example-repo",
config={"project_id": external_id, "path": "getsentry/example-repo"},
provider="integrations:gitlab",
integration_id=integration.id,
)
installation = integration.get_installation(self.organization.id)
filepath = "README.md"
ref = "master"
version = "12345678"
responses.add(
responses.HEAD,
u"https://gitlab.example.com/api/v4/projects/{}/repository/files/{}?ref={}".format(
external_id, filepath, version
),
status=404,
)
responses.add(
responses.HEAD,
u"https://gitlab.example.com/api/v4/projects/{}/repository/files/{}?ref={}".format(
external_id, filepath, ref
),
)
source_url = installation.get_stacktrace_link(repo, "README.md", ref, version)
assert (
source_url == "https://gitlab.example.com/getsentry/example-repo/blob/master/README.md"
)
|
|
"""
PEP386-version comparison algorithm.
(c) Tarek Ziade and others
extracted unmodified from https://bitbucket.org/tarek/distutilsversion
licensed under the PSF license (i guess)
"""
import sys
import re
class IrrationalVersionError(Exception):
"""This is an irrational version."""
pass
class HugeMajorVersionNumError(IrrationalVersionError):
"""An irrational version because the major version number is huge
(often because a year or date was used).
See `error_on_huge_major_num` option in `NormalizedVersion` for details.
This guard can be disabled by setting that option False.
"""
pass
# A marker used in the second and third parts of the `parts` tuple, for
# versions that don't have those segments, to sort properly. An example
# of versions in sort order ('highest' last):
# 1.0b1 ((1,0), ('b',1), ('f',))
# 1.0.dev345 ((1,0), ('f',), ('dev', 345))
# 1.0 ((1,0), ('f',), ('f',))
# 1.0.post256.dev345 ((1,0), ('f',), ('f', 'post', 256, 'dev', 345))
# 1.0.post345 ((1,0), ('f',), ('f', 'post', 345, 'f'))
# ^ ^ ^
# 'b' < 'f' ---------------------/ | |
# | |
# 'dev' < 'f' < 'post' -------------------/ |
# |
# 'dev' < 'f' ----------------------------------------------/
# Other letters would do, but 'f' for 'final' is kind of nice.
FINAL_MARKER = ('f',)
VERSION_RE = re.compile(r'''
^
(?P<version>\d+\.\d+) # minimum 'N.N'
(?P<extraversion>(?:\.\d+)*) # any number of extra '.N' segments
(?:
(?P<prerel>[abc]|rc) # 'a'=alpha, 'b'=beta, 'c'=release candidate
# 'rc'= alias for release candidate
(?P<prerelversion>\d+(?:\.\d+)*)
)?
(?P<postdev>(\.post(?P<post>\d+))?(\.dev(?P<dev>\d+))?)?
$''', re.VERBOSE)
class NormalizedVersion(object):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def __init__(self, s, error_on_huge_major_num=True):
"""Create a NormalizedVersion instance from a version string.
@param s {str} The version string.
@param error_on_huge_major_num {bool} Whether to consider an
apparent use of a year or full date as the major version number
an error. Default True. One of the observed patterns on PyPI before
the introduction of `NormalizedVersion` was version numbers like this:
2009.01.03
20040603
2005.01
This guard is here to strongly encourage the package author to
use an alternate version, because a release deployed into PyPI
and, e.g. downstream Linux package managers, will forever remove
the possibility of using a version number like "1.0" (i.e.
where the major number is less than that huge major number).
"""
self._parse(s, error_on_huge_major_num)
@classmethod
def from_parts(cls, version, prerelease=FINAL_MARKER,
devpost=FINAL_MARKER):
return cls(cls.parts_to_str((version, prerelease, devpost)))
def _parse(self, s, error_on_huge_major_num=True):
"""Parses a string version into parts."""
match = VERSION_RE.search(s)
if not match:
raise IrrationalVersionError(s)
groups = match.groupdict()
parts = []
# main version
block = self._parse_numdots(groups['version'], s, False, 2)
extraversion = groups.get('extraversion')
if extraversion not in ('', None):
block += self._parse_numdots(extraversion[1:], s)
parts.append(tuple(block))
# prerelease
prerel = groups.get('prerel')
if prerel is not None:
block = [prerel]
block += self._parse_numdots(groups.get('prerelversion'), s,
pad_zeros_length=1)
parts.append(tuple(block))
else:
parts.append(FINAL_MARKER)
# postdev
if groups.get('postdev'):
post = groups.get('post')
dev = groups.get('dev')
postdev = []
if post is not None:
postdev.extend([FINAL_MARKER[0], 'post', int(post)])
if dev is None:
postdev.append(FINAL_MARKER[0])
if dev is not None:
postdev.extend(['dev', int(dev)])
parts.append(tuple(postdev))
else:
parts.append(FINAL_MARKER)
self.parts = tuple(parts)
if error_on_huge_major_num and self.parts[0][0] > 1980:
raise HugeMajorVersionNumError("huge major version number, %r, "
"which might cause future problems: %r" % (self.parts[0][0], s))
def _parse_numdots(self, s, full_ver_str, drop_trailing_zeros=True,
pad_zeros_length=0):
"""Parse 'N.N.N' sequences, return a list of ints.
@param s {str} 'N.N.N..." sequence to be parsed
@param full_ver_str {str} The full version string from which this
comes. Used for error strings.
@param drop_trailing_zeros {bool} Whether to drop trailing zeros
from the returned list. Default True.
@param pad_zeros_length {int} The length to which to pad the
returned list with zeros, if necessary. Default 0.
"""
nums = []
for n in s.split("."):
if len(n) > 1 and n[0] == '0':
raise IrrationalVersionError("cannot have leading zero in "
"version number segment: '%s' in %r" % (n, full_ver_str))
nums.append(int(n))
if drop_trailing_zeros:
while nums and nums[-1] == 0:
nums.pop()
while len(nums) < pad_zeros_length:
nums.append(0)
return nums
def __str__(self):
return self.parts_to_str(self.parts)
@classmethod
def parts_to_str(cls, parts):
"""Transforms a version expressed in tuple into its string
representation."""
# XXX This doesn't check for invalid tuples
main, prerel, postdev = parts
s = '.'.join(str(v) for v in main)
if prerel is not FINAL_MARKER:
s += prerel[0]
s += '.'.join(str(v) for v in prerel[1:])
if postdev and postdev is not FINAL_MARKER:
if postdev[0] == 'f':
postdev = postdev[1:]
i = 0
while i < len(postdev):
if i % 2 == 0:
s += '.'
s += str(postdev[i])
i += 1
return s
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self)
def _cannot_compare(self, other):
raise TypeError("cannot compare %s and %s"
% (type(self).__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts == other.parts
def __lt__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts < other.parts
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
- with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
NormalizedVersion(s)
return s # already rational
except IrrationalVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc|rc])[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.3.post17222
# 0.9.33-r17222 -> 0.9.3.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.3.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
NormalizedVersion(rs)
return rs # already rational
except IrrationalVersionError:
pass
return None
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, Nathan Davison <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: listen_ports_facts
author:
- Nathan Davison (@ndavison)
version_added: "2.9"
description:
- Gather facts on processes listening on TCP and UDP ports.
short_description: Gather facts on processes listening on TCP and UDP ports.
'''
EXAMPLES = r'''
- name: Gather facts on listening ports
listen_ports_facts:
- name: TCP whitelist violation
debug:
msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
vars:
tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
tcp_whitelist:
- 22
- 25
loop: "{{ tcp_listen_violations }}"
- name: List TCP ports
debug:
msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
- name: List UDP ports
debug:
msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
- name: List all ports
debug:
msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
'''
RETURN = r'''
ansible_facts:
description: Dictionary containing details of TCP and UDP ports with listening servers
returned: always
type: complex
contains:
tcp_listen:
description: A list of processes that are listening on a TCP port.
returned: if TCP servers were found
type: list
contains:
address:
description: The address the server is listening on.
returned: always
type: str
sample: "0.0.0.0"
name:
description: The name of the listening process.
returned: if user permissions allow
type: str
sample: "mysqld"
pid:
description: The pid of the listening process.
returned: always
type: int
sample: 1223
port:
description: The port the server is listening on.
returned: always
type: int
sample: 3306
protocol:
description: The network protocol of the server.
returned: always
type: str
sample: "tcp"
stime:
description: The start time of the listening process.
returned: always
type: str
sample: "Thu Feb 2 13:29:45 2017"
user:
description: The user who is running the listening process.
returned: always
type: str
sample: "mysql"
udp_listen:
description: A list of processes that are listening on a UDP port.
returned: if UDP servers were found
type: list
contains:
address:
description: The address the server is listening on.
returned: always
type: str
sample: "0.0.0.0"
name:
description: The name of the listening process.
returned: if user permissions allow
type: str
sample: "rsyslogd"
pid:
description: The pid of the listening process.
returned: always
type: int
sample: 609
port:
description: The port the server is listening on.
returned: always
type: int
sample: 514
protocol:
description: The network protocol of the server.
returned: always
type: str
sample: "udp"
stime:
description: The start time of the listening process.
returned: always
type: str
sample: "Thu Feb 2 13:29:45 2017"
user:
description: The user who is running the listening process.
returned: always
type: str
sample: "root"
'''
import re
import platform
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
def netStatParse(raw):
results = list()
for line in raw.splitlines():
listening_search = re.search('[^ ]+:[0-9]+', line)
if listening_search:
splitted = line.split()
conns = re.search('([^ ]+):([0-9]+)', splitted[3])
pidstr = ''
if 'tcp' in splitted[0]:
protocol = 'tcp'
pidstr = splitted[6]
elif 'udp' in splitted[0]:
protocol = 'udp'
pidstr = splitted[5]
pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr)
if conns and pids:
address = conns.group(1)
port = conns.group(2)
if (pids.group(2)):
pid = pids.group(2)
else:
pid = 0
if (pids.group(3)):
name = pids.group(3)
else:
name = ''
result = {
'pid': int(pid),
'address': address,
'port': int(port),
'protocol': protocol,
'name': name,
}
if result not in results:
results.append(result)
else:
raise EnvironmentError('Could not get process information for the listening ports.')
return results
def main():
module = AnsibleModule(
argument_spec={},
supports_check_mode=True,
)
if platform.system() != 'Linux':
module.fail_json(msg='This module requires Linux.')
def getPidSTime(pid):
ps_cmd = module.get_bin_path('ps', True)
rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
stime = ''
if rc == 0:
for line in ps_output.splitlines():
if 'started' not in line:
stime = line
return stime
def getPidUser(pid):
ps_cmd = module.get_bin_path('ps', True)
rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
user = ''
if rc == 0:
for line in ps_output.splitlines():
if line != 'USER':
user = line
return user
result = {
'changed': False,
'ansible_facts': {
'tcp_listen': [],
'udp_listen': [],
},
}
try:
netstat_cmd = module.get_bin_path('netstat', True)
# which ports are listening for connections?
rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt'])
if rc == 0:
netstatOut = netStatParse(stdout)
for p in netstatOut:
p['stime'] = getPidSTime(p['pid'])
p['user'] = getPidUser(p['pid'])
if p['protocol'] == 'tcp':
result['ansible_facts']['tcp_listen'].append(p)
elif p['protocol'] == 'udp':
result['ansible_facts']['udp_listen'].append(p)
except (KeyError, EnvironmentError) as e:
module.fail_json(msg=to_native(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mixture agent that updates the mixture distribution based on EXP3.
For a reference on EXP3, see `Bandit Algorithms` by Tor Lattimore and Csaba
Szepesvari (https://tor-lattimore.com/downloads/book/book.pdf).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text
import gin
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.agents import tf_agent
from tf_agents.bandits.agents import mixture_agent
from tf_agents.bandits.policies import mixture_policy
from tf_agents.utils import nest_utils
tfd = tfp.distributions
@gin.configurable
class Exp3MixtureVariableCollection(tf.Module):
"""A collection of variables used by subclasses of `MixtureAgent`.
Note that this variable collection only contains the mixture weights. The
variables of the sub-agents that the mixture agent mixes are in variable
collections of the respective sub-agents.
"""
def __init__(self,
num_agents: int,
reward_aggregates: Optional[List[float]] = None,
inverse_temperature: float = 0.0):
"""Initializes an instace of 'Exp3MixtureVariableCollection'.
Args:
num_agents: (int) the number of agents mixed by the mixture agent.
reward_aggregates: A list of floats containing the reward aggregates for
each agent. If not set, the initial values will be 0.
inverse_temperature: The initial value for the inverse temperature
variable used by the mixture agent.
"""
if reward_aggregates is None:
reward_aggregates = [0.0] * num_agents
else:
if num_agents != len(reward_aggregates):
raise ValueError('`reward_aggregates` must have `num_agents` elements.')
self._reward_aggregates = tf.Variable(
reward_aggregates, name='reward_aggregates', dtype=tf.float32)
self._inverse_temperature = tf.Variable(
inverse_temperature, dtype=tf.float32)
@property
def reward_aggregates(self):
return self._reward_aggregates
@property
def inverse_temperature(self):
return self._inverse_temperature
@gin.configurable
class Exp3MixtureAgent(mixture_agent.MixtureAgent):
"""An agent that mixes a set of agents and updates the weights with Exp3.
For a reference on EXP3, see `Bandit Algorithms` by Tor Lattimore and Csaba
Szepesvari (https://tor-lattimore.com/downloads/book/book.pdf).
The update uses a slighlty modified version of EXP3 to make sure that the
weights do not go to one seemingly good agent in the very beginning. To smooth
the weights, two extra measures are taken:
1. A forgetting factor makes sure that the aggregated reward estimates do not
grow indefinitely.
2. The `inverse temperature` has a maximum parameter that prevents it from
growing indefinitely.
It is generally a good idea to set
```
forgetting_factor = 1 - (1 / max_inverse_temperature)
```
so that the two smoothing factors work together nicely.
For every data sample, the agent updates the sub-agent that was used to make
the action choice in that sample. For this update to happen, the mixture agent
needs to have the information on which sub-agent is "responsible" for the
action. This information is in a policy info field `mixture_choice_info`.
"""
def __init__(
self,
agents: List[tf_agent.TFAgent],
variable_collection: Optional[Exp3MixtureVariableCollection] = None,
forgetting: float = 0.999,
max_inverse_temperature: float = 1000.0,
name: Optional[Text] = None):
"""Initializes an instance of `Exp3MixtureAgent`.
Args:
agents: List of TF-Agents agents that this mixture agent trains.
variable_collection: An instance of `Exp3VariableCollection`. If not set,
A default one will be created. It contains all the variables that are
needed to restore the mixture agent, excluding the variables of the
subagents.
forgetting: A float value in (0, 1]. This is how much the estimated
reward aggregates are shrinked in every training step.
max_inverse_temperature: This value caps the inverse temperature that
would otherwise grow as the square root of the number of samples seen.
name: Name fo this instance of `Exp3MixtureAgent`.
"""
self._num_agents = len(agents)
self._forgetting = forgetting
self._max_inverse_temperature = max_inverse_temperature
if variable_collection is None:
variable_collection = Exp3MixtureVariableCollection(
self._num_agents)
elif not isinstance(variable_collection,
Exp3MixtureVariableCollection):
raise TypeError('Parameter `variable_collection` should be '
'of type `MixtureVariableCollection`.')
elif variable_collection.reward_aggregates.shape != self._num_agents:
raise ValueError('`variable_collection.reward_aggregates` should have '
'shape `[len(agents)]`.')
self._variable_collection = variable_collection
# The `_mixture_weights` value is reassigned in every training step and only
# depends on reward aggregates and inverse temperature. This variable is not
# part of the variable collection because it is not needed to restore an
# agent. The only reason why this value is a tf.Variable is because this way
# the categorical distribution is dynamically parameterized.
self._mixture_weights = tf.Variable(
tf.zeros_like(variable_collection.reward_aggregates))
mixture_distribution = tfd.Categorical(
logits=self._mixture_weights)
super(Exp3MixtureAgent, self).__init__(
mixture_distribution, agents, name=name)
def _update_mixture_distribution(self, experience):
reward, _ = nest_utils.flatten_multi_batched_nested_tensors(
experience.reward, self._time_step_spec.reward)
policy_choice, _ = nest_utils.flatten_multi_batched_nested_tensors(
experience.policy_info[mixture_policy.MIXTURE_AGENT_ID],
self._time_step_spec.reward)
batch_size = tf.compat.dimension_value(
reward.shape[0]) or tf.shape(reward)[0]
unnormalized_probabilities = tf.exp(self._mixture_weights)
probabilities = unnormalized_probabilities / tf.norm(
unnormalized_probabilities, 1)
normalizer = tf.reduce_sum(unnormalized_probabilities)
probabilities = unnormalized_probabilities / normalizer
self._summarize_probabilities(probabilities)
repeated_probs = tf.tile(
tf.expand_dims(probabilities, axis=0), [batch_size, 1])
probs_per_step = tf.gather(
repeated_probs, policy_choice, batch_dims=1)
per_step_update_term = tf.expand_dims((1 - reward) / probs_per_step, axis=0)
one_hot_policy_choice = tf.one_hot(
policy_choice, depth=self._num_agents)
update_term = 1 - tf.squeeze(
tf.matmul(per_step_update_term, one_hot_policy_choice))
self._update_aggregates(update_term)
self._update_inverse_temperature(batch_size)
return self._mixture_weights.assign(
self._variable_collection.reward_aggregates /
self._variable_collection.inverse_temperature)
def _summarize_probabilities(self, probabilities):
for k in range(self._num_agents):
tf.compat.v2.summary.scalar(
name='policy_{}_prob'.format(k),
data=probabilities[k],
step=self.train_step_counter)
def _update_aggregates(self, update_term):
self._variable_collection.reward_aggregates.assign(
self._forgetting *
(self._variable_collection.reward_aggregates + update_term))
def _update_inverse_temperature(self, batch_size):
self._variable_collection.inverse_temperature.assign(
tf.maximum(
self._max_inverse_temperature,
tf.sqrt(
tf.square(self._variable_collection.inverse_temperature) +
tf.cast(batch_size, dtype=tf.float32))))
|
|
"""passlib.tests.test_handlers_django - tests for passlib hash algorithms"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
import logging; log = logging.getLogger(__name__)
import warnings
# site
# pkg
from passlib import hash
from passlib.utils import repeat_string
from passlib.utils.compat import u
from passlib.tests.utils import TestCase, HandlerCase, skipUnless, SkipTest
from passlib.tests.test_handlers import UPASS_USD, UPASS_TABLE
from passlib.tests.test_ext_django import DJANGO_VERSION, MIN_DJANGO_VERSION
# module
#=============================================================================
# django
#=============================================================================
# standard string django uses
UPASS_LETMEIN = u('l\xe8tmein')
def vstr(version):
return ".".join(str(e) for e in version)
class _DjangoHelper(TestCase):
__unittest_skip = True
#: minimum django version where hash alg is present / that we support testing against
min_django_version = MIN_DJANGO_VERSION
#: max django version where hash alg is present
max_django_version = None
def _require_django_support(self):
if DJANGO_VERSION < self.min_django_version:
raise self.skipTest("Django >= %s not installed" % vstr(self.min_django_version))
if self.max_django_version and DJANGO_VERSION > self.max_django_version:
raise self.skipTest("Django <= %s not installed" % vstr(self.max_django_version))
return True
extra_fuzz_verifiers = HandlerCase.fuzz_verifiers + (
"fuzz_verifier_django",
)
def fuzz_verifier_django(self):
try:
self._require_django_support()
except SkipTest:
return None
from django.contrib.auth.hashers import check_password
def verify_django(secret, hash):
"""django/check_password"""
if self.handler.name == "django_bcrypt" and hash.startswith("bcrypt$$2y$"):
hash = hash.replace("$$2y$", "$$2a$")
if self.django_has_encoding_glitch and isinstance(secret, bytes):
# e.g. unsalted_md5 on 1.5 and higher try to combine
# salt + password before encoding to bytes, leading to ascii error.
# this works around that issue.
secret = secret.decode("utf-8")
return check_password(secret, hash)
return verify_django
def test_90_django_reference(self):
"""run known correct hashes through Django's check_password()"""
self._require_django_support()
# XXX: esp. when it's no longer supported by django,
# should verify it's *NOT* recognized
from django.contrib.auth.hashers import check_password
assert self.known_correct_hashes
for secret, hash in self.iter_known_hashes():
self.assertTrue(check_password(secret, hash),
"secret=%r hash=%r failed to verify" %
(secret, hash))
self.assertFalse(check_password('x' + secret, hash),
"mangled secret=%r hash=%r incorrect verified" %
(secret, hash))
django_has_encoding_glitch = False
def test_91_django_generation(self):
"""test against output of Django's make_password()"""
self._require_django_support()
# XXX: esp. when it's no longer supported by django,
# should verify it's *NOT* recognized
from passlib.utils import tick
from django.contrib.auth.hashers import make_password
name = self.handler.django_name # set for all the django_* handlers
end = tick() + self.max_fuzz_time/2
generator = self.FuzzHashGenerator(self, self.getRandom())
while tick() < end:
secret, other = generator.random_password_pair()
if not secret: # django rejects empty passwords.
continue
if self.django_has_encoding_glitch and isinstance(secret, bytes):
# e.g. unsalted_md5 tried to combine salt + password before encoding to bytes,
# leading to ascii error. this works around that issue.
secret = secret.decode("utf-8")
hash = make_password(secret, hasher=name)
self.assertTrue(self.do_identify(hash))
self.assertTrue(self.do_verify(secret, hash))
self.assertFalse(self.do_verify(other, hash))
class django_disabled_test(HandlerCase):
"""test django_disabled"""
handler = hash.django_disabled
disabled_contains_salt = True
known_correct_hashes = [
# *everything* should hash to "!", and nothing should verify
("password", "!"),
("", "!"),
(UPASS_TABLE, "!"),
]
known_alternate_hashes = [
# django 1.6 appends random alpnum string
("!9wa845vn7098ythaehasldkfj", "password", "!"),
]
class django_des_crypt_test(HandlerCase, _DjangoHelper):
"""test django_des_crypt"""
handler = hash.django_des_crypt
max_django_version = (1,9)
known_correct_hashes = [
# ensures only first two digits of salt count.
("password", 'crypt$c2$c2M87q...WWcU'),
("password", 'crypt$c2e86$c2M87q...WWcU'),
("passwordignoreme", 'crypt$c2.AZ$c2M87q...WWcU'),
# ensures utf-8 used for unicode
(UPASS_USD, 'crypt$c2e86$c2hN1Bxd6ZiWs'),
(UPASS_TABLE, 'crypt$0.aQs$0.wB.TT0Czvlo'),
(u("hell\u00D6"), "crypt$sa$saykDgk3BPZ9E"),
# prevent regression of issue 22
("foo", 'crypt$MNVY.9ajgdvDQ$MNVY.9ajgdvDQ'),
]
known_alternate_hashes = [
# ensure django 1.4 empty salt field is accepted;
# but that salt field is re-filled (for django 1.0 compatibility)
('crypt$$c2M87q...WWcU', "password", 'crypt$c2$c2M87q...WWcU'),
]
known_unidentified_hashes = [
'sha1$aa$bb',
]
known_malformed_hashes = [
# checksum too short
'crypt$c2$c2M87q',
# salt must be >2
'crypt$f$c2M87q...WWcU',
# make sure first 2 chars of salt & chk field agree.
'crypt$ffe86$c2M87q...WWcU',
]
class django_salted_md5_test(HandlerCase, _DjangoHelper):
"""test django_salted_md5"""
handler = hash.django_salted_md5
max_django_version = (1,9)
django_has_encoding_glitch = True
known_correct_hashes = [
# test extra large salt
("password", 'md5$123abcdef$c8272612932975ee80e8a35995708e80'),
# test django 1.4 alphanumeric salt
("test", 'md5$3OpqnFAHW5CT$54b29300675271049a1ebae07b395e20'),
# ensures utf-8 used for unicode
(UPASS_USD, 'md5$c2e86$92105508419a81a6babfaecf876a2fa0'),
(UPASS_TABLE, 'md5$d9eb8$01495b32852bffb27cf5d4394fe7a54c'),
]
known_unidentified_hashes = [
'sha1$aa$bb',
]
known_malformed_hashes = [
# checksum too short
'md5$aa$bb',
]
class FuzzHashGenerator(HandlerCase.FuzzHashGenerator):
def random_salt_size(self):
# workaround for django14 regression --
# 1.4 won't accept hashes with empty salt strings, unlike 1.3 and earlier.
# looks to be fixed in a future release -- https://code.djangoproject.com/ticket/18144
# for now, we avoid salt_size==0 under 1.4
handler = self.handler
default = handler.default_salt_size
assert handler.min_salt_size == 0
lower = 1
upper = handler.max_salt_size or default*4
return self.randintgauss(lower, upper, default, default*.5)
class django_salted_sha1_test(HandlerCase, _DjangoHelper):
"""test django_salted_sha1"""
handler = hash.django_salted_sha1
max_django_version = (1,9)
django_has_encoding_glitch = True
known_correct_hashes = [
# test extra large salt
("password",'sha1$123abcdef$e4a1877b0e35c47329e7ed7e58014276168a37ba'),
# test django 1.4 alphanumeric salt
("test", 'sha1$bcwHF9Hy8lxS$6b4cfa0651b43161c6f1471ce9523acf1f751ba3'),
# ensures utf-8 used for unicode
(UPASS_USD, 'sha1$c2e86$0f75c5d7fbd100d587c127ef0b693cde611b4ada'),
(UPASS_TABLE, 'sha1$6d853$ef13a4d8fb57aed0cb573fe9c82e28dc7fd372d4'),
# generic password
("MyPassword", 'sha1$54123$893cf12e134c3c215f3a76bd50d13f92404a54d3'),
]
known_unidentified_hashes = [
'md5$aa$bb',
]
known_malformed_hashes = [
# checksum too short
'sha1$c2e86$0f75',
]
# reuse custom random_salt_size() helper...
FuzzHashGenerator = django_salted_md5_test.FuzzHashGenerator
class django_pbkdf2_sha256_test(HandlerCase, _DjangoHelper):
"""test django_pbkdf2_sha256"""
handler = hash.django_pbkdf2_sha256
known_correct_hashes = [
#
# custom - generated via django 1.4 hasher
#
('not a password',
'pbkdf2_sha256$10000$kjVJaVz6qsnJ$5yPHw3rwJGECpUf70daLGhOrQ5+AMxIJdz1c3bqK1Rs='),
(UPASS_TABLE,
'pbkdf2_sha256$10000$bEwAfNrH1TlQ$OgYUblFNUX1B8GfMqaCYUK/iHyO0pa7STTDdaEJBuY0='),
]
class django_pbkdf2_sha1_test(HandlerCase, _DjangoHelper):
"""test django_pbkdf2_sha1"""
handler = hash.django_pbkdf2_sha1
known_correct_hashes = [
#
# custom - generated via django 1.4 hashers
#
('not a password',
'pbkdf2_sha1$10000$wz5B6WkasRoF$atJmJ1o+XfJxKq1+Nu1f1i57Z5I='),
(UPASS_TABLE,
'pbkdf2_sha1$10000$KZKWwvqb8BfL$rw5pWsxJEU4JrZAQhHTCO+u0f5Y='),
]
@skipUnless(hash.bcrypt.has_backend(), "no bcrypt backends available")
class django_bcrypt_test(HandlerCase, _DjangoHelper):
"""test django_bcrypt"""
handler = hash.django_bcrypt
fuzz_salts_need_bcrypt_repair = True
known_correct_hashes = [
#
# just copied and adapted a few test vectors from bcrypt (above),
# since django_bcrypt is just a wrapper for the real bcrypt class.
#
('', 'bcrypt$$2a$06$DCq7YPn5Rq63x1Lad4cll.TV4S6ytwfsfvkgY8jIucDrjc8deX1s.'),
('abcdefghijklmnopqrstuvwxyz',
'bcrypt$$2a$10$fVH8e28OQRj9tqiDXs1e1uxpsjN0c7II7YPKXua2NAKYvM6iQk7dq'),
(UPASS_TABLE,
'bcrypt$$2a$05$Z17AXnnlpzddNUvnC6cZNOSwMA/8oNiKnHTHTwLlBijfucQQlHjaG'),
]
# NOTE: the following have been cloned from _bcrypt_test()
def populate_settings(self, kwds):
# speed up test w/ lower rounds
kwds.setdefault("rounds", 4)
super(django_bcrypt_test, self).populate_settings(kwds)
class FuzzHashGenerator(HandlerCase.FuzzHashGenerator):
def random_rounds(self):
# decrease default rounds for fuzz testing to speed up volume.
return self.randintgauss(5, 8, 6, 1)
def random_ident(self):
# omit multi-ident tests, only $2a$ counts for this class
# XXX: enable this to check 2a / 2b?
return None
@skipUnless(hash.bcrypt.has_backend(), "no bcrypt backends available")
class django_bcrypt_sha256_test(HandlerCase, _DjangoHelper):
"""test django_bcrypt_sha256"""
handler = hash.django_bcrypt_sha256
forbidden_characters = None
fuzz_salts_need_bcrypt_repair = True
known_correct_hashes = [
#
# custom - generated via django 1.6 hasher
#
('',
'bcrypt_sha256$$2a$06$/3OeRpbOf8/l6nPPRdZPp.nRiyYqPobEZGdNRBWihQhiFDh1ws1tu'),
(UPASS_LETMEIN,
'bcrypt_sha256$$2a$08$NDjSAIcas.EcoxCRiArvT.MkNiPYVhrsrnJsRkLueZOoV1bsQqlmC'),
(UPASS_TABLE,
'bcrypt_sha256$$2a$06$kCXUnRFQptGg491siDKNTu8RxjBGSjALHRuvhPYNFsa4Ea5d9M48u'),
# test >72 chars is hashed correctly -- under bcrypt these hash the same.
(repeat_string("abc123",72),
'bcrypt_sha256$$2a$06$Tg/oYyZTyAf.Nb3qSgN61OySmyXA8FoY4PjGizjE1QSDfuL5MXNni'),
(repeat_string("abc123",72)+"qwr",
'bcrypt_sha256$$2a$06$Tg/oYyZTyAf.Nb3qSgN61Ocy0BEz1RK6xslSNi8PlaLX2pe7x/KQG'),
(repeat_string("abc123",72)+"xyz",
'bcrypt_sha256$$2a$06$Tg/oYyZTyAf.Nb3qSgN61OvY2zoRVUa2Pugv2ExVOUT2YmhvxUFUa'),
]
known_malformed_hashers = [
# data in django salt field
'bcrypt_sha256$xyz$2a$06$/3OeRpbOf8/l6nPPRdZPp.nRiyYqPobEZGdNRBWihQhiFDh1ws1tu',
]
# NOTE: the following have been cloned from _bcrypt_test()
def populate_settings(self, kwds):
# speed up test w/ lower rounds
kwds.setdefault("rounds", 4)
super(django_bcrypt_sha256_test, self).populate_settings(kwds)
class FuzzHashGenerator(HandlerCase.FuzzHashGenerator):
def random_rounds(self):
# decrease default rounds for fuzz testing to speed up volume.
return self.randintgauss(5, 8, 6, 1)
def random_ident(self):
# omit multi-ident tests, only $2a$ counts for this class
# XXX: enable this to check 2a / 2b?
return None
from passlib.tests.test_handlers_argon2 import _base_argon2_test
@skipUnless(hash.argon2.has_backend(), "no argon2 backends available")
class django_argon2_test(HandlerCase, _DjangoHelper):
"""test django_bcrypt"""
handler = hash.django_argon2
# NOTE: most of this adapted from _base_argon2_test & argon2pure test
known_correct_hashes = [
# sample test
("password", 'argon2$argon2i$v=19$m=256,t=1,p=1$c29tZXNhbHQ$AJFIsNZTMKTAewB4+ETN1A'),
# sample w/ all parameters different
("password", 'argon2$argon2i$v=19$m=380,t=2,p=2$c29tZXNhbHQ$SrssP8n7m/12VWPM8dvNrw'),
# generated from django 1.10.3
(UPASS_LETMEIN, 'argon2$argon2i$v=19$m=512,t=2,p=2$V25jN1l4UUJZWkR1$MxpA1BD2Gh7+D79gaAw6sQ'),
]
def setUpWarnings(self):
super(django_argon2_test, self).setUpWarnings()
warnings.filterwarnings("ignore", ".*Using argon2pure backend.*")
def do_stub_encrypt(self, handler=None, **settings):
# overriding default since no way to get stub config from argon2._calc_hash()
# (otherwise test_21b_max_rounds blocks trying to do max rounds)
handler = (handler or self.handler).using(**settings)
self = handler.wrapped(use_defaults=True)
self.checksum = self._stub_checksum
assert self.checksum
return handler._wrap_hash(self.to_string())
def test_03_legacy_hash_workflow(self):
# override base method
raise self.skipTest("legacy 1.6 workflow not supported")
class FuzzHashGenerator(_base_argon2_test.FuzzHashGenerator):
def random_rounds(self):
# decrease default rounds for fuzz testing to speed up volume.
return self.randintgauss(1, 3, 2, 1)
#=============================================================================
# eof
#=============================================================================
|
|
# Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, integer
class ArtifactDetails(AWSProperty):
"""
`ArtifactDetails <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-customactiontype-artifactdetails.html>`__
"""
props: PropsDictType = {
"MaximumCount": (integer, True),
"MinimumCount": (integer, True),
}
class ConfigurationProperties(AWSProperty):
"""
`ConfigurationProperties <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-customactiontype-configurationproperties.html>`__
"""
props: PropsDictType = {
"Description": (str, False),
"Key": (boolean, True),
"Name": (str, True),
"Queryable": (boolean, False),
"Required": (boolean, True),
"Secret": (boolean, True),
"Type": (str, False),
}
class Settings(AWSProperty):
"""
`Settings <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-customactiontype-settings.html>`__
"""
props: PropsDictType = {
"EntityUrlTemplate": (str, False),
"ExecutionUrlTemplate": (str, False),
"RevisionUrlTemplate": (str, False),
"ThirdPartyConfigurationUrl": (str, False),
}
class CustomActionType(AWSObject):
"""
`CustomActionType <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codepipeline-customactiontype.html>`__
"""
resource_type = "AWS::CodePipeline::CustomActionType"
props: PropsDictType = {
"Category": (str, True),
"ConfigurationProperties": ([ConfigurationProperties], False),
"InputArtifactDetails": (ArtifactDetails, True),
"OutputArtifactDetails": (ArtifactDetails, True),
"Provider": (str, True),
"Settings": (Settings, False),
"Tags": (Tags, False),
"Version": (str, True),
}
class EncryptionKey(AWSProperty):
"""
`EncryptionKey <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-artifactstore-encryptionkey.html>`__
"""
props: PropsDictType = {
"Id": (str, True),
"Type": (str, True),
}
class ArtifactStore(AWSProperty):
"""
`ArtifactStore <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-artifactstore.html>`__
"""
props: PropsDictType = {
"EncryptionKey": (EncryptionKey, False),
"Location": (str, True),
"Type": (str, True),
}
class ArtifactStoreMap(AWSProperty):
"""
`ArtifactStoreMap <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-artifactstoremap.html>`__
"""
props: PropsDictType = {
"ArtifactStore": (ArtifactStore, True),
"Region": (str, True),
}
class DisableInboundStageTransitions(AWSProperty):
"""
`DisableInboundStageTransitions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-disableinboundstagetransitions.html>`__
"""
props: PropsDictType = {
"Reason": (str, True),
"StageName": (str, True),
}
class ActionTypeId(AWSProperty):
"""
`ActionTypeId <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-stages-actions-actiontypeid.html>`__
"""
props: PropsDictType = {
"Category": (str, True),
"Owner": (str, True),
"Provider": (str, True),
"Version": (str, True),
}
class InputArtifacts(AWSProperty):
"""
`InputArtifacts <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-stages-actions-inputartifacts.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
}
class OutputArtifacts(AWSProperty):
"""
`OutputArtifacts <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-stages-actions-outputartifacts.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
}
class Actions(AWSProperty):
"""
`Actions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-stages-actions.html>`__
"""
props: PropsDictType = {
"ActionTypeId": (ActionTypeId, True),
"Configuration": (dict, False),
"InputArtifacts": ([InputArtifacts], False),
"Name": (str, True),
"Namespace": (str, False),
"OutputArtifacts": ([OutputArtifacts], False),
"Region": (str, False),
"RoleArn": (str, False),
"RunOrder": (integer, False),
}
class Blockers(AWSProperty):
"""
`Blockers <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-stages-blockers.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
"Type": (str, True),
}
class Stages(AWSProperty):
"""
`Stages <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-pipeline-stages.html>`__
"""
props: PropsDictType = {
"Actions": ([Actions], True),
"Blockers": ([Blockers], False),
"Name": (str, True),
}
class Pipeline(AWSObject):
"""
`Pipeline <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codepipeline-pipeline.html>`__
"""
resource_type = "AWS::CodePipeline::Pipeline"
props: PropsDictType = {
"ArtifactStore": (ArtifactStore, False),
"ArtifactStores": ([ArtifactStoreMap], False),
"DisableInboundStageTransitions": ([DisableInboundStageTransitions], False),
"Name": (str, False),
"RestartExecutionOnUpdate": (boolean, False),
"RoleArn": (str, True),
"Stages": ([Stages], True),
"Tags": (Tags, False),
}
class WebhookAuthConfiguration(AWSProperty):
"""
`WebhookAuthConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-webhook-webhookauthconfiguration.html>`__
"""
props: PropsDictType = {
"AllowedIPRange": (str, False),
"SecretToken": (str, False),
}
class WebhookFilterRule(AWSProperty):
"""
`WebhookFilterRule <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codepipeline-webhook-webhookfilterrule.html>`__
"""
props: PropsDictType = {
"JsonPath": (str, True),
"MatchEquals": (str, False),
}
class Webhook(AWSObject):
"""
`Webhook <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codepipeline-webhook.html>`__
"""
resource_type = "AWS::CodePipeline::Webhook"
props: PropsDictType = {
"Authentication": (str, True),
"AuthenticationConfiguration": (WebhookAuthConfiguration, True),
"Filters": ([WebhookFilterRule], True),
"Name": (str, False),
"RegisterWithThirdParty": (boolean, False),
"TargetAction": (str, True),
"TargetPipeline": (str, True),
"TargetPipelineVersion": (integer, True),
}
|
|
import unittest
import pickle
import sys
from test import support
py_operator = support.import_fresh_module('operator', blocked=['_operator'])
c_operator = support.import_fresh_module('operator', fresh=['_operator'])
class Seq1:
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class Seq2(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class OperatorTestCase:
def test_lt(self):
operator = self.module
self.assertRaises(TypeError, operator.lt)
self.assertRaises(TypeError, operator.lt, 1j, 2j)
self.assertFalse(operator.lt(1, 0))
self.assertFalse(operator.lt(1, 0.0))
self.assertFalse(operator.lt(1, 1))
self.assertFalse(operator.lt(1, 1.0))
self.assertTrue(operator.lt(1, 2))
self.assertTrue(operator.lt(1, 2.0))
def test_le(self):
operator = self.module
self.assertRaises(TypeError, operator.le)
self.assertRaises(TypeError, operator.le, 1j, 2j)
self.assertFalse(operator.le(1, 0))
self.assertFalse(operator.le(1, 0.0))
self.assertTrue(operator.le(1, 1))
self.assertTrue(operator.le(1, 1.0))
self.assertTrue(operator.le(1, 2))
self.assertTrue(operator.le(1, 2.0))
def test_eq(self):
operator = self.module
class C(object):
def __eq__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.eq)
self.assertRaises(SyntaxError, operator.eq, C(), C())
self.assertFalse(operator.eq(1, 0))
self.assertFalse(operator.eq(1, 0.0))
self.assertTrue(operator.eq(1, 1))
self.assertTrue(operator.eq(1, 1.0))
self.assertFalse(operator.eq(1, 2))
self.assertFalse(operator.eq(1, 2.0))
def test_ne(self):
operator = self.module
class C(object):
def __ne__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.ne)
self.assertRaises(SyntaxError, operator.ne, C(), C())
self.assertTrue(operator.ne(1, 0))
self.assertTrue(operator.ne(1, 0.0))
self.assertFalse(operator.ne(1, 1))
self.assertFalse(operator.ne(1, 1.0))
self.assertTrue(operator.ne(1, 2))
self.assertTrue(operator.ne(1, 2.0))
def test_ge(self):
operator = self.module
self.assertRaises(TypeError, operator.ge)
self.assertRaises(TypeError, operator.ge, 1j, 2j)
self.assertTrue(operator.ge(1, 0))
self.assertTrue(operator.ge(1, 0.0))
self.assertTrue(operator.ge(1, 1))
self.assertTrue(operator.ge(1, 1.0))
self.assertFalse(operator.ge(1, 2))
self.assertFalse(operator.ge(1, 2.0))
def test_gt(self):
operator = self.module
self.assertRaises(TypeError, operator.gt)
self.assertRaises(TypeError, operator.gt, 1j, 2j)
self.assertTrue(operator.gt(1, 0))
self.assertTrue(operator.gt(1, 0.0))
self.assertFalse(operator.gt(1, 1))
self.assertFalse(operator.gt(1, 1.0))
self.assertFalse(operator.gt(1, 2))
self.assertFalse(operator.gt(1, 2.0))
def test_abs(self):
operator = self.module
self.assertRaises(TypeError, operator.abs)
self.assertRaises(TypeError, operator.abs, None)
self.assertEqual(operator.abs(-1), 1)
self.assertEqual(operator.abs(1), 1)
def test_add(self):
operator = self.module
self.assertRaises(TypeError, operator.add)
self.assertRaises(TypeError, operator.add, None, None)
self.assertTrue(operator.add(3, 4) == 7)
def test_bitwise_and(self):
operator = self.module
self.assertRaises(TypeError, operator.and_)
self.assertRaises(TypeError, operator.and_, None, None)
self.assertTrue(operator.and_(0xf, 0xa) == 0xa)
def test_concat(self):
operator = self.module
self.assertRaises(TypeError, operator.concat)
self.assertRaises(TypeError, operator.concat, None, None)
self.assertTrue(operator.concat('py', 'thon') == 'python')
self.assertTrue(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
self.assertTrue(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
self.assertTrue(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
self.assertRaises(TypeError, operator.concat, 13, 29)
def test_countOf(self):
operator = self.module
self.assertRaises(TypeError, operator.countOf)
self.assertRaises(TypeError, operator.countOf, None, None)
self.assertTrue(operator.countOf([1, 2, 1, 3, 1, 4], 3) == 1)
self.assertTrue(operator.countOf([1, 2, 1, 3, 1, 4], 5) == 0)
def test_delitem(self):
operator = self.module
a = [4, 3, 2, 1]
self.assertRaises(TypeError, operator.delitem, a)
self.assertRaises(TypeError, operator.delitem, a, None)
self.assertTrue(operator.delitem(a, 1) is None)
self.assertTrue(a == [4, 2, 1])
def test_floordiv(self):
operator = self.module
self.assertRaises(TypeError, operator.floordiv, 5)
self.assertRaises(TypeError, operator.floordiv, None, None)
self.assertTrue(operator.floordiv(5, 2) == 2)
def test_truediv(self):
operator = self.module
self.assertRaises(TypeError, operator.truediv, 5)
self.assertRaises(TypeError, operator.truediv, None, None)
self.assertTrue(operator.truediv(5, 2) == 2.5)
def test_getitem(self):
operator = self.module
a = range(10)
self.assertRaises(TypeError, operator.getitem)
self.assertRaises(TypeError, operator.getitem, a, None)
self.assertTrue(operator.getitem(a, 2) == 2)
def test_indexOf(self):
operator = self.module
self.assertRaises(TypeError, operator.indexOf)
self.assertRaises(TypeError, operator.indexOf, None, None)
self.assertTrue(operator.indexOf([4, 3, 2, 1], 3) == 1)
self.assertRaises(ValueError, operator.indexOf, [4, 3, 2, 1], 0)
def test_invert(self):
operator = self.module
self.assertRaises(TypeError, operator.invert)
self.assertRaises(TypeError, operator.invert, None)
self.assertEqual(operator.inv(4), -5)
def test_lshift(self):
operator = self.module
self.assertRaises(TypeError, operator.lshift)
self.assertRaises(TypeError, operator.lshift, None, 42)
self.assertTrue(operator.lshift(5, 1) == 10)
self.assertTrue(operator.lshift(5, 0) == 5)
self.assertRaises(ValueError, operator.lshift, 2, -1)
def test_mod(self):
operator = self.module
self.assertRaises(TypeError, operator.mod)
self.assertRaises(TypeError, operator.mod, None, 42)
self.assertTrue(operator.mod(5, 2) == 1)
def test_mul(self):
operator = self.module
self.assertRaises(TypeError, operator.mul)
self.assertRaises(TypeError, operator.mul, None, None)
self.assertTrue(operator.mul(5, 2) == 10)
def test_matmul(self):
operator = self.module
self.assertRaises(TypeError, operator.matmul)
self.assertRaises(TypeError, operator.matmul, 42, 42)
class M:
def __matmul__(self, other):
return other - 1
self.assertEqual(M() @ 42, 41)
def test_neg(self):
operator = self.module
self.assertRaises(TypeError, operator.neg)
self.assertRaises(TypeError, operator.neg, None)
self.assertEqual(operator.neg(5), -5)
self.assertEqual(operator.neg(-5), 5)
self.assertEqual(operator.neg(0), 0)
self.assertEqual(operator.neg(-0), 0)
def test_bitwise_or(self):
operator = self.module
self.assertRaises(TypeError, operator.or_)
self.assertRaises(TypeError, operator.or_, None, None)
self.assertTrue(operator.or_(0xa, 0x5) == 0xf)
def test_pos(self):
operator = self.module
self.assertRaises(TypeError, operator.pos)
self.assertRaises(TypeError, operator.pos, None)
self.assertEqual(operator.pos(5), 5)
self.assertEqual(operator.pos(-5), -5)
self.assertEqual(operator.pos(0), 0)
self.assertEqual(operator.pos(-0), 0)
def test_pow(self):
operator = self.module
self.assertRaises(TypeError, operator.pow)
self.assertRaises(TypeError, operator.pow, None, None)
self.assertEqual(operator.pow(3,5), 3**5)
self.assertRaises(TypeError, operator.pow, 1)
self.assertRaises(TypeError, operator.pow, 1, 2, 3)
def test_rshift(self):
operator = self.module
self.assertRaises(TypeError, operator.rshift)
self.assertRaises(TypeError, operator.rshift, None, 42)
self.assertTrue(operator.rshift(5, 1) == 2)
self.assertTrue(operator.rshift(5, 0) == 5)
self.assertRaises(ValueError, operator.rshift, 2, -1)
def test_contains(self):
operator = self.module
self.assertRaises(TypeError, operator.contains)
self.assertRaises(TypeError, operator.contains, None, None)
self.assertTrue(operator.contains(range(4), 2))
self.assertFalse(operator.contains(range(4), 5))
def test_setitem(self):
operator = self.module
a = list(range(3))
self.assertRaises(TypeError, operator.setitem, a)
self.assertRaises(TypeError, operator.setitem, a, None, None)
self.assertTrue(operator.setitem(a, 0, 2) is None)
self.assertTrue(a == [2, 1, 2])
self.assertRaises(IndexError, operator.setitem, a, 4, 2)
def test_sub(self):
operator = self.module
self.assertRaises(TypeError, operator.sub)
self.assertRaises(TypeError, operator.sub, None, None)
self.assertTrue(operator.sub(5, 2) == 3)
def test_truth(self):
operator = self.module
class C(object):
def __bool__(self):
raise SyntaxError
self.assertRaises(TypeError, operator.truth)
self.assertRaises(SyntaxError, operator.truth, C())
self.assertTrue(operator.truth(5))
self.assertTrue(operator.truth([0]))
self.assertFalse(operator.truth(0))
self.assertFalse(operator.truth([]))
def test_bitwise_xor(self):
operator = self.module
self.assertRaises(TypeError, operator.xor)
self.assertRaises(TypeError, operator.xor, None, None)
self.assertTrue(operator.xor(0xb, 0xc) == 0x7)
def test_is(self):
operator = self.module
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_)
self.assertTrue(operator.is_(a, b))
self.assertFalse(operator.is_(a,c))
def test_is_not(self):
operator = self.module
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_not)
self.assertFalse(operator.is_not(a, b))
self.assertTrue(operator.is_not(a,c))
def test_attrgetter(self):
operator = self.module
class A:
pass
a = A()
a.name = 'arthur'
f = operator.attrgetter('name')
self.assertEqual(f(a), 'arthur')
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, f, a, 'dent')
self.assertRaises(TypeError, f, a, surname='dent')
f = operator.attrgetter('rank')
self.assertRaises(AttributeError, f, a)
self.assertRaises(TypeError, operator.attrgetter, 2)
self.assertRaises(TypeError, operator.attrgetter)
# multiple gets
record = A()
record.x = 'X'
record.y = 'Y'
record.z = 'Z'
self.assertEqual(operator.attrgetter('x','z','y')(record), ('X', 'Z', 'Y'))
self.assertRaises(TypeError, operator.attrgetter, ('x', (), 'y'))
class C(object):
def __getattr__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.attrgetter('foo'), C())
# recursive gets
a = A()
a.name = 'arthur'
a.child = A()
a.child.name = 'thomas'
f = operator.attrgetter('child.name')
self.assertEqual(f(a), 'thomas')
self.assertRaises(AttributeError, f, a.child)
f = operator.attrgetter('name', 'child.name')
self.assertEqual(f(a), ('arthur', 'thomas'))
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('child.')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('.child')
self.assertRaises(AttributeError, f, a)
a.child.child = A()
a.child.child.name = 'johnson'
f = operator.attrgetter('child.child.name')
self.assertEqual(f(a), 'johnson')
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertEqual(f(a), ('arthur', 'thomas', 'johnson'))
def test_itemgetter(self):
operator = self.module
a = 'ABCDE'
f = operator.itemgetter(2)
self.assertEqual(f(a), 'C')
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, f, a, 3)
self.assertRaises(TypeError, f, a, size=3)
f = operator.itemgetter(10)
self.assertRaises(IndexError, f, a)
class C(object):
def __getitem__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.itemgetter(42), C())
f = operator.itemgetter('name')
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.itemgetter)
d = dict(key='val')
f = operator.itemgetter('key')
self.assertEqual(f(d), 'val')
f = operator.itemgetter('nonkey')
self.assertRaises(KeyError, f, d)
# example used in the docs
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
self.assertEqual(list(map(getcount, inventory)), [3, 2, 5, 1])
self.assertEqual(sorted(inventory, key=getcount),
[('orange', 1), ('banana', 2), ('apple', 3), ('pear', 5)])
# multiple gets
data = list(map(str, range(20)))
self.assertEqual(operator.itemgetter(2,10,5)(data), ('2', '10', '5'))
self.assertRaises(TypeError, operator.itemgetter(2, 'x', 5), data)
def test_methodcaller(self):
operator = self.module
self.assertRaises(TypeError, operator.methodcaller)
self.assertRaises(TypeError, operator.methodcaller, 12)
class A:
def foo(self, *args, **kwds):
return args[0] + args[1]
def bar(self, f=42):
return f
def baz(*args, **kwds):
return kwds['name'], kwds['self']
a = A()
f = operator.methodcaller('foo')
self.assertRaises(IndexError, f, a)
f = operator.methodcaller('foo', 1, 2)
self.assertEqual(f(a), 3)
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, f, a, 3)
self.assertRaises(TypeError, f, a, spam=3)
f = operator.methodcaller('bar')
self.assertEqual(f(a), 42)
self.assertRaises(TypeError, f, a, a)
f = operator.methodcaller('bar', f=5)
self.assertEqual(f(a), 5)
f = operator.methodcaller('baz', name='spam', self='eggs')
self.assertEqual(f(a), ('spam', 'eggs'))
def test_inplace(self):
operator = self.module
class C(object):
def __iadd__ (self, other): return "iadd"
def __iand__ (self, other): return "iand"
def __ifloordiv__(self, other): return "ifloordiv"
def __ilshift__ (self, other): return "ilshift"
def __imod__ (self, other): return "imod"
def __imul__ (self, other): return "imul"
def __imatmul__ (self, other): return "imatmul"
def __ior__ (self, other): return "ior"
def __ipow__ (self, other): return "ipow"
def __irshift__ (self, other): return "irshift"
def __isub__ (self, other): return "isub"
def __itruediv__ (self, other): return "itruediv"
def __ixor__ (self, other): return "ixor"
def __getitem__(self, other): return 5 # so that C is a sequence
c = C()
self.assertEqual(operator.iadd (c, 5), "iadd")
self.assertEqual(operator.iand (c, 5), "iand")
self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
self.assertEqual(operator.ilshift (c, 5), "ilshift")
self.assertEqual(operator.imod (c, 5), "imod")
self.assertEqual(operator.imul (c, 5), "imul")
self.assertEqual(operator.imatmul (c, 5), "imatmul")
self.assertEqual(operator.ior (c, 5), "ior")
self.assertEqual(operator.ipow (c, 5), "ipow")
self.assertEqual(operator.irshift (c, 5), "irshift")
self.assertEqual(operator.isub (c, 5), "isub")
self.assertEqual(operator.itruediv (c, 5), "itruediv")
self.assertEqual(operator.ixor (c, 5), "ixor")
self.assertEqual(operator.iconcat (c, c), "iadd")
def test_length_hint(self):
operator = self.module
class X(object):
def __init__(self, value):
self.value = value
def __length_hint__(self):
if type(self.value) is type:
raise self.value
else:
return self.value
self.assertEqual(operator.length_hint([], 2), 0)
self.assertEqual(operator.length_hint(iter([1, 2, 3])), 3)
self.assertEqual(operator.length_hint(X(2)), 2)
self.assertEqual(operator.length_hint(X(NotImplemented), 4), 4)
self.assertEqual(operator.length_hint(X(TypeError), 12), 12)
with self.assertRaises(TypeError):
operator.length_hint(X("abc"))
with self.assertRaises(ValueError):
operator.length_hint(X(-2))
with self.assertRaises(LookupError):
operator.length_hint(X(LookupError))
def test_dunder_is_original(self):
operator = self.module
names = [name for name in dir(operator) if not name.startswith('_')]
for name in names:
orig = getattr(operator, name)
dunder = getattr(operator, '__' + name.strip('_') + '__', None)
if dunder:
self.assertIs(dunder, orig)
class PyOperatorTestCase(OperatorTestCase, unittest.TestCase):
module = py_operator
@unittest.skipUnless(c_operator, 'requires _operator')
class COperatorTestCase(OperatorTestCase, unittest.TestCase):
module = c_operator
class OperatorPickleTestCase:
def copy(self, obj, proto):
with support.swap_item(sys.modules, 'operator', self.module):
pickled = pickle.dumps(obj, proto)
with support.swap_item(sys.modules, 'operator', self.module2):
return pickle.loads(pickled)
def test_attrgetter(self):
attrgetter = self.module.attrgetter
class A:
pass
a = A()
a.x = 'X'
a.y = 'Y'
a.z = 'Z'
a.t = A()
a.t.u = A()
a.t.u.v = 'V'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
f = attrgetter('x')
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
# multiple gets
f = attrgetter('x', 'y', 'z')
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
# recursive gets
f = attrgetter('t.u.v')
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
def test_itemgetter(self):
itemgetter = self.module.itemgetter
a = 'ABCDE'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
f = itemgetter(2)
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
# multiple gets
f = itemgetter(2, 0, 4)
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
def test_methodcaller(self):
methodcaller = self.module.methodcaller
class A:
def foo(self, *args, **kwds):
return args[0] + args[1]
def bar(self, f=42):
return f
def baz(*args, **kwds):
return kwds['name'], kwds['self']
a = A()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
f = methodcaller('bar')
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
# positional args
f = methodcaller('foo', 1, 2)
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
# keyword args
f = methodcaller('bar', f=5)
f2 = self.copy(f, proto)
self.assertEqual(repr(f2), repr(f))
self.assertEqual(f2(a), f(a))
f = methodcaller('baz', self='eggs', name='spam')
f2 = self.copy(f, proto)
# Can't test repr consistently with multiple keyword args
self.assertEqual(f2(a), f(a))
class PyPyOperatorPickleTestCase(OperatorPickleTestCase, unittest.TestCase):
module = py_operator
module2 = py_operator
@unittest.skipUnless(c_operator, 'requires _operator')
class PyCOperatorPickleTestCase(OperatorPickleTestCase, unittest.TestCase):
module = py_operator
module2 = c_operator
@unittest.skipUnless(c_operator, 'requires _operator')
class CPyOperatorPickleTestCase(OperatorPickleTestCase, unittest.TestCase):
module = c_operator
module2 = py_operator
@unittest.skipUnless(c_operator, 'requires _operator')
class CCOperatorPickleTestCase(OperatorPickleTestCase, unittest.TestCase):
module = c_operator
module2 = c_operator
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import signal
import time
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
OK_EXIT_STATUS = 0
# This matches what the shell does on POSIX.
INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
# POSIX limits status codes to 0-255. Normally run-webkit-tests returns the number
# of tests that failed. These indicate exceptional conditions triggered by the
# script itself, so we count backwards from 255 (aka -1) to enumerate them.
#
# FIXME: crbug.com/357866. We really shouldn't return the number of failures
# in the exit code at all.
EARLY_EXIT_STATUS = 251
SYS_DEPS_EXIT_STATUS = 252
NO_TESTS_EXIT_STATUS = 253
NO_DEVICES_EXIT_STATUS = 254
UNEXPECTED_ERROR_EXIT_STATUS = 255
ERROR_CODES = (
INTERRUPTED_EXIT_STATUS,
EARLY_EXIT_STATUS,
SYS_DEPS_EXIT_STATUS,
NO_TESTS_EXIT_STATUS,
NO_DEVICES_EXIT_STATUS,
UNEXPECTED_ERROR_EXIT_STATUS,
)
# In order to avoid colliding with the above codes, we put a ceiling on
# the value returned by num_regressions
MAX_FAILURES_EXIT_STATUS = 101
class TestRunException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
class TestRunResults(object):
def __init__(self, expectations, num_tests):
self.total = num_tests
self.remaining = self.total
self.expectations = expectations
self.expected = 0
self.expected_failures = 0
self.unexpected = 0
self.unexpected_failures = 0
self.unexpected_crashes = 0
self.unexpected_timeouts = 0
self.tests_by_expectation = {}
self.tests_by_timeline = {}
self.results_by_name = {} # Map of test name to the last result for the test.
self.all_results = [] # All results from a run, including every iteration of every test.
self.unexpected_results_by_name = {}
self.failures_by_name = {}
self.total_failures = 0
self.expected_skips = 0
for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
self.tests_by_expectation[expectation] = set()
for timeline in test_expectations.TestExpectations.TIMELINES.values():
self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
self.slow_tests = set()
self.interrupted = False
self.keyboard_interrupted = False
self.run_time = 0 # The wall clock time spent running the tests (layout_test_runner.run()).
def add(self, test_result, expected, test_is_slow):
result_type_for_stats = test_result.type
if test_expectations.WONTFIX in self.expectations.model().get_expectations(test_result.test_name):
result_type_for_stats = test_expectations.WONTFIX
self.tests_by_expectation[result_type_for_stats].add(test_result.test_name)
self.results_by_name[test_result.test_name] = test_result
if test_result.type != test_expectations.SKIP:
self.all_results.append(test_result)
self.remaining -= 1
if len(test_result.failures):
self.total_failures += 1
self.failures_by_name[test_result.test_name] = test_result.failures
if expected:
self.expected += 1
if test_result.type == test_expectations.SKIP:
self.expected_skips += 1
elif test_result.type != test_expectations.PASS:
self.expected_failures += 1
else:
self.unexpected_results_by_name[test_result.test_name] = test_result
self.unexpected += 1
if len(test_result.failures):
self.unexpected_failures += 1
if test_result.type == test_expectations.CRASH:
self.unexpected_crashes += 1
elif test_result.type == test_expectations.TIMEOUT:
self.unexpected_timeouts += 1
if test_is_slow:
self.slow_tests.add(test_result.test_name)
class RunDetails(object):
def __init__(self, exit_code, summarized_full_results=None,
summarized_failing_results=None, initial_results=None,
all_retry_results=None, enabled_pixel_tests_in_retry=False):
self.exit_code = exit_code
self.summarized_full_results = summarized_full_results
self.summarized_failing_results = summarized_failing_results
self.initial_results = initial_results
self.all_retry_results = all_retry_results or []
self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
def _interpret_test_failures(failures):
test_dict = {}
failure_types = [type(failure) for failure in failures]
# FIXME: get rid of all this is_* values once there is a 1:1 map between
# TestFailure type and test_expectations.EXPECTATION.
if test_failures.FailureMissingAudio in failure_types:
test_dict['is_missing_audio'] = True
if test_failures.FailureMissingResult in failure_types:
test_dict['is_missing_text'] = True
if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
test_dict['is_missing_image'] = True
if test_failures.FailureTestHarnessAssertion in failure_types:
test_dict['is_testharness_test'] = True
return test_dict
def summarize_results(port_obj, expectations, initial_results,
all_retry_results, enabled_pixel_tests_in_retry,
only_include_failing=False):
"""Returns a dictionary containing a summary of the test runs, with the following fields:
'version': a version indicator
'fixable': The number of fixable tests (NOW - PASS)
'skipped': The number of skipped tests (NOW & SKIPPED)
'num_regressions': The number of non-flaky failures
'num_flaky': The number of flaky failures
'num_passes': The number of expected and unexpected passes
'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
"""
results = {}
results['version'] = 3
all_retry_results = all_retry_results or []
tbe = initial_results.tests_by_expectation
tbt = initial_results.tests_by_timeline
results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
# FIXME: Remove this. It is redundant with results['num_failures_by_type'].
results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
num_passes = 0
num_flaky = 0
num_regressions = 0
keywords = {}
for expectation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
keywords[expectation_enum] = expectation_string.upper()
num_failures_by_type = {}
for expectation in initial_results.tests_by_expectation:
tests = initial_results.tests_by_expectation[expectation]
if expectation != test_expectations.WONTFIX:
tests &= tbt[test_expectations.NOW]
num_failures_by_type[keywords[expectation]] = len(tests)
# The number of failures by type.
results['num_failures_by_type'] = num_failures_by_type
tests = {}
for test_name, result in initial_results.results_by_name.iteritems():
expected = expectations.get_expectations_string(test_name)
actual = [keywords[result.type]]
actual_types = [result.type]
if only_include_failing and result.type == test_expectations.SKIP:
continue
if result.type == test_expectations.PASS:
num_passes += 1
if not result.has_stderr and only_include_failing:
continue
elif (result.type != test_expectations.SKIP and
test_name in initial_results.unexpected_results_by_name):
# Loop through retry results to collate results and determine
# whether this is a regression, unexpected pass, or flaky test.
is_flaky = False
has_unexpected_pass = False
for retry_attempt_results in all_retry_results:
# If a test passes on one of the retries, it won't be in the subsequent retries.
if test_name not in retry_attempt_results.results_by_name:
break
retry_result_type = retry_attempt_results.results_by_name[test_name].type
actual.append(keywords[retry_result_type])
actual_types.append(retry_result_type)
if test_name in retry_attempt_results.unexpected_results_by_name:
if retry_result_type == test_expectations.PASS:
# The test failed unexpectedly at first, then passed
# unexpectedly on a subsequent run -> unexpected pass.
has_unexpected_pass = True
else:
# The test failed unexpectedly at first but then ran as
# expected on a subsequent run -> flaky.
is_flaky = True
if len(set(actual)) == 1:
actual = [actual[0]]
actual_types = [actual_types[0]]
if is_flaky:
num_flaky += 1
elif has_unexpected_pass:
num_passes += 1
if not result.has_stderr and only_include_failing:
continue
else:
# Either no retries or all retries failed unexpectedly.
num_regressions += 1
test_dict = {}
rounded_run_time = round(result.test_run_time, 1)
if rounded_run_time:
test_dict['time'] = rounded_run_time
if result.has_stderr:
test_dict['has_stderr'] = True
bugs = expectations.model().get_expectation_line(test_name).bugs
if bugs:
test_dict['bugs'] = bugs
if result.reftest_type:
test_dict.update(reftest_type=list(result.reftest_type))
test_dict['expected'] = expected
test_dict['actual'] = ' '.join(actual)
def is_expected(actual_result):
return expectations.matches_an_expected_result(test_name, actual_result,
port_obj.get_option('pixel_tests') or result.reftest_type,
port_obj.get_option('enable_sanitizer'))
# To avoid bloating the output results json too much, only add an entry for whether the failure is unexpected.
if not any(is_expected(actual_result) for actual_result in actual_types):
test_dict['is_unexpected'] = True
test_dict.update(_interpret_test_failures(result.failures))
for retry_attempt_results in all_retry_results:
retry_result = retry_attempt_results.unexpected_results_by_name.get(test_name)
if retry_result:
test_dict.update(_interpret_test_failures(retry_result.failures))
if result.has_repaint_overlay:
test_dict['has_repaint_overlay'] = True
# Store test hierarchically by directory. e.g.
# foo/bar/baz.html: test_dict
# foo/bar/baz1.html: test_dict
#
# becomes
# foo: {
# bar: {
# baz.html: test_dict,
# baz1.html: test_dict
# }
# }
parts = test_name.split('/')
current_map = tests
for i, part in enumerate(parts):
if i == (len(parts) - 1):
current_map[part] = test_dict
break
if part not in current_map:
current_map[part] = {}
current_map = current_map[part]
results['tests'] = tests
# FIXME: Remove this. It is redundant with results['num_failures_by_type'].
results['num_passes'] = num_passes
results['num_flaky'] = num_flaky
# FIXME: Remove this. It is redundant with results['num_failures_by_type'].
results['num_regressions'] = num_regressions
# Does results.html have enough information to compute this itself? (by
# checking total number of results vs. total number of tests?)
results['interrupted'] = initial_results.interrupted
results['layout_tests_dir'] = port_obj.layout_tests_dir()
results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
results['seconds_since_epoch'] = int(time.time())
results['build_number'] = port_obj.get_option('build_number')
results['builder_name'] = port_obj.get_option('builder_name')
if port_obj.get_option('order') == 'random':
results['random_order_seed'] = port_obj.get_option('seed')
results['path_delimiter'] = '/'
# Don't do this by default since it takes >100ms.
# It's only used for rebaselining and uploading data to the flakiness dashboard.
results['chromium_revision'] = ''
if port_obj.get_option('builder_name'):
path = port_obj.repository_path()
git = port_obj.host.git(path=path)
if git:
results['chromium_revision'] = str(git.commit_position(path))
else:
_log.warning('Failed to determine chromium commit position for %s, '
'leaving "chromium_revision" key blank in full_results.json.',
path)
return results
|
|
import itertools
from sqlalchemy import Boolean
from sqlalchemy import delete
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy import insert
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import type_coerce
from sqlalchemy import update
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.types import TypeDecorator
class ReturnCombinationTests(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "postgresql"
@testing.fixture
def table_fixture(self):
return Table(
"foo",
MetaData(),
Column("id", Integer, primary_key=True),
Column("q", Integer, server_default="5"),
Column("x", Integer),
Column("y", Integer),
)
@testing.combinations(
(
insert,
"INSERT INTO foo (id, q, x, y) "
"VALUES (%(id)s, %(q)s, %(x)s, %(y)s)",
),
(update, "UPDATE foo SET id=%(id)s, q=%(q)s, x=%(x)s, y=%(y)s"),
(delete, "DELETE FROM foo"),
argnames="dml_fn, sql_frag",
id_="na",
)
def test_return_combinations(self, table_fixture, dml_fn, sql_frag):
t = table_fixture
stmt = dml_fn(t)
stmt = stmt.returning(t.c.x)
stmt = stmt.returning(t.c.y)
self.assert_compile(
stmt,
"%s RETURNING foo.x, foo.y" % (sql_frag),
)
def test_return_no_return_defaults(self, table_fixture):
t = table_fixture
stmt = t.insert()
stmt = stmt.returning(t.c.x)
assert_raises_message(
sa_exc.InvalidRequestError,
"RETURNING is already configured on this statement",
stmt.return_defaults,
)
def test_return_defaults_no_returning(self, table_fixture):
t = table_fixture
stmt = t.insert()
stmt = stmt.return_defaults()
assert_raises_message(
sa_exc.InvalidRequestError,
r"return_defaults\(\) is already configured on this statement",
stmt.returning,
t.c.x,
)
class ReturningTest(fixtures.TablesTest, AssertsExecutionResults):
__requires__ = ("returning",)
__backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
class GoofyType(TypeDecorator):
impl = String
cache_ok = True
def process_bind_param(self, value, dialect):
if value is None:
return None
return "FOO" + value
def process_result_value(self, value, dialect):
if value is None:
return None
return value + "BAR"
cls.GoofyType = GoofyType
Table(
"tables",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("persons", Integer),
Column("full", Boolean),
Column("goofy", GoofyType(50)),
Column("strval", String(50)),
)
def test_column_targeting(self, connection):
table = self.tables.tables
result = connection.execute(
table.insert().returning(table.c.id, table.c.full),
{"persons": 1, "full": False},
)
row = result.first()._mapping
assert row[table.c.id] == row["id"] == 1
assert row[table.c.full] == row["full"]
assert row["full"] is False
result = connection.execute(
table.insert()
.values(persons=5, full=True, goofy="somegoofy")
.returning(table.c.persons, table.c.full, table.c.goofy)
)
row = result.first()._mapping
assert row[table.c.persons] == row["persons"] == 5
assert row[table.c.full] == row["full"]
eq_(row[table.c.goofy], row["goofy"])
eq_(row["goofy"], "FOOsomegoofyBAR")
def test_labeling(self, connection):
table = self.tables.tables
result = connection.execute(
table.insert()
.values(persons=6)
.returning(table.c.persons.label("lala"))
)
row = result.first()._mapping
assert row["lala"] == 6
def test_anon_expressions(self, connection):
table = self.tables.tables
GoofyType = self.GoofyType
result = connection.execute(
table.insert()
.values(goofy="someOTHERgoofy")
.returning(func.lower(table.c.goofy, type_=GoofyType))
)
row = result.first()
eq_(row[0], "foosomeothergoofyBAR")
result = connection.execute(
table.insert().values(persons=12).returning(table.c.persons + 18)
)
row = result.first()
eq_(row[0], 30)
def test_update_returning(self, connection):
table = self.tables.tables
connection.execute(
table.insert(),
[{"persons": 5, "full": False}, {"persons": 3, "full": False}],
)
result = connection.execute(
table.update()
.values(dict(full=True))
.where(table.c.persons > 4)
.returning(table.c.id)
)
eq_(result.fetchall(), [(1,)])
result2 = connection.execute(
select(table.c.id, table.c.full).order_by(table.c.id)
)
eq_(result2.fetchall(), [(1, True), (2, False)])
@testing.fails_on(
"mssql",
"driver has unknown issue with string concatenation "
"in INSERT RETURNING",
)
def test_insert_returning_w_expression_one(self, connection):
table = self.tables.tables
result = connection.execute(
table.insert().returning(table.c.strval + "hi"),
{"persons": 5, "full": False, "strval": "str1"},
)
eq_(result.fetchall(), [("str1hi",)])
result2 = connection.execute(
select(table.c.id, table.c.strval).order_by(table.c.id)
)
eq_(result2.fetchall(), [(1, "str1")])
def test_insert_returning_w_type_coerce_expression(self, connection):
table = self.tables.tables
result = connection.execute(
table.insert().returning(type_coerce(table.c.goofy, String)),
{"persons": 5, "goofy": "somegoofy"},
)
eq_(result.fetchall(), [("FOOsomegoofy",)])
result2 = connection.execute(
select(table.c.id, table.c.goofy).order_by(table.c.id)
)
eq_(result2.fetchall(), [(1, "FOOsomegoofyBAR")])
def test_update_returning_w_expression_one(self, connection):
table = self.tables.tables
connection.execute(
table.insert(),
[
{"persons": 5, "full": False, "strval": "str1"},
{"persons": 3, "full": False, "strval": "str2"},
],
)
result = connection.execute(
table.update()
.where(table.c.persons > 4)
.values(full=True)
.returning(table.c.strval + "hi")
)
eq_(result.fetchall(), [("str1hi",)])
result2 = connection.execute(
select(table.c.id, table.c.strval).order_by(table.c.id)
)
eq_(result2.fetchall(), [(1, "str1"), (2, "str2")])
def test_update_returning_w_type_coerce_expression(self, connection):
table = self.tables.tables
connection.execute(
table.insert(),
[
{"persons": 5, "goofy": "somegoofy1"},
{"persons": 3, "goofy": "somegoofy2"},
],
)
result = connection.execute(
table.update()
.where(table.c.persons > 4)
.values(goofy="newgoofy")
.returning(type_coerce(table.c.goofy, String))
)
eq_(result.fetchall(), [("FOOnewgoofy",)])
result2 = connection.execute(
select(table.c.id, table.c.goofy).order_by(table.c.id)
)
eq_(
result2.fetchall(),
[(1, "FOOnewgoofyBAR"), (2, "FOOsomegoofy2BAR")],
)
@testing.requires.full_returning
def test_update_full_returning(self, connection):
table = self.tables.tables
connection.execute(
table.insert(),
[{"persons": 5, "full": False}, {"persons": 3, "full": False}],
)
result = connection.execute(
table.update()
.where(table.c.persons > 2)
.values(full=True)
.returning(table.c.id, table.c.full)
)
eq_(result.fetchall(), [(1, True), (2, True)])
@testing.requires.full_returning
def test_delete_full_returning(self, connection):
table = self.tables.tables
connection.execute(
table.insert(),
[{"persons": 5, "full": False}, {"persons": 3, "full": False}],
)
result = connection.execute(
table.delete().returning(table.c.id, table.c.full)
)
eq_(result.fetchall(), [(1, False), (2, False)])
def test_insert_returning(self, connection):
table = self.tables.tables
result = connection.execute(
table.insert().returning(table.c.id), {"persons": 1, "full": False}
)
eq_(result.fetchall(), [(1,)])
@testing.requires.multivalues_inserts
def test_multirow_returning(self, connection):
table = self.tables.tables
ins = (
table.insert()
.returning(table.c.id, table.c.persons)
.values(
[
{"persons": 1, "full": False},
{"persons": 2, "full": True},
{"persons": 3, "full": False},
]
)
)
result = connection.execute(ins)
eq_(result.fetchall(), [(1, 1), (2, 2), (3, 3)])
def test_no_ipk_on_returning(self, connection):
table = self.tables.tables
result = connection.execute(
table.insert().returning(table.c.id), {"persons": 1, "full": False}
)
assert_raises_message(
sa_exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
@testing.fails_on_everything_except("postgresql")
def test_literal_returning(self, connection):
if testing.against("postgresql"):
literal_true = "true"
else:
literal_true = "1"
result4 = connection.exec_driver_sql(
'insert into tables (id, persons, "full") '
"values (5, 10, %s) returning persons" % literal_true
)
eq_([dict(row._mapping) for row in result4], [{"persons": 10}])
def test_delete_returning(self, connection):
table = self.tables.tables
connection.execute(
table.insert(),
[{"persons": 5, "full": False}, {"persons": 3, "full": False}],
)
result = connection.execute(
table.delete().where(table.c.persons > 4).returning(table.c.id)
)
eq_(result.fetchall(), [(1,)])
result2 = connection.execute(
select(table.c.id, table.c.full).order_by(table.c.id)
)
eq_(result2.fetchall(), [(2, False)])
class CompositeStatementTest(fixtures.TestBase):
__requires__ = ("returning",)
__backend__ = True
@testing.provide_metadata
def test_select_doesnt_pollute_result(self, connection):
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_result_value(self, value, dialect):
raise Exception("I have not been selected")
t1 = Table("t1", self.metadata, Column("x", MyType()))
t2 = Table("t2", self.metadata, Column("x", Integer))
self.metadata.create_all(connection)
connection.execute(t1.insert().values(x=5))
stmt = (
t2.insert()
.values(x=select(t1.c.x).scalar_subquery())
.returning(t2.c.x)
)
result = connection.execute(stmt)
eq_(result.scalar(), 5)
class SequenceReturningTest(fixtures.TablesTest):
__requires__ = "returning", "sequences"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
seq = Sequence("tid_seq")
Table(
"tables",
metadata,
Column(
"id",
Integer,
seq,
primary_key=True,
),
Column("data", String(50)),
)
cls.sequences.tid_seq = seq
def test_insert(self, connection):
table = self.tables.tables
r = connection.execute(
table.insert().values(data="hi").returning(table.c.id)
)
eq_(r.first(), tuple([testing.db.dialect.default_sequence_base]))
eq_(
connection.execute(self.sequences.tid_seq),
testing.db.dialect.default_sequence_base + 1,
)
class KeyReturningTest(fixtures.TablesTest, AssertsExecutionResults):
"""test returning() works with columns that define 'key'."""
__requires__ = ("returning",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"tables",
metadata,
Column(
"id",
Integer,
primary_key=True,
key="foo_id",
test_needs_autoincrement=True,
),
Column("data", String(20)),
)
@testing.exclude("postgresql", "<", (8, 2), "8.2+ feature")
def test_insert(self, connection):
table = self.tables.tables
result = connection.execute(
table.insert().returning(table.c.foo_id), dict(data="somedata")
)
row = result.first()._mapping
assert row[table.c.foo_id] == row["id"] == 1
result = connection.execute(table.select()).first()._mapping
assert row[table.c.foo_id] == row["id"] == 1
class ReturnDefaultsTest(fixtures.TablesTest):
__requires__ = ("returning",)
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.sql import ColumnElement
from sqlalchemy.ext.compiler import compiles
counter = itertools.count()
class IncDefault(ColumnElement):
pass
@compiles(IncDefault)
def compile_(element, compiler, **kw):
return str(next(counter))
Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
Column("insdef", Integer, default=IncDefault()),
Column("upddef", Integer, onupdate=IncDefault()),
)
def test_chained_insert_pk(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().values(upddef=1).return_defaults(t1.c.insdef)
)
eq_(
[
result.returned_defaults._mapping[k]
for k in (t1.c.id, t1.c.insdef)
],
[1, 0],
)
def test_arg_insert_pk(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(t1.c.insdef).values(upddef=1)
)
eq_(
[
result.returned_defaults._mapping[k]
for k in (t1.c.id, t1.c.insdef)
],
[1, 0],
)
def test_chained_update_pk(self, connection):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.update().values(data="d1").return_defaults(t1.c.upddef)
)
eq_(
[result.returned_defaults._mapping[k] for k in (t1.c.upddef,)], [1]
)
def test_arg_update_pk(self, connection):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.update().return_defaults(t1.c.upddef).values(data="d1")
)
eq_(
[result.returned_defaults._mapping[k] for k in (t1.c.upddef,)], [1]
)
def test_insert_non_default(self, connection):
"""test that a column not marked at all as a
default works with this feature."""
t1 = self.tables.t1
result = connection.execute(
t1.insert().values(upddef=1).return_defaults(t1.c.data)
)
eq_(
[
result.returned_defaults._mapping[k]
for k in (t1.c.id, t1.c.data)
],
[1, None],
)
def test_update_non_default(self, connection):
"""test that a column not marked at all as a
default works with this feature."""
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.update().values(upddef=2).return_defaults(t1.c.data)
)
eq_(
[result.returned_defaults._mapping[k] for k in (t1.c.data,)],
[None],
)
def test_insert_sql_expr(self, connection):
from sqlalchemy import literal
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults().values(insdef=literal(10) + 5)
)
eq_(
result.returned_defaults._mapping,
{"id": 1, "data": None, "insdef": 15, "upddef": None},
)
def test_update_sql_expr(self, connection):
from sqlalchemy import literal
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.update().values(upddef=literal(10) + 5).return_defaults()
)
eq_(result.returned_defaults._mapping, {"upddef": 15})
def test_insert_non_default_plus_default(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert()
.values(upddef=1)
.return_defaults(t1.c.data, t1.c.insdef)
)
eq_(
dict(result.returned_defaults._mapping),
{"id": 1, "data": None, "insdef": 0},
)
eq_(result.inserted_primary_key, (1,))
def test_update_non_default_plus_default(self, connection):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.update()
.values(insdef=2)
.return_defaults(t1.c.data, t1.c.upddef)
)
eq_(
dict(result.returned_defaults._mapping),
{"data": None, "upddef": 1},
)
def test_insert_all(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().values(upddef=1).return_defaults()
)
eq_(
dict(result.returned_defaults._mapping),
{"id": 1, "data": None, "insdef": 0},
)
eq_(result.inserted_primary_key, (1,))
def test_update_all(self, connection):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.update().values(insdef=2).return_defaults()
)
eq_(dict(result.returned_defaults._mapping), {"upddef": 1})
@testing.requires.insert_executemany_returning
def test_insert_executemany_no_defaults_passed(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(),
[
{"data": "d1"},
{"data": "d2"},
{"data": "d3"},
{"data": "d4"},
{"data": "d5"},
{"data": "d6"},
],
)
eq_(
[row._mapping for row in result.returned_defaults_rows],
[
{"id": 1, "insdef": 0, "upddef": None},
{"id": 2, "insdef": 0, "upddef": None},
{"id": 3, "insdef": 0, "upddef": None},
{"id": 4, "insdef": 0, "upddef": None},
{"id": 5, "insdef": 0, "upddef": None},
{"id": 6, "insdef": 0, "upddef": None},
],
)
eq_(
result.inserted_primary_key_rows,
[(1,), (2,), (3,), (4,), (5,), (6,)],
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if return defaults is supported",
lambda: result.returned_defaults,
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if primary key returning is supported",
lambda: result.inserted_primary_key,
)
@testing.requires.insert_executemany_returning
def test_insert_executemany_insdefault_passed(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(),
[
{"data": "d1", "insdef": 11},
{"data": "d2", "insdef": 12},
{"data": "d3", "insdef": 13},
{"data": "d4", "insdef": 14},
{"data": "d5", "insdef": 15},
{"data": "d6", "insdef": 16},
],
)
eq_(
[row._mapping for row in result.returned_defaults_rows],
[
{"id": 1, "upddef": None},
{"id": 2, "upddef": None},
{"id": 3, "upddef": None},
{"id": 4, "upddef": None},
{"id": 5, "upddef": None},
{"id": 6, "upddef": None},
],
)
eq_(
result.inserted_primary_key_rows,
[(1,), (2,), (3,), (4,), (5,), (6,)],
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if return defaults is supported",
lambda: result.returned_defaults,
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if primary key returning is supported",
lambda: result.inserted_primary_key,
)
@testing.requires.insert_executemany_returning
def test_insert_executemany_only_pk_passed(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(),
[
{"id": 10, "data": "d1"},
{"id": 11, "data": "d2"},
{"id": 12, "data": "d3"},
{"id": 13, "data": "d4"},
{"id": 14, "data": "d5"},
{"id": 15, "data": "d6"},
],
)
eq_(
[row._mapping for row in result.returned_defaults_rows],
[
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
],
)
eq_(
result.inserted_primary_key_rows,
[(10,), (11,), (12,), (13,), (14,), (15,)],
)
|
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import collections.abc
import asyncio
from itertools import groupby
from typing import AsyncIterator
from multidict import CIMultiDict
from ._http_response_impl_async import AsyncHttpResponseImpl, AsyncHttpResponseBackcompatMixin
from ..pipeline.transport._aiohttp import AioHttpStreamDownloadGenerator
from ..utils._pipeline_transport_rest_shared import _pad_attr_name, _aiohttp_body_helper
from ..exceptions import ResponseNotReadError
class _ItemsView(collections.abc.ItemsView):
def __init__(self, ref):
super().__init__(ref)
self._ref = ref
def __iter__(self):
for key, groups in groupby(self._ref.__iter__(), lambda x: x[0]):
yield tuple([key, ", ".join(group[1] for group in groups)])
def __contains__(self, item):
if not (isinstance(item, (list, tuple)) and len(item) == 2):
return False
for k, v in self.__iter__():
if item[0].lower() == k.lower() and item[1] == v:
return True
return False
def __repr__(self):
return f"dict_items({list(self.__iter__())})"
class _KeysView(collections.abc.KeysView):
def __init__(self, items):
super().__init__(items)
self._items = items
def __iter__(self):
for key, _ in self._items:
yield key
def __contains__(self, key):
for k in self.__iter__():
if key.lower() == k.lower():
return True
return False
def __repr__(self):
return f"dict_keys({list(self.__iter__())})"
class _ValuesView(collections.abc.ValuesView):
def __init__(self, items):
super().__init__(items)
self._items = items
def __iter__(self):
for _, value in self._items:
yield value
def __contains__(self, value):
for v in self.__iter__():
if value == v:
return True
return False
def __repr__(self):
return f"dict_values({list(self.__iter__())})"
class _CIMultiDict(CIMultiDict):
"""Dictionary with the support for duplicate case-insensitive keys."""
def __iter__(self):
return iter(self.keys())
def keys(self):
"""Return a new view of the dictionary's keys."""
return _KeysView(self.items())
def items(self):
"""Return a new view of the dictionary's items."""
return _ItemsView(super().items())
def values(self):
"""Return a new view of the dictionary's values."""
return _ValuesView(self.items())
def __getitem__(self, key: str) -> str:
return ", ".join(self.getall(key, []))
def get(self, key, default=None):
values = self.getall(key, None)
if values:
values = ", ".join(values)
return values or default
class _RestAioHttpTransportResponseBackcompatMixin(AsyncHttpResponseBackcompatMixin):
"""Backcompat mixin for aiohttp responses.
Need to add it's own mixin because it has function load_body, which other
transport responses don't have, and also because we need to synchronously
decompress the body if users call .body()
"""
def body(self) -> bytes:
"""Return the whole body as bytes in memory.
Have to modify the default behavior here. In AioHttp, we do decompression
when accessing the body method. The behavior here is the same as if the
caller did an async read of the response first. But for backcompat reasons,
we need to support this decompression within the synchronous body method.
"""
return _aiohttp_body_helper(self)
async def _load_body(self) -> None:
"""Load in memory the body, so it could be accessible from sync methods."""
self._content = await self.read() # type: ignore
def __getattr__(self, attr):
backcompat_attrs = ["load_body"]
attr = _pad_attr_name(attr, backcompat_attrs)
return super().__getattr__(attr)
class RestAioHttpTransportResponse(AsyncHttpResponseImpl, _RestAioHttpTransportResponseBackcompatMixin):
def __init__(
self,
*,
internal_response,
decompress: bool = True,
**kwargs
):
headers = _CIMultiDict(internal_response.headers)
super().__init__(
internal_response=internal_response,
status_code=internal_response.status,
headers=headers,
content_type=headers.get('content-type'),
reason=internal_response.reason,
stream_download_generator=AioHttpStreamDownloadGenerator,
content=None,
**kwargs
)
self._decompress = decompress
self._decompressed_content = False
def __getstate__(self):
state = self.__dict__.copy()
# Remove the unpicklable entries.
state['_internal_response'] = None # aiohttp response are not pickable (see headers comments)
state['headers'] = CIMultiDict(self.headers) # MultiDictProxy is not pickable
return state
@property
def content(self):
# type: (...) -> bytes
"""Return the response's content in bytes."""
if self._content is None:
raise ResponseNotReadError(self)
return _aiohttp_body_helper(self)
async def read(self) -> bytes:
"""Read the response's bytes into memory.
:return: The response's bytes
:rtype: bytes
"""
if not self._content:
self._stream_download_check()
self._content = await self._internal_response.read()
await self._set_read_checks()
return _aiohttp_body_helper(self)
async def close(self) -> None:
"""Close the response.
:return: None
:rtype: None
"""
if not self.is_closed:
self._is_closed = True
self._internal_response.close()
await asyncio.sleep(0)
|
|
"""
Internal package providing a Python CRUD interface to MLflow experiments and runs.
This is a lower level API than the :py:mod:`mlflow.tracking.fluent` module, and is
exposed in the :py:mod:`mlflow.tracking` module.
"""
import time
import os
from mlflow.store.tracking import SEARCH_MAX_RESULTS_DEFAULT
from mlflow.tracking._tracking_service import utils
from mlflow.utils.validation import (
_validate_param_name,
_validate_tag_name,
_validate_run_id,
_validate_experiment_artifact_location,
_validate_experiment_name,
_validate_metric,
_validate_param_keys_unique,
PARAM_VALIDATION_MSG,
)
from mlflow.entities import Param, Metric, RunStatus, RunTag, ViewType, ExperimentTag
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, ErrorCode
from mlflow.store.artifact.artifact_repository_registry import get_artifact_repository
from mlflow.utils.mlflow_tags import MLFLOW_USER
from mlflow.utils.string_utils import is_string_type
from mlflow.utils.uri import add_databricks_profile_info_to_artifact_uri
from collections import OrderedDict
class TrackingServiceClient:
"""
Client of an MLflow Tracking Server that creates and manages experiments and runs.
"""
_artifact_repos_cache = OrderedDict()
def __init__(self, tracking_uri):
"""
:param tracking_uri: Address of local or remote tracking server.
"""
self.tracking_uri = tracking_uri
# NB: Fetch the tracking store (`self.store`) upon client initialization to ensure that
# the tracking URI is valid and the store can be properly resolved. We define `store` as a
# property method to ensure that the client is serializable, even if the store is not
# self.store # pylint: disable=pointless-statement
self.store
@property
def store(self):
return utils._get_store(self.tracking_uri)
def get_run(self, run_id):
"""
Fetch the run from backend store. The resulting :py:class:`Run <mlflow.entities.Run>`
contains a collection of run metadata -- :py:class:`RunInfo <mlflow.entities.RunInfo>`,
as well as a collection of run parameters, tags, and metrics --
:py:class:`RunData <mlflow.entities.RunData>`. In the case where multiple metrics with the
same key are logged for the run, the :py:class:`RunData <mlflow.entities.RunData>` contains
the most recently logged value at the largest step for each metric.
:param run_id: Unique identifier for the run.
:return: A single :py:class:`mlflow.entities.Run` object, if the run exists. Otherwise,
raises an exception.
"""
_validate_run_id(run_id)
return self.store.get_run(run_id)
def get_metric_history(self, run_id, key):
"""
Return a list of metric objects corresponding to all values logged for a given metric.
:param run_id: Unique identifier for run
:param key: Metric name within the run
:return: A list of :py:class:`mlflow.entities.Metric` entities if logged, else empty list
"""
return self.store.get_metric_history(run_id=run_id, metric_key=key)
def create_run(self, experiment_id, start_time=None, tags=None):
"""
Create a :py:class:`mlflow.entities.Run` object that can be associated with
metrics, parameters, artifacts, etc.
Unlike :py:func:`mlflow.projects.run`, creates objects but does not run code.
Unlike :py:func:`mlflow.start_run`, does not change the "active run" used by
:py:func:`mlflow.log_param`.
:param experiment_id: The ID of then experiment to create a run in.
:param start_time: If not provided, use the current timestamp.
:param tags: A dictionary of key-value pairs that are converted into
:py:class:`mlflow.entities.RunTag` objects.
:return: :py:class:`mlflow.entities.Run` that was created.
"""
tags = tags if tags else {}
# Extract user from tags
# This logic is temporary; the user_id attribute of runs is deprecated and will be removed
# in a later release.
user_id = tags.get(MLFLOW_USER, "unknown")
return self.store.create_run(
experiment_id=experiment_id,
user_id=user_id,
start_time=start_time or int(time.time() * 1000),
tags=[RunTag(key, value) for (key, value) in tags.items()],
)
def list_run_infos(
self,
experiment_id,
run_view_type=ViewType.ACTIVE_ONLY,
max_results=SEARCH_MAX_RESULTS_DEFAULT,
order_by=None,
page_token=None,
):
"""
Return run information for runs which belong to the experiment_id.
:param experiment_id: The experiment id which to search
:param run_view_type: ACTIVE_ONLY, DELETED_ONLY, or ALL runs
:param max_results: Maximum number of results desired.
:param order_by: List of order_by clauses. Currently supported values are
are ``metric.key``, ``parameter.key``, ``tag.key``, ``attribute.key``.
For example, ``order_by=["tag.release ASC", "metric.click_rate DESC"]``.
:return: A :py:class:`PagedList <mlflow.store.entities.PagedList>` of
:py:class:`RunInfo <mlflow.entities.RunInfo>` objects that satisfy the search
expressions. If the underlying tracking store supports pagination, the token for the
next page may be obtained via the ``token`` attribute of the returned object.
"""
return self.store.list_run_infos(
experiment_id, run_view_type, max_results, order_by, page_token
)
def list_experiments(self, view_type=ViewType.ACTIVE_ONLY, max_results=None, page_token=None):
"""
:param view_type: Qualify requested type of experiments.
:param max_results: If passed, specifies the maximum number of experiments desired.
If not passed, all experiments will be returned for the File and
SQLAlchemy backends. For the REST backend, the server will determine
an appropriate number of experiments to return.
:param page_token: Token specifying the next page of results. It should be obtained from
a ``list_experiments`` call.
:return: A :py:class:`PagedList <mlflow.store.entities.PagedList>` of
:py:class:`Experiment <mlflow.entities.Experiment>` objects. The pagination token
for the next page can be obtained via the ``token`` attribute of the object.
"""
return self.store.list_experiments(
view_type=view_type, max_results=max_results, page_token=page_token
)
def get_experiment(self, experiment_id):
"""
:param experiment_id: The experiment ID returned from ``create_experiment``.
:return: :py:class:`mlflow.entities.Experiment`
"""
return self.store.get_experiment(experiment_id)
def get_experiment_by_name(self, name):
"""
:param name: The experiment name.
:return: :py:class:`mlflow.entities.Experiment`
"""
return self.store.get_experiment_by_name(name)
def create_experiment(self, name, artifact_location=None, tags=None):
"""Create an experiment.
:param name: The experiment name. Must be unique.
:param artifact_location: The location to store run artifacts.
If not provided, the server picks an appropriate default.
:param tags: A dictionary of key-value pairs that are converted into
:py:class:`mlflow.entities.ExperimentTag` objects.
:return: Integer ID of the created experiment.
"""
_validate_experiment_name(name)
_validate_experiment_artifact_location(artifact_location)
return self.store.create_experiment(
name=name,
artifact_location=artifact_location,
tags=[ExperimentTag(key, value) for (key, value) in tags.items()] if tags else [],
)
def delete_experiment(self, experiment_id):
"""
Delete an experiment from the backend store.
:param experiment_id: The experiment ID returned from ``create_experiment``.
"""
self.store.delete_experiment(experiment_id)
def restore_experiment(self, experiment_id):
"""
Restore a deleted experiment unless permanently deleted.
:param experiment_id: The experiment ID returned from ``create_experiment``.
"""
self.store.restore_experiment(experiment_id)
def rename_experiment(self, experiment_id, new_name):
"""
Update an experiment's name. The new name must be unique.
:param experiment_id: The experiment ID returned from ``create_experiment``.
"""
self.store.rename_experiment(experiment_id, new_name)
def log_metric(self, run_id, key, value, timestamp=None, step=None):
"""
Log a metric against the run ID.
:param run_id: The run id to which the metric should be logged.
:param key: Metric name (string). This string may only contain alphanumerics,
underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Metric value (float). Note that some special values such
as +/- Infinity may be replaced by other values depending on the store. For
example, the SQLAlchemy store replaces +/- Inf with max / min float values.
All backend stores will support values up to length 5000, but some
may support larger values.
:param timestamp: Time when this metric was calculated. Defaults to the current system time.
:param step: Training step (iteration) at which was the metric calculated. Defaults to 0.
"""
timestamp = timestamp if timestamp is not None else int(time.time() * 1000)
step = step if step is not None else 0
_validate_metric(key, value, timestamp, step)
metric = Metric(key, value, timestamp, step)
self.store.log_metric(run_id, metric)
def log_param(self, run_id, key, value):
"""
Log a parameter against the run ID. Value is converted to a string.
"""
_validate_param_name(key)
param = Param(key, str(value))
try:
self.store.log_param(run_id, param)
except MlflowException as e:
if e.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE):
msg = f"{e.message}{PARAM_VALIDATION_MSG}'"
raise MlflowException(msg, INVALID_PARAMETER_VALUE)
else:
raise e
def set_experiment_tag(self, experiment_id, key, value):
"""
Set a tag on the experiment with the specified ID. Value is converted to a string.
:param experiment_id: String ID of the experiment.
:param key: Name of the tag.
:param value: Tag value (converted to a string).
"""
_validate_tag_name(key)
tag = ExperimentTag(key, str(value))
self.store.set_experiment_tag(experiment_id, tag)
def set_tag(self, run_id, key, value):
"""
Set a tag on the run with the specified ID. Value is converted to a string.
:param run_id: String ID of the run.
:param key: Tag name (string). This string may only contain alphanumerics, underscores
(_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Tag value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
"""
_validate_tag_name(key)
tag = RunTag(key, str(value))
self.store.set_tag(run_id, tag)
def delete_tag(self, run_id, key):
"""
Delete a tag from a run. This is irreversible.
:param run_id: String ID of the run
:param key: Name of the tag
"""
self.store.delete_tag(run_id, key)
def log_batch(self, run_id, metrics=(), params=(), tags=()):
"""
Log multiple metrics, params, and/or tags.
:param run_id: String ID of the run
:param metrics: If provided, List of Metric(key, value, timestamp) instances.
:param params: If provided, List of Param(key, value) instances.
:param tags: If provided, List of RunTag(key, value) instances.
Raises an MlflowException if any errors occur.
:return: None
"""
if len(metrics) == 0 and len(params) == 0 and len(tags) == 0:
return
if len(params) > 1:
_validate_param_keys_unique(params)
for metric in metrics:
_validate_metric(metric.key, metric.value, metric.timestamp, metric.step)
for param in params:
_validate_param_name(param.key)
for tag in tags:
_validate_tag_name(tag.key)
self.store.log_batch(run_id=run_id, metrics=metrics, params=params, tags=tags)
def _record_logged_model(self, run_id, mlflow_model):
from mlflow.models import Model
if not isinstance(mlflow_model, Model):
raise TypeError(
"Argument 'mlflow_model' should be of type mlflow.models.Model but was "
"{}".format(type(mlflow_model))
)
self.store.record_logged_model(run_id, mlflow_model)
def _get_artifact_repo(self, run_id):
# Attempt to fetch the artifact repo from a local cache
cached_repo = TrackingServiceClient._artifact_repos_cache.get(run_id)
if cached_repo is not None:
return cached_repo
else:
run = self.get_run(run_id)
artifact_uri = add_databricks_profile_info_to_artifact_uri(
run.info.artifact_uri, self.tracking_uri
)
artifact_repo = get_artifact_repository(artifact_uri)
# Cache the artifact repo to avoid a future network call, removing the oldest
# entry in the cache if there are too many elements
if len(TrackingServiceClient._artifact_repos_cache) > 1024:
TrackingServiceClient._artifact_repos_cache.popitem(last=False)
TrackingServiceClient._artifact_repos_cache[run_id] = artifact_repo
return artifact_repo
def log_artifact(self, run_id, local_path, artifact_path=None):
"""
Write a local file or directory to the remote ``artifact_uri``.
:param local_path: Path to the file or directory to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
"""
artifact_repo = self._get_artifact_repo(run_id)
if os.path.isdir(local_path):
dir_name = os.path.basename(os.path.normpath(local_path))
path_name = (
os.path.join(artifact_path, dir_name) if artifact_path is not None else dir_name
)
artifact_repo.log_artifacts(local_path, path_name)
else:
artifact_repo.log_artifact(local_path, artifact_path)
def log_artifacts(self, run_id, local_dir, artifact_path=None):
"""
Write a directory of files to the remote ``artifact_uri``.
:param local_dir: Path to the directory of files to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
"""
self._get_artifact_repo(run_id).log_artifacts(local_dir, artifact_path)
def list_artifacts(self, run_id, path=None):
"""
List the artifacts for a run.
:param run_id: The run to list artifacts from.
:param path: The run's relative artifact path to list from. By default it is set to None
or the root artifact path.
:return: List of :py:class:`mlflow.entities.FileInfo`
"""
return self._get_artifact_repo(run_id).list_artifacts(path)
def download_artifacts(self, run_id, path, dst_path=None):
"""
Download an artifact file or directory from a run to a local directory if applicable,
and return a local path for it.
:param run_id: The run to download artifacts from.
:param path: Relative source path to the desired artifact.
:param dst_path: Absolute path of the local filesystem destination directory to which to
download the specified artifacts. This directory must already exist.
If unspecified, the artifacts will either be downloaded to a new
uniquely-named directory on the local filesystem or will be returned
directly in the case of the LocalArtifactRepository.
:return: Local path of desired artifact.
"""
return self._get_artifact_repo(run_id).download_artifacts(path, dst_path)
def set_terminated(self, run_id, status=None, end_time=None):
"""Set a run's status to terminated.
:param status: A string value of :py:class:`mlflow.entities.RunStatus`.
Defaults to "FINISHED".
:param end_time: If not provided, defaults to the current time."""
end_time = end_time if end_time else int(time.time() * 1000)
status = status if status else RunStatus.to_string(RunStatus.FINISHED)
self.store.update_run_info(
run_id, run_status=RunStatus.from_string(status), end_time=end_time
)
def delete_run(self, run_id):
"""
Deletes a run with the given ID.
"""
self.store.delete_run(run_id)
def restore_run(self, run_id):
"""
Restores a deleted run with the given ID.
"""
self.store.restore_run(run_id)
def search_runs(
self,
experiment_ids,
filter_string="",
run_view_type=ViewType.ACTIVE_ONLY,
max_results=SEARCH_MAX_RESULTS_DEFAULT,
order_by=None,
page_token=None,
):
"""
Search experiments that fit the search criteria.
:param experiment_ids: List of experiment IDs, or a single int or string id.
:param filter_string: Filter query string, defaults to searching all runs.
:param run_view_type: one of enum values ACTIVE_ONLY, DELETED_ONLY, or ALL runs
defined in :py:class:`mlflow.entities.ViewType`.
:param max_results: Maximum number of runs desired.
:param order_by: List of columns to order by (e.g., "metrics.rmse"). The ``order_by`` column
can contain an optional ``DESC`` or ``ASC`` value. The default is ``ASC``.
The default ordering is to sort by ``start_time DESC``, then ``run_id``.
:param page_token: Token specifying the next page of results. It should be obtained from
a ``search_runs`` call.
:return: A :py:class:`PagedList <mlflow.store.entities.PagedList>` of
:py:class:`Run <mlflow.entities.Run>` objects that satisfy the search expressions.
If the underlying tracking store supports pagination, the token for the next page may
be obtained via the ``token`` attribute of the returned object.
"""
if isinstance(experiment_ids, int) or is_string_type(experiment_ids):
experiment_ids = [experiment_ids]
return self.store.search_runs(
experiment_ids=experiment_ids,
filter_string=filter_string,
run_view_type=run_view_type,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
|
|
# -*- coding: utf-8 -*-
import logging
import cgi
import re
import widgets
from . import convs
from ..utils import cached_property
from collections import OrderedDict
from .perms import FieldPerm
logger = logging.getLogger(__name__)
__all__ = ['BaseField', 'Field', 'FieldBlock', 'FieldSet', 'FieldList', 'FileField']
class BaseField(object):
'''
Simple container class which ancestors represents various parts of Form.
Encapsulates converter, various fields attributes, methods for data
access control
'''
# obsolete parameters from previous versions
_obsolete = frozenset(['default', 'get_default', 'template', 'media',
'render_type', 'render', 'required'])
#: :class:`FieldPerm` instance determining field's access permissions.
#: Can be set by field inheritance or throught constructor.
perm_getter = FieldPerm()
# defaults
#: :class:`Converter` instance determining field's convertation method
conv = convs.Char()
#: :class:`Widget` instance determining field's render method
widget = widgets.TextInput
#: Unicode label of the field
label = None
#: Short description of the field
hint = None
def __init__(self, name, conv=None, parent=None, permissions=None, **kwargs):
if self._obsolete & set(kwargs):
raise TypeError(
'Obsolete parameters are used: {}'.format(
list(self._obsolete & set(kwargs))))
kwargs.update(
parent=parent,
name=name,
conv=(conv or self.conv)(field=self),
widget=(kwargs.get('widget') or self.widget)(field=self),
)
if permissions is not None:
kwargs['perm_getter'] = FieldPerm(permissions)
self._init_kwargs = kwargs
self.__dict__.update(kwargs)
def __call__(self, **kwargs):
'''
Creates current object's copy with extra constructor arguments passed.
'''
params = dict(self._init_kwargs, **kwargs)
return self.__class__(**params)
@property
def multiple(self):
return self.conv.multiple
@property
def env(self):
return self.parent.env
@property
def form(self):
return self.parent.form
@property
def input_name(self):
'''
Name of field's input element generated in account to possible
nesting of fields. The input name is to be used in templates as value
of Input (Select, etc) element's Name attribute and Label element's For
attribute.
'''
return self.parent.prefix + self.name
@property
def error(self):
'''
String description of validation error in this field during last accept.
`None` if there is no error.
'''
return self.form.errors.get(self.input_name)
@cached_property
def clean_value(self):
'''
Current field's converted value from form's python_data.
'''
# XXX cached_property is used only for set initial state
# this property should be set every time field data
# has been changed, for instance, in accept method
python_data = self.parent.python_data
if self.name in python_data:
return python_data[self.name]
return self.get_initial()
@property
def id(self):
if self.form.id:
# We use template names in list to replace, so we must use it here to
# insure unique IDs.
return '{}-{}'.format(self.form.id, self.input_name)
return self.input_name
def from_python(self, value):
return self.conv.from_python(value)
@cached_property
def permissions(self):
'''
Field's access permissions. By default, is filled from perm_getter.
'''
return self.perm_getter.get_perms(self)
@cached_property
def writable(self):
return 'w' in self.permissions
@cached_property
def readable(self):
return 'r' in self.permissions
@cached_property
def field_names(self):
return [self.name]
def load_initial(self, initial, raw_data):
value = initial.get(self.name, self.get_initial())
self.set_raw_value(raw_data,
self.from_python(value))
return {self.name: value}
def __repr__(self):
args = ', '.join([k+'='+repr(v)
for k, v in self._init_kwargs.items()
if k not in ['widget', 'conv', 'parent']])
return '{}({})'.format(self.__class__.__name__, args)
class Field(BaseField):
'''
Atomic field
'''
conv = convs.Char()
_null_value = ''
def get_initial(self):
if hasattr(self, 'initial'):
return self.initial
if self.multiple:
return []
return None
@property
def raw_value(self):
if self.multiple:
return self.form.raw_data.getall(self.input_name)
else:
return self.form.raw_data.get(self.input_name, '')
def set_raw_value(self, raw_data, value):
if self.multiple:
try:
del raw_data[self.input_name]
except KeyError:
pass
for v in value:
raw_data.add(self.input_name, v)
else:
raw_data[self.input_name] = value
def _check_value_type(self, values):
if not self.multiple:
values = [values]
for value in values:
if not isinstance(value, basestring):
self.form.errors[self.input_name] = 'Given value has incompatible type'
return False
return True
def accept(self):
'''Extracts raw value from form's raw data and passes it to converter'''
value = self.raw_value
if not self._check_value_type(value):
# XXX should this be silent or TypeError?
value = [] if self.multiple else self._null_value
self.clean_value = self.conv.accept(value)
return {self.name: self.clean_value}
class AggregateField(BaseField):
@property
def python_data(self):
'''Representation of aggregate value as dictionary.'''
try:
value = self.clean_value
except LookupError:
# XXX is this necessary?
value = self.get_initial()
return self.from_python(value)
class FieldSet(AggregateField):
'''
Container field aggregating a couple of other different fields
'''
conv = convs.Converter()
widget = widgets.FieldSetWidget()
fields = []
def __init__(self, name, conv=None, fields=None, **kwargs):
fields = fields if fields is not None else self.fields
if kwargs.get('parent'):
conv = (conv or self.conv)(field=self)
fields = [field(parent=self) for field in fields]
kwargs.update(
name=name,
conv=conv,
fields=fields,
)
BaseField.__init__(self, **kwargs)
@property
def prefix(self):
return self.input_name+'.'
def get_field(self, name):
names = name.split('.', 1)
for field in self.fields:
if isinstance(field, FieldBlock):
result = field.get_field(name)
if result is not None:
return result
if field.name == names[0]:
if len(names) > 1:
return field.get_field(names[1])
return field
return None
def get_initial(self):
field_names = sum([x.field_names for x in self.fields], [])
result = dict((name, self.get_field(name).get_initial())
for name in field_names)
return self.conv.accept(result, silent=True)
def set_raw_value(self, raw_data, value):
# fills in raw_data multidict, resulting keys are field's absolute names
assert isinstance(value, dict), \
'To set raw value on {!r} need dict, got {!r}'\
.format(self.input_name, value)
if not value:
# Field set can be optional
return
field_names = sum([x.field_names for x in self.fields], [])
for field_name in field_names:
subvalue = value[field_name]
field = self.get_field(field_name)
field.set_raw_value(raw_data, field.from_python(subvalue))
def accept(self):
'''
Accepts all children fields, collects resulting values into dict and
passes that dict to converter.
Returns result of converter as separate value in parent `python_data`
'''
result = dict(self.python_data)
for field in self.fields:
if field.writable:
result.update(field.accept())
else:
# readonly field
field.set_raw_value(self.form.raw_data,
field.from_python(result[field.name]))
self.clean_value = self.conv.accept(result)
return {self.name: self.clean_value}
class FieldBlock(FieldSet):
'''
Anonymous FieldSet, values of one are accepted as they are children
of FieldBlock's parent.
FieldBlock is used to logically organize fields and do validation
of group of fields without naming that group and without dedicating
result of accept to separate object.
'''
conv = convs.FieldBlockConv()
widget = widgets.FieldBlockWidget()
prefix = ''
def __init__(self, title, fields=[], **kwargs):
kwargs.update(
title=title,
fields=fields,
)
kwargs.setdefault('name', '') # XXX generate unique name
FieldSet.__init__(self, **kwargs)
@cached_property
def prefix(self):
return self.parent.prefix
def accept(self):
'''
Acts as `Field.accepts` but returns result of every child field
as value in parent `python_data`.
'''
result = FieldSet.accept(self)
self.clean_value = result[self.name]
return self.clean_value
def load_initial(self, initial, raw_data):
result = {}
for field in self.fields:
result.update(field.load_initial(initial, raw_data))
return result
@cached_property
def field_names(self):
result = []
for field in self.fields:
result += field.field_names
return result
@property
def python_data(self):
# we need only subfield values in python data
result = {}
for field_name in self.field_names:
if field_name in self.parent.python_data:
result[field_name] = self.parent.python_data[field_name]
return result
class FieldList(AggregateField):
'''
Container aggregating an ordered set of similar fields
'''
order = True
conv = convs.List()
widget = widgets.FieldListWidget()
_digit_re = re.compile('\d+$')
def __init__(self, name, conv=None, field=Field(None),
parent=None, **kwargs):
if parent:
conv = (conv or self.conv)(field=self)
field = field(parent=self)
kwargs.update(
parent=parent,
name=name,
conv=conv,
field=field,
)
BaseField.__init__(self, **kwargs)
@property
def prefix(self):
# NOTE: There was '-' instead of '.' and get_field('list-1') was broken
return self.input_name+'.'
def get_initial(self):
return []
def get_field(self, name):
names = name.split('.', 1)
if not self._digit_re.match(names[0]):
# XXX is this needed?
return None
field = self.field(name=names[0])
if len(names) > 1:
return field.get_field(names[1])
return field
@property
def indices_input_name(self):
return self.input_name+'-indices'
def accept(self):
old = self.python_data
result = OrderedDict()
for index in self.form.raw_data.getall(self.indices_input_name):
try:
#XXX: we do not convert index to int, just check it.
# is it good idea?
int(index)
except ValueError:
logger.warning('Got incorrect index from form: %r', index)
continue
#TODO: describe this
field = self.field(name=str(index))
if not field.writable:
# readonly field
if index in old:
result[field.name] = old[field.name]
else:
result.update(field.accept())
self.clean_value = self.conv.accept(result)
return {self.name: self.clean_value}
def set_raw_value(self, raw_data, value):
indices = []
for index in range(1, len(value)+1):
index = str(index)
subvalue = value[index]
subfield = self.field(name=index)
subfield.set_raw_value(raw_data, subfield.from_python(subvalue))
indices.append(index)
try:
del raw_data[self.indices_input_name]
except KeyError:
pass
for index in indices:
raw_data.add(self.indices_input_name, index)
class FileField(Field):
'''
The simpliest file field
'''
_null_value = None
conv = convs.SimpleFile()
def set_raw_value(self, raw_data, value):
pass
def _check_value_type(self, values):
if not self.multiple:
values = [values]
for value in values:
if value and \
not isinstance(value, cgi.FieldStorage) and \
not hasattr(value, 'read'): # XXX is this right?
self.form.errors[self.input_name] = 'Given value is not file'
return False
return True
|
|
# -*- coding: utf-8 -*-
"""JMdict support."""
# This could be a bit cleaner if I used something like SQLalchemy
# perhaps... The create/insert/index bits were done decent enough,
# but lookups are done in straight SQL due to the potential
# complexity, and this sadly does break the abstraction of the table
# objects...
from __future__ import print_function
from __future__ import with_statement
import os, re, sqlite3
from cStringIO import StringIO
from xml.etree.cElementTree import ElementTree
from helpers import gzread, get_encoding, convert_query_to_unicode
from db import Database as BaseDatabase
from table import Table, ChildTable, KeyValueTable
import gettext
#t = gettext.translation("jblite")
#_ = t.ugettext
gettext.install("jblite")
# Full expansion of xml:lang
XML_LANG = "{http://www.w3.org/XML/1998/namespace}lang"
# FORMAT OF TABLE MAP:
# dictionary entry: table: (children | None)
# table: table_name | (table_name, table_type, *args, **kwargs)
#
# Ideas:
# Value = dict: take keys as child tables, lookup all rows, and take values as grandchildren.
# Value = list: take items as child tables, lookup all rows, assume no children.
#
#
# entry:
# data = tables["entry"].lookup()
# children_map = TABLE_MAP["entry"]
# children = get_data(children_map["k_ele"])
# result = TableData(data, children)
#
#
# {"k_ele": {"data": [...],
# "children": {...}}}
# Table data object:
# obj.data: {}, # single db row
# obj.children: {"key": table_object}
# breadth first creation? depth?
# Map of tables to their children maps. Empty {} means no children.
class Entry(object):
def __init__(self, record):
self._record = record
def __unicode__(self):
"""Basic string representation of the entry."""
rec = self._record
lines = []
k_eles = rec.find_children("k_ele")
if len(k_eles) > 0:
lines.append(_(u"Kanji readings:"))
for k_ele_index, k_ele in enumerate(k_eles):
k_ele_index += 1
lines.append(_(u" Reading %d:") % k_ele_index)
lines.append(_(u" Blob: %s") % k_ele.data['value'])
r_eles = rec.find_children("r_ele")
if len(r_eles) > 0:
lines.append(_(u"Kana readings:"))
for r_ele_index, r_ele in enumerate(r_eles):
r_ele_index += 1
lines.append(_(u" Reading %d:") % r_ele_index)
lines.append(_(u" Blob: %s") % r_ele.data['value'])
senses = rec.find_children("sense")
if len(senses) > 0:
lines.append(_(u"Glosses:"))
for sense_index, sense in enumerate(senses):
sense_index += 1
lines.append(_(u" Sense %d:") % sense_index)
glosses = sense.find_children("gloss")
gloss_d = {}
for gloss in glosses:
gloss_d.setdefault(gloss.data["lang"], []).append(gloss)
# Output glosses by language
for lang in sorted(gloss_d.keys()):
gloss_recs = gloss_d[lang]
lines.append(_(u" Lang: %s") % lang)
for gloss_index, gloss in enumerate(gloss_recs):
gloss_index += 1
val = gloss.data['value']
lines.append(_(u" Gloss %d: %s") % (gloss_index, val))
return u"\n".join(lines)
def __repr__(self):
return repr(self._record)
class Database(BaseDatabase):
"""Top level object for SQLite 3-based JMdict database."""
entry_class = Entry
table_map = {
u"entry": {
u"k_ele": {
u"ke_inf": {},
u"ke_pri": {},
},
u"r_ele": {
u"re_restr": {},
u"re_inf": {},
u"re_pri": {},
},
u"links": {},
u"bibl": {},
u"etym": {},
u"audit": {},
u"sense": {
u"pos": {},
u"field": {},
u"misc": {},
u"dial": {},
u"stagk": {},
u"stagr": {},
u"xref": {},
u"ant": {},
u"s_inf": {},
u"example": {},
u"lsource": {},
u"gloss": {
u"pri": {},
}
}
}
}
def __init__(self, filename, init_from_file=None):
self.conn = sqlite3.connect(filename)
self.conn.row_factory = sqlite3.Row # keyword accessors for rows
self.cursor = self.conn.cursor()
self.tables = self._create_table_objects()
if init_from_file is not None:
raw_data = gzread(init_from_file)
entities = self._get_entities(raw_data)
infile = StringIO(raw_data)
etree = ElementTree(file=infile)
infile.close()
self._create_new_tables()
self._populate_database(etree, entities)
self.conn.commit()
def search(self, query, lang=None):
# Search
# Two main methods: to and from Japanese.
# 1. Guess which direction we're searching.
# 2. Search preferred method.
# 3. Search remaining method.
query = convert_query_to_unicode(query)
query = "%%%s%%" % query # Wrap in wildcards
entries_from = self._search_from_japanese(query)
entries_to = self._search_to_japanese(query, lang=lang)
entry_ids = entries_from + entries_to
results = [self.lookup(entry_id) for entry_id in entry_ids]
return results
def _search_from_japanese(self, query):
# Japanese search locations:
# 1. Kanji elements
# 2. Reading elements
# 3. Any indices (none yet)
#
# Preferred orderings
# 1. Location of query in result
# 1. Exact match
# 2. Begins with
# 3. Anywhere
# 2. Ranking of usage (the (P) option in EDICT, for example)
#
# FOR NOW: just get the searching working.
# This puts us on roughly the same level as J-Ben 1.2.x.
entries_by_keb = self._search_keb(query)
entries_by_reb = self._search_reb(query)
#entries_by_indices = self._search_indices_from_ja(unicode_query)
# Merge results into one list and return.
results = []
for lst in (entries_by_keb, entries_by_reb):
for o in lst:
if o not in results:
results.append(o)
return results
def _search_keb(self, unicode_query):
"""Searches kanji elements (Japanese readings with kanji).
Returns a list of entry IDs.
"""
# keb: entry.id -> k_ele.fk, k_ele.value
query = "SELECT fk FROM k_ele WHERE value LIKE ?"
args = (unicode_query,)
self.cursor.execute(query, args)
rows = self.cursor.fetchall()
return [row[0] for row in rows]
def _search_reb(self, unicode_query):
"""Searches reading elements (Japanese readings without kanji).
Returns a list of entry IDs.
"""
# reb: entry.id -> r_ele.fk, r_ele.value
query = "SELECT fk FROM r_ele WHERE value LIKE ?"
args = (unicode_query,)
self.cursor.execute(query, args)
rows = self.cursor.fetchall()
return [row[0] for row in rows]
def _search_indices_from_ja(self, unicode_query):
raise NotImplementedError
def _search_to_japanese(self, query, lang):
# Foreign language search locations:
# 1. Glosses
# 2. Any indices (none yet)
#
# For other considerations, see search_from_japanese().
entries_by_glosses = self._search_glosses(query, lang)
#entries_by_indices = self._search_indices_to_ja(unicode_query, lang)
# Merge results into one list and return.
results = []
for lst in (entries_by_glosses,):
for o in lst:
if o not in results:
results.append(o)
return results
def _search_glosses(self, unicode_query, lang):
"""Searches foreign language glosses.
If lang is not None, only entries which match the lang
parameter are returned.
Returns a list of entry IDs.
"""
# entry.id -> sense.fk, sense.id -> gloss.fk
# FORMAT: SELECT e.id FROM gloss g, sense s, entry e
# WHERE (g.lang = ? AND) g.value LIKE ?
# AND g.fk = s.id AND s.fk = e.id
select_clause = "SELECT e.id"
from_clause = "FROM gloss g, sense s, entry e"
where_conditions = []
args = []
if lang is not None:
where_conditions.append("g.lang = ?")
args.append(lang)
where_conditions.append("g.value LIKE ?")
args.append(unicode_query)
where_conditions.append("g.fk = s.id")
where_conditions.append("s.fk = e.id")
where_clause = "WHERE %s" % " AND ".join(where_conditions)
query = " ".join([select_clause, from_clause, where_clause])
self.cursor.execute(query, args)
rows = self.cursor.fetchall()
return [row[0] for row in rows]
def _search_indices_to_ja(self, unicode_query, lang):
raise NotImplementedError
def lookup(self, id):
return BaseDatabase.lookup(self, "entry", id)
def query_db(self, *args, **kwargs):
"""Helper. Wraps the execute/fetchall idiom on the DB cursor."""
self.cursor.execute(*args, **kwargs)
return self.cursor.fetchall()
def _convert_entities(self, entities):
"""Expands a list of entities.
Returns a list of the entity expansions. The order of the
returned expansions matches the order of the input entities.
"""
args = list(sorted(set(entities)))
template = ", ".join(["?"] * len(args))
query = "SELECT entity, expansion " \
"FROM entity WHERE entity IN (%s)" % template
rows = self.query_db(query, args)
d = {}
for entity, expansion in rows:
d[entity] = expansion
result = [d[entity] for entity in entities]
return result
def _create_table_objects(self):
"""Creates table objects.
Returns a dictionary of table name to table object.
"""
class_mappings = {
"entry": EntryTable, # key->int ID
"r_ele": REleTable, # key-value plus nokanji flag
"sense": SenseTable, # one-many group mapping for sense info
"audit": AuditTable, # key->(update_date, update_details)
"lsource": LSourceTable, # key -> lang, type=full/part, wasei=t/f
"gloss": GlossTable, # key -> lang, g_gend, value, pri flag
"links": LinksTable, # key -> tag, desc, uri
"bibl": BiblTable, # key -> tag, txt
"entity": EntityTable, # Info from JMdict XML entities
}
# Set up key/value and key/entity tables
kv_tables = [ # key-value tables (id -> text blob)
"k_ele",
"ke_pri",
"re_restr",
"re_pri",
"etym",
"stagk",
"stagr",
"xref", # (#PCDATA)* - why the *?
"ant", # (#PCDATA)* - why the *?
"s_inf",
"example",
"pri",
]
kv_entity_tables = [ # key-value tables where val == entity
"ke_inf",
"re_inf",
"dial",
"field",
"misc",
"pos",
]
for tbl in kv_tables:
class_mappings[tbl] = KeyValueTable
for tbl in kv_entity_tables:
class_mappings[tbl] = KeyEntityTable
# Create all table objects
table_mappings = {}
for tbl, cls in class_mappings.iteritems():
table_mappings[tbl] = cls(self.cursor, tbl)
return table_mappings
def _create_new_tables(self):
"""(Re)creates the database tables."""
for tbl, tbl_obj in self.tables.iteritems():
self.cursor.execute("DROP TABLE IF EXISTS %s" % tbl)
tbl_obj.create()
def _populate_database(self, etree, entities):
"""Imports XML data into SQLite database.
table_d: table to table_object dictionary
etree: ElementTree object for JMdict
entities: entity name to description dictionary
"""
# NOTE: this is waaay too long. Should be broken up somehow.
# For now this will work though...
# Populate entities table and get integer keys
# NOTE: we'll be mapping from *expanded* entities to ints.
entity_int_d = {}
tbl = self.tables['entity']
for entity, expansion in entities.iteritems():
i = tbl.insert(entity, expansion)
entity_int_d[expansion] = i
# Iterate through each entry
for entry in etree.findall("entry"):
# entry table
ent_seq = entry.find("ent_seq")
entry_id = self.tables["entry"].insert(int(ent_seq.text))
for k_ele in entry.findall("k_ele"):
# k_ele
value = k_ele.find("keb").text
k_ele_id = self.tables["k_ele"].insert(entry_id, value)
# ke_inf
for ke_inf in k_ele.findall("ke_inf"):
value = ke_inf.text.strip()
entity_id = entity_int_d[value]
self.tables["ke_inf"].insert(k_ele_id, entity_id)
# ke_pri
for ke_pri in k_ele.findall("ke_pri"):
value = ke_pri.text
self.tables["ke_pri"].insert(k_ele_id, value)
for r_ele in entry.findall("r_ele"):
# r_ele
value = r_ele.find("reb").text
# For nokanji: currently it's an empty tag, so
# treating it as true/false.
nokanji = 1 if r_ele.find("nokanji") is not None else 0
r_ele_id = self.tables["r_ele"].insert(entry_id, value, nokanji)
# re_restr
for re_restr in r_ele.findall("re_restr"):
value = re_restr.text
self.tables["re_restr"].insert(r_ele_id, value)
# re_inf
for re_inf in r_ele.findall("re_inf"):
value = re_inf.text.strip()
entity_id = entity_int_d[value]
self.tables["re_inf"].insert(r_ele_id, entity_id)
# re_pri
for re_pri in r_ele.findall("re_pri"):
value = re_pri.text
self.tables["re_pri"].insert(r_ele_id, value)
# info
# (Although children of an info node, since there's only
# one per entry, let's connect directly to the entry.)
info = entry.find("info")
if info is not None:
for links in info.findall("links"):
link_tag = links.find("link_tag").text
link_desc = links.find("link_desc").text
link_uri = links.find("link_uri").text
self.tables["links"].insert(entry_id, link_tag, link_desc,
link_uri)
for bibl in info.findall("bibl"):
bib_tag = links.find("bib_tag")
bib_txt = links.find("bib_txt")
bib_tag = bib_tag.text if bib_tag is not None else None
bib_txt = bib_txt.text if bib_txt is not None else None
self.tables["bibl"].insert(entry_id, bib_tag, bib_txt)
for etym in info.findall("etym"):
self.tables["etym"].insert(entry_id, etym.text)
for audit in info.findall("audit"):
upd_date = audit.find("upd_date").text
upd_detl = audit.find("upd_detl").text
self.tables["audit"].insert(entry_id, upd_date, upd_detl)
# sense
key_entity_tables = ["pos", "field", "misc", "dial"]
key_value_tables = ["stagk", "stagr", "xref", "ant", "s_inf", "example"]
for sense in entry.findall("sense"):
# Each sense gets its own ID, for grouping purposes
sense_id = self.tables["sense"].insert(entry_id)
for elem_name in key_value_tables:
for element in sense.findall(elem_name):
self.tables[elem_name].insert(sense_id, element.text)
for elem_name in key_entity_tables:
for element in sense.findall(elem_name):
entity_id = entity_int_d[element.text.strip()]
self.tables[elem_name].insert(sense_id, entity_id)
for lsource in sense.findall("lsource"):
lang = lsource.get(XML_LANG, "eng")
ls_type = lsource.get("ls_type") # implied "full" if absent, "part" otherwise
ls_wasei = lsource.get("ls_wasei") # usually "y"... just a flag.
partial = 1 if ls_type is not None else 0
if ls_wasei is None:
wasei = 0
elif ls_wasei == "y":
wasei = 1
else:
raise ValueError(
'Only known valid ls_wasei attribute value '
'is "y", found:', ls_wasei.text)
self.tables["lsource"].insert(sense_id,
lang, partial, wasei)
for gloss in sense.findall("gloss"):
lang = gloss.get(XML_LANG, "eng")
g_gend = gloss.get("g_gend")
pri_list = gloss.getchildren()
if len(pri_list) > 1:
gloss_id = self.tables['gloss'].insert(
sense_id, lang, g_gend, gloss.text, 1)
for pri in pri_list:
self.tables['pri'].insert(gloss_id, pri.text)
else:
self.tables['gloss'].insert(sense_id, lang, g_gend,
gloss.text, 0)
def _get_entities(self, xml_data):
"""Gets the ENTITY definitions from JMdict.
Finds the built-in DTD and extracts all ENTITY definitions.
"""
dtd = self._get_dtd(xml_data)
# do some logic to find all entities...
entities = {}
regex = '<!ENTITY[ ]+([a-zA-Z0-9-]+)[ ]+"(.*?)">'
for match in re.finditer(regex, xml_data):
key, value = match.groups()[0:2]
entities[key] = value
return entities
def _get_dtd(self, xml_data):
"""Gets the DTD from JMdict."""
# This works for JMdict (as it is at the time of writing), but is
# not a general solution.
start_index = xml_data.find("<!DOCTYPE")
if start_index == -1:
raise Exception("Could not find start of internal DTD")
end_index = xml_data.find("]>")
if end_index == -1:
raise Exception("Could not find end ofinternal DTD")
end_index += 2
dtd = xml_data[start_index:end_index]
return dtd
class EntryTable(Table):
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, ent_seq INTEGER)")
insert_query = "INSERT INTO %s VALUES (NULL, ?)"
index_queries = [
"CREATE INDEX %s_seq ON %s (ent_seq)",
]
class KeyEntityTable(KeyValueTable):
"""Just like a KeyValueTable, but with 'entity' instead of 'value'."""
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, fk INTEGER, entity INTEGER)")
class REleTable(ChildTable):
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, fk INTEGER,"
" value TEXT, nokanji INTEGER)")
insert_query = "INSERT INTO %s VALUES (NULL, ?, ?, ?)"
index_queries = [
"CREATE INDEX %s_fk ON %s (fk)",
]
class SenseTable(ChildTable):
"""Corresponds to <sense> tag. Functions as group for glosses, etc."""
create_query = ("CREATE TABLE %s (id INTEGER PRIMARY KEY, fk INTEGER)")
insert_query = "INSERT INTO %s VALUES (NULL, ?)"
index_queries = [
"CREATE INDEX %s_fk ON %s (fk)",
]
class AuditTable(ChildTable):
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, fk INTEGER,"
" update_date TEXT, update_details TEXT)")
insert_query = "INSERT INTO %s VALUES (NULL, ?, ?, ?)"
index_queries = [
"CREATE INDEX %s_fk ON %s (fk)",
]
class LSourceTable(ChildTable):
"""Represents the <lsource> element from JMdict.
Important changes:
ls_type=full/part => partial=1/0
ls_wasei=y/null => wasei=1/0
"""
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, fk INTEGER,"
" lang TEXT, partial INTEGER, wasei INTEGER)")
insert_query = "INSERT INTO %s VALUES (NULL, ?, ?, ?, ?)"
index_queries = [
"CREATE INDEX %s_fk ON %s (fk)",
]
class GlossTable(ChildTable):
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, fk INTEGER,"
" lang TEXT, g_gend TEXT, value TEXT, pri INTEGER)")
insert_query = "INSERT INTO %s VALUES (NULL, ?, ?, ?, ?, ?)"
index_queries = [
"CREATE INDEX %s_fk ON %s (fk)",
"CREATE INDEX %s_lang ON %s (lang)",
"CREATE INDEX %s_value ON %s (value)",
]
class LinksTable(ChildTable):
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, fk INTEGER,"
" tag TEXT, desc TEXT, uri TEXT)")
insert_query = "INSERT INTO %s VALUES (NULL, ?, ?, ?, ?)"
index_queries = [
"CREATE INDEX %s_fk ON %s (fk)",
]
class BiblTable(ChildTable):
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, fk INTEGER,"
" tag TEXT, txt TEXT)")
insert_query = "INSERT INTO %s VALUES (NULL, ?, ?, ?, ?)"
index_queries = [
"CREATE INDEX %s_fk ON %s (fk)",
]
class EntityTable(Table):
create_query = ("CREATE TABLE %s "
"(id INTEGER PRIMARY KEY, entity TEXT, expansion TEXT)")
insert_query = "INSERT INTO %s VALUES (NULL, ?, ?)"
######################################################################
def parse_args():
from optparse import OptionParser
op = OptionParser(usage="%prog [options] <db_filename> [search_query]")
op.add_option("-i", "--initialize",
dest="init_fname", metavar="XML_SOURCE",
help=_("Initialize database from file."))
op.add_option("-L", "--lang",
help=_("Specify preferred language for searching."))
options, args = op.parse_args()
if len(args) < 1:
op.print_help()
exit(-1)
return (options, args)
def main():
# Copied *almost* verbatim from kd2.py.
options, args = parse_args()
db_fname = args[0]
if options.init_fname is not None:
db = Database(db_fname, init_from_file=options.init_fname)
else:
db = Database(db_fname)
results = []
if len(args) > 1:
# Do search
# To be nice, we'll join all remaining args with spaces.
search_query = " ".join(args[1:])
if options.lang is not None:
results = db.search(search_query, lang=options.lang)
else:
results = db.search(search_query)
if len(results) > 0:
encoding = get_encoding()
for index, result in enumerate(results):
index += 1
print(_("[Entry %d]") % index)
print(unicode(result).encode(encoding))
print()
else:
print(_("No results found."))
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._workspace_managed_sql_server_security_alert_policy_operations import build_create_or_update_request_initial, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WorkspaceManagedSqlServerSecurityAlertPolicyOperations:
"""WorkspaceManagedSqlServerSecurityAlertPolicyOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, "_models.SecurityAlertPolicyNameAutoGenerated"],
**kwargs: Any
) -> "_models.ServerSecurityAlertPolicy":
"""Get server's security alert policy.
Get a workspace managed sql server's security alert policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param security_alert_policy_name: The name of the security alert policy.
:type security_alert_policy_name: str or
~azure.mgmt.synapse.models.SecurityAlertPolicyNameAutoGenerated
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerSecurityAlertPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ServerSecurityAlertPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
security_alert_policy_name=security_alert_policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, "_models.SecurityAlertPolicyNameAutoGenerated"],
parameters: "_models.ServerSecurityAlertPolicy",
**kwargs: Any
) -> Optional["_models.ServerSecurityAlertPolicy"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServerSecurityAlertPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ServerSecurityAlertPolicy')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
security_alert_policy_name=security_alert_policy_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, "_models.SecurityAlertPolicyNameAutoGenerated"],
parameters: "_models.ServerSecurityAlertPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.ServerSecurityAlertPolicy"]:
"""Create or Update server's threat detection policy.
Create or Update a workspace managed sql server's threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param security_alert_policy_name: The name of the security alert policy.
:type security_alert_policy_name: str or
~azure.mgmt.synapse.models.SecurityAlertPolicyNameAutoGenerated
:param parameters: The workspace managed sql server security alert policy.
:type parameters: ~azure.mgmt.synapse.models.ServerSecurityAlertPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServerSecurityAlertPolicy or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.ServerSecurityAlertPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
security_alert_policy_name=security_alert_policy_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServerSecurityAlertPolicyListResult"]:
"""Get server's threat detection policies.
Get workspace managed sql server's threat detection policies.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerSecurityAlertPolicyListResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.ServerSecurityAlertPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ServerSecurityAlertPolicyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies'} # type: ignore
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron.common import test_lib
from neutron.extensions import providernet as pr_net
from neutron.tests.unit.extensions import test_l3
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.device_manager.plugging_drivers.\
n1kv_ml2_trunking_driver import N1kvML2TrunkingPlugDriver
from networking_cisco.plugins.cisco.device_manager.plugging_drivers.\
n1kv_ml2_trunking_driver import MIN_LL_VLAN_TAG
from networking_cisco.plugins.ml2.drivers.cisco.n1kv import (
config as ml2_n1kv_config)
from networking_cisco.plugins.ml2.drivers.cisco.n1kv import constants
from networking_cisco.tests.unit.cisco.l3 import (
test_l3_router_appliance_plugin)
L3_PLUGIN_KLASS = (
'networking_cisco.tests.unit.cisco.l3.test_l3_router_appliance_plugin.'
'TestApplianceL3RouterServicePlugin')
POLICY_PROFILE_PLUGIN = ('networking_cisco.plugins.ml2.drivers.cisco.n1kv.'
'policy_profile_service.PolicyProfilePlugin')
NETWORK_PROFILE_PLUGIN = ('networking_cisco.plugins.ml2.drivers.cisco.n1kv.'
'network_profile_service.NetworkProfilePlugin')
DEFAULT_PP = 'm1'
class TestN1kvTrunkingPluggingDriver(
test_l3_router_appliance_plugin.L3RouterApplianceTestCaseBase,
test_l3.L3NatTestCaseMixin):
# we use router types defined in .ini file.
configure_routertypes = False
router_type = 'CSR1kv_Neutron_router'
test_driver = N1kvML2TrunkingPlugDriver
def setUp(self, service_plugins=None):
service_plugins = {
constants.CISCO_N1KV: POLICY_PROFILE_PLUGIN,
constants.CISCO_N1KV_NET_PROFILE: NETWORK_PROFILE_PLUGIN,
bc.constants.L3: L3_PLUGIN_KLASS}
ml2_cisco_opts = {
'n1kv_vsm_ips': ['127.0.0.1'],
'username': 'admin',
'password': 'Sfish123',
'default_policy_profile': DEFAULT_PP
}
for opt, val in ml2_cisco_opts.items():
ml2_n1kv_config.cfg.CONF.set_override(opt, val, 'ml2_cisco_n1kv')
super(TestN1kvTrunkingPluggingDriver, self).setUp(
service_plugins=service_plugins)
# save possible test_lib.test_config 'config_files' dict entry so we
# can restore it after tests since we will change its value
self._old_config_files = copy.copy(test_lib.test_config.get(
'config_files'))
# include config files for device manager service plugin and router
# service plugin since we define a number of hosting device templates,
# hosting devices and routertypes there
self._add_device_manager_plugin_ini_file()
self._add_router_plugin_ini_file()
#TODO(bobmel): Fix bug in test_extensions.py and we can remove the
# below call to setup_config()
self.setup_config()
self.net_plugin = bc.get_plugin(constants.CISCO_N1KV_NET_PROFILE)
self.policy_plugin = bc.get_plugin(constants.CISCO_N1KV)
def tearDown(self):
if self._old_config_files is None:
test_lib.test_config.pop('config_files', None)
else:
test_lib.test_config['config_files'] = self._old_config_files
super(TestN1kvTrunkingPluggingDriver, self).tearDown()
def test__get_profile_id(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
plugging_driver = self.test_driver()
p_id = plugging_driver._get_profile_id('port_profile', 'N1kv port '
'profile', 'the_profile')
self.assertEqual(p_id, 'profile_uuid1')
def test__get_profile_id_multiple_match(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'},
{'id': 'profile_uuid2'}])
self.policy_plugin.get_policy_profiles = m1
plugging_driver = self.test_driver()
p_id = plugging_driver._get_profile_id('port_profile', 'N1kv port '
'profile', 'the_profile')
self.assertEqual(p_id, None)
def test__get_profile_id_no_match(self):
m1 = mock.MagicMock(return_value=[])
self.policy_plugin.get_policy_profiles = m1
plugging_driver = self.test_driver()
p_id = plugging_driver._get_profile_id('port_profile', 'N1kv port '
'profile', 'the_profile')
self.assertEqual(p_id, None)
def test__get_network_profile_id(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.net_plugin.get_network_profiles = m1
plugging_driver = self.test_driver()
p_id = plugging_driver._get_profile_id('net_profile', 'net profile',
'the_profile')
self.assertEqual(p_id, 'profile_uuid1')
def test__get_network_profile_id_multiple_match(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'},
{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m1
plugging_driver = self.test_driver()
p_id = plugging_driver._get_profile_id('net_profile', 'net profile',
'the_profile')
self.assertEqual(p_id, None)
def test__get_network_profile_id_no_match(self):
m1 = mock.MagicMock(return_value=[])
self.net_plugin.get_network_profiles = m1
plugging_driver = self.test_driver()
p_id = plugging_driver._get_profile_id('net_profile', 'net profile',
'the_profile')
self.assertEqual(p_id, None)
def test_create_hosting_device_resources(self):
def _verify_resource_name(res_list, resource_prefix, num):
valid_names = set()
for i in range(num):
index = str(i + 1)
valid_names.add('t1_' + resource_prefix + index)
valid_names.add('t2_' + resource_prefix + index)
for r in res_list:
# assert by trying to remove item
valid_names.remove(r['name'])
self.assertEqual(len(valid_names), 0)
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
plugging_driver = self.test_driver()
mgmt_context = {'mgmt_nw_id': osn_subnet['network_id']}
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 2)
self.assertIsNotNone(plugging_driver._t1_net_id)
self.assertIsNotNone(plugging_driver._t2_net_id)
self.assertIsNotNone(res['mgmt_port'])
self.assertEqual(len(res), 2)
self.assertEqual(len(res['ports']), 4)
_verify_resource_name(res['ports'], 'p:', 2)
def test_create_hosting_device_resources_no_mgmt_context(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
plugging_driver = self.test_driver()
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, None, 2)
self.assertIsNone(plugging_driver._t1_net_id)
self.assertIsNone(plugging_driver._t2_net_id)
self.assertIsNone(res['mgmt_port'], res)
self.assertEqual(len(res), 2)
self.assertEqual(len(res['ports']), 0)
def test_get_hosting_device_resources_by_complementary_id(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
plugging_driver = self.test_driver()
mgmt_context = {'mgmt_nw_id': osn_subnet['network_id']}
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 1)
# ports that should not be returned
with self.port(), self.port(device_id='uuid2'), self.port(
tenant_id=tenant_id):
res_get = plugging_driver.get_hosting_device_resources(
ctx, '', 'some_id', tenant_id, osn_subnet['network_id'])
self.assertEqual(res_get['mgmt_port']['id'],
res['mgmt_port']['id'])
self.assertEqual({i['id'] for i in res['ports']},
{i['id'] for i in res_get['ports']})
def test_get_hosting_device_resources_by_device_id(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
plugging_driver = self.test_driver()
mgmt_context = {'mgmt_nw_id': osn_subnet['network_id']}
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 1)
# update attributes of created ports to fake what Nova updates
hd_uuid = 'hd_uuid1'
update_spec = {'port': {'device_id': hd_uuid,
'device_owner': 'nova'}}
for hd_port in self._list('ports')['ports']:
self._update('ports', hd_port['id'], update_spec)
# ports that should not be returned
with self.port(), self.port(device_id='uuid2'), self.port(
tenant_id=tenant_id), self.port(tenant_id=tenant_id,
device_owner='other_uuid'):
res_get = plugging_driver.get_hosting_device_resources(
ctx, hd_uuid, 'some_id', tenant_id, osn_subnet['network_id'])
self.assertEqual(res_get['mgmt_port']['id'],
res['mgmt_port']['id'])
self.assertEqual({i['id'] for i in res['ports']},
{i['id'] for i in res_get['ports']})
def test_delete_hosting_device_resources(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': osn_subnet['network_id']}
plugging_driver = self.test_driver()
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 2)
nets = self._list('networks')
self.assertEqual(len(nets['networks']), 3)
subnets = self._list('subnets')
self.assertEqual(len(subnets['subnets']), 3)
ports = self._list('ports')
self.assertEqual(len(ports['ports']), 5)
# avoid passing the mgmt port twice in argument list
mgmt_port = res['mgmt_port']
del res['mgmt_port']
plugging_driver.delete_hosting_device_resources(
ctx, tenant_id, mgmt_port, **res)
nets = self._list('networks')['networks']
# mgmt network and subnet should remain
self.assertEqual(len(nets), 1)
self.assertEqual(nets[0]['id'], osn_subnet['network_id'])
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 1)
self.assertEqual(subnets[0]['id'], osn_subnet['id'])
ports = self._list('ports')
self.assertEqual(len(ports['ports']), 0)
def test_delete_hosting_device_resources_retry_success(self):
def _fake_delete_resources(context, name, deleter,
exception_type, resource_ids):
if counters['attempts'] < counters['max_attempts']:
if name == "trunk port":
counters['attempts'] += 1
return
real_delete_resources(context, name, deleter,
exception_type, resource_ids)
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': osn_subnet['network_id']}
plugging_driver = self.test_driver()
real_delete_resources = plugging_driver._delete_resources
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 2)
nets = self._list('networks')
self.assertEqual(len(nets['networks']), 3)
subnets = self._list('subnets')
self.assertEqual(len(subnets['subnets']), 3)
ports = self._list('ports')
self.assertEqual(len(ports['ports']), 5)
# avoid passing the mgmt port twice in argument list
mgmt_port = res['mgmt_port']
del res['mgmt_port']
with mock.patch.object(plugging_driver, '_delete_resources') as (
delete_mock):
with mock.patch(
'networking_cisco.plugins.cisco.device_manager.'
'plugging_drivers.n1kv_ml2_trunking_driver.'
'eventlet.sleep'):
delete_mock.side_effect = _fake_delete_resources
counters = {'attempts': 0, 'max_attempts': 2}
plugging_driver.delete_hosting_device_resources(
ctx, tenant_id, mgmt_port, **res)
# three retry iterations with two calls per iteration
self.assertEqual(delete_mock.call_count, 6)
nets = self._list('networks')['networks']
self.assertEqual(len(nets), 1)
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 1)
ports = self._list('ports')
self.assertEqual(len(ports['ports']), 0)
def test_delete_hosting_device_resources_finite_attempts(self):
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': osn_subnet['network_id']}
plugging_driver = self.test_driver()
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 2)
nets = self._list('networks')
self.assertEqual(len(nets['networks']), 3)
subnets = self._list('subnets')
self.assertEqual(len(subnets['subnets']), 3)
ports = self._list('ports')
self.assertEqual(len(ports['ports']), 5)
# avoid passing the mgmt port twice in argument list
mgmt_port = res['mgmt_port']
del res['mgmt_port']
with mock.patch.object(plugging_driver, '_delete_resources') as (
delete_mock):
with mock.patch(
'networking_cisco.plugins.cisco.device_manager.'
'plugging_drivers.n1kv_ml2_trunking_driver.eventlet'
'.sleep'):
plugging_driver.delete_hosting_device_resources(
ctx, tenant_id, mgmt_port, **res)
# four retry iterations with two calls per iteration
self.assertEqual(delete_mock.call_count, 8)
nets = self._list('networks')['networks']
self.assertEqual(len(nets), 3)
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 3)
ports = self._list('ports')
self.assertEqual(len(ports['ports']), 5)
def test_extend_hosting_port_info_adds_segmentation_id(self):
hosting_info = {}
fake_port_db_obj = mock.MagicMock()
fake_port_db_obj.hosting_info = mock.MagicMock()
fake_port_db_obj.hosting_info.segmentation_id = 50
hosting_device = mock.MagicMock()
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
plugging_driver = self.test_driver()
plugging_driver.extend_hosting_port_info(ctx, fake_port_db_obj,
hosting_device, hosting_info)
self.assertEqual(hosting_info['segmentation_id'], 50)
def _update_provider_net_info(self, res_list, fields):
for res in res_list:
pv_info = self._pv_info['vlan'].get(res['id'])
if pv_info is None:
pv_info = self._pv_info['vxlan'].get(res['id'])
if pv_info is None:
nw_type = self._network_type
if not self._pv_info[nw_type]:
tag = {'vlan': 50, 'vxlan': 7000}[nw_type]
pv_info = {'nw_type': nw_type, 'tag': tag}
self._pv_info[nw_type][res['id']] = pv_info
if pv_info is None:
tag = max([i['tag']
for i in self._pv_info[nw_type].values()]) + 1
pv_info = {'nw_type': nw_type, 'tag': tag}
self._pv_info[nw_type][res['id']] = pv_info
res[pr_net.NETWORK_TYPE] = pv_info['nw_type']
res[pr_net.SEGMENTATION_ID] = pv_info['tag']
if fields is not None:
for attr in list(res.keys()):
if attr not in fields:
del res[attr]
def _mocked_get_network(self, context, id, fields=None):
res = self.real_get_network(context, id)
self._update_provider_net_info([res], fields)
return res
def _mocked_get_networks(self, *args, **kwargs):
if len(args) >= 3:
fields = args[2]
list_args = [i for i in args]
list_args[2] = None
args = list_args
else:
fields = kwargs.pop('fields', None)
res_list = self.real_get_networks(*args, **kwargs)
self._update_provider_net_info(res_list, fields)
return res_list
def _test_allocate_hosting_port(self, test_info1, test_info2):
def _validate_allocation(plugin, ctx, r, port_db, test_info,
i, hd, trunk_ports, plugging_driver):
binding_db = plugin._allocate_hosting_port(
ctx, r['id'], port_db, hd['id'], plugging_driver)
selected_port = trunk_ports.get(binding_db.hosting_port_id)
self.assertIsNotNone(selected_port)
self.assertEqual(selected_port['name'],
test_info['port_names'][i])
self.assertEqual(binding_db.segmentation_id,
test_info['vlan_tags'][i])
m1 = mock.MagicMock(return_value=[{'id': 'profile_uuid1'}])
self.policy_plugin.get_policy_profiles = m1
m2 = mock.MagicMock(return_value=[{'id': 'profile_uuid2'}])
self.net_plugin.get_network_profiles = m2
osn_subnet = self._list('subnets')['subnets'][0]
tenant_id = osn_subnet['tenant_id']
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': osn_subnet['network_id']}
plugging_driver = self.test_driver()
res = plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 2)
plugging_driver.setup_logical_port_connectivity = mock.MagicMock()
plugging_driver.teardown_logical_port_connectivity = mock.MagicMock()
with self.subnet() as subnet1:
sn1 = subnet1['subnet']
ext_net_id = sn1['network_id']
self._set_net_external(ext_net_id)
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hds = self._list('hosting_devices')['hosting_devices']
hd = hds[0]
# update attributes of created ports to fake what Nova updates
hd_uuid = hd['id']
update_spec = {'port': {'device_id': hd_uuid,
'device_owner': 'nova'}}
self._update('ports', res['mgmt_port']['id'], update_spec)
trunk_ports = {}
for hd_port in res['ports']:
self._update('ports', hd_port['id'], update_spec)
trunk_ports[hd_port['id']] = hd_port
self._pv_info = {'vlan': {}, 'vxlan': {}}
self._network_type = test_info1['network_types'][0]
self.real_get_network = self.core_plugin.get_network
self.real_get_networks = self.core_plugin.get_networks
with mock.patch.object(self.core_plugin, 'get_network') as m1,\
mock.patch.object(self.core_plugin,
'get_networks') as m2:
m1.side_effect = self._mocked_get_network
m2.side_effect = self._mocked_get_networks
u1_ctx = bc.context.Context('', r1['tenant_id'],
is_admin=True)
gw_port_db = self.core_plugin._get_ports_query(
u1_ctx, filters={'network_id': [ext_net_id]}).one()
_validate_allocation(
self.l3_plugin, u1_ctx, r1, gw_port_db,
test_info1, 0, hd, trunk_ports, plugging_driver)
for i in range(1, len(test_info1['network_types'])):
cidr = '1.0.' + str(i) + '.0/24'
with self.subnet(cidr=cidr) as subnet2:
sn2 = subnet2['subnet']
itfc_info = self._router_interface_action(
'add', r1['id'], sn2['id'], None)
self._network_type = test_info1['network_types'][i]
port_db = self.core_plugin._get_port(
u1_ctx, itfc_info['port_id'])
_validate_allocation(
self.l3_plugin, u1_ctx, r1, port_db,
test_info1,
i, hd, trunk_ports, plugging_driver)
self._network_type = test_info2['network_types'][0]
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router2:
r2 = router2['router']
u2_ctx = bc.context.Context('', r2['tenant_id'],
is_admin=True)
gw_port_db = self.core_plugin._get_ports_query(
u2_ctx, filters={'network_id': [ext_net_id],
'device_id': [r2['id']]}).one()
_validate_allocation(
self.l3_plugin, u2_ctx, r2, gw_port_db,
test_info2, 0, hd, trunk_ports, plugging_driver)
for i in range(1, len(test_info2['network_types'])):
cidr = '2.0.' + str(i) + '.0/24'
with self.subnet(cidr=cidr) as subnet3:
sn3 = subnet3['subnet']
itfc_info = self._router_interface_action(
'add', r2['id'], sn3['id'], None)
self._network_type = test_info2[
'network_types'][i]
port_db = self.core_plugin._get_port(
u2_ctx, itfc_info['port_id'])
_validate_allocation(
self.l3_plugin, u2_ctx, r2,
port_db, test_info2, i, hd, trunk_ports,
plugging_driver)
def test_allocate_hosting_port_vlan_network_all_unused(self):
self._test_allocate_hosting_port({'network_types': ['vlan'],
'port_names': ['t2_p:1'],
'vlan_tags': [50]},
{'network_types': ['vlan'],
'port_names': ['t2_p:2'],
'vlan_tags': [50]})
def test_allocate_hosting_port_vlan_network_vlan_already_allocated(self):
self._test_allocate_hosting_port({'network_types': ['vlan', 'vlan'],
'port_names': ['t2_p:1', 't2_p:1'],
'vlan_tags': [50, 51]},
{'network_types': ['vlan', 'vlan'],
'port_names': ['t2_p:2', 't2_p:2'],
'vlan_tags': [50, 52]})
def test_allocate_hosting_port_vlan_network_vxlan_already_allocated(self):
self._test_allocate_hosting_port({'network_types': ['vxlan', 'vlan'],
'port_names': ['t1_p:1', 't2_p:1'],
'vlan_tags': [MIN_LL_VLAN_TAG, 50]},
{'network_types': ['vxlan', 'vlan'],
'port_names': ['t1_p:2', 't2_p:2'],
'vlan_tags': [MIN_LL_VLAN_TAG, 51]})
def test_allocate_hosting_port_vxlan_network_all_unused(self):
self._test_allocate_hosting_port({'network_types': ['vxlan'],
'port_names': ['t1_p:1'],
'vlan_tags': [MIN_LL_VLAN_TAG]},
{'network_types': ['vxlan'],
'port_names': ['t1_p:2'],
'vlan_tags': [MIN_LL_VLAN_TAG]})
def test_allocate_hosting_port_vxlan_network_vlan_already_allocated(self):
self._test_allocate_hosting_port({'network_types': ['vlan', 'vxlan'],
'port_names': ['t2_p:1', 't1_p:1'],
'vlan_tags': [50, MIN_LL_VLAN_TAG]},
{'network_types': ['vlan', 'vxlan'],
'port_names': ['t2_p:2', 't1_p:2'],
'vlan_tags': [50,
MIN_LL_VLAN_TAG]})
def test_allocate_hosting_port_vxlan_network_vxlan_already_allocated(self):
self._test_allocate_hosting_port({'network_types': ['vxlan', 'vxlan'],
'port_names': ['t1_p:1', 't1_p:1'],
'vlan_tags': [10, 11]},
{'network_types': ['vxlan', 'vxlan'],
'port_names': ['t1_p:2', 't1_p:2'],
'vlan_tags': [10, 11]})
def _test_allocate_hosting_port_no_port_found_failure(self, nw_type):
with self.subnet() as subnet1:
sn1 = subnet1['subnet']
ext_net_id = sn1['network_id']
self._set_net_external(ext_net_id)
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
plugging_driver = self.test_driver()
u_ctx = bc.context.Context('', r1['tenant_id'], is_admin=True)
gw_port_db = self.core_plugin._get_ports_query(
u_ctx, filters={'network_id': [ext_net_id]}).one()
with mock.patch(
'networking_cisco.plugins.cisco.device_manager.'
'plugging_drivers.n1kv_ml2_trunking_driver.eventlet.'
'sleep') as m1:
allocations = plugging_driver.allocate_hosting_port(
u_ctx, r1['id'], gw_port_db, nw_type,
'non_existant_uuid')
self.assertIsNone(allocations)
self.assertEqual(10, m1.call_count)
def test_allocate_hosting_port_vlan_network_no_port_found_failure(self):
self._test_allocate_hosting_port_no_port_found_failure('vlan')
def test_allocate_hosting_port_vxlan_network_no_port_found_failure(self):
self._test_allocate_hosting_port_no_port_found_failure('vxlan')
|
|
#!/usr/bin/env python3
"""
Usage: make_lite.py <wrapped_routines_file> <lapack_dir>
Typical invocation:
make_lite.py wrapped_routines /tmp/lapack-3.x.x
Requires the following to be on the path:
* f2c
* patch
"""
import sys
import os
import re
import subprocess
import shutil
import fortran
import clapack_scrub
from shutil import which
# Arguments to pass to f2c. You'll always want -A for ANSI C prototypes
# Others of interest: -a to not make variables static by default
# -C to check array subscripts
F2C_ARGS = ['-A', '-Nx800']
# The header to add to the top of the f2c_*.c file. Note that dlamch_() calls
# will be replaced by the macros below by clapack_scrub.scrub_source()
HEADER_BLURB = '''\
/*
* NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
* information on remaking this file.
*/
'''
HEADER = HEADER_BLURB + '''\
#include "f2c.h"
#ifdef HAVE_CONFIG
#include "config.h"
#else
extern doublereal dlamch_(char *);
#define EPSILON dlamch_("Epsilon")
#define SAFEMINIMUM dlamch_("Safe minimum")
#define PRECISION dlamch_("Precision")
#define BASE dlamch_("Base")
#endif
extern doublereal dlapy2_(doublereal *x, doublereal *y);
/*
f2c knows the exact rules for precedence, and so omits parentheses where not
strictly necessary. Since this is generated code, we don't really care if
it's readable, and we know what is written is correct. So don't warn about
them.
*/
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wparentheses"
#endif
'''
class FortranRoutine:
"""Wrapper for a Fortran routine in a file.
"""
type = 'generic'
def __init__(self, name=None, filename=None):
self.filename = filename
if name is None:
root, ext = os.path.splitext(filename)
name = root
self.name = name
self._dependencies = None
def dependencies(self):
if self._dependencies is None:
deps = fortran.getDependencies(self.filename)
self._dependencies = [d.lower() for d in deps]
return self._dependencies
def __repr__(self):
return f'FortranRoutine({self.name!r}, filename={self.filename!r})'
class UnknownFortranRoutine(FortranRoutine):
"""Wrapper for a Fortran routine for which the corresponding file
is not known.
"""
type = 'unknown'
def __init__(self, name):
FortranRoutine.__init__(self, name=name, filename='<unknown>')
def dependencies(self):
return []
class FortranLibrary:
"""Container for a bunch of Fortran routines.
"""
def __init__(self, src_dirs):
self._src_dirs = src_dirs
self.names_to_routines = {}
def _findRoutine(self, rname):
rname = rname.lower()
for s in self._src_dirs:
ffilename = os.path.join(s, rname + '.f')
if os.path.exists(ffilename):
return self._newFortranRoutine(rname, ffilename)
return UnknownFortranRoutine(rname)
def _newFortranRoutine(self, rname, filename):
return FortranRoutine(rname, filename)
def addIgnorableRoutine(self, rname):
"""Add a routine that we don't want to consider when looking at
dependencies.
"""
rname = rname.lower()
routine = UnknownFortranRoutine(rname)
self.names_to_routines[rname] = routine
def addRoutine(self, rname):
"""Add a routine to the library.
"""
self.getRoutine(rname)
def getRoutine(self, rname):
"""Get a routine from the library. Will add if it's not found.
"""
unique = []
rname = rname.lower()
routine = self.names_to_routines.get(rname, unique)
if routine is unique:
routine = self._findRoutine(rname)
self.names_to_routines[rname] = routine
return routine
def allRoutineNames(self):
"""Return the names of all the routines.
"""
return list(self.names_to_routines.keys())
def allRoutines(self):
"""Return all the routines.
"""
return list(self.names_to_routines.values())
def resolveAllDependencies(self):
"""Try to add routines to the library to satisfy all the dependencies
for each routine in the library.
Returns a set of routine names that have the dependencies unresolved.
"""
done_this = set()
last_todo = set()
while True:
todo = set(self.allRoutineNames()) - done_this
if todo == last_todo:
break
for rn in todo:
r = self.getRoutine(rn)
deps = r.dependencies()
for d in deps:
self.addRoutine(d)
done_this.add(rn)
last_todo = todo
return todo
class LapackLibrary(FortranLibrary):
def _newFortranRoutine(self, rname, filename):
routine = FortranLibrary._newFortranRoutine(self, rname, filename)
if 'blas' in filename.lower():
routine.type = 'blas'
elif 'install' in filename.lower():
routine.type = 'config'
elif rname.startswith('z'):
routine.type = 'z_lapack'
elif rname.startswith('c'):
routine.type = 'c_lapack'
elif rname.startswith('s'):
routine.type = 's_lapack'
elif rname.startswith('d'):
routine.type = 'd_lapack'
else:
routine.type = 'lapack'
return routine
def allRoutinesByType(self, typename):
routines = sorted((r.name, r) for r in self.allRoutines() if r.type == typename)
return [a[1] for a in routines]
def printRoutineNames(desc, routines):
print(desc)
for r in routines:
print(f'\t{r.name}')
def getLapackRoutines(wrapped_routines, ignores, lapack_dir):
blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC')
if not os.path.exists(blas_src_dir):
blas_src_dir = os.path.join(lapack_dir, 'blas', 'src')
lapack_src_dir = os.path.join(lapack_dir, 'SRC')
if not os.path.exists(lapack_src_dir):
lapack_src_dir = os.path.join(lapack_dir, 'src')
install_src_dir = os.path.join(lapack_dir, 'INSTALL')
if not os.path.exists(install_src_dir):
install_src_dir = os.path.join(lapack_dir, 'install')
library = LapackLibrary([install_src_dir, blas_src_dir, lapack_src_dir])
for r in ignores:
library.addIgnorableRoutine(r)
for w in wrapped_routines:
library.addRoutine(w)
library.resolveAllDependencies()
return library
def getWrappedRoutineNames(wrapped_routines_file):
routines = []
ignores = []
with open(wrapped_routines_file) as fo:
for line in fo:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('IGNORE:'):
line = line[7:].strip()
ig = line.split()
ignores.extend(ig)
else:
routines.append(line)
return routines, ignores
types = {'blas', 'lapack', 'd_lapack', 's_lapack', 'z_lapack', 'c_lapack', 'config'}
def dumpRoutineNames(library, output_dir):
for typename in {'unknown'} | types:
routines = library.allRoutinesByType(typename)
filename = os.path.join(output_dir, typename + '_routines.lst')
with open(filename, 'w') as fo:
for r in routines:
deps = r.dependencies()
fo.write(f"{r.name}: {' '.join(deps)}\n")
def concatenateRoutines(routines, output_file):
with open(output_file, 'w') as output_fo:
for r in routines:
with open(r.filename, 'r') as fo:
source = fo.read()
output_fo.write(source)
class F2CError(Exception):
pass
def runF2C(fortran_filename, output_dir):
fortran_filename = fortran_filename.replace('\\', '/')
try:
subprocess.check_call(
["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename]
)
except subprocess.CalledProcessError as e:
raise F2CError from e
def scrubF2CSource(c_file):
with open(c_file) as fo:
source = fo.read()
source = clapack_scrub.scrubSource(source, verbose=True)
with open(c_file, 'w') as fo:
fo.write(HEADER)
fo.write(source)
def ensure_executable(name):
try:
which(name)
except Exception as ex:
raise SystemExit(name + ' not found') from ex
def create_name_header(output_dir):
routine_re = re.compile(r'^ (subroutine|.* function)\s+(\w+)\(.*$',
re.I)
extern_re = re.compile(r'^extern [a-z]+ ([a-z0-9_]+)\(.*$')
# BLAS/LAPACK symbols
symbols = set(['xerbla'])
for fn in os.listdir(output_dir):
fn = os.path.join(output_dir, fn)
if not fn.endswith('.f'):
continue
with open(fn, 'r') as f:
for line in f:
m = routine_re.match(line)
if m:
symbols.add(m.group(2).lower())
# f2c symbols
f2c_symbols = set()
with open('f2c.h', 'r') as f:
for line in f:
m = extern_re.match(line)
if m:
f2c_symbols.add(m.group(1))
with open(os.path.join(output_dir, 'lapack_lite_names.h'), 'w') as f:
f.write(HEADER_BLURB)
f.write(
"/*\n"
" * This file renames all BLAS/LAPACK and f2c symbols to avoid\n"
" * dynamic symbol name conflicts, in cases where e.g.\n"
" * integer sizes do not match with 'standard' ABI.\n"
" */\n")
# Rename BLAS/LAPACK symbols
for name in sorted(symbols):
f.write(f'#define {name}_ BLAS_FUNC({name})\n')
# Rename also symbols that f2c exports itself
f.write("\n"
"/* Symbols exported by f2c.c */\n")
for name in sorted(f2c_symbols):
f.write(f'#define {name} numpy_lapack_lite_{name}\n')
def main():
if len(sys.argv) != 3:
print(__doc__)
return
# Make sure that patch and f2c are found on path
ensure_executable('f2c')
ensure_executable('patch')
wrapped_routines_file = sys.argv[1]
lapack_src_dir = sys.argv[2]
output_dir = os.path.join(os.path.dirname(__file__), 'build')
try:
shutil.rmtree(output_dir)
except:
pass
os.makedirs(output_dir)
wrapped_routines, ignores = getWrappedRoutineNames(wrapped_routines_file)
library = getLapackRoutines(wrapped_routines, ignores, lapack_src_dir)
dumpRoutineNames(library, output_dir)
for typename in types:
fortran_file = os.path.join(output_dir, f'f2c_{typename}.f')
c_file = fortran_file[:-2] + '.c'
print(f'creating {c_file} ...')
routines = library.allRoutinesByType(typename)
concatenateRoutines(routines, fortran_file)
# apply the patchpatch
patch_file = os.path.basename(fortran_file) + '.patch'
if os.path.exists(patch_file):
subprocess.check_call(['patch', '-u', fortran_file, patch_file])
print(f'Patched {fortran_file}')
try:
runF2C(fortran_file, output_dir)
except F2CError:
print(f'f2c failed on {fortran_file}')
break
scrubF2CSource(c_file)
# patch any changes needed to the C file
c_patch_file = c_file + '.patch'
if os.path.exists(c_patch_file):
subprocess.check_call(['patch', '-u', c_file, c_patch_file])
print()
create_name_header(output_dir)
for fname in os.listdir(output_dir):
if fname.endswith('.c') or fname == 'lapack_lite_names.h':
print('Copying ' + fname)
shutil.copy(
os.path.join(output_dir, fname),
os.path.abspath(os.path.dirname(__file__)),
)
if __name__ == '__main__':
main()
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests to the conductor service."""
from oslo.config import cfg
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform nova-conductor operations locally'),
cfg.StrOpt('topic',
default='conductor',
help='the topic conductor nodes listen on'),
cfg.StrOpt('manager',
default='nova.conductor.manager.ConductorManager',
help='full class name for the Manager for conductor'),
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC.
"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
pass
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'compute')
def instance_get(self, context, instance_id):
return self._manager.instance_get(context, instance_id)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return self._manager.instance_get_by_uuid(context, instance_uuid,
columns_to_join)
def instance_destroy(self, context, instance):
return self._manager.instance_destroy(context, instance)
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self._manager.instance_get_all_by_host(
context, host, columns_to_join=columns_to_join)
def instance_get_all_by_host_and_node(self, context, host, node):
return self._manager.instance_get_all_by_host(context, host, node)
def instance_get_all_by_filters(self, context, filters,
sort_key='created_at',
sort_dir='desc',
columns_to_join=None):
return self._manager.instance_get_all_by_filters(context,
filters,
sort_key,
sort_dir,
columns_to_join)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self._manager.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
def instance_info_cache_update(self, context, instance, values):
return self._manager.instance_info_cache_update(context,
instance,
values)
def instance_info_cache_delete(self, context, instance):
return self._manager.instance_info_cache_delete(context, instance)
def instance_type_get(self, context, instance_type_id):
return self._manager.instance_type_get(context, instance_type_id)
def instance_fault_create(self, context, values):
return self._manager.instance_fault_create(context, values)
def migration_get(self, context, migration_id):
return self._manager.migration_get(context, migration_id)
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
return self._manager.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
def migration_get_in_progress_by_host_and_node(self, context, host, node):
return self._manager.migration_get_in_progress_by_host_and_node(
context, host, node)
def migration_create(self, context, instance, values):
return self._manager.migration_create(context, instance, values)
def migration_update(self, context, migration, status):
return self._manager.migration_update(context, migration, status)
def aggregate_host_add(self, context, aggregate, host):
return self._manager.aggregate_host_add(context, aggregate, host)
def aggregate_host_delete(self, context, aggregate, host):
return self._manager.aggregate_host_delete(context, aggregate, host)
def aggregate_get(self, context, aggregate_id):
return self._manager.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(self, context, host, key=None):
return self._manager.aggregate_get_by_host(context, host, key)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
return self._manager.aggregate_metadata_add(context, aggregate,
metadata,
set_delete)
def aggregate_metadata_delete(self, context, aggregate, key):
return self._manager.aggregate_metadata_delete(context,
aggregate,
key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
return self._manager.aggregate_metadata_get_by_host(context,
host,
key)
def bw_usage_get(self, context, uuid, start_period, mac):
return self._manager.bw_usage_update(context, uuid, mac, start_period)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed=None, update_cells=True):
return self._manager.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out,
last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
def security_group_get_by_instance(self, context, instance):
return self._manager.security_group_get_by_instance(context, instance)
def security_group_rule_get_by_security_group(self, context, secgroup):
return self._manager.security_group_rule_get_by_security_group(
context, secgroup)
def provider_fw_rule_get_all(self, context):
return self._manager.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return self._manager.agent_build_get_by_triple(context, hypervisor,
os, architecture)
def block_device_mapping_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=True)
def block_device_mapping_update(self, context, bdm_id, values):
values = dict(values)
values['id'] = bdm_id
return self._manager.block_device_mapping_update_or_create(
context, values, create=False)
def block_device_mapping_update_or_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
return self._manager.block_device_mapping_get_all_by_instance(
context, instance, legacy)
def block_device_mapping_destroy(self, context, bdms):
return self._manager.block_device_mapping_destroy(context, bdms=bdms)
def block_device_mapping_destroy_by_instance_and_device(self, context,
instance,
device_name):
return self._manager.block_device_mapping_destroy(
context, instance=instance, device_name=device_name)
def block_device_mapping_destroy_by_instance_and_volume(self, context,
instance,
volume_id):
return self._manager.block_device_mapping_destroy(
context, instance=instance, volume_id=volume_id)
def vol_get_usage_by_time(self, context, start_time):
return self._manager.vol_get_usage_by_time(context, start_time)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self._manager.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def service_get_all(self, context):
return self._manager.service_get_all_by(context)
def service_get_all_by_topic(self, context, topic):
return self._manager.service_get_all_by(context, topic=topic)
def service_get_all_by_host(self, context, host):
return self._manager.service_get_all_by(context, host=host)
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host)
def service_get_by_compute_host(self, context, host):
result = self._manager.service_get_all_by(context, 'compute', host)
# FIXME(comstud): A major revision bump to 2.0 should return a
# single entry, so we should just return 'result' at that point.
return result[0]
def service_get_by_args(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
binary=binary)
def action_event_start(self, context, values):
return self._manager.action_event_start(context, values)
def action_event_finish(self, context, values):
return self._manager.action_event_finish(context, values)
def service_create(self, context, values):
return self._manager.service_create(context, values)
def service_destroy(self, context, service_id):
return self._manager.service_destroy(context, service_id)
def compute_node_create(self, context, values):
return self._manager.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
return self._manager.compute_node_update(context, node, values,
prune_stats)
def compute_node_delete(self, context, node):
return self._manager.compute_node_delete(context, node)
def service_update(self, context, service, values):
return self._manager.service_update(context, service, values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
return self._manager.task_log_get(context, task_name, begin, end,
host, state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
return self._manager.task_log_begin_task(context, task_name,
begin, end, host,
task_items, message)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
return self._manager.task_log_end_task(context, task_name,
begin, end, host,
errors, message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
return self._manager.notify_usage_exists(
context, instance, current_period, ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, *args):
return self._manager.security_groups_trigger_handler(context,
event, args)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
def network_migrate_instance_start(self, context, instance, migration):
return self._manager.network_migrate_instance_start(context,
instance,
migration)
def network_migrate_instance_finish(self, context, instance, migration):
return self._manager.network_migrate_instance_finish(context,
instance,
migration)
def quota_commit(self, context, reservations, project_id=None):
return self._manager.quota_commit(context, reservations,
project_id=project_id)
def quota_rollback(self, context, reservations, project_id=None):
return self._manager.quota_rollback(context, reservations,
project_id=project_id)
def get_ec2_ids(self, context, instance):
return self._manager.get_ec2_ids(context, instance)
def compute_stop(self, context, instance, do_cast=True):
return self._manager.compute_stop(context, instance, do_cast)
def compute_confirm_resize(self, context, instance, migration_ref):
return self._manager.compute_confirm_resize(context, instance,
migration_ref)
def compute_unrescue(self, context, instance):
return self._manager.compute_unrescue(context, instance)
def compute_reboot(self, context, instance, reboot_type):
return self._manager.compute_reboot(context, instance, reboot_type)
class LocalComputeTaskAPI(object):
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(
manager.ComputeTaskManager())
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit):
return self._manager.migrate_server(context, instance, scheduler_hint,
live, rebuild, flavor, block_migration, disk_over_commit)
def build_instances(self, context, instances, image,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping):
utils.spawn_n(self._manager.build_instances, context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping)
def unshelve_instance(self, context, instance):
utils.spawn_n(self._manager.unshelve_instance, context,
instance=instance)
class API(LocalAPI):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self._manager = rpcapi.ConductorAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic)
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
break
except rpc_common.Timeout:
LOG.warning(_('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor?'))
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'conductor')
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit):
return self.conductor_compute_rpcapi.migrate_server(context, instance,
scheduler_hint, live, rebuild, flavor, block_migration,
disk_over_commit)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping):
self.conductor_compute_rpcapi.build_instances(context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping)
def unshelve_instance(self, context, instance):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance)
|
|
# The following tests are purposely limited to the exposed interface by iorw.py
import os.path
import pytest
import boto3
import moto
from moto import mock_s3
from ..s3 import Bucket, Prefix, Key, S3
@pytest.fixture
def bucket_no_service():
"""Returns a bucket instance with no services"""
return Bucket('my_test_bucket')
@pytest.fixture
def bucket_with_service():
"""Returns a bucket instance with a service"""
return Bucket('my_sqs_bucket', ['sqs'])
@pytest.fixture
def bucket_sqs():
"""Returns a bucket instance with a sqs service"""
return Bucket('my_sqs_bucket', ['sqs'])
@pytest.fixture
def bucket_ec2():
"""Returns a bucket instance with a ec2 service"""
return Bucket('my_sqs_bucket', ['ec2'])
@pytest.fixture
def bucket_multiservice():
"""Returns a bucket instance with a ec2 service"""
return Bucket('my_sqs_bucket', ['ec2', 'sqs'])
def test_bucket_init():
assert Bucket('my_test_bucket')
assert Bucket('my_sqs_bucket', 'sqs')
def test_bucket_defaults():
name = 'a bucket'
b1 = Bucket(name)
b2 = Bucket(name, None)
assert b1.name == b2.name
assert b1.service == b2.service
def test_bucket_missing_params():
with pytest.raises(TypeError):
Bucket(service=None)
with pytest.raises(TypeError):
Bucket()
def test_bucket_list(bucket_sqs):
# prefix_test = ''
# assert bucket_sqs.list(prefix_test)
#
# prefix_test = 'abc'
# assert bucket_sqs.list(prefix_test) is None
#
# prefix_test = 'ec2'
# assert bucket_sqs.list(prefix_test) is None
#
# prefix_test = 'sqs'
# assert bucket_sqs.list(prefix_test)
pass
def test_prefix_init():
with pytest.raises(TypeError):
Prefix()
with pytest.raises(TypeError):
Prefix(service=None)
with pytest.raises(TypeError):
Prefix('my_test_prefix')
b1 = Bucket('my_test_bucket')
p1 = Prefix(b1, 'sqs_test', service='sqs')
assert Prefix(b1, 'test_bucket')
assert Prefix(b1, 'test_bucket', service=None)
assert Prefix(b1, 'test_bucket', None)
assert p1.bucket.service == p1.service
def test_prefix_defaults():
bucket = Bucket('my data pool')
name = 'bigdata bucket'
p1 = Prefix(bucket, name)
p2 = Prefix(bucket, name, None)
assert p1.name == p2.name
assert p1.service == p2.service
def test_prefix_str(bucket_sqs):
p1 = Prefix(bucket_sqs, 'sqs_prefix_test', 'sqs')
assert str(p1) == 's3://' + str(bucket_sqs) + '/sqs_prefix_test'
def test_prefix_repr(bucket_sqs):
p1 = Prefix(bucket_sqs, 'sqs_prefix_test', 'sqs')
assert repr(p1) == 's3://' + str(bucket_sqs) + '/sqs_prefix_test'
def test_key_init():
pass
def test_key_repr():
k = Key("foo", "bar")
assert repr(k) == "s3://foo/bar"
def test_key_defaults():
bucket = Bucket('my data pool')
name = 'bigdata bucket'
k1 = Key(bucket, name)
k2 = Key(bucket, name, None, None, None, None, None)
assert k1.size == k2.size
assert k1.etag == k2.etag
assert k1.storage_class == k2.storage_class
assert k1.service == k2.service
assert k1.is_prefix is False
@mock_s3
def test_s3_defaults():
s1 = S3()
s2 = S3()
assert s1.session == s2.session
assert s1.client == s2.client
assert s1.s3 == s2.s3
local_dir = os.path.dirname(os.path.abspath(__file__))
test_bucket_name = 'test-pm-bucket'
test_string = 'Hello'
test_file_path = 'notebooks/s3/s3_in/s3-simple_notebook.ipynb'
test_empty_file_path = 'notebooks/s3/s3_in/s3-empty.ipynb'
with open(os.path.join(local_dir, test_file_path)) as f:
test_nb_content = f.read()
no_empty_lines = lambda s: "\n".join([l for l in s.split('\n') if len(l) > 0])
test_clean_nb_content = no_empty_lines(test_nb_content)
read_from_gen = lambda g: "\n".join(g)
@pytest.fixture(scope="function")
def s3_client():
mock_s3 = moto.mock_s3()
mock_s3.start()
client = boto3.client('s3')
client.create_bucket(
Bucket=test_bucket_name, CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}
)
client.put_object(Bucket=test_bucket_name, Key=test_file_path, Body=test_nb_content)
client.put_object(Bucket=test_bucket_name, Key=test_empty_file_path, Body='')
yield S3()
try:
client.delete_object(Bucket=test_bucket_name, Key=test_file_path)
client.delete_object(Bucket=test_bucket_name, Key=test_file_path + '.txt')
client.delete_object(Bucket=test_bucket_name, Key=test_empty_file_path)
except Exception:
pass
mock_s3.stop()
def test_s3_read(s3_client):
s3_path = "s3://{}/{}".format(test_bucket_name, test_file_path)
data = read_from_gen(s3_client.read(s3_path))
assert data == test_clean_nb_content
def test_s3_read_empty(s3_client):
s3_path = "s3://{}/{}".format(test_bucket_name, test_empty_file_path)
data = read_from_gen(s3_client.read(s3_path))
assert data == ''
def test_s3_write(s3_client):
s3_path = "s3://{}/{}.txt".format(test_bucket_name, test_file_path)
s3_client.cp_string(test_string, s3_path)
data = read_from_gen(s3_client.read(s3_path))
assert data == test_string
def test_s3_overwrite(s3_client):
s3_path = "s3://{}/{}".format(test_bucket_name, test_file_path)
s3_client.cp_string(test_string, s3_path)
data = read_from_gen(s3_client.read(s3_path))
assert data == test_string
def test_s3_listdir(s3_client):
dir_name = os.path.dirname(test_file_path)
s3_dir = "s3://{}/{}".format(test_bucket_name, dir_name)
s3_path = "s3://{}/{}".format(test_bucket_name, test_file_path)
dir_listings = s3_client.listdir(s3_dir)
assert len(dir_listings) == 2
assert s3_path in dir_listings
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import codecs
try:
from setuptools import setup, Command
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Command # noqa
from distutils.command.install import INSTALL_SCHEMES
extra = {}
# -*- Python 3 -*-
is_py3k = sys.version_info[0] == 3
# -*- Distribution Meta -*-
NAME = 'django-celery'
re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)')
re_doc = re.compile(r'^"""(.+?)"""')
def rq(s):
return s.strip("\"'")
def add_default(m):
attr_name, attr_value = m.groups()
return ((attr_name, rq(attr_value)), )
def add_version(m):
v = list(map(rq, m.groups()[0].split(', ')))
return (('VERSION', '.'.join(v[0:3]) + ''.join(v[3:])), )
def add_doc(m):
return (('doc', m.groups()[0]), )
pats = {re_meta: add_default,
re_vers: add_version,
re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
meta_fh = open(os.path.join(here, 'djcelery/__init__.py'))
try:
meta = {}
for line in meta_fh:
if line.strip() == '# -eof meta-':
break
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
meta.update(handler(m))
finally:
meta_fh.close()
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
src_dir = 'djcelery'
def fullsplit(path, result=None):
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
SKIP_EXTENSIONS = ['.pyc', '.pyo', '.swp', '.swo']
def is_unwanted_file(filename):
for skip_ext in SKIP_EXTENSIONS:
if filename.endswith(skip_ext):
return True
return False
for dirpath, dirnames, filenames in os.walk(src_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
for filename in filenames:
if filename.endswith('.py'):
packages.append('.'.join(fullsplit(dirpath)))
elif is_unwanted_file(filename):
pass
else:
data_files.append(
[dirpath, [os.path.join(dirpath, f) for f in filenames]],
)
class RunTests(Command):
description = 'Run the django test suite from the tests dir.'
user_options = []
extra_env = {}
extra_args = []
def run(self):
for env_name, env_value in self.extra_env.items():
os.environ[env_name] = str(env_value)
this_dir = os.getcwd()
testproj_dir = os.path.join(this_dir, 'tests')
os.chdir(testproj_dir)
sys.path.append(testproj_dir)
from django.core.management import execute_from_command_line
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
prev_argv = list(sys.argv)
try:
sys.argv = [__file__, 'test'] + self.extra_args
execute_from_command_line(argv=sys.argv)
finally:
sys.argv = prev_argv
def initialize_options(self):
pass
def finalize_options(self):
pass
class QuickRunTests(RunTests):
extra_env = dict(SKIP_RLIMITS=1, QUICKTEST=1)
class CIRunTests(RunTests):
@property
def extra_args(self):
toxinidir = os.environ.get('TOXINIDIR', '')
return [
'--with-coverage3',
'--cover3-xml',
'--cover3-xml-file=%s' % (
os.path.join(toxinidir, 'coverage.xml'), ),
'--with-xunit',
'--xunit-file=%s' % (
os.path.join(toxinidir, 'nosetests.xml'), ),
'--cover3-html',
'--cover3-html-dir=%s' % (
os.path.join(toxinidir, 'cover'), ),
]
if os.path.exists('README.rst'):
long_description = codecs.open('README.rst', 'r', 'utf-8').read()
else:
long_description = 'See http://github.com/celery/django-celery'
setup(
name=NAME,
version=meta['VERSION'],
description=meta['doc'],
author=meta['author'],
author_email=meta['contact'],
url=meta['homepage'],
platforms=['any'],
license='BSD',
packages=packages,
data_files=data_files,
zip_safe=False,
install_requires=[
'celery>=3.1.15',
],
cmdclass={'test': RunTests,
'quicktest': QuickRunTests,
'citest': CIRunTests},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Topic :: Communications',
'Topic :: System :: Distributed Computing',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: Jython',
],
long_description=long_description,
**extra
)
|
|
from __future__ import absolute_import
from __future__ import print_function
import os
import csv
import numpy as np
import time
import json
import warnings
from collections import deque
from collections import OrderedDict
from collections import Iterable
from .utils.generic_utils import Progbar
from . import backend as K
try:
import requests
except ImportError:
requests = None
if K.backend() == 'tensorflow':
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
class CallbackList(object):
"""Container abstracting a list of callbacks.
# Arguments
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
# Arguments
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
# Arguments
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
# Arguments
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and
delta_t_median > 0.1):
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.'
% delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
# Arguments
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.'
% delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
# Arguments
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
# Arguments
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
# Arguments
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
# Raises
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target,
verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' %
(epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
# Arguments
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self, monitor='val_loss',
min_delta=0, patience=0, verbose=0, mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' %
(self.monitor), RuntimeWarning)
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
# Arguments
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json', 'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires '
'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(self.root + self.path,
{self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
warnings.warn('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
TensorBoard is a visualization tool provided with TensorFlow.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=/full_path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
# Arguments
log_dir: the path of the directory where to save the log
files to be parsed by Tensorboard.
histogram_freq: frequency (in epochs) at which to compute activation
histograms for the layers of the model. If set to 0,
histograms won't be computed.
write_graph: whether to visualize the graph in Tensorboard.
The log file can become quite large when
write_graph is set to True.
write_images: whether to write model weights to visualize as
image in Tensorboard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
def __init__(self, log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None):
super(TensorBoard, self).__init__()
if K.backend() != 'tensorflow':
raise RuntimeError('TensorBoard callback only works '
'with the TensorFlow backend.')
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_images = write_images
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata or {}
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf.summary.histogram(weight.name, weight)
if self.write_images:
w_img = tf.squeeze(weight)
shape = w_img.get_shape()
if len(shape) > 1 and shape[0] > shape[1]:
w_img = tf.transpose(w_img)
if len(shape) == 1:
w_img = tf.expand_dims(w_img, 0)
w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)
tf.summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf.summary.histogram('{}_out'.format(layer.name),
layer.output)
self.merged = tf.summary.merge_all()
if self.write_graph:
self.writer = tf.summary.FileWriter(self.log_dir,
self.sess.graph)
else:
self.writer = tf.summary.FileWriter(self.log_dir)
if self.embeddings_freq:
self.saver = tf.train.Saver()
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding']
embeddings = {layer.name: layer.weights[0]
for layer in self.model.layers
if layer.name in embeddings_layer_names}
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()}
config = projector.ProjectorConfig()
self.embeddings_logs = []
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
self.embeddings_logs.append(os.path.join(self.log_dir,
layer_name + '.ckpt'))
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
# TODO: implement batched calls to sess.run
# (current call will likely go OOM on GPU)
if self.model.uses_learning_phase:
cut_v_data = len(self.model.inputs)
val_data = self.validation_data[:cut_v_data] + [0]
tensors = self.model.inputs + [K.learning_phase()]
else:
val_data = self.validation_data
tensors = self.model.inputs
feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
if self.embeddings_freq and self.embeddings_logs:
if epoch % self.embeddings_freq == 0:
for log in self.embeddings_logs:
self.saver.save(self.sess, log, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
# Example
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
# Arguments
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self, monitor='val_loss', factor=0.1, patience=10,
verbose=0, mode='auto', epsilon=1e-4, cooldown=0, min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau '
'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
self.monitor, RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
# Example
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
# Arguments
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a')
else:
self.csv_file = open(self.filename, 'w')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(self.csv_file,
fieldnames=['epoch'] + self.keys, dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
# Arguments
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
# Example
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
|
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import sys
import buildbot_common
import build_version
import generate_make
import parse_dsc
from build_paths import NACL_DIR, SDK_SRC_DIR, OUT_DIR, SDK_EXAMPLE_DIR
from build_paths import GSTORE
from generate_index import LandingPage
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import http_download
MAKE = 'nacl_sdk/make_3.99.90-26-gf80222c/make.exe'
LIB_DICT = {
'linux': [],
'mac': [],
'win': ['x86_32']
}
VALID_TOOLCHAINS = ['newlib', 'glibc', 'pnacl', 'win', 'linux', 'mac']
# Global verbosity setting.
# If set to try (normally via a command line arg) then build_projects will
# add V=1 to all calls to 'make'
verbose = False
def CopyFilesFromTo(filelist, srcdir, dstdir):
for filename in filelist:
srcpath = os.path.join(srcdir, filename)
dstpath = os.path.join(dstdir, filename)
buildbot_common.CopyFile(srcpath, dstpath)
def UpdateHelpers(pepperdir, clobber=False):
tools_dir = os.path.join(pepperdir, 'tools')
if not os.path.exists(tools_dir):
buildbot_common.ErrorExit('SDK tools dir is missing: %s' % tools_dir)
exampledir = os.path.join(pepperdir, 'examples')
if clobber:
buildbot_common.RemoveDir(exampledir)
buildbot_common.MakeDir(exampledir)
# Copy files for individual bild and landing page
files = ['favicon.ico', 'httpd.cmd']
CopyFilesFromTo(files, SDK_EXAMPLE_DIR, exampledir)
resourcesdir = os.path.join(SDK_EXAMPLE_DIR, 'resources')
files = ['index.css', 'index.js', 'button_close.png',
'button_close_hover.png']
CopyFilesFromTo(files, resourcesdir, exampledir)
# Copy tools scripts and make includes
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.py'),
tools_dir)
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.mk'),
tools_dir)
# On Windows add a prebuilt make
if getos.GetPlatform() == 'win':
buildbot_common.BuildStep('Add MAKE')
http_download.HttpDownload(GSTORE + MAKE,
os.path.join(tools_dir, 'make.exe'))
def ValidateToolchains(toolchains):
invalid_toolchains = set(toolchains) - set(VALID_TOOLCHAINS)
if invalid_toolchains:
buildbot_common.ErrorExit('Invalid toolchain(s): %s' % (
', '.join(invalid_toolchains)))
def UpdateProjects(pepperdir, project_tree, toolchains,
clobber=False, configs=None, first_toolchain=False):
if configs is None:
configs = ['Debug', 'Release']
if not os.path.exists(os.path.join(pepperdir, 'tools')):
buildbot_common.ErrorExit('Examples depend on missing tools.')
if not os.path.exists(os.path.join(pepperdir, 'toolchain')):
buildbot_common.ErrorExit('Examples depend on missing toolchains.')
ValidateToolchains(toolchains)
# Create the library output directories
libdir = os.path.join(pepperdir, 'lib')
platform = getos.GetPlatform()
for config in configs:
for arch in LIB_DICT[platform]:
dirpath = os.path.join(libdir, '%s_%s_host' % (platform, arch), config)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
landing_page = None
for branch, projects in project_tree.iteritems():
dirpath = os.path.join(pepperdir, branch)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
targets = [desc['NAME'] for desc in projects]
# Generate master make for this branch of projects
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch),
targets)
if branch.startswith('examples') and not landing_page:
landing_page = LandingPage()
# Generate individual projects
for desc in projects:
srcroot = os.path.dirname(desc['FILEPATH'])
generate_make.ProcessProject(pepperdir, srcroot, pepperdir, desc,
toolchains, configs=configs,
first_toolchain=first_toolchain)
if branch.startswith('examples'):
landing_page.AddDesc(desc)
if landing_page:
# Generate the landing page text file.
index_html = os.path.join(pepperdir, 'examples', 'index.html')
example_resources_dir = os.path.join(SDK_EXAMPLE_DIR, 'resources')
index_template = os.path.join(example_resources_dir,
'index.html.template')
with open(index_html, 'w') as fh:
out = landing_page.GeneratePage(index_template)
fh.write(out)
# Generate top Make for examples
targets = ['api', 'demo', 'getting_started', 'tutorial']
targets = [x for x in targets if 'examples/'+x in project_tree]
branch_name = 'examples'
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch_name),
targets)
def BuildProjectsBranch(pepperdir, branch, deps, clean, config, args=None):
make_dir = os.path.join(pepperdir, branch)
print "\nMake: " + make_dir
if getos.GetPlatform() == 'win':
# We need to modify the environment to build host on Windows.
make = os.path.join(make_dir, 'make.bat')
else:
make = 'make'
env = None
if os.environ.get('USE_GOMA') == '1':
env = dict(os.environ)
env['NACL_COMPILER_PREFIX'] = 'gomacc'
# Add -m32 to the CFLAGS when building using i686-nacl-gcc
# otherwise goma won't recognise it as different to the x86_64
# build.
env['X86_32_CFLAGS'] = '-m32'
env['X86_32_CXXFLAGS'] = '-m32'
jobs = '50'
else:
jobs = str(multiprocessing.cpu_count())
make_cmd = [make, '-j', jobs]
make_cmd.append('CONFIG='+config)
if not deps:
make_cmd.append('IGNORE_DEPS=1')
if verbose:
make_cmd.append('V=1')
if args:
make_cmd += args
else:
make_cmd.append('TOOLCHAIN=all')
buildbot_common.Run(make_cmd, cwd=make_dir, env=env)
if clean:
# Clean to remove temporary files but keep the built
buildbot_common.Run(make_cmd + ['clean'], cwd=make_dir, env=env)
def BuildProjects(pepperdir, project_tree, deps=True,
clean=False, config='Debug'):
# Make sure we build libraries (which live in 'src') before
# any of the examples.
build_first = [p for p in project_tree if p != 'src']
build_second = [p for p in project_tree if p == 'src']
for branch in build_first + build_second:
BuildProjectsBranch(pepperdir, branch, deps, clean, config)
def main(args):
parser = optparse.OptionParser()
parser.add_option('-c', '--clobber',
help='Clobber project directories before copying new files',
action='store_true', default=False)
parser.add_option('-b', '--build',
help='Build the projects.', action='store_true')
parser.add_option('--config',
help='Choose configuration to build (Debug or Release). Builds both '
'by default')
parser.add_option('-x', '--experimental',
help='Build experimental projects', action='store_true')
parser.add_option('-t', '--toolchain',
help='Build using toolchain. Can be passed more than once.',
action='append', default=[])
parser.add_option('-d', '--dest',
help='Select which build destinations (project types) are valid.',
action='append')
parser.add_option('-p', '--project',
help='Select which projects are valid.',
action='append')
parser.add_option('-v', '--verbose', action='store_true')
options, args = parser.parse_args(args[1:])
if args:
parser.error('Not expecting any arguments.')
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# on the build.
del os.environ['NACL_SDK_ROOT']
pepper_ver = str(int(build_version.ChromeMajorVersion()))
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
if not options.toolchain:
options.toolchain = ['newlib', 'glibc', 'pnacl', 'host']
if 'host' in options.toolchain:
options.toolchain.remove('host')
options.toolchain.append(getos.GetPlatform())
print 'Adding platform: ' + getos.GetPlatform()
ValidateToolchains(options.toolchain)
filters = {}
if options.toolchain:
filters['TOOLS'] = options.toolchain
print 'Filter by toolchain: ' + str(options.toolchain)
if not options.experimental:
filters['EXPERIMENTAL'] = False
if options.dest:
filters['DEST'] = options.dest
print 'Filter by type: ' + str(options.dest)
if options.project:
filters['NAME'] = options.project
print 'Filter by name: ' + str(options.project)
try:
project_tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit(str(e))
parse_dsc.PrintProjectTree(project_tree)
UpdateHelpers(pepperdir, clobber=options.clobber)
UpdateProjects(pepperdir, project_tree, options.toolchain,
clobber=options.clobber)
if options.verbose:
global verbose
verbose = True
if options.build:
if options.config:
configs = [options.config]
else:
configs = ['Debug', 'Release']
for config in configs:
BuildProjects(pepperdir, project_tree, config=config)
return 0
if __name__ == '__main__':
script_name = os.path.basename(sys.argv[0])
try:
sys.exit(main(sys.argv))
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit('%s: %s' % (script_name, e))
except KeyboardInterrupt:
buildbot_common.ErrorExit('%s: interrupted' % script_name)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example:
session.set_defaults(
sql_connection="sqlite:///var/lib/nailgun/sqlite.db",
sqlite_db="/var/lib/nailgun/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples:
def get_foo(context, foo):
return model_query(context, models.Foo).\
filter_by(foo=foo).\
first()
def update_foo(context, id, newfoo):
model_query(context, models.Foo).\
filter_by(id=id).\
update({'foo': newfoo})
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. If the connection is dropped before this is possible, the
database will implicitly rollback the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = model_query(context, models.Foo, session).\
filter_by(id=foo_id).\
first()
model_query(context, models.Bar, session).\
filter_by(id=foo_ref['bar_id']).\
update({'bar': newbar})
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so:
def update_bar(context, foo_id, newbar):
subq = model_query(context, models.Foo.id).\
filter_by(id=foo_id).\
limit(1).\
subquery()
model_query(context, models.Bar).\
filter_by(id=subq.as_scalar()).\
update({'bar': newbar})
For reference, this emits approximagely the following SQL statement:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example:
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = model_query(BarModel).\
find(some_condition).\
soft_delete(synchronize_session=True)
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = model_query(BarModel, session=session).\
find(some_condition)
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import os.path
import re
import time
from oslo_config import cfg
import six
from sqlalchemy import exc as sqla_exc
import sqlalchemy.interfaces
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from nailgun.openstack.common.db import exception
from nailgun.openstack.common.gettextutils import _ # noqa
from nailgun.openstack.common import log as logging
from nailgun.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='nailgun.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False,
sqlite_fk=False, slave_session=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
if not m:
return
columns = m.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
@functools.wraps(f)
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri,
sqlite_fk=sqlite_fk)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL connections checked out of the pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
try:
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise sqla_exc.DisconnectionError("Database server went away")
else:
raise
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for filename, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if filename.endswith('session.py') and method == '_do_query':
continue
if filename.endswith('api.py') and method == 'wrapper':
continue
if filename.endswith('utils.py') and method == '_inner':
continue
if filename.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if filename.endswith('db/api.py'):
continue
# only trace inside nailgun
index = filename.rfind('nailgun')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (filename[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername
|
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 29 00:09:26 2010
@author: Charles Law
"""
import math
def fft(fin, inverse):
nfft = len(fin)
twiddles, factors = fft_alloc(nfft, inverse)
fout = []
for i in xrange(nfft):
fout.append((0, 0))
fout_ind_start = 0
fin_ind_start = 0
in_stride = 1
fft_work(fout, fout_ind_start, fin, fin_ind_start, 1, in_stride, factors,
twiddles, nfft)
return fout
def fft_work(fout, fout_ind, f, f_ind, fstride, in_stride, factors,
twiddles, nfft_orig):
p = factors[0][0] # the radix
m = factors[0][1] # stage's fft length/p
factors = factors[1:]
fout_beg = fout_ind
fout_end = fout_ind + p*m
if m == 1 :
dowhile_if = 1
while ( dowhile_if ):
fout[fout_ind] = f[f_ind]
f_ind = f_ind + fstride*in_stride
fout_ind = fout_ind + 1
if fout_ind == fout_end:
dowhile_if = 0
else:
dowhile_if = 1
while ( dowhile_if ):
# recursive call:
# DFT of size m*p performed by doing
# p instances of smaller DFTs of size m,
# each one takes a decimated version of the input
fft_work(fout, fout_ind , f, f_ind, fstride*p, in_stride,
factors, twiddles, nfft_orig)
f_ind = f_ind + fstride*in_stride
#}while( (fout += m) != fout_end )
fout_ind = fout_ind + m
if ( fout_ind == fout_end ):
dowhile_if = 0
fout_ind = fout_beg
# recombine the p smaller DFTs
if p == 2:
fft_bfly2(fout, fout_ind, fstride, twiddles, m)
elif p == 3:
fft_bfly3(fout, fout_ind, fstride, twiddles, m)
else:
fft_bfly_generic(fout, fout_ind, fstride, twiddles, m, p, nfft_orig)
return fout
def fft_bfly2(fout, fout_ind, fstride, twiddles, m):
tw1_ind = 0
fout2_ind = fout_ind + m
dowhile_if = 1
while(dowhile_if):
t = _mult ( fout[fout2_ind], twiddles[tw1_ind] )
tw1_ind = tw1_ind + fstride
fout[fout2_ind] = _sub( fout[fout_ind], t )
fout[fout_ind] = _addto( fout[fout_ind], t )
fout2_ind = fout2_ind + 1
fout_ind = fout_ind + 1
m -= 1
if not(m):
dowhile_if = 0
return fout
def fft_bfly3(fout, fout_ind, fstride, twiddles, m):
k = m
m2 = 2*m
scratch = [(0, 0), (0, 0), (0, 0), (0, 0)]
epi3_i = twiddles[fstride*m][1]
tw1_ind = 0
tw2_ind = tw1_ind
dowhile_if = 1
while (dowhile_if):
scratch[1] = _mult( fout[fout_ind+m], twiddles[tw1_ind] )
scratch[2] = _mult( fout[fout_ind+m2], twiddles[tw2_ind] )
scratch[3] = _add( scratch[1], scratch[2] )
scratch[0] = _sub( scratch[1], scratch[2] )
tw1_ind = tw1_ind + fstride
tw2_ind = tw2_ind + fstride*2
fout[fout_ind+m] = ( fout[fout_ind][0] - (scratch[3][0])/2, \
fout[fout_ind][1] - (scratch[3][1])/2 )
scratch[0] = _mult_by_scalar( scratch[0], epi3_i )
fout[fout_ind] = _addto( fout[fout_ind], scratch[3] )
fout[fout_ind+m2] = ( fout[fout_ind+m][0] + scratch[0][1], \
fout[fout_ind+m][1] - scratch[0][0] )
fout[fout_ind+m] = ( fout[fout_ind+m][0] - scratch[0][1], \
fout[fout_ind+m][1] + scratch[0][0] )
fout_ind = fout_ind + 1
k -= 1
if not(k):
dowhile_if = 0
return fout
def fft_bfly_generic(fout, fout_ind, fstride, twiddles, m, p, nfft_orig):
n_orig = nfft_orig
# initialize scratch
scratch = []
for q1 in xrange(p): #( q1=0 ; q1<p ; ++q1 )
scratch.append(0)
for u in xrange(m): #( u=0; u<m; ++u )
k = u
for q1 in xrange(p): #( q1=0 ; q1<p ; ++q1 )
scratch[q1] = fout[fout_ind+k]
k = k + m
k = u
for q1 in xrange(p):
twidx = 0
fout[fout_ind+k] = scratch[0]
for q in xrange(1, p):
twidx = twidx + fstride * k
if (twidx >= n_orig):
twidx = twidx - nfft_orig
t = _mult( scratch[q], twiddles[twidx] )
fout[fout_ind+k] = _addto( fout[fout_ind+k], t )
k = k + m
return fout
def fft_alloc(nfft, inverse):
twiddles = []
for i in xrange(nfft):
phase = -2*math.pi*float(i) / float(nfft)
if (inverse):
phase = phase * float(-1)
twiddles.append(fft_cexp(phase))
factors = fft_factor(nfft)
return twiddles, factors
def fft_cexp(phase):
x = (math.cos(phase), math.sin(phase))
return x
def fft_factor(n):
facbuf = []
p = 4
floor_sqrt = math.floor( math.sqrt( float(n) ) )
# factor out powers of 4, powers of 2, then any remaining primes
dowhile_test = 1
while (dowhile_test):
while n % p:
if p == 4:
p = 2
elif p == 2:
p = 3
else:
p = p + 2
if (p > floor_sqrt):
p = n # no more factors, skip to end
n = n / p
facbuf.append((p, n))
if not(n > 1):
dowhile_test = 0
return facbuf
def _mult( a, b ):
return ( a[0]*b[0] - a[1]*b[1], a[0]*b[1] + a[1]*b[0] )
def _sub( a, b ):
return ( a[0]-b[0], a[1]-b[1] )
def _add( a, b ):
return ( a[0] + b[0], a[1] + b[1] )
def _addto( res , a):
return ( res[0] + a[0], res[1] + a[1] )
def _mult_by_scalar( c, s ):
return ( c[0] * s, c[1] * s)
def main():
fin = [(0, 0), (1, 0), (1, 0), (1, 0), (1, 0), (0, 0)]
inverse = 0
print fft(fin, inverse)
if __name__ == '__main__':
main()
|
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" A file explorer example. """
# Standard library imports.
import os, sys
# Put the Enthought library on the Python path.
sys.path.append(os.path.abspath(r'..\..\..'))
# Enthought library imports.
from pyface.api import ApplicationWindow, GUI, PythonShell, SplashScreen
from pyface.api import SplitApplicationWindow, SplitPanel
from pyface.action.api import Action, Group, MenuBarManager, MenuManager
from pyface.action.api import Separator, StatusBarManager, ToolBarManager
from traits.api import Float, Str
# Local imports.
from file_filters import AllowOnlyFolders
from file_sorters import FileSorter
from file_table_viewer import FileTableViewer
from file_tree_viewer import FileTreeViewer
class ExampleAction(Action):
""" An example action. """
accelerator = Str('Ctrl-K')
def perform(self):
""" Performs the action. """
print 'Performing', self.name
return
class MainWindow(SplitApplicationWindow):
""" The main application window. """
#### 'SplitApplicationWindow' interface ###################################
# The ratio of the size of the left/top pane to the right/bottom pane.
ratio = Float(0.3)
# The direction in which the panel is split.
direction = Str('vertical')
###########################################################################
# 'object' interface.
###########################################################################
def __init__(self, **traits):
""" Creates a new window. """
# Base class constructor.
super(MainWindow, self).__init__(**traits)
# Create the window's menu, tool and status bars.
self._create_action_bars()
return
###########################################################################
# Protected 'SplitApplicationWindow' interface.
###########################################################################
def _create_lhs(self, parent):
""" Creates the left hand side or top depending on the style. """
return self._create_file_tree(parent, os.path.abspath(os.curdir))
def _create_rhs(self, parent):
""" Creates the panel containing the selected preference page. """
self._rhs = SplitPanel(
parent = parent,
lhs = self._create_file_table,
rhs = self._create_python_shell,
direction = 'horizontal'
)
return self._rhs.control
###########################################################################
# Private interface.
###########################################################################
def _create_action_bars(self):
""" Creates the window's menu, tool and status bars. """
# Common actions.
highest = Action(name='Highest', style='radio')
higher = Action(name='Higher', style='radio', checked=True)
lower = Action(name='Lower', style='radio')
lowest = Action(name='Lowest', style='radio')
self._actions = [highest, higher, lower, lowest]
# Menu bar.
self.menu_bar_manager = MenuBarManager(
MenuManager(
ExampleAction(name='Foogle'),
Separator(),
highest,
higher,
lower,
lowest,
Separator(),
Action(name='E&xit', on_perform=self.close),
name = '&File',
)
)
# Tool bar.
self.tool_bar_manager = ToolBarManager(
ExampleAction(name='Foo'),
Separator(),
ExampleAction(name='Bar'),
Separator(),
ExampleAction(name='Baz'),
Separator(),
highest,
higher,
lower,
lowest
)
# Status bar.
self.status_bar_manager = StatusBarManager()
return
def _create_file_tree(self, parent, dirname):
""" Creates the file tree. """
self._tree_viewer = tree_viewer = FileTreeViewer(
parent,
input = os.path.abspath(os.curdir),
filters = [AllowOnlyFolders()]
)
tree_viewer.on_trait_change(self._on_selection_changed, 'selection')
return tree_viewer.control
def _create_file_table(self, parent):
""" Creates the file table. """
self._table_viewer = table_viewer = FileTableViewer(
parent,
sorter = FileSorter(),
odd_row_background = "white"
)
return table_viewer.control
def _create_python_shell(self, parent):
""" Creates the Python shell. """
self._python_shell = python_shell = PythonShell(parent)
python_shell.bind('widget', self._tree_viewer)
python_shell.bind('w', self._tree_viewer)
python_shell.bind('window', self)
python_shell.bind('actions', self._actions)
return python_shell.control
#### Trait event handlers #################################################
def _on_selection_changed(self, selection):
""" Called when the selection in the tree is changed. """
if len(selection) > 0:
self._table_viewer.input = selection[0]
return
# Application entry point.
if __name__ == '__main__':
# Create the GUI and put up a splash screen (this does NOT start the GUI
# event loop).
gui = GUI(splash_screen=SplashScreen())
# Create and open the main window.
window = MainWindow()
window.open()
# Start the GUI event loop.
gui.start_event_loop()
##### EOF #####################################################################
|
|
import mock
import pytest
from paasta_tools import long_running_service_tools
from paasta_tools import native_mesos_scheduler
def test_main():
with mock.patch(
'paasta_tools.native_mesos_scheduler.get_paasta_native_jobs_for_cluster',
return_value=[('service1', 'instance1'), ('service2', 'instance2')], autospec=True,
), mock.patch(
'paasta_tools.native_mesos_scheduler.create_driver', autospec=True,
), mock.patch(
'paasta_tools.native_mesos_scheduler.sleep', autospec=True,
), mock.patch(
'paasta_tools.native_mesos_scheduler.load_system_paasta_config', autospec=True,
), mock.patch(
'paasta_tools.native_mesos_scheduler.compose_job_id', autospec=True,
), mock.patch(
'paasta_tools.native_mesos_scheduler.NativeScheduler', autospec=True,
):
native_mesos_scheduler.main(["--stay-alive-seconds=0"])
@mock.patch('paasta_tools.mesos_tools.get_local_slave_state', autospec=True)
def test_paasta_native_services_running_here(mock_get_local_slave_state):
id_1 = 'klingon.ships.detected.249qwiomelht4jioewglkemr.someuuid'
id_2 = 'fire.photon.torpedos.jtgriemot5yhtwe94.someuuid'
id_3 = 'dota.axe.cleave.482u9jyoi4wed.someuuid'
id_4 = 'mesos.deployment.is.hard.someuuid'
id_5 = 'how.to.fake.data.someuuid'
ports_1 = '[111-111]'
ports_2 = '[222-222]'
ports_3 = '[333-333]'
ports_4 = '[444-444]'
ports_5 = '[555-555]'
mock_get_local_slave_state.return_value = {
'frameworks': [
{
'executors': [
{
'id': id_1, 'resources': {'ports': ports_1},
'tasks': [{'state': 'TASK_RUNNING'}],
},
{'id': id_2, 'resources': {'ports': ports_2}, 'tasks': [{'state': 'TASK_RUNNING'}]},
],
'name': 'paasta_native service.instance-1111111',
},
{
'executors': [
{'id': id_3, 'resources': {'ports': ports_3}, 'tasks': [{'state': 'TASK_RUNNING'}]},
{'id': id_4, 'resources': {'ports': ports_4}, 'tasks': [{'state': 'TASK_RUNNING'}]},
],
'name': 'paasta_native service.instance-3145jgreoifd',
},
{
'executors': [
{'id': id_5, 'resources': {'ports': ports_5}, 'tasks': [{'state': 'TASK_STAGED'}]},
],
'name': 'paasta_native service.instance-754rchoeurcho',
},
{
'executors': [
{'id': 'bunk', 'resources': {'ports': '[65-65]'}, 'tasks': [{'state': 'TASK_RUNNING'}]},
],
'name': 'super_bunk',
},
],
}
expected = [
('klingon', 'ships', 111),
('fire', 'photon', 222),
('dota', 'axe', 333),
('mesos', 'deployment', 444),
]
actual = native_mesos_scheduler.paasta_native_services_running_here()
mock_get_local_slave_state.assert_called_once_with(hostname=None)
assert expected == actual
def test_get_paasta_native_services_running_here_for_nerve():
cluster = 'edelweiss'
soa_dir = 'the_sound_of_music'
fake_marathon_services = [
('no_test', 'left_behind', 1111),
('no_docstrings', 'forever_abandoned', 2222),
]
registrations = [
['no_docstrings.dos'],
['no_test.uno'],
]
nerve_dicts = [
long_running_service_tools.ServiceNamespaceConfig({'binary': 1, 'proxy_port': 6666}),
long_running_service_tools.ServiceNamespaceConfig({'clock': 0, 'proxy_port': 6666}),
]
expected = [
('no_test.uno', {'clock': 0, 'port': 1111, 'proxy_port': 6666}),
('no_docstrings.dos', {'binary': 1, 'port': 2222, 'proxy_port': 6666}),
]
with mock.patch(
'paasta_tools.native_mesos_scheduler.paasta_native_services_running_here',
autospec=True,
return_value=fake_marathon_services,
) as pnsrh_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.read_all_registrations_for_service_instance',
autospec=True,
side_effect=lambda *args, **kwargs: registrations.pop(),
) as get_namespace_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.load_service_namespace_config',
autospec=True,
side_effect=lambda *args, **kwargs: nerve_dicts.pop(),
) as read_ns_config_patch:
actual = native_mesos_scheduler.get_paasta_native_services_running_here_for_nerve(cluster, soa_dir)
assert expected == actual
pnsrh_patch.assert_called_once_with(hostname=None)
get_namespace_patch.assert_any_call('no_test', 'left_behind', cluster, soa_dir)
get_namespace_patch.assert_any_call('no_docstrings', 'forever_abandoned', cluster, soa_dir)
assert get_namespace_patch.call_count == 2
read_ns_config_patch.assert_any_call('no_test', 'uno', soa_dir)
read_ns_config_patch.assert_any_call('no_docstrings', 'dos', soa_dir)
assert read_ns_config_patch.call_count == 2
def test_get_paasta_native_services_running_here_for_nerve_multiple_namespaces():
cluster = 'edelweiss'
soa_dir = 'the_sound_of_music'
fake_marathon_services = [
('no_test', 'left_behind', 1111),
('no_docstrings', 'forever_abandoned', 2222),
]
namespaces = [
['no_docstrings.quatro'],
['no_test.uno', 'no_test.dos', 'no_test.tres'],
]
nerve_dicts = {
('no_test', 'uno'): long_running_service_tools.ServiceNamespaceConfig({'proxy_port': 6666}),
('no_test', 'dos'): long_running_service_tools.ServiceNamespaceConfig({'proxy_port': 6667}),
('no_test', 'tres'): long_running_service_tools.ServiceNamespaceConfig({'proxy_port': 6668}),
('no_docstrings', 'quatro'): long_running_service_tools.ServiceNamespaceConfig({'proxy_port': 6669}),
}
expected = [
('no_test.uno', {'port': 1111, 'proxy_port': 6666}),
('no_test.dos', {'port': 1111, 'proxy_port': 6667}),
('no_test.tres', {'port': 1111, 'proxy_port': 6668}),
('no_docstrings.quatro', {'port': 2222, 'proxy_port': 6669}),
]
with mock.patch(
'paasta_tools.native_mesos_scheduler.paasta_native_services_running_here',
autospec=True,
return_value=fake_marathon_services,
) as pnsrh_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.read_all_registrations_for_service_instance',
autospec=True,
side_effect=lambda *args, **kwargs: namespaces.pop(),
) as get_namespace_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.load_service_namespace_config',
autospec=True,
side_effect=lambda service, namespace, soa_dir: nerve_dicts.pop((service, namespace)),
) as read_ns_config_patch:
actual = native_mesos_scheduler.get_paasta_native_services_running_here_for_nerve(cluster, soa_dir)
assert expected == actual
pnsrh_patch.assert_called_once_with(hostname=None)
get_namespace_patch.assert_any_call('no_test', 'left_behind', cluster, soa_dir)
get_namespace_patch.assert_any_call('no_docstrings', 'forever_abandoned', cluster, soa_dir)
assert get_namespace_patch.call_count == 2
read_ns_config_patch.assert_any_call('no_test', 'uno', soa_dir)
read_ns_config_patch.assert_any_call('no_test', 'dos', soa_dir)
read_ns_config_patch.assert_any_call('no_test', 'tres', soa_dir)
read_ns_config_patch.assert_any_call('no_docstrings', 'quatro', soa_dir)
assert read_ns_config_patch.call_count == 4
def test_get_paasta_native_services_running_here_for_nerve_when_not_in_smartstack():
cluster = 'edelweiss'
soa_dir = 'the_sound_of_music'
fake_marathon_services = [
('no_test', 'left_behind', 1111),
('no_docstrings', 'forever_abandoned', 2222),
]
registrations = [
['no_docstrings.dos'],
['no_test.uno'],
]
nerve_dicts = [
long_running_service_tools.ServiceNamespaceConfig({'binary': 1}),
long_running_service_tools.ServiceNamespaceConfig({'clock': 0, 'proxy_port': 6666}),
]
expected = [('no_test.uno', {'clock': 0, 'port': 1111, 'proxy_port': 6666})]
with mock.patch(
'paasta_tools.native_mesos_scheduler.paasta_native_services_running_here',
autospec=True,
return_value=fake_marathon_services,
) as pnsrh_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.read_all_registrations_for_service_instance',
autospec=True,
side_effect=lambda *args, **kwargs: registrations.pop(),
) as get_namespace_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.load_service_namespace_config',
autospec=True,
side_effect=lambda *args, **kwargs: nerve_dicts.pop(),
) as read_ns_config_patch:
actual = native_mesos_scheduler.get_paasta_native_services_running_here_for_nerve(cluster, soa_dir)
assert expected == actual
pnsrh_patch.assert_called_once_with(hostname=None)
get_namespace_patch.assert_any_call('no_test', 'left_behind', cluster, soa_dir)
get_namespace_patch.assert_any_call('no_docstrings', 'forever_abandoned', cluster, soa_dir)
assert get_namespace_patch.call_count == 2
read_ns_config_patch.assert_any_call('no_test', 'uno', soa_dir)
read_ns_config_patch.assert_any_call('no_docstrings', 'dos', soa_dir)
assert read_ns_config_patch.call_count == 2
def test_get_paasta_native_services_running_here_for_nerve_when_get_cluster_raises_custom_exception():
cluster = None
soa_dir = 'the_sound_of_music'
with mock.patch(
'paasta_tools.native_mesos_scheduler.load_system_paasta_config',
autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.paasta_native_services_running_here',
autospec=True,
return_value=[],
):
load_system_paasta_config_patch.return_value.get_cluster \
= mock.Mock(side_effect=native_mesos_scheduler.PaastaNotConfiguredError)
actual = native_mesos_scheduler.get_paasta_native_services_running_here_for_nerve(cluster, soa_dir)
assert actual == []
def test_get_paasta_native_services_running_here_for_nerve_when_paasta_not_configured():
cluster = None
soa_dir = 'the_sound_of_music'
with mock.patch(
'paasta_tools.native_mesos_scheduler.load_system_paasta_config',
autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.paasta_native_services_running_here',
autospec=True,
return_value=[],
):
load_system_paasta_config_patch.return_value.get_cluster \
= mock.Mock(side_effect=native_mesos_scheduler.PaastaNotConfiguredError)
actual = native_mesos_scheduler.get_paasta_native_services_running_here_for_nerve(cluster, soa_dir)
assert actual == []
def test_get_paasta_native_services_running_here_for_nerve_when_get_cluster_raises_other_exception():
cluster = None
soa_dir = 'the_sound_of_music'
with mock.patch(
'paasta_tools.native_mesos_scheduler.load_system_paasta_config',
autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.native_mesos_scheduler.paasta_native_services_running_here',
autospec=True,
return_value=[],
):
load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(side_effect=Exception)
with pytest.raises(Exception):
native_mesos_scheduler.get_paasta_native_services_running_here_for_nerve(cluster, soa_dir)
|
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
"""
Test Program Creator role
"""
import unittest
from integration.ggrc import TestCase
from ggrc.models import get_model
from ggrc.models import all_models
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import Generator
from integration.ggrc.generator import ObjectGenerator
class TestCreator(TestCase):
""" TestCreator """
def setUp(self):
TestCase.setUp(self)
self.generator = Generator()
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("admin", "gGRC Admin")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def test_admin_page_access(self):
for role, code in (("creator", 403), ("admin", 200)):
self.api.set_user(self.users[role])
self.assertEqual(self.api.tc.get("/admin").status_code, code)
def test_creator_can_crud(self):
""" Test Basic create/read,update/delete operations """
self.api.set_user(self.users["creator"])
all_errors = []
base_models = set([
"Control", "Assessment", "DataAsset", "Contract",
"Policy", "Regulation", "Standard", "Document", "Facility",
"Market", "Objective", "OrgGroup", "Vendor", "Product",
"Clause", "System", "Process", "Issue", "Project", "AccessGroup"
])
for model_singular in base_models:
try:
model = get_model(model_singular)
table_singular = model._inflector.table_singular
table_plural = model._inflector.table_plural
# Test POST creation
response = self.api.post(model, {
table_singular: {
"title": model_singular,
"context": None,
"reference_url": "ref",
"contact": {
"type": "Person",
"id": self.users["creator"].id,
},
},
})
if response.status_code != 201:
all_errors.append("{} post creation failed {} {}".format(
model_singular, response.status, response.data))
continue
# Test GET when not owner
obj_id = response.json.get(table_singular).get("id")
response = self.api.get(model, obj_id)
if response.status_code != 403: # we are not onwers yet
all_errors.append(
"{} can retrieve object if not owner".format(model_singular))
continue
response = self.api.get_collection(model, obj_id)
collection = response.json.get(
"{}_collection".format(table_plural)).get(table_plural)
if len(collection) != 0:
all_errors.append(
"{} can retrieve object if not owner (collection)".format(model_singular))
continue
# Become an owner
response = self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": self.users['creator'].id,
"type": "Person",
}, "ownable": {
"type": model_singular,
"id": obj_id
}, "context": None}})
if response.status_code != 201:
all_errors.append("{} can't create owner {}.".format(
model_singular, response.status))
continue
# Test GET when owner
response = self.api.get(model, obj_id)
if response.status_code != 200:
all_errors.append("{} can't GET object {}".format(
model_singular, response.status))
continue
# Test GET collection when owner
response = self.api.get_collection(model, obj_id)
collection = response.json.get(
"{}_collection".format(table_plural)).get(table_plural)
if len(collection) == 0:
all_errors.append(
"{} cannot retrieve object even if owner (collection)".format(model_singular))
continue
except:
all_errors.append("{} exception thrown".format(model_singular))
raise
self.assertEqual(all_errors, [])
def test_creator_search(self):
""" Test if creator can see the correct object while using the search api """
self.api.set_user(self.users['admin'])
self.api.post(all_models.Regulation, {
"regulation": {"title": "Admin regulation", "context": None},
})
self.api.set_user(self.users['creator'])
response = self.api.post(all_models.Policy, {
"policy": {"title": "Creator Policy", "context": None},
})
obj_id = response.json.get("policy").get("id")
self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": self.users['creator'].id,
"type": "Person",
}, "ownable": {
"type": "Policy",
"id": obj_id,
}, "context": None}})
response, _ = self.api.search("Regulation,Policy")
entries = response.json["results"]["entries"]
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0]["type"], "Policy")
response, _ = self.api.search("Regulation,Policy", counts=True)
self.assertEqual(response.json["results"]["counts"]["Policy"], 1)
self.assertEqual(
response.json["results"]["counts"].get("Regulation"), None)
def _get_count(self, obj):
""" Return the number of counts for the given object from search """
response, _ = self.api.search(obj, counts=True)
return response.json["results"]["counts"].get(obj)
def test_creator_should_see_users(self):
""" Test if creater can see all the users in the system """
self.api.set_user(self.users['admin'])
admin_count = self._get_count("Person")
self.api.set_user(self.users['creator'])
creator_count = self._get_count("Person")
self.assertEqual(admin_count, creator_count)
def test_creator_cannot_be_owner(self):
""" Test if creater cannot become owner of the object he has not created """
self.api.set_user(self.users['admin'])
_, obj = self.generator.generate(all_models.Regulation, "regulation", {
"regulation": {"title": "Test regulation", "context": None},
})
self.api.set_user(self.users['creator'])
response = self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": self.users['creator'].id,
"type": "Person",
}, "ownable": {
"type": "Regulation",
"id": obj.id,
}, "context": None}})
self.assertEqual(response.status_code, 403)
def test_relationships_access(self):
"""Check if creator cannot access relationship objects"""
self.api.set_user(self.users['admin'])
_, obj_0 = self.generator.generate(all_models.Regulation, "regulation", {
"regulation": {"title": "Test regulation", "context": None},
})
_, obj_1 = self.generator.generate(all_models.Regulation, "regulation", {
"regulation": {"title": "Test regulation 2", "context": None},
})
response, rel = self.generator.generate(all_models.Relationship, "relationship", {
"relationship": {"source": {
"id": obj_0.id,
"type": "Regulation"
}, "destination": {
"id": obj_1.id,
"type": "Regulation"
}, "context": None},
})
relationship_id = rel.id
self.assertEqual(response.status_code, 201)
self.api.set_user(self.users['creator'])
response = self.api.get_collection(all_models.Relationship, relationship_id)
self.assertEqual(response.status_code, 200)
num = len(response.json["relationships_collection"]["relationships"])
self.assertEqual(num, 0)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import manip_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
if not context.executing_eagerly():
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d "
"elements. This may consume a large amount of memory." %
num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(
array_ops.fill(
array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype, name="grad_ys_%d" % i)))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
stop_ops.update(op._id for op in stop_gradient_ops) # pylint: disable=protected-access
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
@tf_export("gradients")
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
```python
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
```
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
```python
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
```
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
# Creating the gradient graph for control flow mutates Operations. _lock
# ensures a Session.run call cannot occur between creating and mutating new
# ops.
with ops.get_default_graph()._lock: # pylint: disable=protected-access
return _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops,
gate_gradients, aggregation_method, stop_gradients)
def _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops,
gate_gradients, aggregation_method, stop_gradients):
"""Implementation of gradients()."""
if context.executing_eagerly():
raise RuntimeError("tf.gradients not supported when eager execution "
"is enabled. Use tf.contrib.eager.GradientTape "
"instead.")
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [
x.handle if resource_variable_ops.is_resource_variable(x) else x
for x in xs
]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(
xs, name="x", as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
if len(ys) > 1:
ys = [array_ops.identity(y) if y.consumers() else y for y in ys]
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
pending_count, loop_state = _PendingCount(
ops.get_default_graph(), to_ops, from_ops, colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
func_call = None
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
func_call = ops.get_default_graph()._get_function(op.type)
grad_fn = func_call.python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (
(not grad_fn and is_func_call) or _IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len([x for x in in_grads
if x is not None]) > 1:
with ops.device(None):
with ops.colocate_with(None, ignore_existing=True):
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for i, (t_in, in_grad) in enumerate(zip(op.inputs, in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s" %
(op.name, i, t_in.shape, in_grad.shape))
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (
pending_count[x.op._id] > 0 and control_flow_util.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_util.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_util.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(
t_grad, list), ("gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(
grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
@tf_export("AggregationMethod")
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_util.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
@tf_export("hessians")
def hessians(ys,
xs,
name="hessians",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
"""
xs = _AsList(xs)
kwargs = {
"colocate_gradients_with_ops": colocate_gradients_with_ops,
"gate_gradients": gate_gradients,
"aggregation_method": aggregation_method
}
# Compute first-order derivatives and iterate for each x in xs.
hessians = []
_gradients = gradients(ys, xs, **kwargs)
for gradient, x in zip(_gradients, xs):
# change shape to one-dimension without graph branching
gradient = array_ops.reshape(gradient, [-1])
# Declare an iterator and tensor array loop variables for the gradients.
n = array_ops.size(x)
loop_vars = [
array_ops.constant(0, dtypes.int32),
tensor_array_ops.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, hessian = control_flow_ops.while_loop(
lambda j, _: j < n,
lambda j, result: (j + 1,
result.write(j, gradients(gradient[j], x)[0])),
loop_vars
)
_shape = array_ops.shape(x)
_reshaped_hessian = array_ops.reshape(hessian.stack(),
array_ops.concat((_shape, _shape), 0))
hessians.append(_reshaped_hessian)
return hessians
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""core.util.time timezone data and maps."""
# Abbreviations/synomyms to IANA timezone name map from
# https://en.wikipedia.org/wiki/List_of_time_zone_abbreviations.
#
# TimeZone abbreviations are not unique. We pick the ones most relevant to the
# Cloud SDK. For example, CST is used for US/Central, China standard time and
# Cuba Standard Time. Most service date/times will be UTC or have an explicit
# numeric +/-HH:MM timezone offset, so duplicates will not be a big problem.
ABBREVIATION_TO_IANA = {
'ACST': 'Australia/Adelaide',
'AEST': 'Australia/Brisbane',
'AFT': 'Asia/Kabul',
'AKST': 'America/Anchorage',
'AMST': 'UTC-03',
'ART': 'America/Argentina/La_Rioja',
'AST': 'Atlantic/Bermuda',
'AWST': 'Australia/Perth',
'AZOST': 'Atlantic/Azores',
'AZT': 'Asia/Baku',
'BDT': 'Asia/Brunei',
'BIOT': 'UTC+06',
'BIT': 'UTC-12',
'BOT': 'UTC-04',
'BRST': 'America/Sao_Paulo',
'BRT': 'America/Sao_Paulo',
'BST': 'UTC+01',
'BTT': 'UTC+06',
'CAT': 'UTC+02',
'CCT': 'UTC+06:30',
'CDT': 'America/Chicago',
'CEST': 'Europe/Belgrade',
'CET': 'Europe/Belgrade',
'CHAST': 'UTC+12:45',
'CHOT': 'Asia/Choibalsan',
'ChST': 'UTC+10',
'CHUT': 'UTC+10',
'CIST': 'UTC-08',
'CIT': 'UTC+08',
'CKT': 'UTC-10',
'CLST': 'UTC-03',
'CLT': 'UTC-04',
'COST': 'UTC-04',
'COT': 'UTC-05',
'CST': 'America/Chicago',
'CT': 'Asia/Hong_Kong',
'CVT': 'Atlantic/Cape_Verde',
'CWST': 'UTC+08:45',
'CXT': 'Indian/Christmas',
'DAVT': 'Antarctica/Davis',
'DDUT': 'Antarctica/DumontDUrville',
'DFT': 'UTC+01',
'EASST': 'UTC-05',
'EAST': 'UTC-06',
'EAT': 'UTC+03',
'ECT': 'UTC-05',
'EDT': 'America/New_York',
'EEST': 'Europe/Chisinau',
'EET': 'Europe/Chisinau',
'EGST': 'UTC+00',
'EGT': 'UTC-01',
'EIT': 'UTC+09',
'EST': 'America/New_York',
'FET': 'Europe/Minsk',
'FJT': 'Pacific/Fiji',
'FKST': 'UTC-03',
'FKT': 'UTC-04',
'FNT': 'UTC-02',
'GALT': 'Pacific/Galapagos',
'GAMT': 'UTC-09',
'GET': 'Asia/Tbilisi',
'GFT': 'UTC-03',
'GILT': 'UTC+12',
'GIT': 'UTC-09',
'GMT': 'UTC',
'GST': 'Atlantic/South_Georgia',
'GYT': 'America/Guyana',
'HAEC': 'UTC+02',
'HAST': 'Pacific/Honolulu',
'HKT': 'Asia/Hong_Kong',
'HMT': 'UTC+05',
'HOVT': 'UTC+07',
'HST': 'Pacific/Honolulu',
'IBST': 'UTC',
'ICT': 'UTC+07',
'IOT': 'UTC+03',
'IRKT': 'Asia/Irkutsk',
'IRST': 'Asia/Tehran',
'IST': 'Asia/Jerusalem',
'JST': 'Asia/Tokyo',
'KGT': 'UTC+06',
'KOST': 'Pacific/Kosrae',
'KRAT': 'Asia/Krasnoyarsk',
'KST': 'Asia/Seoul',
'LHST': 'UTC+11',
'LINT': 'Pacific/Kiritimati',
'MAGT': 'Asia/Magadan',
'MART': 'UTC-09:30',
'MAWT': 'Antarctica/Mawson',
'MDT': 'America/Phoenix',
'MEST': 'Europe/Belgrade',
'MET': 'Europe/Belgrade',
'MHT': 'UTC+12',
'MIST': 'Antarctica/Macquarie',
'MIT': 'UTC-09:30',
'MMT': 'Asia/Rangoon',
'MSK': 'Europe/Moscow',
'MST': 'America/Phoenix',
'MUT': 'Indian/Mahe',
'MVT': 'Indian/Maldives',
'MYT': 'UTC+08',
'NCT': 'Pacific/Noumea',
'NFT': 'Pacific/Norfolk',
'NPT': 'Asia/Katmandu',
'NST': 'America/St_Johns',
'NT': 'America/St_Johns',
'NUT': 'Pacific/Niue',
'NZST': 'Pacific/Auckland',
'OMST': 'Asia/Omsk',
'ORAT': 'Asia/Oral',
'PDT': 'America/Los_Angeles',
'PETT': 'Asia/Kamchatka',
'PET': 'UTC-05',
'PGT': 'UTC+10',
'PHOT': 'UTC+13',
'PKT': 'Asia/Karachi',
'PMST': 'UTC-03',
'PONT': 'UTC+11',
'PST': 'America/Los_Angeles',
'PYST': 'America/Asuncion',
'PYT': 'America/Asuncion',
'RET': 'Indian/Reunion',
'ROTT': 'Antarctica/Rothera',
'SAKT': 'Asia/Sakhalin',
'SAMT': 'Europe/Samara',
'SAST': 'Africa/Johannesburg',
'SBT': 'Pacific/Norfolk',
'SGT': 'Asia/Kuala_Lumpur',
'SLST': 'Asia/Colombo',
'SRET': 'Asia/Srednekolymsk',
'SST': 'Asia/Kuala_Lumpur',
'SYOT': 'UTC+03',
'TAHT': 'Pacific/Tahiti',
'TFT': 'Indian/Kerguelen',
'THA': 'UTC+07',
'TJT': 'UTC+05',
'TKT': 'UTC+13',
'TLT': 'UTC+09',
'TMT': 'UTC+05',
'TOT': 'Pacific/Tongatapu',
'TVT': 'UTC+12',
'UCT': 'UTC',
'ULAT': 'Asia/Ulaanbaatar',
'US/Central': 'America/Chicago',
'US/Eastern': 'America/New_York',
'US/Mountain': 'America/Phoenix',
'US/Pacific': 'America/Los_Angeles',
'USZ1': 'Europe/Kaliningrad',
'UYST': 'UTC-02',
'UYT': 'UTC-03',
'UZT': 'UTC+05',
'VET': 'America/Caracas',
'VLAT': 'Asia/Vladivostok',
'VOLT': 'Europe/Volgograd',
'VOST': 'Antarctica/Vostok',
'VUT': 'UTC+11',
'WAKT': 'Pacific/Wake',
'WAST': 'Africa/Algiers',
'WAT': 'Africa/Algiers',
'WEST': 'Europe/Amsterdam',
'WET': 'Europe/Amsterdam',
'WIT': 'UTC+07',
'WST': 'UTC+08',
'YAKT': 'Asia/Yakutsk',
'YEKT': 'Asia/Yekaterinburg',
'Z': 'UTC',
}
# IANA timezone name to Windows timezone name map. Generated from
# http://unicode.org/repos/cldr/trunk/common/supplemental/windowsZones.xml.
# The mapping is imperfect (many to one) but its the best Windows can do.
IANA_TO_WINDOWS = {
# (UTC-12:00) International Date Line West
'Etc/GMT+12': 'Dateline Standard Time',
# (UTC-11:00) Coordinated Universal Time-11
'Etc/GMT+11': 'UTC-11',
'Pacific/Midway': 'UTC-11',
'Pacific/Niue': 'UTC-11',
'Pacific/Pago_Pago': 'UTC-11',
# (UTC-10:00) Hawaii
'Etc/GMT+10': 'Hawaiian Standard Time',
'Pacific/Honolulu': 'Hawaiian Standard Time',
'Pacific/Johnston': 'Hawaiian Standard Time',
'Pacific/Rarotonga': 'Hawaiian Standard Time',
'Pacific/Tahiti': 'Hawaiian Standard Time',
# (UTC-09:00) Alaska
'America/Anchorage': 'Alaskan Standard Time',
'America/Juneau': 'Alaskan Standard Time',
'America/Metlakatla': 'Alaskan Standard Time',
'America/Nome': 'Alaskan Standard Time',
'America/Sitka': 'Alaskan Standard Time',
'America/Yakutat': 'Alaskan Standard Time',
# (UTC-08:00) Baja California
# Unmappable
# (UTC-08:00) Pacific Time (US & Canada)
'America/Dawson': 'Pacific Standard Time',
'America/Los_Angeles': 'Pacific Standard Time',
'America/Santa_Isabel': 'Pacific Standard Time',
'America/Tijuana': 'Pacific Standard Time',
'America/Vancouver': 'Pacific Standard Time',
'America/Whitehorse': 'Pacific Standard Time',
'PST8PDT': 'Pacific Standard Time',
# (UTC-07:00) Arizona
'America/Creston': 'US Mountain Standard Time',
'America/Dawson_Creek': 'US Mountain Standard Time',
'America/Fort_Nelson': 'US Mountain Standard Time',
'America/Hermosillo': 'US Mountain Standard Time',
'America/Phoenix': 'US Mountain Standard Time',
'Etc/GMT+7': 'US Mountain Standard Time',
# (UTC-07:00) Chihuahua, La Paz, Mazatlan
'America/Chihuahua': 'Mountain Standard Time (Mexico)',
'America/Mazatlan': 'Mountain Standard Time (Mexico)',
# (UTC-07:00) Mountain Time (US & Canada)
'America/Boise': 'Mountain Standard Time',
'America/Cambridge_Bay': 'Mountain Standard Time',
'America/Denver': 'Mountain Standard Time',
'America/Edmonton': 'Mountain Standard Time',
'America/Inuvik': 'Mountain Standard Time',
'America/Ojinaga': 'Mountain Standard Time',
'America/Yellowknife': 'Mountain Standard Time',
'MST7MDT': 'Mountain Standard Time',
# (UTC-06:00) Central America
'America/Belize': 'Central America Standard Time',
'America/Costa_Rica': 'Central America Standard Time',
'America/El_Salvador': 'Central America Standard Time',
'America/Guatemala': 'Central America Standard Time',
'America/Managua': 'Central America Standard Time',
'America/Tegucigalpa': 'Central America Standard Time',
'Etc/GMT+6': 'Central America Standard Time',
'Pacific/Galapagos': 'Central America Standard Time',
# (UTC-06:00) Central Time (US & Canada)
'America/Chicago': 'Central Standard Time',
'America/Indiana/Knox': 'Central Standard Time',
'America/Indiana/Tell_City': 'Central Standard Time',
'America/Matamoros': 'Central Standard Time',
'America/Menominee': 'Central Standard Time',
'America/North_Dakota/Beulah': 'Central Standard Time',
'America/North_Dakota/Center': 'Central Standard Time',
'America/North_Dakota/New_Salem': 'Central Standard Time',
'America/Rainy_River': 'Central Standard Time',
'America/Rankin_Inlet': 'Central Standard Time',
'America/Resolute': 'Central Standard Time',
'America/Winnipeg': 'Central Standard Time',
'CST6CDT': 'Central Standard Time',
# (UTC-06:00) Guadalajara, Mexico City, Monterrey
'America/Bahia_Banderas': 'Central Standard Time (Mexico)',
'America/Merida': 'Central Standard Time (Mexico)',
'America/Mexico_City': 'Central Standard Time (Mexico)',
'America/Monterrey': 'Central Standard Time (Mexico)',
# (UTC-06:00) Saskatchewan
'America/Regina': 'Canada Central Standard Time',
'America/Swift_Current': 'Canada Central Standard Time',
# (UTC-05:00) Bogota, Lima, Quito, Rio Branco
'America/Bogota': 'SA Pacific Standard Time',
'America/Cayman': 'SA Pacific Standard Time',
'America/Coral_Harbour': 'SA Pacific Standard Time',
'America/Eirunepe': 'SA Pacific Standard Time',
'America/Guayaquil': 'SA Pacific Standard Time',
'America/Jamaica': 'SA Pacific Standard Time',
'America/Lima': 'SA Pacific Standard Time',
'America/Panama': 'SA Pacific Standard Time',
'America/Rio_Branco': 'SA Pacific Standard Time',
'Etc/GMT+5': 'SA Pacific Standard Time',
'Pacific/Easter': 'SA Pacific Standard Time',
# (UTC-05:00) Chetumal
'America/Cancun': 'Eastern Standard Time (Mexico)',
# (UTC-05:00) Eastern Time (US & Canada)
'America/Detroit': 'Eastern Standard Time',
'America/Havana': 'Eastern Standard Time',
'America/Indiana/Petersburg': 'Eastern Standard Time',
'America/Indiana/Vincennes': 'Eastern Standard Time',
'America/Indiana/Winamac': 'Eastern Standard Time',
'America/Iqaluit': 'Eastern Standard Time',
'America/Kentucky/Monticello': 'Eastern Standard Time',
'America/Louisville': 'Eastern Standard Time',
'America/Montreal': 'Eastern Standard Time',
'America/Nassau': 'Eastern Standard Time',
'America/New_York': 'Eastern Standard Time',
'America/Nipigon': 'Eastern Standard Time',
'America/Pangnirtung': 'Eastern Standard Time',
'America/Port-au-Prince': 'Eastern Standard Time',
'America/Thunder_Bay': 'Eastern Standard Time',
'America/Toronto': 'Eastern Standard Time',
'EST5EDT': 'Eastern Standard Time',
# (UTC-05:00) Indiana (East)
'America/Indiana/Marengo': 'US Eastern Standard Time',
'America/Indianapolis': 'US Eastern Standard Time',
'America/Indiana/Vevay': 'US Eastern Standard Time',
# (UTC-04:30) Caracas
'America/Caracas': 'Venezuela Standard Time',
# (UTC-04:00) Asuncion
'America/Asuncion': 'Paraguay Standard Time',
# (UTC-04:00) Atlantic Time (Canada)
'America/Glace_Bay': 'Atlantic Standard Time',
'America/Goose_Bay': 'Atlantic Standard Time',
'America/Halifax': 'Atlantic Standard Time',
'America/Moncton': 'Atlantic Standard Time',
'America/Thule': 'Atlantic Standard Time',
'Atlantic/Bermuda': 'Atlantic Standard Time',
# (UTC-04:00) Cuiaba
'America/Campo_Grande': 'Central Brazilian Standard Time',
'America/Cuiaba': 'Central Brazilian Standard Time',
# (UTC-04:00) Georgetown, La Paz, Manaus, San Juan
'America/Anguilla': 'SA Western Standard Time',
'America/Antigua': 'SA Western Standard Time',
'America/Aruba': 'SA Western Standard Time',
'America/Barbados': 'SA Western Standard Time',
'America/Blanc-Sablon': 'SA Western Standard Time',
'America/Boa_Vista': 'SA Western Standard Time',
'America/Curacao': 'SA Western Standard Time',
'America/Dominica': 'SA Western Standard Time',
'America/Grand_Turk': 'SA Western Standard Time',
'America/Grenada': 'SA Western Standard Time',
'America/Guadeloupe': 'SA Western Standard Time',
'America/Guyana': 'SA Western Standard Time',
'America/Kralendijk': 'SA Western Standard Time',
'America/La_Paz': 'SA Western Standard Time',
'America/Lower_Princes': 'SA Western Standard Time',
'America/Manaus': 'SA Western Standard Time',
'America/Marigot': 'SA Western Standard Time',
'America/Martinique': 'SA Western Standard Time',
'America/Montserrat': 'SA Western Standard Time',
'America/Port_of_Spain': 'SA Western Standard Time',
'America/Porto_Velho': 'SA Western Standard Time',
'America/Puerto_Rico': 'SA Western Standard Time',
'America/Santo_Domingo': 'SA Western Standard Time',
'America/St_Barthelemy': 'SA Western Standard Time',
'America/St_Kitts': 'SA Western Standard Time',
'America/St_Lucia': 'SA Western Standard Time',
'America/St_Thomas': 'SA Western Standard Time',
'America/St_Vincent': 'SA Western Standard Time',
'America/Tortola': 'SA Western Standard Time',
'Etc/GMT+4': 'SA Western Standard Time',
# (UTC-03:30) Newfoundland
'America/St_Johns': 'Newfoundland Standard Time',
# (UTC-03:00) Brasilia
'America/Sao_Paulo': 'E. South America Standard Time',
# (UTC-03:00) Cayenne, Fortaleza
'America/Araguaina': 'SA Eastern Standard Time',
'America/Belem': 'SA Eastern Standard Time',
'America/Cayenne': 'SA Eastern Standard Time',
'America/Fortaleza': 'SA Eastern Standard Time',
'America/Maceio': 'SA Eastern Standard Time',
'America/Paramaribo': 'SA Eastern Standard Time',
'America/Recife': 'SA Eastern Standard Time',
'America/Santarem': 'SA Eastern Standard Time',
'Antarctica/Rothera': 'SA Eastern Standard Time',
'Atlantic/Stanley': 'SA Eastern Standard Time',
'Etc/GMT+3': 'SA Eastern Standard Time',
# (UTC-03:00) City of Buenos Aires
'America/Argentina/La_Rioja': 'Argentina Standard Time',
'America/Argentina/Rio_Gallegos': 'Argentina Standard Time',
'America/Argentina/Salta': 'Argentina Standard Time',
'America/Argentina/San_Juan': 'Argentina Standard Time',
'America/Argentina/San_Luis': 'Argentina Standard Time',
'America/Argentina/Tucuman': 'Argentina Standard Time',
'America/Argentina/Ushuaia': 'Argentina Standard Time',
'America/Buenos_Aires': 'Argentina Standard Time',
'America/Catamarca': 'Argentina Standard Time',
'America/Cordoba': 'Argentina Standard Time',
'America/Jujuy': 'Argentina Standard Time',
'America/Mendoza': 'Argentina Standard Time',
# (UTC-03:00) Greenland
'America/Godthab': 'Greenland Standard Time',
# (UTC-03:00) Montevideo
'America/Montevideo': 'Montevideo Standard Time',
# (UTC-03:00) Salvador
'America/Bahia': 'Bahia Standard Time',
# (UTC-03:00) Santiago
'America/Santiago': 'Pacific SA Standard Time',
'Antarctica/Palmer': 'Pacific SA Standard Time',
# (UTC-02:00) Coordinated Universal Time-02
'America/Noronha': 'UTC-02',
'Atlantic/South_Georgia': 'UTC-02',
'Etc/GMT+2': 'UTC-02',
# (UTC-01:00) Azores
'America/Scoresbysund': 'Azores Standard Time',
'Atlantic/Azores': 'Azores Standard Time',
# (UTC-01:00) Cabo Verde Is.
'Atlantic/Cape_Verde': 'Cape Verde Standard Time',
'Etc/GMT+1': 'Cape Verde Standard Time',
# (UTC) Casablanca
'Africa/Casablanca': 'Morocco Standard Time',
'Africa/El_Aaiun': 'Morocco Standard Time',
# (UTC) Coordinated Universal Time
'America/Danmarkshavn': 'UTC',
'Etc/GMT': 'UTC',
# (UTC) Dublin, Edinburgh, Lisbon, London
'Atlantic/Canary': 'GMT Standard Time',
'Atlantic/Faeroe': 'GMT Standard Time',
'Atlantic/Madeira': 'GMT Standard Time',
'Europe/Dublin': 'GMT Standard Time',
'Europe/Guernsey': 'GMT Standard Time',
'Europe/Isle_of_Man': 'GMT Standard Time',
'Europe/Jersey': 'GMT Standard Time',
'Europe/Lisbon': 'GMT Standard Time',
'Europe/London': 'GMT Standard Time',
# (UTC) Monrovia, Reykjavik
'Africa/Abidjan': 'Greenwich Standard Time',
'Africa/Accra': 'Greenwich Standard Time',
'Africa/Bamako': 'Greenwich Standard Time',
'Africa/Banjul': 'Greenwich Standard Time',
'Africa/Bissau': 'Greenwich Standard Time',
'Africa/Conakry': 'Greenwich Standard Time',
'Africa/Dakar': 'Greenwich Standard Time',
'Africa/Freetown': 'Greenwich Standard Time',
'Africa/Lome': 'Greenwich Standard Time',
'Africa/Monrovia': 'Greenwich Standard Time',
'Africa/Nouakchott': 'Greenwich Standard Time',
'Africa/Ouagadougou': 'Greenwich Standard Time',
'Africa/Sao_Tome': 'Greenwich Standard Time',
'Atlantic/Reykjavik': 'Greenwich Standard Time',
'Atlantic/St_Helena': 'Greenwich Standard Time',
# (UTC+01:00) Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna
'Arctic/Longyearbyen': 'W. Europe Standard Time',
'Europe/Amsterdam': 'W. Europe Standard Time',
'Europe/Andorra': 'W. Europe Standard Time',
'Europe/Berlin': 'W. Europe Standard Time',
'Europe/Busingen': 'W. Europe Standard Time',
'Europe/Gibraltar': 'W. Europe Standard Time',
'Europe/Luxembourg': 'W. Europe Standard Time',
'Europe/Malta': 'W. Europe Standard Time',
'Europe/Monaco': 'W. Europe Standard Time',
'Europe/Oslo': 'W. Europe Standard Time',
'Europe/Rome': 'W. Europe Standard Time',
'Europe/San_Marino': 'W. Europe Standard Time',
'Europe/Stockholm': 'W. Europe Standard Time',
'Europe/Vaduz': 'W. Europe Standard Time',
'Europe/Vatican': 'W. Europe Standard Time',
'Europe/Vienna': 'W. Europe Standard Time',
'Europe/Zurich': 'W. Europe Standard Time',
# (UTC+01:00) Belgrade, Bratislava, Budapest, Ljubljana, Prague
'Europe/Belgrade': 'Central Europe Standard Time',
'Europe/Bratislava': 'Central Europe Standard Time',
'Europe/Budapest': 'Central Europe Standard Time',
'Europe/Ljubljana': 'Central Europe Standard Time',
'Europe/Podgorica': 'Central Europe Standard Time',
'Europe/Prague': 'Central Europe Standard Time',
'Europe/Tirane': 'Central Europe Standard Time',
# (UTC+01:00) Brussels, Copenhagen, Madrid, Paris
'Africa/Ceuta': 'Romance Standard Time',
'Europe/Brussels': 'Romance Standard Time',
'Europe/Copenhagen': 'Romance Standard Time',
'Europe/Madrid': 'Romance Standard Time',
'Europe/Paris': 'Romance Standard Time',
# (UTC+01:00) Sarajevo, Skopje, Warsaw, Zagreb
'Europe/Sarajevo': 'Central European Standard Time',
'Europe/Skopje': 'Central European Standard Time',
'Europe/Warsaw': 'Central European Standard Time',
'Europe/Zagreb': 'Central European Standard Time',
# (UTC+01:00) West Central Africa
'Africa/Algiers': 'W. Central Africa Standard Time',
'Africa/Bangui': 'W. Central Africa Standard Time',
'Africa/Brazzaville': 'W. Central Africa Standard Time',
'Africa/Douala': 'W. Central Africa Standard Time',
'Africa/Kinshasa': 'W. Central Africa Standard Time',
'Africa/Lagos': 'W. Central Africa Standard Time',
'Africa/Libreville': 'W. Central Africa Standard Time',
'Africa/Luanda': 'W. Central Africa Standard Time',
'Africa/Malabo': 'W. Central Africa Standard Time',
'Africa/Ndjamena': 'W. Central Africa Standard Time',
'Africa/Niamey': 'W. Central Africa Standard Time',
'Africa/Porto-Novo': 'W. Central Africa Standard Time',
'Africa/Tunis': 'W. Central Africa Standard Time',
'Etc/GMT-1': 'W. Central Africa Standard Time',
# (UTC+01:00) Windhoek
'Africa/Windhoek': 'Namibia Standard Time',
# (UTC+02:00) Amman
'Asia/Amman': 'Jordan Standard Time',
# (UTC+02:00) Athens, Bucharest
'Asia/Nicosia': 'GTB Standard Time',
'Europe/Athens': 'GTB Standard Time',
'Europe/Bucharest': 'GTB Standard Time',
# (UTC+02:00) Beirut
'Asia/Beirut': 'Middle East Standard Time',
# (UTC+02:00) Cairo
'Africa/Cairo': 'Egypt Standard Time',
# (UTC+02:00) Damascus
'Asia/Damascus': 'Syria Standard Time',
# (UTC+02:00) E. Europe
'Europe/Chisinau': 'E. Europe Standard Time',
# (UTC+02:00) Harare, Pretoria
'Africa/Blantyre': 'South Africa Standard Time',
'Africa/Bujumbura': 'South Africa Standard Time',
'Africa/Gaborone': 'South Africa Standard Time',
'Africa/Harare': 'South Africa Standard Time',
'Africa/Johannesburg': 'South Africa Standard Time',
'Africa/Kigali': 'South Africa Standard Time',
'Africa/Lubumbashi': 'South Africa Standard Time',
'Africa/Lusaka': 'South Africa Standard Time',
'Africa/Maputo': 'South Africa Standard Time',
'Africa/Maseru': 'South Africa Standard Time',
'Africa/Mbabane': 'South Africa Standard Time',
'Etc/GMT-2': 'South Africa Standard Time',
# (UTC+02:00) Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius
'Europe/Helsinki': 'FLE Standard Time',
'Europe/Kiev': 'FLE Standard Time',
'Europe/Mariehamn': 'FLE Standard Time',
'Europe/Riga': 'FLE Standard Time',
'Europe/Sofia': 'FLE Standard Time',
'Europe/Tallinn': 'FLE Standard Time',
'Europe/Uzhgorod': 'FLE Standard Time',
'Europe/Vilnius': 'FLE Standard Time',
'Europe/Zaporozhye': 'FLE Standard Time',
# (UTC+02:00) Istanbul
'Europe/Istanbul': 'Turkey Standard Time',
# (UTC+02:00) Jerusalem
'Asia/Jerusalem': 'Israel Standard Time',
# (UTC+02:00) Kaliningrad (RTZ 1)
'Europe/Kaliningrad': 'Kaliningrad Standard Time',
# (UTC+02:00) Tripoli
'Africa/Tripoli': 'Libya Standard Time',
# (UTC+03:00) Baghdad
'Asia/Baghdad': 'Arabic Standard Time',
# (UTC+03:00) Kuwait, Riyadh
'Asia/Aden': 'Arab Standard Time',
'Asia/Bahrain': 'Arab Standard Time',
'Asia/Kuwait': 'Arab Standard Time',
'Asia/Qatar': 'Arab Standard Time',
'Asia/Riyadh': 'Arab Standard Time',
# (UTC+03:00) Minsk
'Europe/Minsk': 'Belarus Standard Time',
# (UTC+03:00) Moscow, St. Petersburg, Volgograd (RTZ 2)
'Europe/Moscow': 'Russian Standard Time',
'Europe/Simferopol': 'Russian Standard Time',
'Europe/Volgograd': 'Russian Standard Time',
# (UTC+03:00) Nairobi
'Africa/Addis_Ababa': 'E. Africa Standard Time',
'Africa/Asmera': 'E. Africa Standard Time',
'Africa/Dar_es_Salaam': 'E. Africa Standard Time',
'Africa/Djibouti': 'E. Africa Standard Time',
'Africa/Juba': 'E. Africa Standard Time',
'Africa/Kampala': 'E. Africa Standard Time',
'Africa/Khartoum': 'E. Africa Standard Time',
'Africa/Mogadishu': 'E. Africa Standard Time',
'Africa/Nairobi': 'E. Africa Standard Time',
'Antarctica/Syowa': 'E. Africa Standard Time',
'Etc/GMT-3': 'E. Africa Standard Time',
'Indian/Antananarivo': 'E. Africa Standard Time',
'Indian/Comoro': 'E. Africa Standard Time',
'Indian/Mayotte': 'E. Africa Standard Time',
# (UTC+03:30) Tehran
'Asia/Tehran': 'Iran Standard Time',
# (UTC+04:00) Abu Dhabi, Muscat
'Asia/Dubai': 'Arabian Standard Time',
'Asia/Muscat': 'Arabian Standard Time',
'Etc/GMT-4': 'Arabian Standard Time',
# (UTC+04:00) Baku
'Asia/Baku': 'Azerbaijan Standard Time',
# (UTC+04:00) Izhevsk, Samara (RTZ 3)
'Europe/Samara': 'Russia Time Zone 3',
# (UTC+04:00) Port Louis
'Indian/Mahe': 'Mauritius Standard Time',
'Indian/Mauritius': 'Mauritius Standard Time',
'Indian/Reunion': 'Mauritius Standard Time',
# (UTC+04:00) Tbilisi
'Asia/Tbilisi': 'Georgian Standard Time',
# (UTC+04:00) Yerevan
'Asia/Yerevan': 'Caucasus Standard Time',
# (UTC+04:30) Kabul
'Asia/Kabul': 'Afghanistan Standard Time',
# (UTC+05:00) Ashgabat, Tashkent
'Antarctica/Mawson': 'West Asia Standard Time',
'Asia/Aqtau': 'West Asia Standard Time',
'Asia/Aqtobe': 'West Asia Standard Time',
'Asia/Ashgabat': 'West Asia Standard Time',
'Asia/Dushanbe': 'West Asia Standard Time',
'Asia/Oral': 'West Asia Standard Time',
'Asia/Samarkand': 'West Asia Standard Time',
'Asia/Tashkent': 'West Asia Standard Time',
'Etc/GMT-5': 'West Asia Standard Time',
'Indian/Kerguelen': 'West Asia Standard Time',
'Indian/Maldives': 'West Asia Standard Time',
# (UTC+05:00) Ekaterinburg (RTZ 4)
'Asia/Yekaterinburg': 'Ekaterinburg Standard Time',
# (UTC+05:00) Islamabad, Karachi
'Asia/Karachi': 'Pakistan Standard Time',
# (UTC+05:30) Chennai, Kolkata, Mumbai, New Delhi
'Asia/Calcutta': 'India Standard Time',
# (UTC+05:30) Sri Jayawardenepura
'Asia/Colombo': 'Sri Lanka Standard Time',
# (UTC+05:45) Kathmandu
'Asia/Katmandu': 'Nepal Standard Time',
# (UTC+06:00) Astana
'Antarctica/Vostok': 'Central Asia Standard Time',
'Asia/Almaty': 'Central Asia Standard Time',
'Asia/Bishkek': 'Central Asia Standard Time',
'Asia/Qyzylorda': 'Central Asia Standard Time',
'Asia/Urumqi': 'Central Asia Standard Time',
'Etc/GMT-6': 'Central Asia Standard Time',
'Indian/Chagos': 'Central Asia Standard Time',
# (UTC+06:00) Dhaka
'Asia/Dhaka': 'Bangladesh Standard Time',
'Asia/Thimphu': 'Bangladesh Standard Time',
# (UTC+06:00) Novosibirsk (RTZ 5)
'Asia/Novosibirsk': 'N. Central Asia Standard Time',
'Asia/Omsk': 'N. Central Asia Standard Time',
# (UTC+06:30) Yangon (Rangoon)
'Asia/Rangoon': 'Myanmar Standard Time',
'Indian/Cocos': 'Myanmar Standard Time',
# (UTC+07:00) Bangkok, Hanoi, Jakarta
'Antarctica/Davis': 'SE Asia Standard Time',
'Asia/Bangkok': 'SE Asia Standard Time',
'Asia/Jakarta': 'SE Asia Standard Time',
'Asia/Phnom_Penh': 'SE Asia Standard Time',
'Asia/Pontianak': 'SE Asia Standard Time',
'Asia/Saigon': 'SE Asia Standard Time',
'Asia/Vientiane': 'SE Asia Standard Time',
'Etc/GMT-7': 'SE Asia Standard Time',
'Indian/Christmas': 'SE Asia Standard Time',
# (UTC+07:00) Krasnoyarsk (RTZ 6)
'Asia/Krasnoyarsk': 'North Asia Standard Time',
'Asia/Novokuznetsk': 'North Asia Standard Time',
# (UTC+08:00) Beijing, Chongqing, Hong Kong, Urumqi
'Asia/Hong_Kong': 'China Standard Time',
'Asia/Macau': 'China Standard Time',
'Asia/Shanghai': 'China Standard Time',
# (UTC+08:00) Irkutsk (RTZ 7)
'Asia/Irkutsk': 'North Asia East Standard Time',
# (UTC+08:00) Kuala Lumpur, Singapore
'Asia/Brunei': 'Singapore Standard Time',
'Asia/Kuala_Lumpur': 'Singapore Standard Time',
'Asia/Kuching': 'Singapore Standard Time',
'Asia/Makassar': 'Singapore Standard Time',
'Asia/Manila': 'Singapore Standard Time',
'Asia/Singapore': 'Singapore Standard Time',
'Etc/GMT-8': 'Singapore Standard Time',
# (UTC+08:00) Perth
'Antarctica/Casey': 'W. Australia Standard Time',
'Australia/Perth': 'W. Australia Standard Time',
# (UTC+08:00) Taipei
'Asia/Taipei': 'Taipei Standard Time',
# (UTC+08:00) Ulaanbaatar
'Asia/Choibalsan': 'Ulaanbaatar Standard Time',
'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time',
# (UTC+08:30) Pyongyang
'Asia/Pyongyang': 'North Korea Standard Time',
# (UTC+09:00) Osaka, Sapporo, Tokyo
'Asia/Dili': 'Tokyo Standard Time',
'Asia/Jayapura': 'Tokyo Standard Time',
'Asia/Tokyo': 'Tokyo Standard Time',
'Etc/GMT-9': 'Tokyo Standard Time',
'Pacific/Palau': 'Tokyo Standard Time',
# (UTC+09:00) Seoul
'Asia/Seoul': 'Korea Standard Time',
# (UTC+09:00) Yakutsk (RTZ 8)
'Asia/Chita': 'Yakutsk Standard Time',
'Asia/Khandyga': 'Yakutsk Standard Time',
'Asia/Yakutsk': 'Yakutsk Standard Time',
# (UTC+09:30) Adelaide
'Australia/Adelaide': 'Cen. Australia Standard Time',
'Australia/Broken_Hill': 'Cen. Australia Standard Time',
# (UTC+09:30) Darwin
'Australia/Darwin': 'AUS Central Standard Time',
# (UTC+10:00) Brisbane
'Australia/Brisbane': 'E. Australia Standard Time',
'Australia/Lindeman': 'E. Australia Standard Time',
# (UTC+10:00) Canberra, Melbourne, Sydney
'Australia/Melbourne': 'AUS Eastern Standard Time',
'Australia/Sydney': 'AUS Eastern Standard Time',
# (UTC+10:00) Guam, Port Moresby
'Antarctica/DumontDUrville': 'West Pacific Standard Time',
'Etc/GMT-10': 'West Pacific Standard Time',
'Pacific/Guam': 'West Pacific Standard Time',
'Pacific/Port_Moresby': 'West Pacific Standard Time',
'Pacific/Saipan': 'West Pacific Standard Time',
'Pacific/Truk': 'West Pacific Standard Time',
# (UTC+10:00) Hobart
'Australia/Currie': 'Tasmania Standard Time',
'Australia/Hobart': 'Tasmania Standard Time',
# (UTC+10:00) Magadan
'Asia/Magadan': 'Magadan Standard Time',
# (UTC+10:00) Vladivostok, Magadan (RTZ 9)
'Asia/Sakhalin': 'Vladivostok Standard Time',
'Asia/Ust-Nera': 'Vladivostok Standard Time',
'Asia/Vladivostok': 'Vladivostok Standard Time',
# (UTC+11:00) Chokurdakh (RTZ 10)
'Asia/Srednekolymsk': 'Russia Time Zone 10',
# (UTC+11:00) Solomon Is., New Caledonia
'Antarctica/Macquarie': 'Central Pacific Standard Time',
'Etc/GMT-11': 'Central Pacific Standard Time',
'Pacific/Bougainville': 'Central Pacific Standard Time',
'Pacific/Efate': 'Central Pacific Standard Time',
'Pacific/Guadalcanal': 'Central Pacific Standard Time',
'Pacific/Kosrae': 'Central Pacific Standard Time',
'Pacific/Norfolk': 'Central Pacific Standard Time',
'Pacific/Noumea': 'Central Pacific Standard Time',
'Pacific/Ponape': 'Central Pacific Standard Time',
# (UTC+12:00) Anadyr, Petropavlovsk-Kamchatsky (RTZ 11)
'Asia/Anadyr': 'Russia Time Zone 11',
'Asia/Kamchatka': 'Russia Time Zone 11',
# (UTC+12:00) Auckland, Wellington
'Antarctica/McMurdo': 'New Zealand Standard Time',
'Pacific/Auckland': 'New Zealand Standard Time',
# (UTC+12:00) Coordinated Universal Time+12
'Etc/GMT-12': 'UTC+12',
'Pacific/Funafuti': 'UTC+12',
'Pacific/Kwajalein': 'UTC+12',
'Pacific/Majuro': 'UTC+12',
'Pacific/Nauru': 'UTC+12',
'Pacific/Tarawa': 'UTC+12',
'Pacific/Wake': 'UTC+12',
'Pacific/Wallis': 'UTC+12',
# (UTC+12:00) Fiji
'Pacific/Fiji': 'Fiji Standard Time',
# (UTC+13:00) Nuku'alofa
'Etc/GMT-13': 'Tonga Standard Time',
'Pacific/Enderbury': 'Tonga Standard Time',
'Pacific/Fakaofo': 'Tonga Standard Time',
'Pacific/Tongatapu': 'Tonga Standard Time',
# (UTC+13:00) Samoa
'Pacific/Apia': 'Samoa Standard Time',
# (UTC+14:00) Kiritimati Island
'Etc/GMT-14': 'Line Islands Standard Time',
'Pacific/Kiritimati': 'Line Islands Standard Time',
}
|
|
# Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dynamic class-creation for Motor."""
import functools
import inspect
_class_cache = {}
def asynchronize(framework, sync_method, doc=None, wrap_class=None, unwrap_class=None):
"""Decorate `sync_method` so it returns a Future.
The method runs on a thread and resolves the Future when it completes.
:Parameters:
- `motor_class`: Motor class being created, e.g. MotorClient.
- `framework`: An asynchronous framework
- `sync_method`: Unbound method of pymongo Collection, Database,
MongoClient, etc.
- `doc`: Optionally override sync_method's docstring
- `wrap_class`: Optional PyMongo class, wrap a returned object of
this PyMongo class in the equivalent Motor class
- `unwrap_class` Optional Motor class name, unwrap an argument with
this Motor class name and pass the wrapped PyMongo
object instead
"""
@functools.wraps(sync_method)
def method(self, *args, **kwargs):
if unwrap_class is not None:
# Don't call isinstance(), not checking subclasses.
unwrapped_args = [
obj.delegate
if obj.__class__.__name__.endswith((unwrap_class, "MotorClientSession"))
else obj
for obj in args
]
unwrapped_kwargs = {
key: (
obj.delegate
if obj.__class__.__name__.endswith((unwrap_class, "MotorClientSession"))
else obj
)
for key, obj in kwargs.items()
}
else:
# For speed, don't call unwrap_args_session/unwrap_kwargs_session.
unwrapped_args = [
obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj
for obj in args
]
unwrapped_kwargs = {
key: (
obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj
)
for key, obj in kwargs.items()
}
loop = self.get_io_loop()
return framework.run_on_executor(
loop, sync_method, self.delegate, *unwrapped_args, **unwrapped_kwargs
)
if wrap_class is not None:
method = framework.pymongo_class_wrapper(method, wrap_class)
method.is_wrap_method = True # For Synchro.
# This is for the benefit of motor_extensions.py, which needs this info to
# generate documentation with Sphinx.
method.is_async_method = True
name = sync_method.__name__
method.pymongo_method_name = name
if doc is not None:
method.__doc__ = doc
return method
def unwrap_args_session(args):
return (
obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj
for obj in args
)
def unwrap_kwargs_session(kwargs):
return {
key: (obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj)
for key, obj in kwargs.items()
}
_coro_token = object()
def coroutine_annotation(f):
"""In docs, annotate a function that returns a Future with 'coroutine'.
This doesn't affect behavior.
"""
# Like:
# @coroutine_annotation
# def method(self):
#
f.coroutine_annotation = True
return f
class MotorAttributeFactory(object):
"""Used by Motor classes to mark attributes that delegate in some way to
PyMongo. At module import time, create_class_with_framework calls
create_attribute() for each attr to create the final class attribute.
"""
def __init__(self, doc=None):
self.doc = doc
def create_attribute(self, cls, attr_name):
raise NotImplementedError
class Async(MotorAttributeFactory):
def __init__(self, attr_name, doc=None):
"""A descriptor that wraps a PyMongo method, such as insert_one,
and returns an asynchronous version of the method that returns a Future.
:Parameters:
- `attr_name`: The name of the attribute on the PyMongo class, if
different from attribute on the Motor class
"""
super().__init__(doc)
self.attr_name = attr_name
self.wrap_class = None
self.unwrap_class = None
def create_attribute(self, cls, attr_name):
name = self.attr_name or attr_name
method = getattr(cls.__delegate_class__, name)
return asynchronize(
framework=cls._framework,
sync_method=method,
doc=self.doc,
wrap_class=self.wrap_class,
unwrap_class=self.unwrap_class,
)
def wrap(self, original_class):
self.wrap_class = original_class
return self
def unwrap(self, class_name):
self.unwrap_class = class_name
return self
class AsyncRead(Async):
def __init__(self, attr_name=None, doc=None):
"""A descriptor that wraps a PyMongo read method like find_one() that
returns a Future.
"""
Async.__init__(self, attr_name=attr_name, doc=doc)
class AsyncWrite(Async):
def __init__(self, attr_name=None, doc=None):
"""A descriptor that wraps a PyMongo write method like update_one() that
accepts getLastError options and returns a Future.
"""
Async.__init__(self, attr_name=attr_name, doc=doc)
class AsyncCommand(Async):
def __init__(self, attr_name=None, doc=None):
"""A descriptor that wraps a PyMongo command like copy_database() that
returns a Future and does not accept getLastError options.
"""
Async.__init__(self, attr_name=attr_name, doc=doc)
class ReadOnlyProperty(MotorAttributeFactory):
"""Creates a readonly attribute on the wrapped PyMongo object."""
def create_attribute(self, cls, attr_name):
def fget(obj):
return getattr(obj.delegate, attr_name)
if self.doc:
doc = self.doc
else:
doc = getattr(cls.__delegate_class__, attr_name).__doc__
if doc:
return property(fget=fget, doc=doc)
else:
return property(fget=fget)
class DelegateMethod(ReadOnlyProperty):
"""A method on the wrapped PyMongo object that does no I/O and can be called
synchronously"""
def __init__(self, doc=None):
ReadOnlyProperty.__init__(self, doc)
self.wrap_class = None
def wrap(self, original_class):
self.wrap_class = original_class
return self
def create_attribute(self, cls, attr_name):
if self.wrap_class is None:
return ReadOnlyProperty.create_attribute(self, cls, attr_name)
method = getattr(cls.__delegate_class__, attr_name)
original_class = self.wrap_class
@functools.wraps(method)
def wrapper(self_, *args, **kwargs):
result = method(self_.delegate, *args, **kwargs)
# Don't call isinstance(), not checking subclasses.
if result.__class__ == original_class:
# Delegate to the current object to wrap the result.
return self_.wrap(result)
else:
return result
if self.doc:
wrapper.__doc__ = self.doc
wrapper.is_wrap_method = True # For Synchro.
return wrapper
class MotorCursorChainingMethod(MotorAttributeFactory):
def create_attribute(self, cls, attr_name):
cursor_method = getattr(cls.__delegate_class__, attr_name)
@functools.wraps(cursor_method)
def return_clone(self, *args, **kwargs):
cursor_method(self.delegate, *args, **kwargs)
return self
# This is for the benefit of Synchro, and motor_extensions.py
return_clone.is_motorcursor_chaining_method = True
return_clone.pymongo_method_name = attr_name
if self.doc:
return_clone.__doc__ = self.doc
return return_clone
def create_class_with_framework(cls, framework, module_name):
motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__
cache_key = (cls, motor_class_name, framework)
cached_class = _class_cache.get(cache_key)
if cached_class:
return cached_class
new_class = type(str(motor_class_name), (cls,), {})
new_class.__module__ = module_name
new_class._framework = framework
assert hasattr(new_class, "__delegate_class__")
# If we're constructing MotorClient from AgnosticClient, for example,
# the method resolution order is (AgnosticClient, AgnosticBase, object).
# Iterate over bases looking for attributes and coroutines that must be
# replaced with framework-specific ones.
for base in reversed(inspect.getmro(cls)):
# Turn attribute factories into real methods or descriptors.
for name, attr in base.__dict__.items():
if isinstance(attr, MotorAttributeFactory):
new_class_attr = attr.create_attribute(new_class, name)
setattr(new_class, name, new_class_attr)
_class_cache[cache_key] = new_class
return new_class
|
|
from PIL import Image
from worldengine.drawing_functions import *
from worldengine.common import *
# -------------
# Helper values
# -------------
_biome_colors = {
'ocean': (23, 94, 145),
'ice': (255, 255, 255),
'subpolar dry tundra': (128, 128, 128),
'subpolar moist tundra': (96, 128, 128),
'subpolar wet tundra': (64, 128, 128),
'subpolar rain tundra': (32, 128, 192),
'polar desert': (192, 192, 192),
'boreal desert': (160, 160, 128),
'cool temperate desert': (192, 192, 128),
'warm temperate desert': (224, 224, 128),
'subtropical desert': (240, 240, 128),
'tropical desert': (255, 255, 128),
'boreal rain forest': (32, 160, 192),
'cool temperate rain forest': (32, 192, 192),
'warm temperate rain forest': (32, 224, 192),
'subtropical rain forest': (32, 240, 176),
'tropical rain forest': (32, 255, 160),
'boreal wet forest': (64, 160, 144),
'cool temperate wet forest': (64, 192, 144),
'warm temperate wet forest': (64, 224, 144),
'subtropical wet forest': (64, 240, 144),
'tropical wet forest': (64, 255, 144),
'boreal moist forest': (96, 160, 128),
'cool temperate moist forest': (96, 192, 128),
'warm temperate moist forest': (96, 224, 128),
'subtropical moist forest': (96, 240, 128),
'tropical moist forest': (96, 255, 128),
'warm temperate dry forest': (128, 224, 128),
'subtropical dry forest': (128, 240, 128),
'tropical dry forest': (128, 255, 128),
'boreal dry scrub': (128, 160, 128),
'cool temperate desert scrub': (160, 192, 128),
'warm temperate desert scrub': (192, 224, 128),
'subtropical desert scrub': (208, 240, 128),
'tropical desert scrub': (224, 255, 128),
'cool temperate steppe': (128, 192, 128),
'warm temperate thorn scrub': (160, 224, 128),
'subtropical thorn woodland': (176, 240, 128),
'tropical thorn woodland': (192, 255, 128),
'tropical very dry forest': (160, 255, 128),
}
# ----------------
# Helper functions
# ----------------
def _elevation_color(elevation, sea_level=1.0):
"""
Calculate color based on elevation
:param elevation:
:return:
"""
color_step = 1.5
if elevation < sea_level/2:
elevation /= sea_level
return 0.0, 0.0, 0.75 + 0.5 * elevation
elif elevation < sea_level:
elevation /= sea_level
return 0.0, 2 * (elevation - 0.5), 1.0
else:
elevation -= sea_level
if elevation < 1.0 * color_step:
return (0.0, 0.5 +
0.5 * elevation / color_step, 0.0)
elif elevation < 1.5 * color_step:
return 2 * (elevation - 1.0 * color_step) / color_step, 1.0, 0.0
elif elevation < 2.0 * color_step:
return 1.0, 1.0 - (elevation - 1.5 * color_step) / color_step, 0
elif elevation < 3.0 * color_step:
return (1.0 - 0.5 * (elevation - 2.0 *
color_step) / color_step,
0.5 - 0.25 * (elevation - 2.0 *
color_step) / color_step, 0)
elif elevation < 5.0 * color_step:
return (0.5 - 0.125 * (elevation - 3.0 *
color_step) / (2 * color_step),
0.25 + 0.125 * (elevation - 3.0 *
color_step) / (2 * color_step),
0.375 * (elevation - 3.0 *
color_step) / (2 * color_step))
elif elevation < 8.0 * color_step:
return (0.375 + 0.625 * (elevation - 5.0 *
color_step) / (3 * color_step),
0.375 + 0.625 * (elevation - 5.0 *
color_step) / (3 * color_step),
0.375 + 0.625 * (elevation - 5.0 *
color_step) / (3 * color_step))
else:
elevation -= 8.0 * color_step
while elevation > 2.0 * color_step:
elevation -= 2.0 * color_step
return 1, 1 - elevation / 4.0, 1
def _sature_color(color):
r, g, b = color
if r < 0:
r = 0.0
if r > 1.0:
r = 1.0
if g < 0:
g = 0.0
if g > 1.0:
g = 1.0
if b < 0:
b = 0.0
if b > 1.0:
b = 1.0
return r, g, b
def elevation_color(elevation, sea_level=1.0):
return _sature_color(_elevation_color(elevation, sea_level))
# ----------------------
# Draw on generic target
# ----------------------
class ImagePixelSetter(object):
def __init__(self, width, height, filename):
self.img = Image.new('RGBA', (width, height))
self.pixels = self.img.load()
self.filename = filename
def set_pixel(self, x, y, color):
self.pixels[x, y] = color
def complete(self):
self.img.save(self.filename)
def __getitem__(self, item):
return self.pixels[item]
def __setitem__(self, item, value):
self.pixels[item] = value
def draw_simple_elevation(data, width, height, sea_level, target):
""" This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI)
"""
for y in range(height):
for x in range(width):
e = data[y][x]
r, g, b = elevation_color(e, sea_level)
target.set_pixel(x, y, (int(r * 255), int(g * 255),
int(b * 255), 255))
def draw_riversmap(world, target):
sea_color = (255, 255, 255, 255)
land_color = (0, 0, 0, 255)
for y in range(world.height):
for x in range(world.width):
if world.ocean[y][x]:
target.set_pixel(x, y, sea_color)
else:
target.set_pixel(x, y, land_color)
draw_rivers_on_image(world, target, factor=1)
def draw_grayscale_heightmap(world, target):
min_elev_sea = None
max_elev_sea = None
min_elev_land = None
max_elev_land = None
for y in range(world.height):
for x in range(world.width):
e = world.elevation['data'][y][x]
if world.is_land((x, y)):
if min_elev_land is None or e < min_elev_land:
min_elev_land = e
if max_elev_land is None or e > max_elev_land:
max_elev_land = e
else:
if min_elev_sea is None or e < min_elev_sea:
min_elev_sea = e
if max_elev_sea is None or e > max_elev_sea:
max_elev_sea = e
elev_delta_land = max_elev_land - min_elev_land
elev_delta_sea = max_elev_sea - min_elev_sea
for y in range(world.height):
for x in range(world.width):
e = world.elevation['data'][y][x]
if world.is_land((x, y)):
c = int(((e - min_elev_land) * 127) / elev_delta_land)+128
else:
c = int(((e - min_elev_sea) * 127) / elev_delta_sea)
target.set_pixel(x, y, (c, c, c, 255))
def draw_elevation(world, shadow, target):
width = world.width
height = world.height
data = world.elevation['data']
ocean = world.ocean
min_elev = None
max_elev = None
for y in range(height):
for x in range(width):
if not ocean[y][x]:
e = data[y][x]
if min_elev is None or e < min_elev:
min_elev = e
if max_elev is None or e > max_elev:
max_elev = e
elev_delta = max_elev - min_elev
for y in range(height):
for x in range(width):
if ocean[y][x]:
target.set_pixel(x, y, (0, 0, 255, 255))
else:
e = data[y][x]
c = 255 - int(((e - min_elev) * 255) / elev_delta)
if shadow and y > 2 and x > 2:
if data[y - 1][x - 1] > e:
c -= 15
if data[y - 2][x - 2] > e \
and data[y - 2][x - 2] > data[y - 1][x - 1]:
c -= 10
if data[y - 3][x - 3] > e \
and data[y - 3][x - 3] > data[y - 1][x - 1] \
and data[y - 3][x - 3] > data[y - 2][x - 2]:
c -= 5
if c < 0:
c = 0
target.set_pixel(x, y, (c, c, c, 255))
def draw_ocean(ocean, target):
width = len(ocean[0])
height = len(ocean)
for y in range(height):
for x in range(width):
if ocean[y][x]:
target.set_pixel(x, y, (0, 0, 255, 255))
else:
target.set_pixel(x, y, (0, 255, 255, 255))
def draw_precipitation(world, target):
# FIXME we are drawing humidity, not precipitations
width = world.width
height = world.height
for y in range(height):
for x in range(width):
if world.is_humidity_superarid((x, y)):
target.set_pixel(x, y, (0, 32, 32, 255))
elif world.is_humidity_perarid((x, y)):
target.set_pixel(x, y, (0, 64, 64, 255))
elif world.is_humidity_arid((x, y)):
target.set_pixel(x, y, (0, 96, 96, 255))
elif world.is_humidity_semiarid((x, y)):
target.set_pixel(x, y, (0, 128, 128, 255))
elif world.is_humidity_subhumid((x, y)):
target.set_pixel(x, y, (0, 160, 160, 255))
elif world.is_humidity_humid((x, y)):
target.set_pixel(x, y, (0, 192, 192, 255))
elif world.is_humidity_perhumid((x, y)):
target.set_pixel(x, y, (0, 224, 224, 255))
elif world.is_humidity_superhumid((x, y)):
target.set_pixel(x, y, (0, 255, 255, 255))
def draw_world(world, target):
width = world.width
height = world.height
for y in range(height):
for x in range(width):
if world.is_land((x, y)):
biome = world.biome_at((x, y))
target.set_pixel(x, y, _biome_colors[biome.name()])
else:
c = int(world.sea_depth[y][x] * 200 + 50)
target.set_pixel(x, y, (0, 0, 255 - c, 255))
def draw_temperature_levels(world, target):
width = world.width
height = world.height
for y in range(height):
for x in range(width):
if world.is_temperature_polar((x, y)):
target.set_pixel(x, y, (0, 0, 255, 255))
elif world.is_temperature_alpine((x, y)):
target.set_pixel(x, y, (42, 0, 213, 255))
elif world.is_temperature_boreal((x, y)):
target.set_pixel(x, y, (85, 0, 170, 255))
elif world.is_temperature_cool((x, y)):
target.set_pixel(x, y, (128, 0, 128, 255))
elif world.is_temperature_warm((x, y)):
target.set_pixel(x, y, (170, 0, 85, 255))
elif world.is_temperature_subtropical((x, y)):
target.set_pixel(x, y, (213, 0, 42, 255))
elif world.is_temperature_tropical((x, y)):
target.set_pixel(x, y, (255, 0, 0, 255))
def draw_biome(world, target):
width = world.width
height = world.height
biome = world.biome
for y in range(height):
for x in range(width):
v = biome[y][x]
target.set_pixel(x, y, _biome_colors[v])
# -------------
# Draw on files
# -------------
def draw_simple_elevation_on_file(data, filename, width, height, sea_level):
img = ImagePixelSetter(width, height, filename)
draw_simple_elevation(data, width, height, sea_level, img)
img.complete()
def draw_riversmap_on_file(world, filename):
img = ImagePixelSetter(world.width, world.height, filename)
draw_riversmap(world, img)
img.complete()
def draw_grayscale_heightmap_on_file(world, filename):
img = ImagePixelSetter(world.width, world.height, filename)
draw_grayscale_heightmap(world, img)
img.complete()
def draw_elevation_on_file(world, filename, shadow=True):
img = ImagePixelSetter(world.width, world.height, filename)
draw_elevation(world, shadow, img)
img.complete()
def draw_ocean_on_file(ocean, filename):
width = len(ocean[0])
height = len(ocean)
img = ImagePixelSetter(width, height, filename)
draw_ocean(ocean, img)
img.complete()
def draw_precipitation_on_file(world, filename):
img = ImagePixelSetter(world.width, world.height, filename)
draw_precipitation(world, img)
img.complete()
def draw_world_on_file(world, filename):
img = ImagePixelSetter(world.width, world.height, filename)
draw_world(world, img)
img.complete()
def draw_temperature_levels_on_file(world, filename):
img = ImagePixelSetter(world.width, world.height, filename)
draw_temperature_levels(world, img)
img.complete()
def draw_biome_on_file(world, filename):
img = ImagePixelSetter(world.width, world.height, filename)
draw_biome(world, img)
img.complete()
def draw_ancientmap_on_file(world, filename, resize_factor=1,
sea_color=(212, 198, 169, 255), verbose=False):
img = ImagePixelSetter(world.width * resize_factor,
world.height * resize_factor, filename)
draw_ancientmap(world, img, resize_factor, sea_color, verbose)
img.complete()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "PRECIOUSD" not in os.environ:
os.environ["PRECIOUSD"] = BUILDDIR + '/src/preciousd' + EXEEXT
if "PRECIOUSCLI" not in os.environ:
os.environ["PRECIOUSCLI"] = BUILDDIR + '/src/precious-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'p2p-fullblocktest.py',
'walletbackup.py',
'bip68-112-113-p2p.py',
'wallet.py',
'wallet-hd.py',
'wallet-dump.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
'p2p-segwit.py',
'segwit.py',
'importprunedfunds.py',
'signmessages.py',
'p2p-compactblocks.py',
'nulldummy.py',
'test_script_address2.py'
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'rpcbind_test.py',
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
'p2p-feefilter.py',
'pruning.py', # leave pruning last as it takes a REALLY long time
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print(stdout)
print('stderr:\n' if not stderr == '' else '', stderr)
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
"""Support for Coinbase sensors."""
from __future__ import annotations
import logging
from homeassistant.components.sensor import SensorEntity, SensorStateClass
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
API_ACCOUNT_AMOUNT,
API_ACCOUNT_BALANCE,
API_ACCOUNT_CURRENCY,
API_ACCOUNT_ID,
API_ACCOUNT_NAME,
API_ACCOUNT_NATIVE_BALANCE,
API_RATES,
API_RESOURCE_TYPE,
API_TYPE_VAULT,
CONF_CURRENCIES,
CONF_EXCHANGE_RATES,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
ATTR_NATIVE_BALANCE = "Balance in native currency"
CURRENCY_ICONS = {
"BTC": "mdi:currency-btc",
"ETH": "mdi:currency-eth",
"EUR": "mdi:currency-eur",
"LTC": "mdi:litecoin",
"USD": "mdi:currency-usd",
}
DEFAULT_COIN_ICON = "mdi:cash"
ATTRIBUTION = "Data provided by coinbase.com"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Coinbase sensor platform."""
instance = hass.data[DOMAIN][config_entry.entry_id]
entities: list[SensorEntity] = []
provided_currencies = [
account[API_ACCOUNT_CURRENCY]
for account in instance.accounts
if account[API_RESOURCE_TYPE] != API_TYPE_VAULT
]
desired_currencies = []
if CONF_CURRENCIES in config_entry.options:
desired_currencies = config_entry.options[CONF_CURRENCIES]
exchange_base_currency = instance.exchange_rates[API_ACCOUNT_CURRENCY]
for currency in desired_currencies:
if currency not in provided_currencies:
_LOGGER.warning(
"The currency %s is no longer provided by your account, please check "
"your settings in Coinbase's developer tools",
currency,
)
continue
entities.append(AccountSensor(instance, currency))
if CONF_EXCHANGE_RATES in config_entry.options:
for rate in config_entry.options[CONF_EXCHANGE_RATES]:
entities.append(
ExchangeRateSensor(
instance,
rate,
exchange_base_currency,
)
)
async_add_entities(entities)
class AccountSensor(SensorEntity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, currency):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self._currency = currency
for account in coinbase_data.accounts:
if (
account[API_ACCOUNT_CURRENCY] == currency
and account[API_RESOURCE_TYPE] != API_TYPE_VAULT
):
self._name = f"Coinbase {account[API_ACCOUNT_NAME]}"
self._id = (
f"coinbase-{account[API_ACCOUNT_ID]}-wallet-"
f"{account[API_ACCOUNT_CURRENCY]}"
)
self._state = account[API_ACCOUNT_BALANCE][API_ACCOUNT_AMOUNT]
self._unit_of_measurement = account[API_ACCOUNT_CURRENCY]
self._native_balance = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_AMOUNT
]
self._native_currency = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_CURRENCY
]
break
self._attr_state_class = SensorStateClass.TOTAL
self._attr_device_info = DeviceInfo(
configuration_url="https://www.coinbase.com/settings/api",
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, self._coinbase_data.user_id)},
manufacturer="Coinbase.com",
name=f"Coinbase {self._coinbase_data.user_id[-4:]}",
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the Unique ID of the sensor."""
return self._id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self._unit_of_measurement, DEFAULT_COIN_ICON)
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_NATIVE_BALANCE: f"{self._native_balance} {self._native_currency}",
}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
for account in self._coinbase_data.accounts:
if (
account[API_ACCOUNT_CURRENCY] == self._currency
and account[API_RESOURCE_TYPE] != API_TYPE_VAULT
):
self._state = account[API_ACCOUNT_BALANCE][API_ACCOUNT_AMOUNT]
self._native_balance = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_AMOUNT
]
self._native_currency = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_CURRENCY
]
break
class ExchangeRateSensor(SensorEntity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, exchange_currency, exchange_base):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self.currency = exchange_currency
self._name = f"{exchange_currency} Exchange Rate"
self._id = f"coinbase-{coinbase_data.user_id}-xe-{exchange_currency}"
self._state = round(
1 / float(self._coinbase_data.exchange_rates[API_RATES][self.currency]), 2
)
self._unit_of_measurement = exchange_base
self._attr_state_class = SensorStateClass.MEASUREMENT
self._attr_device_info = DeviceInfo(
configuration_url="https://www.coinbase.com/settings/api",
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, self._coinbase_data.user_id)},
manufacturer="Coinbase.com",
name=f"Coinbase {self._coinbase_data.user_id[-4:]}",
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID of the sensor."""
return self._id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self.currency, DEFAULT_COIN_ICON)
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
self._state = round(
1 / float(self._coinbase_data.exchange_rates.rates[self.currency]), 2
)
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser
@file mi/dataset/parser/flort_dj_sio.py
@author Emily Hahn
@brief An flort-dj SIO specific dataset agent parser
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import re
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, DataParticleValue
from mi.dataset.parser.sio_mule_common import SioParser, SIO_HEADER_MATCHER
from mi.core.exceptions import RecoverableSampleException, UnexpectedDataException
class DataParticleType(BaseEnum):
SAMPLE = 'flort_dj_sio_instrument'
SAMPLE_RECOVERED = 'flort_dj_sio_instrument_recovered'
class FlortdParserDataParticleKey(BaseEnum):
CONTROLLER_TIMESTAMP = 'sio_controller_timestamp'
DATE_STRING = 'date_string'
TIME_STRING = 'time_string'
MEASUREMENT_WAVELENGTH_BETA = 'measurement_wavelength_beta'
RAW_SIGNAL_BETA = 'raw_signal_beta'
MEASUREMENT_WAVELENTH_CHL = 'measurement_wavelength_chl'
RAW_SIGNAL_CHL = 'raw_signal_chl'
MEASUREMENT_WAVELENGTH_CDOM = 'measurement_wavelength_cdom'
RAW_SIGNAL_CDOM = 'raw_signal_cdom'
RAW_INTERNAL_TEMP = 'raw_internal_temp'
# the first two groups make up the sample timestamp (date, time),
# followed by 7 integer data values, which may be marked as not present by '--'
DATA_REGEX = r'(\d\d/\d\d/\d\d)\t(\d\d:\d\d:\d\d)\t(\d+|--)\t(\d+|--)\t(\d+|--)\t(\d+|--)\t(\d+|--)\t(\d+|--)\t(\d+|--)'
DATA_MATCHER = re.compile(DATA_REGEX)
# match the timestamp from the sio mule header
TIMESTAMP_REGEX = b'[0-9A-Fa-f]{8}'
TIMESTAMP_MATCHER = re.compile(TIMESTAMP_REGEX)
class FlortdCommonParserDataParticle(DataParticle):
"""
Class for parsing data from the FLORT-D instrument
"""
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(FlortdCommonParserDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
# the raw data has the timestamp from the sio header pre-pended to it, match the first 8 bytes
timestamp_match = TIMESTAMP_MATCHER.match(self.raw_data[:8])
if not timestamp_match:
raise RecoverableSampleException(
"FlortdParserDataParticle: No regex match of timestamp [%s]" % self.raw_data[:8])
# now match the flort data, excluding the sio header timestamp in the first 8 bytes
self._data_match = DATA_MATCHER.match(self.raw_data[8:])
if not self._data_match:
raise RecoverableSampleException("FlortdParserDataParticle: No regex match of \
parsed sample data [%s]", self.raw_data[8:])
# use the timestamp from the sio header as internal timestamp
sec_since_1970 = int(self.raw_data[:8], 16)
self.set_internal_timestamp(unix_time=sec_since_1970)
def _build_parsed_values(self):
"""
Take something in the binary data values and turn it into a
particle with the appropriate tag.
throws SampleException If there is a problem with sample creation
"""
# match the data inside the wrapper
result = []
if self._data_match:
result = [self._encode_value(FlortdParserDataParticleKey.CONTROLLER_TIMESTAMP, self.raw_data[:8],
FlortdParserDataParticle.encode_int_16),
self._encode_value(FlortdParserDataParticleKey.DATE_STRING, self._data_match.group(1), str),
self._encode_value(FlortdParserDataParticleKey.TIME_STRING, self._data_match.group(2), str),
self._encode_value(FlortdParserDataParticleKey.MEASUREMENT_WAVELENGTH_BETA,
self._data_match.group(3),
FlortdParserDataParticle.encode_int_or_dash),
self._encode_value(FlortdParserDataParticleKey.RAW_SIGNAL_BETA, self._data_match.group(4),
FlortdParserDataParticle.encode_int_or_dash),
self._encode_value(FlortdParserDataParticleKey.MEASUREMENT_WAVELENTH_CHL,
self._data_match.group(5),
FlortdParserDataParticle.encode_int_or_dash),
self._encode_value(FlortdParserDataParticleKey.RAW_SIGNAL_CHL, self._data_match.group(6),
FlortdParserDataParticle.encode_int_or_dash),
self._encode_value(FlortdParserDataParticleKey.MEASUREMENT_WAVELENGTH_CDOM,
self._data_match.group(7),
FlortdParserDataParticle.encode_int_or_dash),
self._encode_value(FlortdParserDataParticleKey.RAW_SIGNAL_CDOM, self._data_match.group(8),
FlortdParserDataParticle.encode_int_or_dash),
self._encode_value(FlortdParserDataParticleKey.RAW_INTERNAL_TEMP, self._data_match.group(9),
FlortdParserDataParticle.encode_int_or_dash)]
return result
@staticmethod
def encode_int_16(hex_str):
return int(hex_str, 16)
@staticmethod
def encode_int_or_dash(val_str):
"""
Need to handle missing values as "--" instead of an int
"""
if val_str == "--":
return None
return int(val_str)
class FlortdParserDataParticle(FlortdCommonParserDataParticle):
"""
Class for parsing telemetered FLORT-D data
"""
_data_particle_type = DataParticleType.SAMPLE
class FlortdRecoveredParserDataParticle(FlortdCommonParserDataParticle):
"""
Class for parsing recovered FLORT-D data
"""
_data_particle_type = DataParticleType.SAMPLE_RECOVERED
class FlortDjSioParser(SioParser):
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
while chunk is not None:
header_match = SIO_HEADER_MATCHER.match(chunk)
if header_match.group(1) == 'FL':
data_match = DATA_MATCHER.search(chunk)
if data_match:
log.debug('Found data match in chunk %s', chunk[1:32])
# particle-ize the data block received, return the record
# prepend the timestamp from sio mule header to the flort raw data,
# which is stored in header_match.group(3)
sample = self._extract_sample(self._particle_class, None,
header_match.group(3) + data_match.group(0),
None)
if sample:
# create particle
result_particles.append(sample)
else:
# We found a line in the file that was unexpected. Since we are continuing,
# just log a warning.
warn_str = "Unexpected data in beginning of header: "
log.warn(warn_str + "%s", header_match.group(1))
message = warn_str + header_match.group(1)
self._exception_callback(UnexpectedDataException(message))
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
return result_particles
def handle_non_data(self, non_data, non_end, start):
"""
handle data in the non_data chunker queue
@param non_data data in the non data chunker queue
@param non_end ending index of the non_data chunk
@param start start index of the next data chunk
"""
# we can get non_data after our current chunk, check that this chunk is before that chunk
if non_data is not None and non_end <= start:
log.error("Found %d bytes of unexpected non-data:%s", len(non_data), non_data)
self._exception_callback(UnexpectedDataException("Found %d bytes of un-expected non-data:%s" %
(len(non_data), non_data)))
|
|
#!/usr/bin/env python
# This document is part of Pelagos Data
# https://github.com/skytruth/pelagos-data
# =========================================================================== #
#
# The MIT License (MIT)
#
# Copyright (c) 2014 SkyTruth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# =========================================================================== #
# TODO: Populate docstring
"""
Clip arbitrary regions to quad tree levels
"""
import multiprocessing
import os
from os.path import *
import sys
try:
from osgeo import ogr
from osgeo import osr
except ImportError:
import ogr
import osr
ogr.UseExceptions()
osr.UseExceptions()
#/* ======================================================================= */#
#/* Build information
#/* ======================================================================= */#
__version__ = '0.1-dev'
__release__ = 'August 28, 2014'
__author__ = 'Kevin D. Wurster'
__source__ = 'https://github.com/SkyTruth/pelagos-data'
__docname__ = basename(__file__)
__license__ = '''
The MIT License (MIT)
Copyright (c) 2014 SkyTruth
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#/* ======================================================================= */#
#/* Define print_usage() function
#/* ======================================================================= */#
def print_usage():
"""
Command line usage information
:return: 1 for exit code purposes
:rtype: int
"""
# TODO: Populate usage
print("""
Usage:
{0} [-of ogr_driver] [-lco option=value] [-dsco option=value] [-cfn]
{1} [-gl layer_name|layer1,layer2,...] [-rl layer_name|layer1,layer2]
{1} -g grid_file.ext -r region_file.ext -o output_file.ext
""".format(__docname__, " " * len(__docname__)))
return 1
#/* ======================================================================= */#
#/* Define print_license() function
#/* ======================================================================= */#
def print_license():
"""
Print out license information
:return: 1 for exit code purposes
:rtype: int
"""
print(__license__)
return 1
#/* ======================================================================= */#
#/* Define print_help() function
#/* ======================================================================= */#
def print_help():
"""
Detailed help information
:return: 1 for exit code purposes
:rtype: int
"""
# TODO: Populate help
print("""
Help: {0}
------{1}
{2}
""".format(__docname__, '-' * len(__docname__), main.__doc__))
return 1
#/* ======================================================================= */#
#/* Define print_help_info() function
#/* ======================================================================= */#
def print_help_info():
"""
Print a list of help related flags
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help flags:
--help More detailed description of this utility
--usage Arguments, parameters, flags, options, etc.
--version Version and ownership information
--license License information
""")
return 1
#/* ======================================================================= */#
#/* Define print_version() function
#/* ======================================================================= */#
def print_version():
"""
Print script version information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
%s version %s - released %s
""" % (__docname__, __version__, __release__))
return 1
#/* ======================================================================= */#
#/* Define ring2poly() function
#/* ======================================================================= */#
def ring2poly(ring, add_z=True):
"""
Convert a linearring to a ogr.wkbPolygon
"""
if not ring.GetGeometryType() is ogr.wkbLinearRing:
raise ValueError("ERROR: Wrong type - should be '%s' not '%s'" % (ogr.wkbLinearRing, ring.GetGeometryType()))
ring = ring.CLone()
if add_z:
geom_type = ogr.wkbPolygon
else:
geom_type = ogr.wkbPolygon25D
temp_geom = ogr.Geometry(geom_type)
temp_geom.AddGeometry(ring)
return temp_geom.Clone()
#/* ======================================================================= */#
#/* Define is_ring_poly() function
#/* ======================================================================= */#
def is_ring_poly(ring):
"""
Determine whether or not a ogr.wkbLinearRing is closed, which means it could
be a polygon
"""
if not ring.GetGeometryType() is ogr.wkbLinearRing:
raise ValueError("ERROR: Wrong type - should be '%s' not '%s'" % (ogr.wkbLinearRing, ring.GetGeometryType()))
p_indexes = range(ring.GetPointCount())
if ring.GetPoint(min(p_indexes)) == ring.GetPoint(max(p_indexes)):
return True
else:
return False
#/* ======================================================================= */#
#/* Define main() function
#/* ======================================================================= */#
def main(args):
#/* ----------------------------------------------------------------------- */#
#/* Defaults
#/* ----------------------------------------------------------------------- */#
output_driver_name = "ESRI Shapefile"
processing_algorithm = 'combined'
check_field_names = False
#/* ----------------------------------------------------------------------- */#
#/* Containers
#/* ----------------------------------------------------------------------- */#
# options_processing_algorithm = ('region', 'grid', 'combined', 'regionnohull')
options_processing_algorithm = ('combined')
grid_file = None
grid_layer_name = None
region_file = None
region_layer_name = None
output_file = None
output_dsco = []
output_lco = []
#/* ----------------------------------------------------------------------- */#
#/* Parse arguments
#/* ----------------------------------------------------------------------- */#
i = 0
arg = None
arg_error = False
while i < len(args):
try:
arg = args[i]
# Help arguments
if arg in ('--help-info', '-help-info', '--helpinfo', '-help-info', '--h', '-h'):
return print_help_info()
elif arg in ('--help', '-help'):
return print_help()
elif arg in ('--usage', '-usage'):
return print_usage()
elif arg in ('--version', '-version'):
return print_version()
elif arg in ('--license', '-usage'):
return print_license()
# Specify input files
elif arg in ('-g', '-grid'):
i += 2
grid_file = abspath(normpath(expanduser(args[i - 1])))
elif arg in ('-gl', '-grid-layer'):
i += 2
grid_layer_name = args[i - 1]
elif arg in ('-r', '-region'):
i += 2
region_file = abspath(normpath(expanduser(args[i - 1])))
elif arg in ('-rl', '-region-layer'):
i += 2
region_layer_name = args[i - 1]
# Output file
elif arg in ('-o', '-output'):
i += 2
output_file = abspath(normpath(expanduser(args[i - 1])))
# OGR output options
elif arg in ('-of', '-output-format'):
i += 2
output_driver_name = args[i - 1]
elif arg in ('-lco', '-lyr-creation-option'):
i += 2
output_lco.append(args[i - 1])
elif arg in ('-dsco', '-ds-creation-option'):
i += 2
output_dsco.append(args[i - 1])
# Additional options
elif arg in ('-cfn', '-check-field-names'):
i += 1
check_field_names = True
# Positional arguments and errors
else:
# Invalid argument
i += 1
arg_error = True
print("ERROR: Invalid argument: %s" % arg)
# This catches several conditions:
# 1. The last argument is a flag that requires parameters but the user did not supply the parameter
# 2. The arg parser did not properly consume all parameters for an argument
# 3. The arg parser did not properly iterate the 'i' variable
# 4. An argument split on '=' doesn't have anything after '=' - e.g. '--output-file='
except (IndexError, ValueError):
i += 1
arg_error = True
print("ERROR: An argument has invalid parameters: %s" % arg)
#/* ----------------------------------------------------------------------- */#
#/* Validate parameters
#/* ----------------------------------------------------------------------- */#
bail = False
# Check arguments
if arg_error:
bail = True
print("ERROR: Did not successfully parse arguments")
# Check input driver name
if not isinstance(output_driver_name, str):
bail = True
print("ERROR: Invalid output driver name: %s" % output_driver_name)
# Check input grid file
if not isinstance(grid_file, str):
bail = True
print("ERROR: Invalid input grid file: %s" % grid_file)
elif not os.access(grid_file, os.R_OK):
bail = True
print("ERROR: Invalid input grid file: %s" % grid_file)
# Check input region file
if not isinstance(region_file, str):
bail = True
print("ERROR: Invalid input region file: %s" % region_file)
elif not os.access(region_file, os.R_OK):
bail = True
print("ERROR: Can't access input region file: %s" % region_file)
# Check output file
if not isinstance(output_file, str):
bail = True
print("ERROR: Invalid output file: %s" % output_file)
elif os.path.exists(output_file):
bail = True
print("ERROR: Output file exists: %s" % output_file)
# elif not os.access(dirname(output_file), os.W_OK):
# bail = True
# print("ERROR: Need write access: %s" % dirname(output_file))
# Check additional options
if processing_algorithm.lower() not in options_processing_algorithm:
bail = True
print("ERROR: Invalid processing algorithm: %s" % processing_algorithm)
print(" Options: %s" % ', '.join(options_processing_algorithm))
# Exit if something did not pass validation
if bail:
return 1
#/* ----------------------------------------------------------------------- */#
#/* Prep input and output OGR objects
#/* ----------------------------------------------------------------------- */#
# Open grid file
bail = False
grid_ds = ogr.Open(grid_file)
if grid_ds is None:
bail = True
print("ERROR: Could not open grid file: %s" % grid_file)
# Open region file
region_ds = ogr.Open(region_file)
if region_ds is None:
bail = True
print("ERROR: Could not open region file: %s" % region_file)
# Get output driver and output datasource
output_ds = None
output_driver = ogr.GetDriverByName(output_driver_name)
if output_driver is None:
bail = True
print("ERROR: Invalid output driver: %s" % output_driver_name)
elif not output_driver.TestCapability('CreateDataSource'):
bail = True
print("ERROR: Output driver does not support datasource creation: %s" % output_driver_name)
else:
output_ds = output_driver.CreateDataSource(output_file, options=output_dsco)
if output_ds is None:
bail = True
print("ERROR: Couldn't create output datasource: %s" % output_file)
# Make sure output datasource can create layers
if not output_ds.TestCapability('CreateLayer'):
bail = True
print("ERROR: Output driver does not support layer creation: %s" % output_driver_name)
# Get grid layers to process
all_grid_layers = None
try:
if grid_layer_name is None:
all_grid_layers = [grid_ds.GetLayer(i) for i in range(grid_ds.GetLayerCount())]
else:
all_grid_layers = [grid_ds.GetLayerByName(i) for i in grid_layer_name.split(',')]
except RuntimeError:
bail = True
print("ERROR: Invalid grid layer(s): %s" % grid_layer_name)
# Get region layers to process
all_region_layers = None
try:
if region_layer_name is None:
all_region_layers = [region_ds.GetLayerByIndex(0)]
else:
all_region_layers = [region_ds.GetLayerByName(i) for i in region_layer_name.split(',')]
except RuntimeError:
bail = True
print("ERROR: Invalid region layer(s): %s" % region_layer_name)
# Make sure all region layers are polygon or multipolygon (or 25D variants)
for region_layer in all_region_layers:
if region_layer.GetGeomType() not in (ogr.wkbPolygon, ogr.wkbPolygon25D,
ogr.wkbMultiPolygon, ogr.wkbMultiPolygon25D):
bail = True
print("ERROR: Region layer '%s' is not a multi/polygon/25D" % region_layer.GetName())
# Make sure all grid layers are polygon or multipolygon (or 25D variants)
for grid_layer in all_grid_layers:
if grid_layer.GetGeomType() not in (ogr.wkbPolygon, ogr.wkbPolygon25D,
ogr.wkbMultiPolygon, ogr.wkbMultiPolygon25D):
bail = True
print("ERROR: Grid layer '%s' is not a multi/polygon/25D" % grid_layer.GetName())
# Encountered an error - exit but close everything first
if bail:
region_layer = None
grid_layer = None
output_driver = None
output_ds = None
all_region_layers = None
all_grid_layers = None
grid_ds = None
region_ds = None
return 1
#/* ----------------------------------------------------------------------- */#
#/* Process data
#/* ----------------------------------------------------------------------- */#
print("Output datasource: %s" % output_file)
# Process every combination of region layer and grid layer
region_layer_counter = 0
grid_layer_counter = 0
for region_layer in all_region_layers:
# Update user
region_layer_counter += 1
print("Processing region layer %s/%s: %s" % (region_layer_counter, len(all_region_layers),
region_layer.GetName()))
for grid_layer in all_grid_layers:
# Get field definitions for processing
# Make sure there are no duplicate fields
# Get feature definitions
region_feature_def = region_layer.GetLayerDefn()
grid_feature_def = grid_layer.GetLayerDefn()
# Get field definitions
region_field_definitions = [region_feature_def.GetFieldDefn(i) for i in range(region_feature_def.GetFieldCount())]
grid_field_definitions = [grid_feature_def.GetFieldDefn(i) for i in range(grid_feature_def.GetFieldCount())]
# Get list of fields - used to check for duplicate fields and used to populate output features
region_layer_fields = [i.GetName() for i in region_field_definitions]
grid_layer_fields = [i.GetName() for i in grid_field_definitions]
# Cache SRS objects
grid_layer_srs = region_layer.GetSpatialRef()
region_layer_srs = region_layer.GetSpatialRef()
# Check for duplicate fields
if check_field_names:
bail = False
for r_field in region_layer_fields:
if r_field in grid_layer_fields:
bail = True
print("ERROR: Duplicate field: %s" % r_field)
# TODO: Implement check for homogeneous SRS
# if grid_layer_srs.IsSameGeogCS(region_layer_srs) is 0 or grid_layer_srs.IsSameVertCS(region_layer_srs) is 0:
# bail = True
# print("ERROR: Non-matching CRS for region layer '%s' and grid layer '%s'"
# % (region_layer.GetName(), grid_layer.GetName()))
if bail:
grid_srs = None
grid_layer_srs = None
region_layer_srs = None
r_field = None
region_feature_def = None
grid_feature_def = None
region_field_definitions = None
grid_field_definitions = None
grid_layer = None
region_layer = None
return 1
# Update user
grid_layer_counter += 1
print(" Processing grid layer %s/%s: %s" % (grid_layer_counter, len(all_grid_layers), grid_layer.GetName()))
# Create output layer
output_layer_name = region_layer.GetName() + '-' + grid_layer.GetName()
output_layer = output_ds.CreateLayer(output_layer_name, srs=grid_layer.GetSpatialRef(),
geom_type=ogr.wkbMultiPolygon, options=output_lco)
for field_def in region_field_definitions + grid_field_definitions:
output_layer.CreateField(field_def)
# Cleanup
region_field_definitions = None
grid_field_definitions = None
region_feature_def = None
grid_feature_def = None
field_def = None
# Create a coordinate transformation object if the region_layer and grid_layer are in a different SRS
if grid_layer_srs.IsSame(region_layer_srs) is not 1:
coord_transform = osr.CoordinateTransformation(region_layer_srs, grid_layer_srs)
else:
coord_transform = None
# TODO: Determine which algorithm to use - grid centric or region polygon centric
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#/* Region polygon centric algorithm
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
print(" Using algorithm: %s" % processing_algorithm)
# TODO: Remove other algorithms - combined is the only one that handles the 'gridify problem'
# if processing_algorithm.lower() == 'region':
#
# # No upfront data prep
# # Uses the convex hull of the region feature being processed as a spatial index on every pass
# # Limits the number of grid cells being examined
#
# # # Create an output chunk for every intersecting geometry for grid cell
# region_feature_counter = 0
# num_region_features = len(region_layer)
# region_layer.ResetReading()
# fid_counter = -1
# for region_feature in region_layer:
#
# # Cache region geometry
# region_geom = region_feature.GetGeometryRef().Clone()
# if coord_transform is not None:
# region_geom.Transform(coord_transform)
#
# # Apply a spatial filter to the grid
# spat_filter_geom = region_geom.ConvexHull()
# grid_layer.SetSpatialFilter(spat_filter_geom)
# grid_layer.ResetReading()
#
# # Update user
# region_feature_counter += 1
# sys.stdout.write("\r\x1b[K" + " %s/%s" % (region_feature_counter, num_region_features))
# sys.stdout.flush()
#
# for grid_feature in grid_layer:
#
# grid_geom = grid_feature.GetGeometryRef()
#
# intersecting_geom = grid_geom.Intersection(region_geom)
# if not intersecting_geom.IsEmpty():
# if intersecting_geom.GetGeometryType() is ogr.wkbGeometryCollection:
# temp_geom = ogr.Geometry(ogr.wkbMultiPolygon)
# for i in range(intersecting_geom.GetGeometryCount()):
# sub_geom = intersecting_geom.GetGeometryRef(i)
# if sub_geom.GetPointCount() > 2:
# temp_geom.AddGeometry(sub_geom)
# intersecting_geom = temp_geom
# output_feature = region_feature.Clone()
# output_feature.SetGeometry(intersecting_geom)
# fid_counter += 1
# output_feature.SetFID(fid_counter)
# output_layer.CreateFeature(output_feature)
#
# # Update user - done processing a grid layer
# print(" - Done")
# output_layer.SyncToDisk()
#
# #/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
# #/* Region polygon centric algorithm - NO CONVEX HULL
# #/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#
# elif processing_algorithm.lower() == 'regionnohull':
#
# # Same as above but uses the more complex region geometry as the spatial filter instead of a convex hull
#
# # # Create an output chunk for every intersecting geometry for grid cell
# region_feature_counter = 0
# num_region_features = len(region_layer)
# region_layer.ResetReading()
# fid_counter = -1
# for region_feature in region_layer:
#
# # Cache region geometry
# region_geom = region_feature.GetGeometryRef().Clone()
# if coord_transform is not None:
# region_geom.Transform(coord_transform)
#
# # Spatial filter
# grid_layer.SetSpatialFilter(region_geom)
# grid_layer.ResetReading()
#
# # Update user
# region_feature_counter += 1
# sys.stdout.write("\r\x1b[K" + " %s/%s" % (region_feature_counter, num_region_features))
# sys.stdout.flush()
#
# progress_max = len(grid_layer)
# for grid_feature in grid_layer:
#
# grid_geom = grid_feature.GetGeometryRef()
#
# intersecting_geom = grid_geom.Intersection(region_geom)
# if not intersecting_geom.IsEmpty():
# if intersecting_geom.GetGeometryType() is ogr.wkbGeometryCollection:
# temp_geom = ogr.Geometry(ogr.wkbMultiPolygon)
# for i in range(intersecting_geom.GetGeometryCount()):
# sub_geom = intersecting_geom.GetGeometryRef(i)
# if sub_geom.GetPointCount() > 2:
# temp_geom.AddGeometry(sub_geom)
# intersecting_geom = temp_geom
# output_feature = region_feature.Clone()
# output_feature.SetGeometry(intersecting_geom)
# fid_counter += 1
# output_feature.SetFID(fid_counter)
# output_layer.CreateFeature(output_feature)
#
# # Update user - done processing a grid layer
# print(" - Done")
# output_layer.SyncToDisk()
#
# #/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
# #/* Grid centric algorithms
# #/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#
# elif processing_algorithm.lower() == 'grid':
#
# # Limit the number of features in the grid_layer that will be processed
# # Create a geometry collection containing one convex hull per region geometry
# # Use this geometry as a spatial filter on the grid layer
# # Be sure to reset reading afterwards
# # Get one grid feature and stamp out all intersecting region polygons
# # Repeat ...
#
# print(" Progress units are grid features")
# print(" Prepping data ...")
# limit_geom = ogr.Geometry(ogr.wkbGeometryCollection)x
# limit_geom.AssignSpatialReference(region_layer.GetSpatialRef())
# for region_feature in region_layer:
# region_geom = region_feature.GetGeometryRef()
# limit_geom.AddGeometry(region_geom.ConvexHull())
# if limit_geom.GetSpatialReference().IsSame(grid_layer_srs) is not 1:
# limit_geom.Transform(coord_transform)
# region_layer.ResetReading()
# grid_layer.SetSpatialFilter(limit_geom)
# region_feature = None
# region_geom = None
#
# # Create an output chunk for every intersecting geometry for grid cell
# grid_feature_counter = 0
# num_grid_features = len(grid_layer)
# fid_counter = -1
# for grid_feature in grid_layer:
#
# # Prep region layer
# grid_geom = grid_feature.GetGeometryRef()
# region_layer.ResetReading()
# region_layer.SetSpatialFilter(grid_geom)
#
# # Update user
# grid_feature_counter += 1
# sys.stdout.write("\r\x1b[K" + " %s/%s" % (grid_feature_counter, num_grid_features))
# sys.stdout.flush()
# for region_feature in region_layer:
#
# # Compute the intersection
# region_geom = region_feature.GetGeometryRef().Clone()
# if coord_transform is not None:
# region_geom.Transform(coord_transform)
# intersecting_geom = grid_geom.Intersection(region_geom)
#
# # Add new feature to the output layer
# if not intersecting_geom.IsEmpty():
# if intersecting_geom.GetGeometryType() is ogr.wkbGeometryCollection:
# temp_geom = ogr.Geometry(ogr.wkbMultiPolygon)
# for i in range(intersecting_geom.GetGeometryCount()):
# sub_geom = intersecting_geom.GetGeometryRef(i)
# if sub_geom.GetGeometryType() in (ogr.wkbPolygon, ogr.wkbMultiPolygon):
# temp_geom.AddGeometry(sub_geom)
# intersecting_geom = temp_geom
# output_feature = region_feature.Clone()
# output_feature.SetGeometry(intersecting_geom)
# fid_counter += 1
# output_feature.SetFID(fid_counter)
# output_layer.CreateFeature(output_feature)
#
# # Update user - done processing a grid layer
# print(" - Done")
# output_layer.SyncToDisk()
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#/* Combined
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
if processing_algorithm == 'combined':
# Create an initial spatial filter consisting of one convex hull for every input region
# This yields a much smaller set of grid tiles that need to be examined
# Dump these filtered grid cells into an in-memory layer
# Loop through all regions, set a spatial filter on the in-memory layer = convex hull
# Stamp out all grid cells
print(" Progress units are region features")
print(" Prepping data ...")
# Create an initial spatial filter from all input geometries
# Create a single geometry containing one convex hull for every input feature
limit_geom = ogr.Geometry(ogr.wkbGeometryCollection)
limit_geom.AssignSpatialReference(region_layer.GetSpatialRef())
for region_feature in region_layer:
region_geom = region_feature.GetGeometryRef()
limit_geom.AddGeometry(region_geom.ConvexHull())
if limit_geom.GetSpatialReference().IsSame(grid_layer_srs) is not 1:
limit_geom.Transform(coord_transform)
region_layer.ResetReading()
grid_layer.SetSpatialFilter(limit_geom)
region_feature = None
region_geom = None
# Loop through the region polygons, set a spatial filter
region_feature_counter = 0
num_region_features = len(region_layer)
fid_counter = -1
print(" Processing data ...")
for region_feature in region_layer:
# Update user
region_feature_counter += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (region_feature_counter, num_region_features))
sys.stdout.flush()
region_geom = region_feature.GetGeometryRef().Clone()
if coord_transform is not None:
region_geom.Transform(coord_transform)
grid_layer.ResetReading()
grid_layer.SetSpatialFilter(region_geom.ConvexHull())
# Stamp out all intersecting grids and add to output layer
for grid_feature in grid_layer:
grid_geom = grid_feature.GetGeometryRef()
intersecting_geom = grid_geom.Intersection(region_geom)
output_geom = ogr.Geometry(ogr.wkbMultiPolygon)
# # Stash all the found grid cells into an in memory layer
# mem_driver = ogr.GetDriverByName('Memory')
# mem_ds = mem_driver.CreateDataSource('mem_grids')
# mem_layer = mem_ds.CreateLayer('mem_grids', grid_layer.GetSpatialRef(), grid_layer.GetGeomType())
# print("")
# mi = 0
# for grid_feature in grid_layer:
# mi += 1
# print("Mem I: %s" % mi)
# mem_layer.CreateFeature(grid_feature)
# grid_layer.ResetReading()
#
# # Loop through the region polygons, set a spatial filter
# region_feature_counter = 0
# num_region_features = len(region_layer)
# fid_counter = -1
# print(" Processing data ...")
# for region_feature in region_layer:
#
# # Update user
# region_feature_counter += 1
# sys.stdout.write("\r\x1b[K" + " %s/%s" % (region_feature_counter, num_region_features))
# sys.stdout.flush()
#
# region_geom = region_feature.GetGeometryRef().Clone()
# if coord_transform is not None:
# region_geom.Transform(coord_transform)
# mem_layer.ResetReading()
# mem_layer.SetSpatialFilter(region_geom.ConvexHull())
#
# # Stamp out all intersecting grids and add to output layer
# for m_grid_feature in mem_layer:
#
# m_grid_geom = m_grid_feature.GetGeometryRef()
# intersecting_geom = m_grid_geom.Intersection(region_geom)
# output_geom = ogr.Geometry(ogr.wkbMultiPolygon)
# NOTE: This logic ONLY allows multi/polygons to pass through - although first IF statement might let others through
# Logic in the validate section still only allows polygons
# POLYGON - Add
if intersecting_geom.GetGeometryType() in (ogr.wkbPolygon, ogr.wkbPolygon25D):
output_geom.AddGeometry(intersecting_geom)
intersecting_geom = None
# LINEARRING - Actually a closed polygon - convert to polygon and add
elif intersecting_geom.GetGeometryType() is ogr.wkbLinearRing and is_ring_poly(intersecting_geom):
output_geom.AddGeometry(ring2poly(intersecting_geom))
intersecting_geom = None
# MULTIPOLYGON - Split into and add individual polygons
elif intersecting_geom.GetGeometryType() in (ogr.wkbMultiPolygon, ogr.wkbMultiPolygon25D):
for add_poly in [intersecting_geom.GetGeometryRef(i) for i in range(intersecting_geom.GetGeometryCount())]:
output_geom.AddGeometry(add_poly)
add_poly = None
intersecting_geom = None
# POINT or MULTIPOINT - Discard
elif intersecting_geom.GetGeometryType() in (ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbMultiPoint, ogr.wkbMultiPoint25D):
intersecting_geom = None
# LINESTRING or MULTILINESTRING
elif intersecting_geom.GetGeometryType() in (ogr.wkbLineString, ogr.wkbLineString25D,
ogr.wkbMultiLineString, ogr.wkbMultiLineString25D):
intersecting_geom = None
# The edge cases of edge cases - the "gridify problem"
# Geometry collection could contain any combination of points, multipoints, lines, multilines,
# linearrings, polygons, and multipolygons. All must be dealt with.
elif intersecting_geom.GetGeometryType() is ogr.wkbGeometryCollection:
for sub_geom_i in range(intersecting_geom.GetPointCount()):
sub_geom = intersecting_geom.GetGeometryRef(sub_geom_i)
# Sub geometry is a polygon - add to output
if sub_geom.GetGeometryType() in (ogr.wkbPolygon, ogr.wkbPolygon25D):
output_geom.AddGeometry(sub_geom)
# Sub geometry is a linearring that is actually a closed and should be a polygon
elif sub_geom.GetGeometryType() is ogr.wkbLinearRing and is_ring_poly(sub_geom):
output_geom.AddGeometry(ring2poly(sub_geom))
# Sub geometry is a multipolygon - explode and add individually
elif sub_geom.GetGeometryType() in (ogr.wkbMultiPolygon, ogr.wkbMultiPolygon25D):
for add_poly in [sub_geom.GetGeometryRef(i) for i in range(sub_geom.GetGeometryCount())]:
output_geom.AddGeometry(add_poly)
add_poly = None
sub_geom = None
intersecting_geom = None
# Unrecognized geometry type
else:
print("")
print("")
print("ERROR: Unrecognized geometry")
print(" Intersections can sometimes yield strange geometry")
print("")
print(" Input file information:")
print(" Region FID: %s" % region_feature.GetFID())
print(" Grid FID: %s" % m_grid_feature.GetFID())
print("")
print(" Problem geometry information:")
print(" Type: %s" % intersecting_geom.GetGeometryType())
print(" Name: %s" % intersecting_geom.GetGeometryName())
print(" Geometry count: %s" % intersecting_geom.GetGeometryCount())
print(" Point count: %s" % intersecting_geom.GetPointCount())
print("")
intersecting_geom = None
output_layer = None
region_layer = None
grid_layer = None
return 1
# Add output feature
if not output_geom.IsEmpty():
output_feature = region_feature.Clone()
fid_counter += 1
output_feature.SetFID(fid_counter)
output_feature.SetGeometry(output_geom)
output_layer.CreateFeature(output_feature)
# Update user - done processing a grid layer
print(" - Done")
output_layer.SyncToDisk()
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#/* Bad algorithm
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
else:
print("ERROR: Invalid processing algorithm: %s" % processing_algorithm)
return 1
# Cleanup
sub_geom = None
all_geoms = None
output_geom = None
bottom_geom = None
temp_geom = None
sub_geom = None
m_grid_feature = None
m_grid_geom = None
mem_layer = None
mem_ds = None
mem_driver = None
limit_geom = None
coord_transform = None
region_geom = None
grid_geom = None
intersecting_geom = None
output_feature = None
output_layer = None
region_layer_srs = None
grid_layer_srs = None
region_layer = None
grid_layer = None
#/* ----------------------------------------------------------------------- */#
#/* Cleanup and final return
#/* ----------------------------------------------------------------------- */#
# Close OGR objects
output_driver = None
output_ds = None
region_layers = None
grid_layers = None
grid_ds = None
region_ds = None
return 0
#/* ======================================================================= */#
#/* Command Line Execution
#/* ======================================================================= */#
if __name__ == '__main__':
# Didn't get enough arguments - print usage and exit
if len(sys.argv) is 1:
sys.exit(print_usage())
# Got enough arguments - give sys.argv[1:] to main()
else:
sys.exit(main(sys.argv[1:]))
|
|
from .._abstract.abstract import BaseParameters
from ..common.geometry import SpatialReference, Envelope
import os
import json
########################################################################
class AnalyzeParameters(BaseParameters):
"""
The analyzeParameters JSON object used to analyze a CSV file are
described below.
Inputs:
sourcelocale - The locale used for the geocoding service source.
geocodeServiceUrl - The URL of the geocoding service that supports
batch geocoding.
Note: ArcGIS for Portal 10.3 supports
configuring multiple geocoding services. If the
client application requires a specific locator,
the URL of this service should be specified in
this parameter.
sourcecountry - The two character country code associated with the
geocoding service, default is "world".
sourcecountryhint - If first time analyzing, the hint is used. If
source country is already specified than
sourcecountry is used.
"""
_sourcelocale = None
_geocodeServiceUrl = None
_sourcecountry = None
_sourcecountryhint = None
#----------------------------------------------------------------------
def __init__(self,
sourcelocale="en",
geocodeServiceUrl=None,
sourcecountry="world",
sourcecountryhint=None):
"""Constructor"""
self._sourcelocale = sourcelocale
self._geocodeServiceUrl = geocodeServiceUrl
self._sourcecountry = sourcecountry
self._sourcecountryhint = sourcecountryhint
#----------------------------------------------------------------------
def __str__(self):
"""returns the object as a string"""
return json.dumps(self.value)
#----------------------------------------------------------------------
@property
def value(self):
"""returns object as a dictionary"""
val = {}
if self.sourcelocale is not None:
val['sourcelocale'] = self.sourcelocale
if self.geocodeServiceUrl is not None:
val['geocodeServiceUrl'] = self.geocodeServiceUrl
if self.sourcecountry is not None:
val['sourcecountry'] = self.sourcecountry
if self.sourcecountryhint is not None:
val['sourcecountryhint'] = self.sourcecountryhint
return val
#----------------------------------------------------------------------
@property
def sourcelocale(self):
"""gets/sets the locale for geocoding serouce source"""
return self._sourcelocale
#----------------------------------------------------------------------
@sourcelocale.setter
def sourcelocale(self, value):
"""gets/sets the locale for geocoding serouce source"""
if self._sourcelocale != value:
self._sourcelocale = value
#----------------------------------------------------------------------
@property
def geocodeServiceUrl(self):
"""gets/sets the geocodeServiceUrl"""
return self._geocodeServiceUrl
#----------------------------------------------------------------------
@geocodeServiceUrl.setter
def geocodeServiceUrl(self, value):
"""gets/sets the geocodeServiceUrl"""
if self._geocodeServiceUrl != value:
self._geocodeServiceUrl = value
#----------------------------------------------------------------------
@property
def sourcecountry(self):
"""gets/sets the sourcecountry"""
return self._sourcecountry
#----------------------------------------------------------------------
@sourcecountry.setter
def sourcecountry(self, value):
"""gets/sets the sourcecountry"""
if self._sourcecountry != value:
self._sourcecountry = value
#----------------------------------------------------------------------
@property
def sourcecountryhint(self):
"""gets/sets the sourcecountryhint"""
return self._sourcecountryhint
#----------------------------------------------------------------------
@sourcecountryhint.setter
def sourcecountryhint(self, value):
"""gets/sets the sourcecountryhint"""
if self._sourcecountryhint != value:
self._sourcecountryhint = value
########################################################################
class CreateServiceParameters(BaseParameters):
"""
The createParameters JSON object
name (Required) Name of the service to be created. This name must be
unique. If the name already exists, the operation will
fail.
serviceDescription - Description given to the service.
hasStaticData - Boolean value indicating whether the data changes.
maxRecordCount - A double value indicating any constraints enforced on
query operations.
supportedQueryFormats - The formats in which query results are returned
capabilities - Specify feature service editing capabilities for Create,
Delete, Query, Update, and Sync.
description - A user-friendly description for the published dataset.
copyrightText - Copyright information associated with the dataset.
spatialReference - All layers added to a hosted feature service need to
have the same spatial reference defined for the
feature service. When creating a new empty service
without specifying its spatial reference, the
spatial reference of the hosted feature service is
set to the first layer added to that feature service
initialExtent - The initial extent set for the service.
allowGeometryUpdates - Boolean value indicating if updating the
geometry of the service is permitted.
units - Units used by the feature service
xssPreventionEnabled - Boolean value indicating whether cross-site
scripting prevention is enabled.
xssPreventionRule - Either InputOnly | InputOutput
xssInputRule - Either rejectInvalid | sanitizeInvalid
"""
_name = None
_spatialReference = None
_serviceDescription = None
_hasStaticData = None
_maxRecordCount=None
_supportedQueryFormats=None
_capabilities=None
_description=None
_copyrightText=None
_initialExtent=None
_allowGeometryUpdates=None
_units=None
_xssPreventionEnabled=None
_xssPreventionRule=None
_xssInputRule=None
_currentVersion = 10.3
_enableEditorTracking = False,
_enableOwnershipAccessControl = False,
_allowOthersToUpdate = True,
_allowOthersToDelete = True,
_supportsAsync = True,
_supportsRegisteringExistingData = True,
_supportsSyncDirectionControl = True,
_supportsPerLayerSync = True,
_supportsPerReplicaSync = True,
_supportsRollbackOnFailure = True,
_hasVersionedData = False,
_supportsDisconnectedEditing = False,
_size =49152,
_syncEnabled =True
#----------------------------------------------------------------------
def __init__(self,
name,
spatialReference,
serviceDescription="",
hasStaticData=False,
maxRecordCount=1000,
supportedQueryFormats="JSON",
capabilities="Query",
description="",
copyrightText="",
initialExtent=None,
allowGeometryUpdates=False,
units="esriDecimalDegrees",
xssPreventionEnabled=False,
xssPreventionRule="InputOnly",
xssInputRule="sanitizeInvalid",
currentVersion="10.3",
enableEditorTracking = False,
enableOwnershipAccessControl = False,
allowOthersToUpdate = True,
allowOthersToDelete = True,
supportsAsync = True,
supportsRegisteringExistingData = True,
supportsSyncDirectionControl = True,
supportsPerLayerSync = True,
supportsPerReplicaSync = True,
supportsRollbackOnFailure = True,
hasVersionedData = False,
supportsDisconnectedEditing = False,
size =49152,
syncEnabled =True
):
"""Constructor"""
self._name = name
if isinstance(spatialReference, SpatialReference):
self._spatialReference = spatialReference.value
else:
raise AttributeError('spatialReference must be of type geometry.SpatialReference')
self._serviceDescription = serviceDescription
self._hasStaticData = hasStaticData
self._maxRecordCount=maxRecordCount
self._supportedQueryFormats= supportedQueryFormats
self._capabilities= capabilities
self._description= description
self._copyrightText= copyrightText
if initialExtent is not None:
if isinstance(initialExtent, Envelope):
self._initialExtent= initialExtent.value
else:
raise AttributeError('initialExtent must be of type geometry.Envelope')
self._allowGeometryUpdates = allowGeometryUpdates
self._units = units
self._xssPreventionEnabled = xssPreventionEnabled
self._xssPreventionRule = xssPreventionRule
self._xssInputRule = xssInputRule
#----------------------------------------------------------------------
@property
def value(self):
""""""
val = {
"name" : self._name,
"spatialReference" : self._spatialReference,
'maxRecordCount' : self._maxRecordCount,
'serviceDescription' : self._serviceDescription,
'description' : self._description,
'hasStaticData' : self._hasStaticData,
'units' : self._units,
'allowGeometryUpdates' : self._allowGeometryUpdates,
'capabilities' : self._capabilities,
"currentVersion" : self._currentVersion,
"hasVersionedData" : self._hasStaticData,
"supportsDisconnectedEditing": self._supportsDisconnectedEditing,
"size":self._size,
"syncEnabled":self._syncEnabled,
"syncCapabilities":{"supportsAsync":self._supportsAsync,
"supportsRegisteringExistingData":self._supportsRegisteringExistingData,
"supportsSyncDirectionControl":self._supportsSyncDirectionControl,
"supportsPerLayerSync":self._supportsPerLayerSync,
"supportsPerReplicaSync":self._supportsPerReplicaSync,
"supportsRollbackOnFailure":self._supportsRollbackOnFailure},
"editorTrackingInfo":{"enableEditorTracking":self._enableEditorTracking,
"enableOwnershipAccessControl":self._enableOwnershipAccessControl,
"allowOthersToUpdate":self._allowOthersToUpdate,
"allowOthersToDelete":self._allowOthersToDelete},
"tables":[],
"_ssl":False
}
if self._initialExtent is not None:
val['initialExtent'] = self._initialExtent
if self._supportedQueryFormats is not None:
val['supportedQueryFormats'] = self._supportedQueryFormats
if self._xssPreventionEnabled:
val['xssPreventionInfo'] = {}
val['xssPreventionInfo']['xssPreventionEnabled'] = self._xssPreventionEnabled
val['xssPreventionInfo']['xssPreventionRule'] = self._xssPreventionRule
val['xssPreventionInfo']['xssInputRule'] = self._xssInputRule
return val
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
return json.dumps(self.value)
########################################################################
class PortalParameters(BaseParameters):
"""
The following parameters represent the properties of a portal
"""
_name = None
_access = None
_description = None
_canSharePublic = None
_canSearchPublic = None
_thumbnail = None
_urlKey = None
_urlHostName = None
_culture = None
__allowed_keys = ['name', 'access', "description",
"canSharePublic", "canSearchPublic",
"thumbnail", "urlKey", "urlHostName",
"culture"]
#----------------------------------------------------------------------
def __init__(self, **kwargv):
"""Constructor"""
for key, value in kwargv:
if key in self.__allowed_keys:
setattr(self, "_"+ key, value)
#----------------------------------------------------------------------
@property
def value(self):
""" returns the class as a dictionary """
val = {}
for k in self.__allowed_keys:
val = getattr(self, "_" + k)
val[k] = val
return val
#----------------------------------------------------------------------
@property
def name(self):
""" The name of the organization/portal. The character limit is 250 """
return self._name
#----------------------------------------------------------------------
@name.setter
def name(self, value):
"""The name of the organization/portal. The character limit is 250"""
if self._name != value:
self._name = value
#----------------------------------------------------------------------
@property
def access(self):
"""
Determines who can view your organization as an anonymous user.
Setting to public allows anonymous users to access your
organization's custom URL. Setting to private restricts access to
only members of your organization. public is the default.
Values: private | public
"""
return self._access
#----------------------------------------------------------------------
@access.setter
def access(self, value):
"""
Determines who can view your organization as an anonymous user.
Setting to public allows anonymous users to access your
organization's custom URL. Setting to private restricts access to
only members of your organization. public is the default.
Values: private | public
"""
if self._access != value:
self._access = value
#----------------------------------------------------------------------
@property
def description(self):
"""
A description of the organization/portal and can be of any length
"""
return self._description
#----------------------------------------------------------------------
@description.setter
def description(self, value):
"""
A description of the organization/portal and can be of any length
"""
if self._description != value:
self._description = value
#----------------------------------------------------------------------
#@property
#def (self):
#""""""
########################################################################
class ItemParameter(BaseParameters):
"""
Item parameters correspond to properties of an item that are available
to update on the Add Item and Update Item operations.
Allowed Parameters:
title - The title of the item. This is the only name that users and
applications use for the item. There is no concept of
display names or aliases in the ArcGIS Portal API.
thumbnail - Enter the pathname to the thumbnail image to be used for
the item. The recommended image size is 200 pixels wide
by 133 pixels high. Acceptable image formats are PNG,
GIF, and JPEG. The maximum file size for an image is 1
MB. This is not a reference to the file but the file
itself, which will be stored on the sharing servers.
thumbnailurl - Enter the URL to the thumbnail image to be used for
the item. The recommended image size is 200 pixels
wide by 133 pixels high. Acceptable image formats are
PNG, GIF, and JPEG. The maximum file size for an
image is 1 MB.
metadata - The file that stores the metadata information on an item.
It's stored in the metadata folder under esriinfo, e.g.,
/sharing/content/items/<itemid>/info/metadata/metadata.xml.
type - The type of the item. Must be drawn from the list of
supported types. See Items and item types for a list of the
supported types.
typeKeywords - Type keywords describe the type and should logically
apply to all items of that type. See Items and item
types for a list of the different predefined type
keywords that will be automatically added based on
the supplied type. Use this parameter only if you
want to add additional type keywords. typeKeywords
can be applied to any type of item, in addition to
hosted feature services.
description - An item description can be of any length.
tags - Tags are words or short phrases that describe the specific
item. Separate with commas.
snippet - Snippet or summary for the item. Limit this brief
descriptive text to 250 characters.
extent - boudning box as Syntax: extent=<xmin>, <ymin>, <xmax>, <ymax>
spatialReference - coordinate system of the item
accessInformation - credits the source of the item
licenseInfo - includes any license information or restrictions
culture -The item locale (language and country) information.
serviceUsername - Set the username on a secure on-premise ArcGIS
Server service. It is valid on Map Services,
Feature Services and Image Services only.
servicePassword - Set the password on a secure on-premise ArcGIS
Server service. It is valid on Map Services,
Feature Services, and Image Services only.
fileName - name of the file updating (optional)
"""
_title = None
_thumbnail = None
_thumbnailurl = None
_metadata = None
_type = None
_typeKeywords = None
_description = None
_tags = None
_snippet = None
_overwrite = False
_extent = None
_spatialReference = None
_accessInformation = None
_licenseInfo = None
_culture = None
_serviceUsername = None
_servicePassword = None
_filename = None
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
pass
#----------------------------------------------------------------------
@property
def value(self):
""" returns the class as a dictionary """
r = {}
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for a in attributes:
if a != "value":
val = getattr(self, a)
if val is not None:
r[a] = val
return r
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
return json.dumps(self.value)
#----------------------------------------------------------------------
@property
def title(self):
"""gets/set the title"""
return self._title
#----------------------------------------------------------------------
@title.setter
def title(self, value):
""" gets/sets the title """
if self._title != value:
self._title = value
#----------------------------------------------------------------------
@property
def thumbnail(self):
"""
gets/sets the thumbnail
Enter the pathname to the thumbnail image to be used for the item.
The recommended image size is 200 pixels wide by 133 pixels high.
Acceptable image formats are PNG, GIF, and JPEG. The maximum file
size for an image is 1 MB. This is not a reference to the file but
the file itself, which will be stored on the sharing servers.
"""
return self._thumbnail
#----------------------------------------------------------------------
@thumbnail.setter
def thumbnail(self, value):
"""
gets/sets the thumbnail
Enter the pathname to the thumbnail image to be used for the item.
The recommended image size is 200 pixels wide by 133 pixels high.
Acceptable image formats are PNG, GIF, and JPEG. The maximum file
size for an image is 1 MB. This is not a reference to the file but
the file itself, which will be stored on the sharing servers.
"""
if os.path.isfile(value) and \
self._thumbnail != value:
self._thumbnail = value
elif value is None:
self._thumbnail = None
#----------------------------------------------------------------------
@property
def thumbnailurl(self):
"""
gets/sets the thumbnail url
Enter the URL to the thumbnail image to be used for the item. The
recommended image size is 200 pixels wide by 133 pixels high.
Acceptable image formats are PNG, GIF, and JPEG. The maximum file
size for an image is 1 MB
"""
return self._thumbnailurl
#----------------------------------------------------------------------
@thumbnailurl.setter
def thumbnailurl(self, value):
"""
gets/sets the thumbnail url
Enter the URL to the thumbnail image to be used for the item. The
recommended image size is 200 pixels wide by 133 pixels high.
Acceptable image formats are PNG, GIF, and JPEG. The maximum file
size for an image is 1 MB
"""
if self._thumbnailurl != value:
self._thumbnailurl = value
#----------------------------------------------------------------------
@property
def metadata(self):
"""
gets/sets the metadata file
The file that stores the metadata information on an item. It's
stored in the metadata folder under esriinfo, e.g.,
/sharing/content/items/<itemid>/info/metadata/metadata.xml
"""
return self._metadata
#----------------------------------------------------------------------
@metadata.setter
def metadata(self, value):
"""
gets/sets the metadata file
The file that stores the metadata information on an item. It's
stored in the metadata folder under esriinfo, e.g.,
/sharing/content/items/<itemid>/info/metadata/metadata.xml
"""
if self._metadata != value:
self._metadata = value
#----------------------------------------------------------------------
@property
def type(self):
"""
gets/sets the type
The type of the item. Must be drawn from the list of supported types.
"""
return self._type
#----------------------------------------------------------------------
@type.setter
def type(self, value):
"""
gets/sets the type
The type of the item. Must be drawn from the list of supported types.
"""
if self._type != value:
self._type = value
#----------------------------------------------------------------------
@property
def typeKeywords(self):
"""
gets/sets the typeKeywords
Type keywords describe the type and should logically apply to all
items of that type. See Items and item types for a list of the
different predefined type keywords that will be automatically added
based on the supplied type. Use this parameter only if you want to
add additional type keywords. typeKeywords can be applied to any
type of item, in addition to hosted feature services.
"""
return self._typeKeywords
#----------------------------------------------------------------------
@typeKeywords.setter
def typeKeywords(self, value):
"""
gets/sets the typeKeywords
Type keywords describe the type and should logically apply to all
items of that type. See Items and item types for a list of the
different predefined type keywords that will be automatically added
based on the supplied type. Use this parameter only if you want to
add additional type keywords. typeKeywords can be applied to any
type of item, in addition to hosted feature services.
"""
if self._typeKeywords != value:
self._typeKeywords = value
#----------------------------------------------------------------------
@property
def description(self):
"""gets/sets an item description of any length"""
return self._description
#----------------------------------------------------------------------
@description.setter
def description(self, value):
"""gets/sets an item description of any length"""
if self._description != value:
self._description = value
#----------------------------------------------------------------------
@property
def tags(self):
"""gets/sets the tags that describe the item"""
return self._tags
#----------------------------------------------------------------------
@tags.setter
def tags(self, value):
"""gets/sets the tags that describe the item"""
if self._tags != value:
self._tags = value
#----------------------------------------------------------------------
@property
def snippet(self):
"""
Snippet or summary for the item. Limit this brief descriptive text
to 250 characters.
"""
return self._snippet
#----------------------------------------------------------------------
@snippet.setter
def snippet(self, value):
"""
Snippet or summary for the item. Limit this brief descriptive text
to 250 characters.
"""
if self._snippet != value:
self._snippet = value
#----------------------------------------------------------------------
@property
def overwrite(self):
"""
overwrites an existing item.
"""
return self._overwrite
#----------------------------------------------------------------------
@overwrite.setter
def overwrite(self, value):
"""
overwrites an existing item (depricated)
"""
if self._overwrite != value:
self._overwrite = value
#----------------------------------------------------------------------
@property
def extent(self):
"""gets/sets the bounding rectangle of the item"""
return self._extent
#----------------------------------------------------------------------
@extent.setter
def extent(self, value):
"""gets/sets the bounding rectangle of the item"""
if self._extent != value:
self._extent = value
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""gets/sets the coordinate system of the item """
return self._spatialReference
#----------------------------------------------------------------------
@spatialReference.setter
def spatialReference(self, value):
"""gets/sets the coordinate system of the item """
if self._spatialReference != value:
self._spatialReference = value
#----------------------------------------------------------------------
@property
def accessInformation(self):
"""gets/sets for the credits of the source of the item."""
return self._accessInformation
#----------------------------------------------------------------------
@accessInformation.setter
def accessInformation(self, value):
"""gets/sets for the credits of the source of the item."""
if self._accessInformation != value:
self._accessInformation = value
#----------------------------------------------------------------------
@property
def licenseInfo(self):
"""gets/sets the license information or restrictions"""
return self._licenseInfo
#----------------------------------------------------------------------
@licenseInfo.setter
def licenseInfo(self, value):
"""gets/sets the license information or restrictions"""
if self._licenseInfo != value:
self._licenseInfo = value
#----------------------------------------------------------------------
@property
def culture(self):
"""gets/sets the item locale"""
return self._culture
#----------------------------------------------------------------------
@culture.setter
def culture(self, value):
"""gets/sets the item locale"""
if self._culture != value:
self._culture = value
#----------------------------------------------------------------------
@property
def serviceUsername(self):
"""
gets/Set the username on a secure on-premise ArcGIS Server service.
It is valid on Map Services, Feature Services and Image Services
only
"""
return self._serviceUsername
#----------------------------------------------------------------------
@serviceUsername.setter
def serviceUsername(self, value):
"""
gets/Set the username on a secure on-premise ArcGIS Server service.
It is valid on Map Services, Feature Services and Image Services
only
"""
if self._serviceUsername != value:
self._serviceUsername = value
#----------------------------------------------------------------------
@property
def servicePassword(self):
"""
gets/sets - Set the password on a secure on-premise ArcGIS Server
service. It is valid on Map Services, Feature Services, and Image
Services only.
"""
return self._servicePassword
#----------------------------------------------------------------------
@servicePassword.setter
def servicePassword(self, value):
"""
gets/sets - Set the password on a secure on-premise ArcGIS Server
service. It is valid on Map Services, Feature Services, and Image
Services only.
"""
if self._servicePassword != value:
self._servicePassword = value
#----------------------------------------------------------------------
@property
def filename(self):
"""gets/sets the file name"""
return self._filename
#----------------------------------------------------------------------
@filename.setter
def filename(self, value):
""""""
if value != self._filename:
self._filename = value
########################################################################
class PublishCSVParameters(BaseParameters):
"""
The publishParameters JSON object used to publish a CSV file
name - (Required) - Name of the service to be created. The same name is
reused as the name for the single layer within the service if
the layerInfo parameter is not provided.
locationType - (Required) - coordinates | address | lookup | none
-When locationType == coordinates, the CSV data contains
X,Y information.
-When locationType == address, the CSV data contains
address fields that will be geocoded to a single point.
-When locationType == lookup, the CSV data contains
fields that can be mapped to well-known sets of
geographies.
-When locationType == none, the CSV data contains no
spatial content and data will be loaded and subsequently
queried as tabular data.
Based on this parameter, additional parameters will be
required, e.g., when specifying locationType ==
coordinates, the latitude and longitude field names must
be specified.
latitudeFieldName - (Required if locationType = coordinates) - If
locationType = coordinates, the name of the field
that contains the Y coordinate.
longitudeFieldName - (Required if locationType = coordinates) - If
locationType = coordinates, the name of the field
that contains the X coordinate.
addressTemplate - (Required if locationType = address) - A string value
that defines the address to find based on CSV field
values.
Example: "{Address} {City}, {State} {Zip}"
lookupType - (Required if locationType == lookup) - The type of place
to look up. To be supported in a follow on phase.
lookupFields (Required if locationType == lookup) - A JSON object with
name value pairs that define the fields used to look up
the location.
layerInfo - (Required) - A JSON object that provides additional
information about the dataset. The JSON format resembles
the layerDescriptor used in publishing feature services to
an on-premise spatial data server or ArcGIS Server. All
parameters except fields are optional.
description - A user-friendly description for the published dataset.
maxRecordCount - A double value indicating any constraints enforced on
query operations.
Default is -1 or int.MaxValue indicating no constraint.
copyrightText - Copyright information associated with the dataset.
columnNames - [<array of column names], overridden if layerInfo fields
is specified.
If columnNames are omitted, the field names are inferred
from the first line in source CSV.
columnDelimiter - A delimiter between data fields in each row of text.
Default is the comma character.
sourceSR - Spatial reference of the input coordinates.
Default is WKID 4326.
targetSR - Target spatial reference of the coordinates as stored in the
database.
Default is WKID 102100.
"""
_name = None
_locationType = None
_latitudeFieldName = None
_longitudeFieldName = None
_addressTemplate = None
_lookupType = None
_lookupField = None
_layerInfo = None
_description = None
_maxRecordCount = None
_copyrightText = None
_columnNames = None
_columnDelimiter = None
_sourceSR = 4326
_targetSR = 102100
_allowed_locationType = ["coordinates", "address", "lookup", "none"]
__allowed_keys = ['name', "locationType", "layerInfo",
"latitudeFieldName", "longitudeFieldName",
"addressTemplate", "lookupType", "lookupFields",
"description", "maxRecordCount", "copyrightText",
"columnNames", "columnDelimiter", "sourceSR",
"targetSR"]
#----------------------------------------------------------------------
def __init__(self,
name,
locationType,
layerInfo,
latitudeFieldName=None,
longitudeFieldName=None,
addressTemplate=None,
lookupType=None,
lookupFields=None,
description="",
maxRecordCount=-1,
copyrightText="",
columnNames=None,
columnDelimiter=",",
sourceSR=4326,
targetSR=102100):
"""Constructor"""
self._name = name
self._layerInfo = layerInfo
if locationType.lower() in self._allowed_locationType:
self._locationType = locationType
else:
raise AttributeError("Invalid locationType %s." % locationType)
#["coordinates", "address", "lookup", "none"]
if locationType.lower() == "none":
pass
elif locationType.lower() == "address":
if addressTemplate is None:
raise AttributeError("addressTemplate must be provide for this location type")
elif locationType.lower() == "coordinates":
if latitudeFieldName is None or \
longitudeFieldName is None:
raise AttributeError("Latitude and Longitude fields must be provided with this location type.")
elif locationType.lower() == "lookup":
if lookupFields is None or \
lookupType is None:
raise AttributeError("lookupFields and lookupType must be provide with this location type.")
self._latitudeFieldName = latitudeFieldName
self._longitudeFieldName = longitudeFieldName
self._addressTemplate = addressTemplate
self._lookupType = lookupType
self._lookupFields = lookupFields
self._description = description
self._maxRecordCount = maxRecordCount
self._copyrightText = copyrightText
self._columnNames = columnNames
self._columnDelimiter = columnDelimiter
self._sourceSR = sourceSR
self._targetSR = targetSR
#----------------------------------------------------------------------
@property
def value(self):
"""returns the values as a dictionary"""
val = {}
for k in self.__allowed_keys:
value = getattr(self, "_" + k)
if value is not None:
val[k] = value
return val
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
return json.dumps(self.value)
#----------------------------------------------------------------------
@property
def name(self):
"""
gets/sets the name property
"""
return self._name
#----------------------------------------------------------------------
@name.setter
def name(self, value):
""" gets/sets the name property """
if self._name != value and value is not None:
self._name = value
#----------------------------------------------------------------------
@property
def locationType(self):
"""
gets/sets the location type
"""
return self._locationType
#----------------------------------------------------------------------
@locationType.setter
def locationType(self, value):
"""
gets/sets the location type
"""
if value.lower() in self._allowed_locationType and \
self._locationType.lower() != value.lower():
self._locationType = value.lower()
#----------------------------------------------------------------------
@property
def latitudeFieldName(self):
""" gets/sets the latitude field name """
return self._latitudeFieldName
#----------------------------------------------------------------------
@latitudeFieldName.setter
def latitudeFieldName(self, value):
""" gets/sets the latitude field name """
if self._latitudeFieldName != value:
self._latitudeFieldName = value
#----------------------------------------------------------------------
@property
def longitudeFieldName(self):
""" gets/sets the longitude field name """
return self._longitudeFieldName
#----------------------------------------------------------------------
@longitudeFieldName.setter
def longitudeFieldName(self, value):
""" gets/sets the longitude field name """
if self._longitudeFieldName != value:
self._longitudeFieldName = value
#----------------------------------------------------------------------
@property
def addressTemplate(self):
""" gets/sets the address tempalte value """
return self._addressTemplate
#----------------------------------------------------------------------
@addressTemplate.setter
def addressTemplate(self, value):
""" gets/sets the address template value """
if self._addressTemplate != value:
self._addressTemplate = value
#----------------------------------------------------------------------
@property
def lookupField(self):
""" gets/sets the lookup field """
return self._lookupField
#----------------------------------------------------------------------
@lookupField.setter
def lookupField(self, value):
""" gets/sets the lookup field """
if self._lookupField != value:
self._lookupField = value
#----------------------------------------------------------------------
@property
def layerInfo(self):
""" gets/sets the layer info """
return self._layerInfo
#----------------------------------------------------------------------
@layerInfo.setter
def layerInfo(self, value):
""" gets/sets the layer info """
if self._layerInfo != value:
self._layerInfo = value
#----------------------------------------------------------------------
@property
def description(self):
"""gets/set the decription"""
return self._description
#----------------------------------------------------------------------
@description.setter
def description(self, value):
""" gets/sets the description """
if self._description != value:
self._description = value
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""gets/set the max record count"""
return self._maxRecordCount
#----------------------------------------------------------------------
@maxRecordCount.setter
def maxRecordCount(self, value):
"""gets/sets the max record count"""
if self._maxRecordCount != value:
self._maxRecordCount = value
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""gets/sets the copyright text"""
return self._copyrightText
#----------------------------------------------------------------------
@copyrightText.setter
def copyrightText(self, value):
"""gets/sets the copyright text"""
if self._copyrightText != value:
self._copyrightText = value
#----------------------------------------------------------------------
@property
def columnNames(self):
"""gets/sets the columnNames"""
return self._columnNames
#----------------------------------------------------------------------
@columnNames.setter
def columnNames(self, value):
"""gets/sets the columnNames"""
if self._columnNames != value:
self._columnNames = value
#----------------------------------------------------------------------
@property
def columnDelimiter(self):
"""gets/sets the columnDelimiter"""
return self._columnDelimiter
#----------------------------------------------------------------------
@columnDelimiter.setter
def columnDelimiter(self, value):
"""gets/sets the columnDelimiter"""
if self._columnDelimiter != value:
self._columnDelimiter = value
#----------------------------------------------------------------------
@property
def targetSR(self):
"""gets/sets the target spatial reference"""
return self._targetSR
#----------------------------------------------------------------------
@targetSR.setter
def targetSR(self, value):
"""gets/sets the target spatial reference"""
if self._targetSR != value:
self._targetSR = value
#----------------------------------------------------------------------
@property
def sourceSR(self):
"""gets/set the source spatialreference"""
return self._sourceSR
#----------------------------------------------------------------------
@sourceSR.setter
def sourceSR(self, value):
"""gets/sets the source spatial reference"""
if self._sourceSR != value:
self._sourceSR = value
########################################################################
class PublishShapefileParameter(BaseParameters):
"""
The publishParameters JSON object used to publish shapefiles
"""
_name = None
_layerInfo = None
_description = None
_maxRecordCount = None
_copyrightText = None
_targetSR = None
_hasStaticData = True
__allowed_keys = ['name', "description",
"maxRecordCount", "copyrightText",
"layerInfo", "targetSR", "hasStaticData"]
#----------------------------------------------------------------------
def __init__(self,
name,
layerInfo,
description="",
maxRecordCount=-1,
copyrightText="",
targetSR=102100
):
"""Constructor"""
self._name = name
self._layerInfo = layerInfo
self._description = description
self._maxRecordCount = maxRecordCount
self._copyrightText = copyrightText
self._targetSR = targetSR
#----------------------------------------------------------------------
@property
def name(self):
"""gets/sets the name"""
return self._name
#----------------------------------------------------------------------
@name.setter
def name(self, value):
"""gets/sets the name"""
if self._name != value:
self._name = value
#----------------------------------------------------------------------
@property
def layerInfo(self):
"""gets/sets the layer info"""
return self._layerInfo
#----------------------------------------------------------------------
@layerInfo.setter
def layerInfo(self, value):
"""gets/sets the layer info"""
if self._layerInfo != value:
self._layerInfo = value
#----------------------------------------------------------------------
@property
def description(self):
"""gets/sets the description"""
return self._description
#----------------------------------------------------------------------
@description.setter
def description(self, value):
"""gets/sets the description"""
if self._description != value:
self._description = value
#----------------------------------------------------------------------
@property
def hasStaticData(self):
"""gets/sets hasStaticData"""
return self._hasStaticData
#----------------------------------------------------------------------
@hasStaticData.setter
def hasStaticData(self, value):
"""gets/sets the hasStaticData"""
if self._hasStaticData != value and \
isinstance(value, bool):
self._hasStaticData = value
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""gets/sets the max record count"""
return self._maxRecordCount
#----------------------------------------------------------------------
@maxRecordCount.setter
def maxRecordCount(self, value):
"""gets/sets the max record count"""
if self._maxRecordCount != value:
self._maxRecordCount = value
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""gets/sets the copyrightText"""
return self._copyrightText
#----------------------------------------------------------------------
@copyrightText.setter
def copyrightText(self, value):
"""gets/sets the copyrightText"""
if self._copyrightText != value:
self._copyrightText = value
#----------------------------------------------------------------------
@property
def targetSR(self):
"""gets/sets the targetSR"""
return self._targetSR
#----------------------------------------------------------------------
@targetSR.setter
def targetSR(self, value):
"""gets/sets the targetSR"""
if self._targetSR != value:
self._targetSR = value
#----------------------------------------------------------------------
@property
def value(self):
"""returns the object as a dictionary"""
val = {}
for k in self.__allowed_keys:
value = getattr(self, "_" + k)
if value is not None:
val[k] = value
return val
#----------------------------------------------------------------------
def __str__(self):
"""returns the object as string"""
return json.dumps(self.value)
########################################################################
class PublishFeatureCollectionParameter(BaseParameters):
"""
The publishParameters JSON object used to publish feature collections
"""
_name = None
_layerInfo = None
_description = None
_maxRecordCount = None
_copyrightText = None
_targetSR = None
__allowed_keys = ['name', "description",
"maxRecordCount", "copyrightText",
"layerInfo", "targetSR"]
#----------------------------------------------------------------------
def __init__(self,
name,
layerInfo,
description="",
maxRecordCount=-1,
copyrightText="",
targetSR=102100
):
"""Constructor"""
self._name = name
self._layerInfo = layerInfo
self._description = description
self._maxRecordCount = maxRecordCount
self._copyrightText = copyrightText
self._targetSR = targetSR
#----------------------------------------------------------------------
@property
def name(self):
"""gets/sets the name"""
return self._name
#----------------------------------------------------------------------
@name.setter
def name(self, value):
"""gets/sets the name"""
if self._name != value:
self._name = value
#----------------------------------------------------------------------
@property
def layerInfo(self):
"""gets/sets the layer info"""
return self._layerInfo
#----------------------------------------------------------------------
@layerInfo.setter
def layerInfo(self, value):
"""gets/sets the layer info"""
if self._layerInfo != value:
self._layerInfo = value
#----------------------------------------------------------------------
@property
def description(self):
"""gets/sets the description"""
return self._description
#----------------------------------------------------------------------
@description.setter
def description(self, value):
"""gets/sets the description"""
if self._description != value:
self._description = value
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""gets/sets the max record count"""
return self._maxRecordCount
#----------------------------------------------------------------------
@maxRecordCount.setter
def maxRecordCount(self, value):
"""gets/sets the max record count"""
if self._maxRecordCount != value:
self._maxRecordCount = value
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""gets/sets the copyrightText"""
return self._copyrightText
#----------------------------------------------------------------------
@copyrightText.setter
def copyrightText(self, value):
"""gets/sets the copyrightText"""
if self._copyrightText != value:
self._copyrightText = value
#----------------------------------------------------------------------
@property
def targetSR(self):
"""gets/sets the targetSR"""
return self._targetSR
#----------------------------------------------------------------------
@targetSR.setter
def targetSR(self, value):
"""gets/sets the targetSR"""
if self._targetSR != value:
self._targetSR = value
#----------------------------------------------------------------------
@property
def value(self):
"""returns the object as a dictionary"""
val = {}
for k in self.__allowed_keys:
value = getattr(self, "_" + k)
if value is not None:
val[k] = value
return val
#----------------------------------------------------------------------
def __str__(self):
"""returns object as a string"""
return json.dumps(self.value)
########################################################################
class PublishFGDBParameter(BaseParameters):
"""
The publishParameters JSON object used to publish file geodatabase
"""
_name = None
_layerInfo = None
_description = None
_maxRecordCount = None
_copyrightText = None
_targetSR = None
_overwrite = False
__allowed_keys = ['name', "description",
"maxRecordCount", "copyrightText",
"layerInfo", "targetSR", "overwrite"]
#----------------------------------------------------------------------
def __init__(self,
name,
layerInfo,
description="",
maxRecordCount=-1,
copyrightText="",
targetSR=102100,
overwrite=False
):
"""Constructor"""
self._name = name
self._layerInfo = layerInfo
self._description = description
self._maxRecordCount = maxRecordCount
self._copyrightText = copyrightText
self._targetSR = targetSR
self._overwrite = overwrite
#----------------------------------------------------------------------
@property
def overwrite(self):
"""gets/sets the overwrite value"""
return self._overwrite
#----------------------------------------------------------------------
@overwrite.setter
def overwrite(self, value):
"""gets/sets the overwrite value"""
if isinstance(value, bool):
self._overwrite = value
#----------------------------------------------------------------------
@property
def name(self):
"""gets/sets the name"""
return self._name
#----------------------------------------------------------------------
@name.setter
def name(self, value):
"""gets/sets the name"""
if self._name != value:
self._name = value
#----------------------------------------------------------------------
@property
def layerInfo(self):
"""gets/sets the layer info"""
return self._layerInfo
#----------------------------------------------------------------------
@layerInfo.setter
def layerInfo(self, value):
"""gets/sets the layer info"""
if self._layerInfo != value:
self._layerInfo = value
#----------------------------------------------------------------------
@property
def description(self):
"""gets/sets the description"""
return self._description
#----------------------------------------------------------------------
@description.setter
def description(self, value):
"""gets/sets the description"""
if self._description != value:
self._description = value
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""gets/sets the max record count"""
return self._maxRecordCount
#----------------------------------------------------------------------
@maxRecordCount.setter
def maxRecordCount(self, value):
"""gets/sets the max record count"""
if self._maxRecordCount != value:
self._maxRecordCount = value
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""gets/sets the copyrightText"""
return self._copyrightText
#----------------------------------------------------------------------
@copyrightText.setter
def copyrightText(self, value):
"""gets/sets the copyrightText"""
if self._copyrightText != value:
self._copyrightText = value
#----------------------------------------------------------------------
@property
def targetSR(self):
"""gets/sets the targetSR"""
return self._targetSR
#----------------------------------------------------------------------
@targetSR.setter
def targetSR(self, value):
"""gets/sets the targetSR"""
if self._targetSR != value:
self._targetSR = value
#----------------------------------------------------------------------
@property
def value(self):
"""returns the object as a dictionary"""
val = {}
for k in self.__allowed_keys:
value = getattr(self, "_" + k)
if value is not None:
val[k] = value
return val
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
return json.dumps(self.value)
########################################################################
class PublishSDParmaeters(BaseParameters):
"""
Required parameters to publish SD Parameters
"""
_tags = None
_overwrite = False
#----------------------------------------------------------------------
def __init__(self, tags,overwrite=False):
"""Constructor"""
self._tags = tags
self._overwrite = overwrite
#----------------------------------------------------------------------
@property
def tags(self):
"""gets/sets the tags value"""
return self._tags
#----------------------------------------------------------------------
@tags.setter
def tags(self, value):
"""gets/sets the tags value"""
if self._tags != value:
self._tags = value
#----------------------------------------------------------------------
@property
def value(self):
""" returns the parameter value """
r = {}
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for a in attributes:
if a != "value":
val = getattr(self, a)
if val is not None:
r[a] = val
return r
#----------------------------------------------------------------------
def __str__(self):
"""returns the object as string"""
return json.dumps(self.value)
#----------------------------------------------------------------------
@property
def overwrite(self):
"""
overwrites an item
"""
return self._overwrite
#----------------------------------------------------------------------
@overwrite.setter
def overwrite(self, value):
"""
overwrites an item
"""
if self._overwrite != value:
self._overwrite = value
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
import pytest
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.address import BuildFileAddress
from pants.base.build_file import FilesystemBuildFile
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.build_file_parser import BuildFileParser
from pants.base.target import Target
from pants_test.base_test import BaseTest
# TODO(Eric Ayers) Explicit unit tests are missing for registered_alises, parse_spec,
# parse_build_file_family
class ErrorTarget(Target):
def __init__(self, *args, **kwargs):
assert False, "This fake target should never be initialized in this test!"
class BuildFileParserBasicsTest(BaseTest):
def test_addressable_exceptions(self):
self.add_to_build_file('a/BUILD', 'target()')
build_file_a = FilesystemBuildFile(self.build_root, 'a/BUILD')
with pytest.raises(BuildFileParser.ExecuteError):
self.build_file_parser.parse_build_file(build_file_a)
self.add_to_build_file('b/BUILD', 'target(name="foo", "bad_arg")')
build_file_b = FilesystemBuildFile(self.build_root, 'b/BUILD')
with pytest.raises(BuildFileParser.BuildFileParserError):
self.build_file_parser.parse_build_file(build_file_b)
self.add_to_build_file('d/BUILD', dedent(
'''
target(
name="foo",
dependencies=[
object(),
]
)
'''
))
build_file_d = FilesystemBuildFile(self.build_root, 'd/BUILD')
with pytest.raises(BuildFileParser.BuildFileParserError):
self.build_file_parser.parse_build_file(build_file_d)
def test_noop_parse(self):
self.add_to_build_file('BUILD', '')
build_file = FilesystemBuildFile(self.build_root, '')
address_map = set(self.build_file_parser.parse_build_file(build_file))
self.assertEqual(len(address_map), 0)
class BuildFileParserTargetTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases.create(targets={'fake': ErrorTarget})
def test_trivial_target(self):
self.add_to_build_file('BUILD', '''fake(name='foozle')''')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
self.assertEqual(len(address_map), 1)
address, proxy = address_map.popitem()
self.assertEqual(address, BuildFileAddress(build_file, 'foozle'))
self.assertEqual(proxy.name, 'foozle')
self.assertEqual(proxy.target_type, ErrorTarget)
def test_trivial_target(self):
self.add_to_build_file('BUILD', '''fake(name='foozle')''')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
self.assertEqual(len(address_map), 1)
address, addressable = address_map.popitem()
self.assertEqual(address, BuildFileAddress(build_file, 'foozle'))
self.assertEqual(addressable.name, 'foozle')
self.assertEqual(addressable.target_type, ErrorTarget)
def test_sibling_build_files(self):
self.add_to_build_file('BUILD', dedent(
'''
fake(name="base",
dependencies=[
':foo',
])
'''))
self.add_to_build_file('BUILD.foo', dedent(
'''
fake(name="foo",
dependencies=[
':bat',
])
'''))
self.add_to_build_file('./BUILD.bar', dedent(
'''
fake(name="bat")
'''))
bar_build_file = FilesystemBuildFile(self.build_root, 'BUILD.bar')
base_build_file = FilesystemBuildFile(self.build_root, 'BUILD')
foo_build_file = FilesystemBuildFile(self.build_root, 'BUILD.foo')
address_map = self.build_file_parser.address_map_from_build_file(bar_build_file)
addresses = address_map.keys()
self.assertEqual(set([bar_build_file, base_build_file, foo_build_file]),
set([address.build_file for address in addresses]))
self.assertEqual(set([':base', ':foo', ':bat']),
set([address.spec for address in addresses]))
def test_build_file_duplicates(self):
# This workspace has two targets in the same file with the same name.
self.add_to_build_file('BUILD', 'fake(name="foo")\n')
self.add_to_build_file('BUILD', 'fake(name="foo")\n')
with pytest.raises(BuildFileParser.AddressableConflictException):
base_build_file = FilesystemBuildFile(self.build_root, 'BUILD')
self.build_file_parser.parse_build_file(base_build_file)
def test_sibling_build_files_duplicates(self):
# This workspace is malformed, you can't shadow a name in a sibling BUILD file
self.add_to_build_file('BUILD', dedent(
'''
fake(name="base",
dependencies=[
':foo',
])
'''))
self.add_to_build_file('BUILD.foo', dedent(
'''
fake(name="foo",
dependencies=[
':bat',
])
'''))
self.add_to_build_file('./BUILD.bar', dedent(
'''
fake(name="base")
'''))
with pytest.raises(BuildFileParser.SiblingConflictException):
base_build_file = FilesystemBuildFile(self.build_root, 'BUILD')
bf_address = BuildFileAddress(base_build_file, 'base')
self.build_file_parser.address_map_from_build_file(base_build_file)
class BuildFileParserExposedObjectTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases.create(objects={'fake_object': object()})
def test_exposed_object(self):
self.add_to_build_file('BUILD', '''fake_object''')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
self.assertEqual(len(address_map), 0)
class BuildFileParserExposedContextAwareObjectFactoryTest(BaseTest):
@staticmethod
def make_lib(parse_context):
def real_make_lib(org, name, rev):
dep = parse_context.create_object('jar', org=org, name=name, rev=rev)
parse_context.create_object('jar_library', name=name, jars=[dep])
return real_make_lib
@staticmethod
def create_java_libraries(parse_context):
def real_create_java_libraries(base_name,
org='com.twitter',
provides_java_name=None,
provides_scala_name=None):
def provides_artifact(provides_name):
if provides_name is None:
return None
jvm_repo = Repository(
name='maven-central',
url='http://maven.example.com',
push_db_basedir=os.path.join('build-support', 'ivy', 'pushdb'),
)
return parse_context.create_object('artifact',
org=org,
name=provides_name,
repo=jvm_repo)
parse_context.create_object('java_library',
name='%s-java' % base_name,
sources=[],
dependencies=[],
provides=provides_artifact(provides_java_name))
parse_context.create_object('scala_library',
name='%s-scala' % base_name,
sources=[],
dependencies=[],
provides=provides_artifact(provides_scala_name))
return real_create_java_libraries
def setUp(self):
super(BuildFileParserExposedContextAwareObjectFactoryTest, self).setUp()
self._paths = set()
def path_relative_util(self, parse_context):
def real_path_relative_util(path):
self._paths.add(os.path.join(parse_context.rel_path, path))
return real_path_relative_util
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'jar_library': JarLibrary,
'java_library': JavaLibrary,
'scala_library': ScalaLibrary,
},
context_aware_object_factories={
'make_lib': self.make_lib,
'create_java_libraries': self.create_java_libraries,
'path_util': self.path_relative_util,
},
objects={
'artifact': Artifact,
'jar': JarDependency,
}
)
def test_context_aware_object_factories(self):
contents = dedent('''
create_java_libraries(base_name="create-java-libraries",
provides_java_name="test-java",
provides_scala_name="test-scala")
make_lib("com.foo.test", "does_not_exists", "1.0")
path_util("baz")
''')
self.create_file('3rdparty/BUILD', contents)
build_file = FilesystemBuildFile(self.build_root, '3rdparty/BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
registered_proxies = set(address_map.values())
self.assertEqual(len(registered_proxies), 3)
targets_created = {}
for target_proxy in registered_proxies:
targets_created[target_proxy.name] = target_proxy.target_type
self.assertEqual(set(['does_not_exists',
'create-java-libraries-scala',
'create-java-libraries-java']),
set(targets_created.keys()))
self.assertEqual(targets_created['does_not_exists'], JarLibrary)
self.assertEqual(targets_created['create-java-libraries-java'], JavaLibrary)
self.assertEqual(targets_created['create-java-libraries-scala'], ScalaLibrary)
self.assertEqual(set(['3rdparty/baz']), self._paths)
def test_raises_parse_error(self):
self.add_to_build_file('BUILD', 'foo(name = = "baz")')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Test some corner cases for the context printing
# Error at beginning of BUILD file
build_file = self.add_to_build_file('begin/BUILD', dedent('''
*?&INVALID! = 'foo'
target(
name='bar',
dependencies= [
':baz',
],
)
'''))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error at end of BUILD file
build_file = self.add_to_build_file('end/BUILD', dedent('''
target(
name='bar',
dependencies= [
':baz',
],
)
*?&INVALID! = 'foo'
'''))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error in the middle of BUILD file > 6 lines
build_file = self.add_to_build_file('middle/BUILD', dedent('''
target(
name='bar',
*?&INVALID! = 'foo'
dependencies = [
':baz',
],
)
'''))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error in very short build file.
build_file = self.add_to_build_file('short/BUILD', dedent('''
target(name='bar', dependencies = [':baz'],) *?&INVALID! = 'foo'
'''))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
def test_raises_execute_error(self):
self.add_to_build_file('BUILD', 'undefined_alias(name="baz")')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
with self.assertRaises(BuildFileParser.ExecuteError):
self.build_file_parser.parse_build_file(build_file)
def test_build_file_parser_error_hierarcy(self):
"""Exception handling code depends on the fact that all explicit exceptions from BuildFileParser are
subclassed from the BuildFileParserError base class.
"""
self.assertIsInstance(BuildFileParser.BuildFileScanError(), BuildFileParser.BuildFileParserError)
self.assertIsInstance(BuildFileParser.AddressableConflictException(), BuildFileParser.BuildFileParserError)
self.assertIsInstance(BuildFileParser.SiblingConflictException(), BuildFileParser.BuildFileParserError)
self.assertIsInstance(BuildFileParser.ParseError(), BuildFileParser.BuildFileParserError)
self.assertIsInstance(BuildFileParser.ExecuteError(), BuildFileParser.BuildFileParserError)
|
|
import os
import logging
import importlib
import json
from billy.scrape.validator import DatetimeValidator
from billy.core import settings
from billy.utils import JSONEncoderPlus
import scrapelib
class ScrapeError(Exception):
"""
Base class for scrape errors.
"""
def __init__(self, msg, orig_exception=None):
self.msg = msg
self.orig_exception = orig_exception
def __str__(self):
if self.orig_exception:
return '%s\nOriginal Exception: %s' % (self.msg,
self.orig_exception)
else:
return self.msg
class NoDataForPeriod(ScrapeError):
"""
Exception to be raised when no data exists for a given period
"""
def __init__(self, period):
self.period = period
def __str__(self):
return 'No data exists for %s' % self.period
class Scraper(scrapelib.Scraper):
""" Base class for all Scrapers
Provides several useful methods for retrieving URLs and checking
arguments against metadata.
"""
latest_only = False
def __init__(self, metadata, output_dir=None, strict_validation=None,
fastmode=False):
"""
Create a new Scraper instance.
:param metadata: metadata for this scraper
:param output_dir: the data directory to use
:param strict_validation: exit immediately if validation fails
"""
super(Scraper, self).__init__()
# scrapelib overrides
self.timeout = settings.SCRAPELIB_TIMEOUT
self.cache_storage = scrapelib.FileCache(settings.BILLY_CACHE_DIR)
self.requests_per_minute = settings.SCRAPELIB_RPM
self.retry_attempts = settings.SCRAPELIB_RETRY_ATTEMPTS
self.retry_wait_seconds = settings.SCRAPELIB_RETRY_WAIT_SECONDS
if fastmode:
self.requests_per_minute = 0
self.cache_write_only = False
self.metadata = metadata
self.output_dir = output_dir
self.output_names = set()
# make output_dir
os.path.isdir(self.output_dir) or os.path.makedirs(self.output_dir)
# validation
self.strict_validation = strict_validation
self.validator = DatetimeValidator()
self._schema = {}
self._load_schemas()
# logging convenience methods
self.logger = logging.getLogger("billy")
self.log = self.logger.info
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
def _load_schemas(self):
""" load all schemas into schema dict """
types = ('bill', 'committee', 'person', 'vote', 'event', 'speech')
for type in types:
schema_path = os.path.join(os.path.split(__file__)[0],
'../schemas/%s.json' % type)
self._schema[type] = json.load(open(schema_path))
self._schema[type]['properties'][settings.LEVEL_FIELD] = {
'minLength': 2, 'type': 'string'}
# bills & votes
self._schema['bill']['properties']['session']['enum'] = \
self.all_sessions()
self._schema['vote']['properties']['session']['enum'] = \
self.all_sessions()
# legislators
terms = [t['name'] for t in self.metadata['terms']]
# ugly break here b/c this line is nearly impossible to split
self._schema['person']['properties']['roles'][
'items']['properties']['term']['enum'] = terms
@property
def object_count(self):
# number of distinct output filenames
return len(self.output_names)
def validate_json(self, obj):
try:
self.validator.validate(obj, self._schema[obj['_type']])
except ValueError as ve:
self.warning(str(ve))
if self.strict_validation:
raise ve
def all_sessions(self):
sessions = []
for t in self.metadata['terms']:
sessions.extend(t['sessions'])
return sessions
def validate_session(self, session, latest_only=False):
""" Check that a session is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if session is invalid
:param session: string representing session to check
"""
if latest_only:
if session != self.metadata['terms'][-1]['sessions'][-1]:
raise NoDataForPeriod(session)
for t in self.metadata['terms']:
if session in t['sessions']:
return True
raise NoDataForPeriod(session)
def validate_term(self, term, latest_only=False):
""" Check that a term is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid
:param term: string representing term to check
:param latest_only: if True, will raise exception if term is not
the current term (default: False)
"""
if latest_only:
if term == self.metadata['terms'][-1]['name']:
return True
else:
raise NoDataForPeriod(term)
for t in self.metadata['terms']:
if term == t['name']:
return True
raise NoDataForPeriod(term)
def save_object(self, obj):
self.log('Save %s %s', obj['_type'], unicode(obj))
# copy jurisdiction to LEVEL_FIELD
obj[settings.LEVEL_FIELD] = getattr(self, 'jurisdiction')
filename = obj.get_filename().encode('utf-8')
self.output_names.add(filename) # keep tally of all output names
# pluralize type
if obj['_type'] == 'speech':
data_dir = 'speeches'
elif obj['_type'] == 'person':
data_dir = 'legislators'
else:
data_dir = obj['_type'] + 's'
with open(os.path.join(self.output_dir, data_dir, filename),
'w') as f:
json.dump(obj, f, cls=JSONEncoderPlus)
# validate after writing, allows for inspection
self.validate_json(obj)
class SourcedObject(dict):
""" Base object used for data storage.
Base class for :class:`~billy.scrape.bills.Bill`,
:class:`~billy.scrape.legislators.Legislator`,
:class:`~billy.scrape.votes.Vote`,
and :class:`~billy.scrape.committees.Committee`.
SourcedObjects work like a dictionary. It is possible
to add extra data beyond the required fields by assigning to the
`SourcedObject` instance like a dictionary.
"""
def __init__(self, _type, **kwargs):
super(SourcedObject, self).__init__()
self['_type'] = _type
self['sources'] = []
self.update(kwargs)
def add_source(self, url, **kwargs):
"""
Add a source URL from which data related to this object was scraped.
:param url: the location of the source
"""
self['sources'].append(dict(url=url, **kwargs))
def get_scraper(mod_path, scraper_type):
""" import a scraper from the scraper registry """
# act of importing puts it into the registry
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ScrapeError("could not import %s" % mod_path, e)
# now find the class within the module
ScraperClass = None
for k, v in module.__dict__.iteritems():
if k.startswith('_'):
continue
if getattr(v, 'scraper_type', None) == scraper_type:
if ScraperClass:
raise ScrapeError("two %s scrapers found in module %s: %s %s" %
(scraper_type, mod_path, ScraperClass, k))
ScraperClass = v
if not ScraperClass:
raise ScrapeError("no %s scraper found in module %s" % (
scraper_type, mod_path))
return ScraperClass
def check_sessions(metadata, sessions):
all_sessions_in_terms = list(reduce(lambda x, y: x + y,
[x['sessions'] for x in metadata['terms']]))
# copy the list to avoid modifying it
metadata_session_details = list(metadata.get('_ignored_scraped_sessions',
[]))
for k, v in metadata['session_details'].iteritems():
try:
all_sessions_in_terms.remove(k)
except ValueError:
raise ScrapeError('session %s exists in session_details but not '
'in a term' % k)
metadata_session_details.append(v.get('_scraped_name'))
if not sessions:
raise ScrapeError('no sessions from session_list()')
if all_sessions_in_terms:
raise ScrapeError('no session_details for session(s): %r' %
all_sessions_in_terms)
unaccounted_sessions = []
for s in sessions:
if s not in metadata_session_details:
unaccounted_sessions.append(s)
if unaccounted_sessions:
raise ScrapeError('session(s) unaccounted for: %r' %
unaccounted_sessions)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.test_session() as sess:
c_out = sess.run([c])
n_out = sess.run([n])
d_out = sess.run([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegexp(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
c_out = sess.run([c])
d_out = sess.run([d])
self.assertEqual(c_out, [42])
self.assertEqual(d_out, [11])
self.assertEqual(shared, {2: 1, 1: 1})
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEqual(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.test_session() as sess:
sess.run([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.Variable(0.0)
v2 = variables.Variable(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.test_session() as sess:
# Initialize the variables first.
sess.run([v1.initializer])
sess.run([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
sess.run([add])
self.assertEqual(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
sess.run([assign_v1])
self.assertEqual(1, len(shared))
sess.run([add])
self.assertEqual(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEqual([0.0, 3.0], shared)
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.test_session() as sess:
sess.run([reader])
self.assertEqual(0, len(shared))
def testMultipleOutputs(self):
"""Handle subscriptions to multiple outputs from the same op."""
sparse_tensor_1 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
sparse_tensor_2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])
# This op has three outputs.
sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)
self.assertEqual(3, len(sparse_add.op.outputs))
c1 = constant_op.constant(1)
with ops.control_dependencies(sparse_add.op.outputs):
# This op depends on all the three outputs.
neg = -c1
shared = []
def sub(t):
shared.append(t)
return t
# Subscribe the three outputs at once.
subscribe.subscribe(sparse_add.op.outputs,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
sess.run([neg])
# All three ops have been processed.
self.assertEqual(3, len(shared))
def test_subscribe_tensors_on_different_devices(self):
"""Side effect ops are added with the same device of the subscribed op."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
with ops.device('cpu:0'):
add = math_ops.add(c1, c2)
with ops.device('cpu:1'):
mul = math_ops.multiply(c1, c2)
def sub(t):
return t
add_sub = subscribe.subscribe(
add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
mul_sub = subscribe.subscribe(
mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Expect the identity tensors injected by subscribe to have been created
# on the same device as their original tensors.
self.assertNotEqual(add_sub.device, mul_sub.device)
self.assertEqual(add.device, add_sub.device)
self.assertEqual(mul.device, mul_sub.device)
def test_subscribe_tensors_within_control_flow_context(self):
"""Side effect ops are added with the same control flow context."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
x1 = math_ops.add(c1, c2)
x2 = math_ops.multiply(c1, c2)
cond = control_flow_ops.cond(
x1 < x2,
lambda: math_ops.add(c1, c2, name='then'),
lambda: math_ops.subtract(c1, c2, name='else'),
name='cond')
branch = ops.get_default_graph().get_tensor_by_name('cond/then:0')
def context(tensor):
return tensor.op._get_control_flow_context()
self.assertIs(context(x1), context(x2))
self.assertIsNot(context(x1), context(branch))
results = []
def sub(tensor):
results.append(tensor)
return tensor
tensors = [x1, branch, x2]
subscriptions = subscribe.subscribe(
tensors, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
for tensor, subscription in zip(tensors, subscriptions):
self.assertIs(context(tensor), context(subscription))
# Verify that sub(x1) and sub(x2) are in the same context.
self.assertIs(context(subscriptions[0]), context(subscriptions[2]))
# Verify that sub(x1) and sub(branch) are not.
self.assertIsNot(context(subscriptions[0]), context(subscriptions[1]))
with self.test_session() as sess:
sess.run(cond)
self.assertEqual(3, len(results))
if __name__ == '__main__':
googletest.main()
|
|
from collections import OrderedDict
from django import forms
from django.contrib import admin
from django.core.paginator import Paginator
from django.db.models import Count, Q, Prefetch
from django.template import loader
from django.utils.translation import ugettext
from rangefilter.filter import (
DateRangeFilter as DateRangeFilterBase,
)
from olympia import amo
from olympia.access import acl
from olympia.addons.models import Addon, AddonApprovalsCounter
from olympia.amo.admin import CommaSearchInAdminMixin
from olympia.ratings.models import Rating
from olympia.translations.utils import truncate_text
from .models import AbuseReport
class AbuseReportTypeFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = ugettext('type')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'type'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('user', ugettext('Users')),
('addon', ugettext('Addons')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'user':
return queryset.filter(user__isnull=False)
elif self.value() == 'addon':
return queryset.filter(Q(addon__isnull=False) |
Q(guid__isnull=False))
return queryset
class FakeChoicesMixin(object):
def choices(self, changelist):
"""
Fake choices method (we don't need one, we don't really have choices
for this filter, it's an input widget) that fetches the params and the
current values for other filters, so that we can feed that into
the form that our template displays.
(We don't control the data passed down to the template, so re-using
this one is our only option)
"""
# Grab search query parts and filter query parts as tuples of tuples.
search_query_parts = (
((admin.views.main.SEARCH_VAR, changelist.query),)
) if changelist.query else ()
filters_query_parts = tuple(
(k, v)
for k, v in changelist.get_filters_params().items()
if k not in self.expected_parameters()
)
# Assemble them into a `query_parts` property on a unique fake choice.
all_choice = next(super().choices(changelist))
all_choice['query_parts'] = search_query_parts + filters_query_parts
yield all_choice
class MinimumReportsCountFilter(FakeChoicesMixin, admin.SimpleListFilter):
"""
Custom filter for minimum reports count param.
Does *not* do the actual filtering of the queryset, as it needs to be done
with an aggregate query after all filters have been applied. That part is
implemented in the model admin, see AbuseReportAdmin.get_search_results().
Needs FakeChoicesMixin for the fake choices the template will be using.
Original idea:
https://hakibenita.com/how-to-add-a-text-filter-to-django-admin
"""
template = 'admin/abuse/abusereport/minimum_reports_count_filter.html'
title = ugettext('minimum reports count (grouped by guid)')
parameter_name = 'minimum_reports_count'
def lookups(self, request, model_admin):
"""
Fake lookups() method required to show the filter.
"""
return ((),)
def queryset(self, request, queryset):
return queryset
class HTML5DateInput(forms.DateInput):
format_key = 'DATE_INPUT_FORMATS'
input_type = 'date'
class DateRangeFilter(FakeChoicesMixin, DateRangeFilterBase):
"""
Custom rangefilter.filters.DateTimeRangeFilter class that uses HTML5
widgets and a template without the need for inline CSS/JavaScript.
Needs FakeChoicesMixin for the fake choices the template will be using (the
upstream implementation depends on JavaScript for this).
"""
template = 'admin/abuse/abusereport/date_range_filter.html'
title = ugettext('creation date')
def _get_form_fields(self):
return OrderedDict((
(self.lookup_kwarg_gte, forms.DateField(
label='From',
widget=HTML5DateInput(),
localize=True,
required=False
)),
(self.lookup_kwarg_lte, forms.DateField(
label='To',
widget=HTML5DateInput(),
localize=True,
required=False
)),
))
def choices(self, changelist):
# We want a fake 'All' choice as per FakeChoicesMixin, but as of 0.3.15
# rangefilter's implementation doesn't bother setting the selected
# property, and our mixin calls super(), so we have to do it here.
all_choice = next(super().choices(changelist))
all_choice['selected'] = not any(self.used_parameters)
yield all_choice
class AbuseReportAdmin(CommaSearchInAdminMixin, admin.ModelAdmin):
class Media:
css = {
'all': ('css/admin/abuse_reports.css',)
}
actions = ('delete_selected', 'mark_as_valid', 'mark_as_suspicious')
date_hierarchy = 'modified'
list_display = ('target_name', 'guid', 'type', 'state', 'distribution',
'reason', 'message_excerpt', 'created')
list_filter = (
AbuseReportTypeFilter,
'state',
'reason',
('created', DateRangeFilter),
MinimumReportsCountFilter,
)
list_select_related = ('user',) # For `addon` see get_queryset() below.
# Shouldn't be needed because those fields should all be readonly, but just
# in case we change our mind, FKs should be raw id fields as usual in our
# admin tools.
raw_id_fields = ('addon', 'user', 'reporter')
# All fields except state must be readonly - the submitted data should
# not be changed, only the state for triage.
readonly_fields = (
'created',
'modified',
'reporter',
'country_code',
'addon',
'guid',
'user',
'message',
'client_id',
'addon_name',
'addon_summary',
'addon_version',
'addon_signature',
'application',
'application_version',
'application_locale',
'operating_system',
'operating_system_version',
'install_date',
'addon_install_origin',
'addon_install_method',
'addon_install_source',
'report_entry_point',
'addon_card',
)
ADDON_METADATA_FIELDSET = 'Add-on metadata'
fieldsets = (
(None, {'fields': ('state', 'reason', 'message')}),
(None, {'fields': (
'created',
'modified',
'reporter',
'country_code',
'client_id',
'addon_signature',
'application',
'application_version',
'application_locale',
'operating_system',
'operating_system_version',
'install_date',
'addon_install_origin',
'addon_install_method',
'addon_install_source',
'report_entry_point'
)})
)
# The first fieldset is going to be dynamically added through
# get_fieldsets() depending on the target (add-on, user or unknown add-on),
# using the fields below:
dynamic_fieldset_fields = {
# Known add-on in database
'addon': ('addon_card',),
# User
'user': ('user',),
# Unknown add-on, we only have the guid and maybe some extra addon_*
# fields that were submitted with the report.
'guid': ('addon_name', 'addon_version', 'guid', 'addon_summary'),
}
view_on_site = False # Abuse reports have no public page to link to.
def has_add_permission(self, request):
# Adding new abuse reports through the admin is useless, so we prevent
# it.
return False
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_save_and_continue'] = False # Don't need this.
return super().change_view(
request, object_id, form_url, extra_context=extra_context,
)
def delete_queryset(self, request, queryset):
"""Given a queryset, soft-delete it from the database."""
queryset.update(state=AbuseReport.STATES.DELETED)
def get_actions(self, request):
actions = super().get_actions(request)
if not acl.action_allowed(request, amo.permissions.ABUSEREPORTS_EDIT):
# You need AbuseReports:Edit for the extra actions.
actions.pop('mark_as_valid')
actions.pop('mark_as_suspicious')
return actions
def get_search_fields(self, request):
"""
Return search fields according to the type filter.
"""
type_ = request.GET.get('type')
if type_ == 'addon':
search_fields = (
'addon__name__localized_string', 'addon__slug', 'addon_name',
'=guid', 'message', '=addon__id',
)
elif type_ == 'user':
search_fields = (
'message', '=user__id', '^user__username', '^user__email',
)
else:
search_fields = ()
return search_fields
def get_search_id_field(self, request):
"""
Return the field to use when all search terms are numeric, according to
the type filter.
"""
type_ = request.GET.get('type')
if type_ == 'addon':
search_field = 'addon_id'
elif type_ == 'user':
search_field = 'user_id'
else:
search_field = super().get_search_id_field(request)
return search_field
def get_search_results(self, request, qs, search_term):
"""
Custom get_search_results() method that handles minimum_reports_count.
"""
minimum_reports_count = request.GET.get('minimum_reports_count')
if minimum_reports_count:
# minimum_reports_count has its own custom filter class but the
# filtering is actually done here, because it needs to happen after
# all other filters have been applied in order for the aggregate
# queryset to be correct.
guids = (qs.values_list('guid', flat=True)
.filter(guid__isnull=False)
.annotate(Count('guid'))
.filter(guid__count__gte=minimum_reports_count)
.order_by())
qs = qs.filter(guid__in=list(guids))
qs, use_distinct = super().get_search_results(request, qs, search_term)
return qs, use_distinct
def get_queryset(self, request):
qs = super().get_queryset(request)
# Minimize number of queries : for users linked to abuse reports, we
# don't have transformers, so we can directly make a JOIN, and that's
# taken care of by list_select_related. For addons, we want the
# translations transformer, so the most efficient way to load them is
# through prefetch_related() + only_translations() (we don't care about
# the other transforms).
return qs.prefetch_related(
Prefetch(
'addon', queryset=Addon.objects.all().only_translations()),
)
def get_fieldsets(self, request, obj=None):
if obj.addon:
target = 'addon'
elif obj.user:
target = 'user'
else:
target = 'guid'
dynamic_fieldset = (
(None, {'fields': self.dynamic_fieldset_fields[target]}),
)
return dynamic_fieldset + self.fieldsets
def target_name(self, obj):
name = obj.target.name if obj.target else obj.addon_name
return '%s %s' % (name, obj.addon_version or '')
target_name.short_description = ugettext('User / Add-on')
def addon_card(self, obj):
template = loader.get_template('reviewers/addon_details_box.html')
addon = obj.addon
try:
approvals_info = addon.addonapprovalscounter
except AddonApprovalsCounter.DoesNotExist:
approvals_info = None
developers = addon.listed_authors
# Provide all the necessary context addon_details_box.html needs. Note
# the use of Paginator() to match what the template expects.
context = {
'addon': addon,
'addon_name': addon.name,
'approvals_info': approvals_info,
'reports': Paginator(
(AbuseReport.objects
.filter(Q(addon=addon) | Q(user__in=developers))
.order_by('-created')), 5).page(1),
'user_ratings': Paginator(
(Rating.without_replies
.filter(addon=addon, rating__lte=3, body__isnull=False)
.order_by('-created')), 5).page(1),
'version': addon.current_version,
}
return template.render(context)
addon_card.short_description = ''
def distribution(self, obj):
return obj.get_addon_signature_display() if obj.addon_signature else ''
distribution.short_description = ugettext('Distribution')
def reporter_country(self, obj):
return obj.country_code
reporter_country.short_description = ugettext("Reporter's country")
def message_excerpt(self, obj):
return truncate_text(obj.message, 140)[0] if obj.message else ''
message_excerpt.short_description = ugettext('Message excerpt')
def mark_as_valid(self, request, qs):
for obj in qs:
obj.update(state=AbuseReport.STATES.VALID)
self.message_user(
request,
ugettext(
'The %d selected reports have been marked as valid.' % (
qs.count())))
mark_as_valid.short_description = 'Mark selected abuse reports as valid'
def mark_as_suspicious(self, request, qs):
for obj in qs:
obj.update(state=AbuseReport.STATES.SUSPICIOUS)
self.message_user(
request,
ugettext(
'The %d selected reports have been marked as suspicious.' % (
qs.count())))
mark_as_suspicious.short_description = (
ugettext('Mark selected abuse reports as suspicious'))
admin.site.register(AbuseReport, AbuseReportAdmin)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
"""
import collections
import copy
import httplib
import math
import re
import time
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder.api.views import limits as limits_views
from cinder.api import xmlutil
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import importutils
from cinder.openstack.common import jsonutils
from cinder import quota
from cinder import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
LIMITS_PREFIX = "limits."
# Convenience constants for the limits dictionary passed to Limiter().
PER_SECOND = 1
PER_MINUTE = 60
PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class LimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
rates = xmlutil.SubTemplateElement(root, 'rates')
rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
rate.set('uri', 'uri')
rate.set('regex', 'regex')
limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
limit.set('value', 'value')
limit.set('verb', 'verb')
limit.set('remaining', 'remaining')
limit.set('unit', 'unit')
limit.set('next-available', 'next-available')
absolute = xmlutil.SubTemplateElement(root, 'absolute',
selector='absolute')
limit = xmlutil.SubTemplateElement(absolute, 'limit',
selector=xmlutil.get_items)
limit.set('name', 0)
limit.set('value', 1)
return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
class LimitsController(object):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.serializers(xml=LimitsTemplate)
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['cinder.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("cinder.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
class Limit(object):
"""Stores information about a limit for HTTP requests."""
UNITS = {
1: "SECOND",
60: "MINUTE",
60 * 60: "HOUR",
60 * 60 * 24: "DAY",
}
UNIT_MAP = dict([(v, k) for k, v in UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = _("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.")
self.error_message = msg % self.__dict__
def __call__(self, verb, url):
"""Represent a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 10, PER_MINUTE),
Limit("POST", "*/servers", "^/servers", 50, PER_DAY),
Limit("PUT", "*", ".*", 10, PER_MINUTE),
Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
Limit("DELETE", "*", ".*", 100, PER_MINUTE),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""Rate-limits requests passing through this middleware.
All limit information is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""Initialize new `RateLimitingMiddleware`
This wraps the given WSGI application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Represent a single call through this middleware.
We should record the request if we have a limit relevant to it.
If no limit is relevant to the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("cinder.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.OverLimitFault(msg, error, retry)
req.environ["cinder.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""Rate-limit checking class which handles limits in memory."""
def __init__(self, limits, **kwargs):
"""Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""Return the limits for a given user."""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""Convert a string into a list of Limit instances.
This implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in Limit.UNIT_MAP:
raise ValueError("Invalid units specified")
unit = Limit.UNIT_MAP[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""Rate-limit checking from a WSGI application.
Uses an in-memory `Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""Handles a call to this application.
Returns 204 if the request is acceptable to the limiter, else a 403
is returned with a relevant header indicating when the request
*will* succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""Rate-limit requests based on answers from a remote source."""
def __init__(self, limiter_address):
"""Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""Ignore a limits string--simply doesn't apply for the limit proxy.
@return: Empty list.
"""
return []
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.constants import RESERVED_TEAM_SLUGS
from sentry.models import slugify_instance
for team in orm['sentry.Team'].objects.filter(models.Q(slug='') | models.Q(slug__isnull=True)):
slugify_instance(team, team.name, reserved=RESERVED_TEAM_SLUGS)
team.save()
for project in orm['sentry.Project'].objects.filter(models.Q(slug='') | models.Q(slug__isnull=True)):
slugify_instance(project, project.name, reserved=RESERVED_TEAM_SLUGS)
project.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova.cloudpipe import pipelib
from nova.i18n import _LI
from nova.i18n import _LW
import nova.virt.firewall as base_firewall
from nova.virt import netutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
libvirt = None
class NWFilterFirewall(base_firewall.FirewallDriver):
"""This class implements a network filtering mechanism by using
libvirt's nwfilter.
all instances get a filter ("nova-base") applied. This filter
provides some basic security such as protection against MAC
spoofing, IP spoofing, and ARP spoofing.
"""
def __init__(self, virtapi, host, **kwargs):
"""Create an NWFilter firewall driver
:param virtapi: nova.virt.virtapi.VirtAPI instance
:param host: nova.virt.libvirt.host.Host instance
:param kwargs: currently unused
"""
super(NWFilterFirewall, self).__init__(virtapi)
global libvirt
if libvirt is None:
try:
libvirt = importutils.import_module('libvirt')
except ImportError:
LOG.warn(_LW("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly."))
self._host = host
self.static_filters_configured = False
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
def nova_no_nd_reflection_filter(self):
"""This filter protects false positives on IPv6 Duplicate Address
Detection(DAD).
"""
uuid = self._get_filter_uuid('nova-no-nd-reflection')
return '''<filter name='nova-no-nd-reflection' chain='ipv6'>
<!-- no nd reflection -->
<!-- drop if destination mac is v6 mcast mac addr and
we sent it. -->
<uuid>%s</uuid>
<rule action='drop' direction='in'>
<mac dstmacaddr='33:33:00:00:00:00'
dstmacmask='ff:ff:00:00:00:00' srcmacaddr='$MAC'/>
</rule>
</filter>''' % uuid
def nova_dhcp_filter(self):
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
ebtables to allow traffic through. Without a corresponding rule in
iptables, it'll get blocked anyway.
"""
uuid = self._get_filter_uuid('nova-allow-dhcp-server')
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
<uuid>%s</uuid>
<rule action='accept' direction='out'
priority='100'>
<udp srcipaddr='0.0.0.0'
dstipaddr='255.255.255.255'
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
</rule>
</filter>''' % uuid
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
instance=instance)
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
return
LOG.info(_LI('Ensuring static filters'), instance=instance)
self._ensure_static_filters()
nodhcp_base_filter = self.get_base_filter_list(instance, False)
dhcp_base_filter = self.get_base_filter_list(instance, True)
for vif in network_info:
_base_filter = nodhcp_base_filter
for subnet in vif['network']['subnets']:
if subnet.get_meta('dhcp_server'):
_base_filter = dhcp_base_filter
break
self._define_filter(self._get_instance_filter_xml(instance,
_base_filter,
vif))
def _get_instance_filter_parameters(self, vif):
parameters = []
def format_parameter(parameter, value):
return ("<parameter name='%s' value='%s'/>" % (parameter, value))
network = vif['network']
if not vif['network'] or not vif['network']['subnets']:
return parameters
v4_subnets = [s for s in network['subnets'] if s['version'] == 4]
v6_subnets = [s for s in network['subnets'] if s['version'] == 6]
for subnet in v4_subnets:
for ip in subnet['ips']:
parameters.append(format_parameter('IP', ip['address']))
dhcp_server = subnet.get_meta('dhcp_server')
if dhcp_server:
parameters.append(format_parameter('DHCPSERVER', dhcp_server))
if CONF.use_ipv6:
for subnet in v6_subnets:
gateway = subnet.get('gateway')
if gateway:
ra_server = gateway['address'] + "/128"
parameters.append(format_parameter('RASERVER', ra_server))
if CONF.allow_same_net_traffic:
for subnet in v4_subnets:
ipv4_cidr = subnet['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
parameters.append(format_parameter('PROJNET', net))
parameters.append(format_parameter('PROJMASK', mask))
if CONF.use_ipv6:
for subnet in v6_subnets:
ipv6_cidr = subnet['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
parameters.append(format_parameter('PROJNET6', net))
parameters.append(format_parameter('PROJMASK6', prefix))
return parameters
def _get_instance_filter_xml(self, instance, filters, vif):
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
parameters = self._get_instance_filter_parameters(vif)
uuid = self._get_filter_uuid(instance_filter_name)
xml = '''<filter name='%s' chain='root'>''' % instance_filter_name
xml += '<uuid>%s</uuid>' % uuid
for f in filters:
xml += '''<filterref filter='%s'>''' % f
xml += ''.join(parameters)
xml += '</filterref>'
xml += '</filter>'
return xml
def get_base_filter_list(self, instance, allow_dhcp):
"""Obtain a list of base filters to apply to an instance.
The return value should be a list of strings, each
specifying a filter name. Subclasses can override this
function to add additional filters as needed. Additional
filters added to the list must also be correctly defined
within the subclass.
"""
if pipelib.is_vpn_image(instance.image_ref):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
else:
base_filter = 'nova-nodhcp'
return [base_filter]
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
self._define_filter(self.nova_no_nd_reflection_filter())
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
filter_set.append('allow-dhcp-server')
self._define_filter(self._filter_container('nova-base', filter_set))
self._define_filter(self._filter_container('nova-vpn',
['allow-dhcp-server']))
self._define_filter(self.nova_dhcp_filter())
self.static_filters_configured = True
def _filter_container(self, name, filters):
uuid = self._get_filter_uuid(name)
xml = '''<filter name='%s' chain='root'>
<uuid>%s</uuid>
%s
</filter>''' % (name, uuid,
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
def _get_filter_uuid(self, name):
try:
flt = self._conn.nwfilterLookupByName(name)
xml = flt.XMLDesc(0)
doc = etree.fromstring(xml)
u = doc.find("./uuid").text
except Exception as e:
LOG.debug(u"Cannot find UUID for filter '%(name)s': '%(e)s'",
{'name': name, 'e': e})
u = uuid.uuid4().hex
LOG.debug("UUID for filter '%s' is '%s'" % (name, u))
return u
def _define_filter(self, xml):
if callable(xml):
xml = xml()
self._conn.nwfilterDefineXML(xml)
def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
_nw = self._conn.nwfilterLookupByName(instance_filter_name)
_nw.undefine()
except libvirt.libvirtError as e:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in
# use (ie. when the instance has not terminated properly)
raise
LOG.debug('The nwfilter(%s) is not found.',
instance_filter_name, instance=instance)
@staticmethod
def _instance_filter_name(instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance.name)
return 'nova-instance-%s-%s' % (instance.name, nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance.name
LOG.debug('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.',
{'instance_filter_name': instance_filter_name,
'name': name},
instance=instance)
return False
return True
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def __init__(self, virtapi, execute=None, **kwargs):
"""Create an IP tables firewall driver instance
:param virtapi: nova.virt.virtapi.VirtAPI instance
:param execute: unused, pass None
:param kwargs: extra arguments
The @kwargs parameter must contain a key 'host' that
maps to an instance of the nova.virt.libvirt.host.Host
class.
"""
super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
self.nwfilter = NWFilterFirewall(virtapi, kwargs['host'])
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basically_filtered:
LOG.debug('iptables firewall: Setup Basic Filtering',
instance=instance)
self.refresh_provider_fw_rules()
self.basically_filtered = True
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
if self.instance_info.pop(instance.id, None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
|
|
import itertools
import unittest
from collections import OrderedDict
import numpy
import theano
from numpy.testing import assert_allclose, assert_raises
from theano import tensor
from theano.gof.graph import is_same_graph
from blocks.utils import is_shared_variable
from blocks.bricks.base import application
from blocks.bricks import Tanh
from blocks.bricks.recurrent import (
recurrent, BaseRecurrent, GatedRecurrent,
SimpleRecurrent, Bidirectional, LSTM,
RecurrentStack, RECURRENTSTACK_SEPARATOR)
from blocks.initialization import (
Constant, IsotropicGaussian, Orthogonal, Identity)
from blocks.filter import get_application_call, VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import INITIAL_STATE
class RecurrentWrapperTestClass(BaseRecurrent):
def __init__(self, dim, ** kwargs):
super(RecurrentWrapperTestClass, self).__init__(self, ** kwargs)
self.dim = dim
def get_dim(self, name):
if name in ['inputs', 'states', 'outputs', 'states_2', 'outputs_2']:
return self.dim
if name == 'mask':
return 0
return super(RecurrentWrapperTestClass, self).get_dim(name)
@recurrent(sequences=['inputs', 'mask'], states=['states', 'states_2'],
outputs=['outputs', 'states_2', 'outputs_2', 'states'],
contexts=[])
def apply(self, inputs=None, states=None, states_2=None, mask=None):
next_states = states + inputs
next_states_2 = states_2 + .5
if mask:
next_states = (mask[:, None] * next_states +
(1 - mask[:, None]) * states)
outputs = 10 * next_states
outputs_2 = 10 * next_states_2
return outputs, next_states_2, outputs_2, next_states
class TestRecurrentWrapper(unittest.TestCase):
def setUp(self):
self.recurrent_example = RecurrentWrapperTestClass(dim=1)
def test(self):
X = tensor.tensor3('X')
out, H2, out_2, H = self.recurrent_example.apply(
inputs=X, mask=None)
x_val = numpy.ones((5, 1, 1), dtype=theano.config.floatX)
h = H.eval({X: x_val})
h2 = H2.eval({X: x_val})
out_eval = out.eval({X: x_val})
out_2_eval = out_2.eval({X: x_val})
# This also implicitly tests that the initial states are zeros
assert_allclose(h, x_val.cumsum(axis=0))
assert_allclose(h2, .5 * (numpy.arange(5).reshape((5, 1, 1)) + 1))
assert_allclose(h * 10, out_eval)
assert_allclose(h2 * 10, out_2_eval)
class RecurrentBrickWithBugInInitialStates(BaseRecurrent):
@recurrent(sequences=[], contexts=[],
states=['states'], outputs=['states'])
def apply(self, states):
return states
@recurrent(sequences=[], contexts=[],
states=['states2'], outputs=['states2'])
def apply2(self, states):
return states
def get_dim(self, name):
return 100
def test_bug_in_initial_states():
def do():
brick = RecurrentBrickWithBugInInitialStates()
brick.apply2(n_steps=3, batch_size=5)
assert_raises(KeyError, do)
class TestSimpleRecurrent(unittest.TestCase):
def setUp(self):
self.simple = SimpleRecurrent(dim=3, weights_init=Constant(2),
activation=Tanh())
self.simple.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
x = tensor.matrix('x')
mask = tensor.vector('mask')
h1 = self.simple.apply(x, h0, mask=mask, iterate=False)
next_h = theano.function(inputs=[h0, x, mask], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=theano.config.floatX)
mask_val = numpy.array([1, 0]).astype(theano.config.floatX)
h1_val = numpy.tanh(h0_val.dot(2 * numpy.ones((3, 3))) + x_val)
h1_val = mask_val[:, None] * h1_val + (1 - mask_val[:, None]) * h0_val
assert_allclose(h1_val, next_h(h0_val, x_val, mask_val)[0])
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h = self.simple.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
for i in range(1, 25):
h_val[i] = numpy.tanh(h_val[i - 1].dot(
2 * numpy.ones((3, 3))) + x_val[i - 1])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
# Also test that initial state is a parameter
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
class TestLSTM(unittest.TestCase):
def setUp(self):
self.lstm = LSTM(dim=3, weights_init=Constant(2),
biases_init=Constant(0))
self.lstm.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
c0 = tensor.matrix('c0')
x = tensor.matrix('x')
h1, c1 = self.lstm.apply(x, h0, c0, iterate=False)
next_h = theano.function(inputs=[x, h0, c0], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
c0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([range(12), range(12, 24)],
dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
# omitting biases because they are zero
activation = numpy.dot(h0_val, W_state_val) + x_val
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
i_t = sigmoid(activation[:, :3] + c0_val * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c0_val * W_cell_to_forget)
next_cells = f_t * c0_val + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
next_cells * W_cell_to_out)
h1_val = o_t * numpy.tanh(next_cells)
assert_allclose(h1_val, next_h(x_val, h0_val, c0_val)[0],
rtol=1e-6)
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h, c = self.lstm.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = (0.1 * numpy.asarray(
list(itertools.islice(itertools.permutations(range(12)), 0, 24)),
dtype=theano.config.floatX))
x_val = numpy.ones((24, 4, 12),
dtype=theano.config.floatX) * x_val[:, None, :]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
c_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
for i in range(1, 25):
activation = numpy.dot(h_val[i-1], W_state_val) + x_val[i-1]
i_t = sigmoid(activation[:, :3] + c_val[i-1] * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c_val[i-1] * W_cell_to_forget)
c_val[i] = f_t * c_val[i-1] + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
c_val[i] * W_cell_to_out)
h_val[i] = o_t * numpy.tanh(c_val[i])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
c_val[i] = (mask_val[i - 1, :, None] * c_val[i] +
(1 - mask_val[i - 1, :, None]) * c_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
# Also test that initial state is a parameter
initial1, initial2 = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial1)
assert is_shared_variable(initial2)
assert {initial1.name, initial2.name} == {
'initial_state', 'initial_cells'}
class TestRecurrentStack(unittest.TestCase):
def setUp(self):
depth = 4
self.depth = depth
dim = 3 # don't change, hardwired in the code
transitions = [LSTM(dim=dim) for _ in range(depth)]
self.stack0 = RecurrentStack(transitions,
weights_init=Constant(2),
biases_init=Constant(0))
self.stack0.initialize()
self.stack2 = RecurrentStack(transitions,
weights_init=Constant(2),
biases_init=Constant(0),
skip_connections=True)
self.stack2.initialize()
def do_one_step(self, stack, skip_connections=False, low_memory=False):
depth = self.depth
# batch=2
h0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,
dtype=theano.config.floatX)
c0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([range(12), range(12, 24)],
dtype=theano.config.floatX)
# we will use same weights on all layers
W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
kwargs = OrderedDict()
for d in range(depth):
if d > 0:
suffix = RECURRENTSTACK_SEPARATOR + str(d)
else:
suffix = ''
if d == 0 or skip_connections:
kwargs['inputs' + suffix] = tensor.matrix('inputs' + suffix)
kwargs['inputs' + suffix].tag.test_value = x_val
kwargs['states' + suffix] = tensor.matrix('states' + suffix)
kwargs['states' + suffix].tag.test_value = h0_val[d]
kwargs['cells' + suffix] = tensor.matrix('cells' + suffix)
kwargs['cells' + suffix].tag.test_value = c0_val[d]
results = stack.apply(iterate=False, low_memory=low_memory, **kwargs)
next_h = theano.function(inputs=list(kwargs.values()),
outputs=results)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
h1_val = []
x_v = x_val
args_val = []
for d in range(depth):
if d == 0 or skip_connections:
args_val.append(x_val)
h0_v = h0_val[d]
args_val.append(h0_v)
c0_v = c0_val[d]
args_val.append(c0_v)
# omitting biases because they are zero
activation = numpy.dot(h0_v, W_state_val) + x_v
if skip_connections and d > 0:
activation += x_val
i_t = sigmoid(activation[:, :3] + c0_v * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c0_v * W_cell_to_forget)
next_cells = f_t * c0_v + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
next_cells * W_cell_to_out)
h1_v = o_t * numpy.tanh(next_cells)
# current layer output state transformed to input of next
x_v = numpy.dot(h1_v, W_state2x_val)
h1_val.append(h1_v)
res = next_h(*args_val)
for d in range(depth):
assert_allclose(h1_val[d], res[d * 2], rtol=1e-6)
def test_one_step(self):
self.do_one_step(self.stack0)
self.do_one_step(self.stack0, low_memory=True)
self.do_one_step(self.stack2, skip_connections=True)
self.do_one_step(self.stack2, skip_connections=True, low_memory=True)
def do_many_steps(self, stack, skip_connections=False, low_memory=False):
depth = self.depth
# 24 steps
# 4 batch examples
# 12 dimensions per step
x_val = (0.1 * numpy.asarray(
list(itertools.islice(itertools.permutations(range(12)), 0, 24)),
dtype=theano.config.floatX))
x_val = numpy.ones((24, 4, 12),
dtype=theano.config.floatX) * x_val[:, None, :]
# mask the last third of steps
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
# unroll all states and cells for all steps and also initial value
h_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)
c_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)
# we will use same weights on all layers
W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
kwargs = OrderedDict()
for d in range(depth):
if d > 0:
suffix = RECURRENTSTACK_SEPARATOR + str(d)
else:
suffix = ''
if d == 0 or skip_connections:
kwargs['inputs' + suffix] = tensor.tensor3('inputs' + suffix)
kwargs['inputs' + suffix].tag.test_value = x_val
kwargs['mask'] = tensor.matrix('mask')
kwargs['mask'].tag.test_value = mask_val
results = stack.apply(iterate=True, low_memory=low_memory, **kwargs)
calc_h = theano.function(inputs=list(kwargs.values()),
outputs=results)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
for i in range(1, 25):
x_v = x_val[i - 1]
h_vs = []
c_vs = []
for d in range(depth):
h_v = h_val[d][i - 1, :, :]
c_v = c_val[d][i - 1, :, :]
activation = numpy.dot(h_v, W_state_val) + x_v
if skip_connections and d > 0:
activation += x_val[i - 1]
i_t = sigmoid(activation[:, :3] + c_v * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c_v * W_cell_to_forget)
c_v1 = f_t * c_v + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
c_v1 * W_cell_to_out)
h_v1 = o_t * numpy.tanh(c_v1)
h_v = (mask_val[i - 1, :, None] * h_v1 +
(1 - mask_val[i - 1, :, None]) * h_v)
c_v = (mask_val[i - 1, :, None] * c_v1 +
(1 - mask_val[i - 1, :, None]) * c_v)
# current layer output state transformed to input of next
x_v = numpy.dot(h_v, W_state2x_val)
h_vs.append(h_v)
c_vs.append(c_v)
for d in range(depth):
h_val[d][i, :, :] = h_vs[d]
c_val[d][i, :, :] = c_vs[d]
args_val = [x_val]*(depth if skip_connections else 1) + [mask_val]
res = calc_h(*args_val)
for d in range(depth):
assert_allclose(h_val[d][1:], res[d * 2], rtol=1e-4)
assert_allclose(c_val[d][1:], res[d * 2 + 1], rtol=1e-4)
# Also test that initial state is a parameter
for h in results:
initial_states = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert all(is_shared_variable(initial_state)
for initial_state in initial_states)
def test_many_steps(self):
self.do_many_steps(self.stack0)
self.do_many_steps(self.stack0, low_memory=True)
self.do_many_steps(self.stack2, skip_connections=True)
self.do_many_steps(self.stack2, skip_connections=True, low_memory=True)
class TestGatedRecurrent(unittest.TestCase):
def setUp(self):
self.gated = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(), weights_init=Constant(2))
self.gated.initialize()
self.reset_only = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(),
weights_init=IsotropicGaussian(), seed=1)
self.reset_only.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
x = tensor.matrix('x')
gi = tensor.matrix('gi')
h1 = self.gated.apply(x, gi, h0, iterate=False)
next_h = theano.function(inputs=[h0, x, gi], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=theano.config.floatX)
zi_val = (h0_val + x_val) / 2
ri_val = -x_val
W_val = 2 * numpy.ones((3, 3), dtype=theano.config.floatX)
z_val = numpy.tanh(h0_val.dot(W_val) + zi_val)
r_val = numpy.tanh(h0_val.dot(W_val) + ri_val)
h1_val = (z_val * numpy.tanh((r_val * h0_val).dot(W_val) + x_val) +
(1 - z_val) * h0_val)
assert_allclose(
h1_val, next_h(h0_val, x_val, numpy.hstack([zi_val, ri_val]))[0],
rtol=1e-6)
def test_many_steps(self):
x = tensor.tensor3('x')
gi = tensor.tensor3('gi')
mask = tensor.matrix('mask')
h = self.reset_only.apply(x, gi, mask=mask)
calc_h = theano.function(inputs=[x, gi, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
ri_val = 0.3 - x_val
zi_val = 2 * ri_val
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
W = self.reset_only.state_to_state.get_value()
Wz = self.reset_only.state_to_gates.get_value()[:, :3]
Wr = self.reset_only.state_to_gates.get_value()[:, 3:]
for i in range(1, 25):
z_val = numpy.tanh(h_val[i - 1].dot(Wz) + zi_val[i - 1])
r_val = numpy.tanh(h_val[i - 1].dot(Wr) + ri_val[i - 1])
h_val[i] = numpy.tanh((r_val * h_val[i - 1]).dot(W) +
x_val[i - 1])
h_val[i] = z_val * h_val[i] + (1 - z_val) * h_val[i - 1]
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
# TODO Figure out why this tolerance needs to be so big
assert_allclose(
h_val,
calc_h(x_val, numpy.concatenate(
[zi_val, ri_val], axis=2), mask_val)[0],
1e-04)
# Also test that initial state is a parameter
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
class TestBidirectional(unittest.TestCase):
def setUp(self):
self.bidir = Bidirectional(weights_init=Orthogonal(),
prototype=SimpleRecurrent(
dim=3, activation=Tanh()))
self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
activation=Tanh(), seed=1)
self.bidir.allocate()
self.simple.initialize()
self.bidir.children[0].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.bidir.children[1].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
def test(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_bidir = theano.function([x, mask],
[self.bidir.apply(x, mask=mask)])
calc_simple = theano.function([x, mask],
[self.simple.apply(x, mask=mask)])
h_bidir = calc_bidir(self.x_val, self.mask_val)[0]
h_simple = calc_simple(self.x_val, self.mask_val)[0]
h_simple_rev = calc_simple(self.x_val[::-1], self.mask_val[::-1])[0]
output_names = self.bidir.apply.outputs
assert output_names == ['states']
assert_allclose(h_simple, h_bidir[..., :3], rtol=1e-04)
assert_allclose(h_simple_rev, h_bidir[::-1, ..., 3:], rtol=1e-04)
class TestBidirectionalStack(unittest.TestCase):
def setUp(self):
prototype = SimpleRecurrent(dim=3, activation=Tanh())
self.layers = [
Bidirectional(weights_init=Orthogonal(), prototype=prototype)
for _ in range(3)]
self.stack = RecurrentStack(self.layers)
for fork in self.stack.forks:
fork.weights_init = Identity(1)
fork.biases_init = Constant(0)
self.stack.initialize()
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
def test_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_stack_layers = [
theano.function([x, mask], self.stack.apply(x, mask=mask)[i])
for i in range(len(self.layers))]
stack_layers = [
f(self.x_val, self.mask_val) for f in calc_stack_layers]
h_val = self.x_val
for stack_layer_value, bidir_net in zip(stack_layers, self.layers):
calc = theano.function([x, mask], bidir_net.apply(x, mask=mask))
simple_layer_value = calc(h_val, self.mask_val)
assert_allclose(stack_layer_value, simple_layer_value, rtol=1e-04)
h_val = simple_layer_value[..., :3]
def test_dims(self):
self.assertEqual(self.stack.get_dim("inputs"), 3)
for i in range(len(self.layers)):
state_name = self.stack.suffix("states", i)
self.assertEqual(self.stack.get_dim(state_name), 6)
def test_saved_inner_graph():
"""Make sure that the original inner graph is saved."""
x = tensor.tensor3()
recurrent = SimpleRecurrent(dim=3, activation=Tanh())
y = recurrent.apply(x)
application_call = get_application_call(y)
assert application_call.inner_inputs
assert application_call.inner_outputs
cg = ComputationGraph(application_call.inner_outputs)
# Check that the inner scan graph is annotated
# with `recurrent.apply`
assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3
# Check that the inner graph is equivalent to the one
# produced by a stand-alone of `recurrent.apply`
assert is_same_graph(application_call.inner_outputs[0],
recurrent.apply(*application_call.inner_inputs,
iterate=False))
def test_super_in_recurrent_overrider():
# A regression test for the issue #475
class SimpleRecurrentWithContext(SimpleRecurrent):
@application(contexts=['context'])
def apply(self, context, *args, **kwargs):
kwargs['inputs'] += context
return super(SimpleRecurrentWithContext, self).apply(*args,
**kwargs)
@apply.delegate
def apply_delegate(self):
return super(SimpleRecurrentWithContext, self).apply
brick = SimpleRecurrentWithContext(100, Tanh())
inputs = tensor.tensor3('inputs')
context = tensor.matrix('context').dimshuffle('x', 0, 1)
brick.apply(context, inputs=inputs)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import explanation_metadata
from google.cloud.aiplatform_v1beta1.types import io
from google.protobuf import struct_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"Explanation",
"ModelExplanation",
"Attribution",
"ExplanationSpec",
"ExplanationParameters",
"SampledShapleyAttribution",
"IntegratedGradientsAttribution",
"XraiAttribution",
"SmoothGradConfig",
"FeatureNoiseSigma",
"BlurBaselineConfig",
"Similarity",
"ExplanationSpecOverride",
"ExplanationMetadataOverride",
},
)
class Explanation(proto.Message):
r"""Explanation of a prediction (provided in
[PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions])
produced by the Model on a given
[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
Attributes:
attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]):
Output only. Feature attributions grouped by predicted
outputs.
For Models that predict only one output, such as regression
Models that predict only one score, there is only one
attibution that explains the predicted output. For Models
that predict multiple outputs, such as multiclass Models
that predict multiple classes, each element explains one
specific item.
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
can be used to identify which output this attribution is
explaining.
If users set
[ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k],
the attributions are sorted by
[instance_output_value][Attributions.instance_output_value]
in descending order. If
[ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices]
is specified, the attributions are stored by
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
in the same order as they appear in the output_indices.
"""
attributions = proto.RepeatedField(proto.MESSAGE, number=1, message="Attribution",)
class ModelExplanation(proto.Message):
r"""Aggregated explanation metrics for a Model over a set of
instances.
Attributes:
mean_attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]):
Output only. Aggregated attributions explaining the Model's
prediction outputs over the set of instances. The
attributions are grouped by outputs.
For Models that predict only one output, such as regression
Models that predict only one score, there is only one
attibution that explains the predicted output. For Models
that predict multiple outputs, such as multiclass Models
that predict multiple classes, each element explains one
specific item.
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
can be used to identify which output this attribution is
explaining.
The
[baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value],
[instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value]
and
[featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
fields are averaged over the test data.
NOTE: Currently AutoML tabular classification Models produce
only one attribution, which averages attributions over all
the classes it predicts.
[Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error]
is not populated.
"""
mean_attributions = proto.RepeatedField(
proto.MESSAGE, number=1, message="Attribution",
)
class Attribution(proto.Message):
r"""Attribution that explains a particular prediction output.
Attributes:
baseline_output_value (float):
Output only. Model predicted output if the input instance is
constructed from the baselines of all the features defined
in
[ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
The field name of the output is determined by the key in
[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
If the Model's predicted output has multiple dimensions
(rank > 1), this is the value in the output located by
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
If there are multiple baselines, their output values are
averaged.
instance_output_value (float):
Output only. Model predicted output on the corresponding
[explanation instance][ExplainRequest.instances]. The field
name of the output is determined by the key in
[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
If the Model predicted output has multiple dimensions, this
is the value in the output located by
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
feature_attributions (google.protobuf.struct_pb2.Value):
Output only. Attributions of each explained feature.
Features are extracted from the [prediction
instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
according to [explanation metadata for
inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
The value is a struct, whose keys are the name of the
feature. The values are how much the feature in the
[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
contributed to the predicted result.
The format of the value is determined by the feature's input
format:
- If the feature is a scalar value, the attribution value
is a [floating
number][google.protobuf.Value.number_value].
- If the feature is an array of scalar values, the
attribution value is an
[array][google.protobuf.Value.list_value].
- If the feature is a struct, the attribution value is a
[struct][google.protobuf.Value.struct_value]. The keys in
the attribution value struct are the same as the keys in
the feature struct. The formats of the values in the
attribution struct are determined by the formats of the
values in the feature struct.
The
[ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri]
field, pointed to by the
[ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec]
field of the
[Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
object, points to the schema file that describes the
features and their attribution values (if it is populated).
output_index (Sequence[int]):
Output only. The index that locates the explained prediction
output.
If the prediction output is a scalar value, output_index is
not populated. If the prediction output has multiple
dimensions, the length of the output_index list is the same
as the number of dimensions of the output. The i-th element
in output_index is the element index of the i-th dimension
of the output vector. Indices start from 0.
output_display_name (str):
Output only. The display name of the output identified by
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
For example, the predicted class name by a
multi-classification Model.
This field is only populated iff the Model predicts display
names as a separate field along with the explained output.
The predicted display name must has the same shape of the
explained output, and can be located using output_index.
approximation_error (float):
Output only. Error of
[feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
caused by approximation used in the explanation method.
Lower value means more precise attributions.
- For Sampled Shapley
[attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution],
increasing
[path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count]
might reduce the error.
- For Integrated Gradients
[attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
increasing
[step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count]
might reduce the error.
- For [XRAI
attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
increasing
[step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count]
might reduce the error.
See `this
introduction </vertex-ai/docs/explainable-ai/overview>`__
for more information.
output_name (str):
Output only. Name of the explain output. Specified as the
key in
[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
"""
baseline_output_value = proto.Field(proto.DOUBLE, number=1,)
instance_output_value = proto.Field(proto.DOUBLE, number=2,)
feature_attributions = proto.Field(
proto.MESSAGE, number=3, message=struct_pb2.Value,
)
output_index = proto.RepeatedField(proto.INT32, number=4,)
output_display_name = proto.Field(proto.STRING, number=5,)
approximation_error = proto.Field(proto.DOUBLE, number=6,)
output_name = proto.Field(proto.STRING, number=7,)
class ExplanationSpec(proto.Message):
r"""Specification of Model explanation.
Attributes:
parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters):
Required. Parameters that configure
explaining of the Model's predictions.
metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata):
Required. Metadata describing the Model's
input and output for explanation.
"""
parameters = proto.Field(proto.MESSAGE, number=1, message="ExplanationParameters",)
metadata = proto.Field(
proto.MESSAGE, number=2, message=explanation_metadata.ExplanationMetadata,
)
class ExplanationParameters(proto.Message):
r"""Parameters to configure explaining for Model's predictions.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
sampled_shapley_attribution (google.cloud.aiplatform_v1beta1.types.SampledShapleyAttribution):
An attribution method that approximates
Shapley values for features that contribute to
the label being predicted. A sampling strategy
is used to approximate the value rather than
considering all subsets of features. Refer to
this paper for model details:
https://arxiv.org/abs/1306.4265.
This field is a member of `oneof`_ ``method``.
integrated_gradients_attribution (google.cloud.aiplatform_v1beta1.types.IntegratedGradientsAttribution):
An attribution method that computes
Aumann-Shapley values taking advantage of the
model's fully differentiable structure. Refer to
this paper for more details:
https://arxiv.org/abs/1703.01365
This field is a member of `oneof`_ ``method``.
xrai_attribution (google.cloud.aiplatform_v1beta1.types.XraiAttribution):
An attribution method that redistributes
Integrated Gradients attribution to segmented
regions, taking advantage of the model's fully
differentiable structure. Refer to this paper
for more details:
https://arxiv.org/abs/1906.02825
XRAI currently performs better on natural
images, like a picture of a house or an animal.
If the images are taken in artificial
environments, like a lab or manufacturing line,
or from diagnostic equipment, like x-rays or
quality-control cameras, use Integrated
Gradients instead.
This field is a member of `oneof`_ ``method``.
similarity (google.cloud.aiplatform_v1beta1.types.Similarity):
Similarity explainability that returns the
nearest neighbors from the provided dataset.
This field is a member of `oneof`_ ``method``.
top_k (int):
If populated, returns attributions for top K
indices of outputs (defaults to 1). Only applies
to Models that predicts more than one outputs
(e,g, multi-class Models). When set to -1,
returns explanations for all outputs.
output_indices (google.protobuf.struct_pb2.ListValue):
If populated, only returns attributions that have
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
contained in output_indices. It must be an ndarray of
integers, with the same shape of the output it's explaining.
If not populated, returns attributions for
[top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k]
indices of outputs. If neither top_k nor output_indeices is
populated, returns the argmax index of the outputs.
Only applicable to Models that predict multiple outputs
(e,g, multi-class Models that predict multiple classes).
"""
sampled_shapley_attribution = proto.Field(
proto.MESSAGE, number=1, oneof="method", message="SampledShapleyAttribution",
)
integrated_gradients_attribution = proto.Field(
proto.MESSAGE,
number=2,
oneof="method",
message="IntegratedGradientsAttribution",
)
xrai_attribution = proto.Field(
proto.MESSAGE, number=3, oneof="method", message="XraiAttribution",
)
similarity = proto.Field(
proto.MESSAGE, number=7, oneof="method", message="Similarity",
)
top_k = proto.Field(proto.INT32, number=4,)
output_indices = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.ListValue,)
class SampledShapleyAttribution(proto.Message):
r"""An attribution method that approximates Shapley values for
features that contribute to the label being predicted. A
sampling strategy is used to approximate the value rather than
considering all subsets of features.
Attributes:
path_count (int):
Required. The number of feature permutations to consider
when approximating the Shapley values.
Valid range of its value is [1, 50], inclusively.
"""
path_count = proto.Field(proto.INT32, number=1,)
class IntegratedGradientsAttribution(proto.Message):
r"""An attribution method that computes the Aumann-Shapley value
taking advantage of the model's fully differentiable structure.
Refer to this paper for more details:
https://arxiv.org/abs/1703.01365
Attributes:
step_count (int):
Required. The number of steps for approximating the path
integral. A good value to start is 50 and gradually increase
until the sum to diff property is within the desired error
range.
Valid range of its value is [1, 100], inclusively.
smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig):
Config for SmoothGrad approximation of
gradients.
When enabled, the gradients are approximated by
averaging the gradients from noisy samples in
the vicinity of the inputs. Adding noise can
help improve the computed gradients. Refer to
this paper for more details:
https://arxiv.org/pdf/1706.03825.pdf
blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig):
Config for IG with blur baseline.
When enabled, a linear path from the maximally
blurred image to the input image is created.
Using a blurred baseline instead of zero (black
image) is motivated by the BlurIG approach
explained here: https://arxiv.org/abs/2004.03383
"""
step_count = proto.Field(proto.INT32, number=1,)
smooth_grad_config = proto.Field(
proto.MESSAGE, number=2, message="SmoothGradConfig",
)
blur_baseline_config = proto.Field(
proto.MESSAGE, number=3, message="BlurBaselineConfig",
)
class XraiAttribution(proto.Message):
r"""An explanation method that redistributes Integrated Gradients
attributions to segmented regions, taking advantage of the
model's fully differentiable structure. Refer to this paper for
more details: https://arxiv.org/abs/1906.02825
Supported only by image Models.
Attributes:
step_count (int):
Required. The number of steps for approximating the path
integral. A good value to start is 50 and gradually increase
until the sum to diff property is met within the desired
error range.
Valid range of its value is [1, 100], inclusively.
smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig):
Config for SmoothGrad approximation of
gradients.
When enabled, the gradients are approximated by
averaging the gradients from noisy samples in
the vicinity of the inputs. Adding noise can
help improve the computed gradients. Refer to
this paper for more details:
https://arxiv.org/pdf/1706.03825.pdf
blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig):
Config for XRAI with blur baseline.
When enabled, a linear path from the maximally
blurred image to the input image is created.
Using a blurred baseline instead of zero (black
image) is motivated by the BlurIG approach
explained here: https://arxiv.org/abs/2004.03383
"""
step_count = proto.Field(proto.INT32, number=1,)
smooth_grad_config = proto.Field(
proto.MESSAGE, number=2, message="SmoothGradConfig",
)
blur_baseline_config = proto.Field(
proto.MESSAGE, number=3, message="BlurBaselineConfig",
)
class SmoothGradConfig(proto.Message):
r"""Config for SmoothGrad approximation of gradients.
When enabled, the gradients are approximated by averaging the
gradients from noisy samples in the vicinity of the inputs.
Adding noise can help improve the computed gradients. Refer to
this paper for more details:
https://arxiv.org/pdf/1706.03825.pdf
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
noise_sigma (float):
This is a single float value and will be used to add noise
to all the features. Use this field when all features are
normalized to have the same distribution: scale to range [0,
1], [-1, 1] or z-scoring, where features are normalized to
have 0-mean and 1-variance. Learn more about
`normalization <https://developers.google.com/machine-learning/data-prep/transform/normalization>`__.
For best results the recommended value is about 10% - 20% of
the standard deviation of the input feature. Refer to
section 3.2 of the SmoothGrad paper:
https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
If the distribution is different per feature, set
[feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma]
instead for each feature.
This field is a member of `oneof`_ ``GradientNoiseSigma``.
feature_noise_sigma (google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma):
This is similar to
[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma],
but provides additional flexibility. A separate noise sigma
can be provided for each feature, which is useful if their
distributions are different. No noise is added to features
that are not set. If this field is unset,
[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
will be used for all features.
This field is a member of `oneof`_ ``GradientNoiseSigma``.
noisy_sample_count (int):
The number of gradient samples to use for approximation. The
higher this number, the more accurate the gradient is, but
the runtime complexity increases by this factor as well.
Valid range of its value is [1, 50]. Defaults to 3.
"""
noise_sigma = proto.Field(proto.FLOAT, number=1, oneof="GradientNoiseSigma",)
feature_noise_sigma = proto.Field(
proto.MESSAGE,
number=2,
oneof="GradientNoiseSigma",
message="FeatureNoiseSigma",
)
noisy_sample_count = proto.Field(proto.INT32, number=3,)
class FeatureNoiseSigma(proto.Message):
r"""Noise sigma by features. Noise sigma represents the standard
deviation of the gaussian kernel that will be used to add noise
to interpolated inputs prior to computing gradients.
Attributes:
noise_sigma (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma.NoiseSigmaForFeature]):
Noise sigma per feature. No noise is added to
features that are not set.
"""
class NoiseSigmaForFeature(proto.Message):
r"""Noise sigma for a single feature.
Attributes:
name (str):
The name of the input feature for which noise sigma is
provided. The features are defined in [explanation metadata
inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
sigma (float):
This represents the standard deviation of the Gaussian
kernel that will be used to add noise to the feature prior
to computing gradients. Similar to
[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
but represents the noise added to the current feature.
Defaults to 0.1.
"""
name = proto.Field(proto.STRING, number=1,)
sigma = proto.Field(proto.FLOAT, number=2,)
noise_sigma = proto.RepeatedField(
proto.MESSAGE, number=1, message=NoiseSigmaForFeature,
)
class BlurBaselineConfig(proto.Message):
r"""Config for blur baseline.
When enabled, a linear path from the maximally blurred image to
the input image is created. Using a blurred baseline instead of
zero (black image) is motivated by the BlurIG approach explained
here:
https://arxiv.org/abs/2004.03383
Attributes:
max_blur_sigma (float):
The standard deviation of the blur kernel for
the blurred baseline. The same blurring
parameter is used for both the height and the
width dimension. If not set, the method defaults
to the zero (i.e. black for images) baseline.
"""
max_blur_sigma = proto.Field(proto.FLOAT, number=1,)
class Similarity(proto.Message):
r"""Similarity explainability that returns the nearest neighbors
from the provided dataset.
Attributes:
gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource):
The Cloud Storage location for the input
instances.
nearest_neighbor_search_config (google.protobuf.struct_pb2.Value):
The configuration for the generated index, the semantics are
the same as
[metadata][google.cloud.aiplatform.v1beta1.Index.metadata]
and should match NearestNeighborSearchConfig.
"""
gcs_source = proto.Field(proto.MESSAGE, number=1, message=io.GcsSource,)
nearest_neighbor_search_config = proto.Field(
proto.MESSAGE, number=2, message=struct_pb2.Value,
)
class ExplanationSpecOverride(proto.Message):
r"""The
[ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec]
entries that can be overridden at [online
explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain]
time.
Attributes:
parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters):
The parameters to be overridden. Note that the
[method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method]
cannot be changed. If not specified, no parameter is
overridden.
metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride):
The metadata to be overridden. If not
specified, no metadata is overridden.
"""
parameters = proto.Field(proto.MESSAGE, number=1, message="ExplanationParameters",)
metadata = proto.Field(
proto.MESSAGE, number=2, message="ExplanationMetadataOverride",
)
class ExplanationMetadataOverride(proto.Message):
r"""The
[ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata]
entries that can be overridden at [online
explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain]
time.
Attributes:
inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputsEntry]):
Required. Overrides the [input
metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]
of the features. The key is the name of the feature to be
overridden. The keys specified here must exist in the input
metadata to be overridden. If a feature is not specified
here, the corresponding feature's input metadata is not
overridden.
"""
class InputMetadataOverride(proto.Message):
r"""The [input
metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
entries to be overridden.
Attributes:
input_baselines (Sequence[google.protobuf.struct_pb2.Value]):
Baseline inputs for this feature.
This overrides the ``input_baseline`` field of the
[ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
object of the corresponding feature's input metadata. If
it's not specified, the original baselines are not
overridden.
"""
input_baselines = proto.RepeatedField(
proto.MESSAGE, number=1, message=struct_pb2.Value,
)
inputs = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message=InputMetadataOverride,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.http import (
Http404,
HttpResponsePermanentRedirect,
HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateResponseMixin, TemplateView, View
from django.views.generic.edit import FormView
from . import app_settings, signals
from ..compat import is_anonymous, is_authenticated, reverse, reverse_lazy
from ..exceptions import ImmediateHttpResponse
from ..utils import get_form_class, get_request_param
from .adapter import get_adapter
from .forms import (
AddEmailForm,
ChangePasswordForm,
LoginForm,
ResetPasswordForm,
ResetPasswordKeyForm,
SetPasswordForm,
SignupForm,
UserTokenForm,
)
from .models import EmailAddress, EmailConfirmation, EmailConfirmationHMAC
from .utils import (
complete_signup,
get_login_redirect_url,
get_next_redirect_url,
logout_on_password_change,
passthrough_next_redirect_url,
perform_login,
sync_user_email_addresses,
url_str_to_user_pk,
)
sensitive_post_parameters_m = method_decorator(
sensitive_post_parameters('password', 'password1', 'password2'))
def _ajax_response(request, response, form=None, data=None):
if request.is_ajax():
if (isinstance(response, HttpResponseRedirect) or isinstance(
response, HttpResponsePermanentRedirect)):
redirect_to = response['Location']
else:
redirect_to = None
response = get_adapter(request).ajax_response(
request,
response,
form=form,
data=data,
redirect_to=redirect_to)
return response
class RedirectAuthenticatedUserMixin(object):
def dispatch(self, request, *args, **kwargs):
if is_authenticated(request.user) and \
app_settings.AUTHENTICATED_LOGIN_REDIRECTS:
redirect_to = self.get_authenticated_redirect_url()
response = HttpResponseRedirect(redirect_to)
return _ajax_response(request, response)
else:
response = super(RedirectAuthenticatedUserMixin,
self).dispatch(request,
*args,
**kwargs)
return response
def get_authenticated_redirect_url(self):
redirect_field_name = self.redirect_field_name
return get_login_redirect_url(self.request,
url=self.get_success_url(),
redirect_field_name=redirect_field_name)
class AjaxCapableProcessFormViewMixin(object):
def get(self, request, *args, **kwargs):
response = super(AjaxCapableProcessFormViewMixin, self).get(
request, *args, **kwargs)
form = self.get_form()
return _ajax_response(
self.request, response, form=form, data=self._get_ajax_data_if())
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
response = self.form_valid(form)
else:
response = self.form_invalid(form)
return _ajax_response(
self.request, response, form=form, data=self._get_ajax_data_if())
def get_form(self, form_class=None):
form = getattr(self, '_cached_form', None)
if form is None:
form = super(AjaxCapableProcessFormViewMixin, self).get_form(
form_class)
self._cached_form = form
return form
def _get_ajax_data_if(self):
return self.get_ajax_data() if self.request.is_ajax() else None
def get_ajax_data(self):
return None
class LoginView(RedirectAuthenticatedUserMixin,
AjaxCapableProcessFormViewMixin,
FormView):
form_class = LoginForm
template_name = "account/login." + app_settings.TEMPLATE_EXTENSION
success_url = None
redirect_field_name = "next"
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
return super(LoginView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(LoginView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_form_class(self):
return get_form_class(app_settings.FORMS, 'login', self.form_class)
def form_valid(self, form):
success_url = self.get_success_url()
try:
return form.login(self.request, redirect_url=success_url)
except ImmediateHttpResponse as e:
return e.response
def get_success_url(self):
# Explicitly passed ?next= URL takes precedence
ret = (get_next_redirect_url(
self.request,
self.redirect_field_name) or self.success_url)
return ret
def get_context_data(self, **kwargs):
ret = super(LoginView, self).get_context_data(**kwargs)
signup_url = passthrough_next_redirect_url(self.request,
reverse("account_signup"),
self.redirect_field_name)
redirect_field_value = get_request_param(self.request,
self.redirect_field_name)
site = get_current_site(self.request)
ret.update({"signup_url": signup_url,
"site": site,
"redirect_field_name": self.redirect_field_name,
"redirect_field_value": redirect_field_value})
return ret
login = LoginView.as_view()
class CloseableSignupMixin(object):
template_name_signup_closed = (
"account/signup_closed." + app_settings.TEMPLATE_EXTENSION)
def dispatch(self, request, *args, **kwargs):
try:
if not self.is_open():
return self.closed()
except ImmediateHttpResponse as e:
return e.response
return super(CloseableSignupMixin, self).dispatch(request,
*args,
**kwargs)
def is_open(self):
return get_adapter(self.request).is_open_for_signup(self.request)
def closed(self):
response_kwargs = {
"request": self.request,
"template": self.template_name_signup_closed,
}
return self.response_class(**response_kwargs)
class SignupView(RedirectAuthenticatedUserMixin, CloseableSignupMixin,
AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/signup." + app_settings.TEMPLATE_EXTENSION
form_class = SignupForm
redirect_field_name = "next"
success_url = None
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
return super(SignupView, self).dispatch(request, *args, **kwargs)
def get_form_class(self):
return get_form_class(app_settings.FORMS, 'signup', self.form_class)
def get_success_url(self):
# Explicitly passed ?next= URL takes precedence
ret = (
get_next_redirect_url(
self.request,
self.redirect_field_name) or self.success_url)
return ret
def form_valid(self, form):
# By assigning the User to a property on the view, we allow subclasses
# of SignupView to access the newly created User instance
self.user = form.save(self.request)
try:
return complete_signup(
self.request, self.user,
app_settings.EMAIL_VERIFICATION,
self.get_success_url())
except ImmediateHttpResponse as e:
return e.response
def get_context_data(self, **kwargs):
ret = super(SignupView, self).get_context_data(**kwargs)
form = ret['form']
email = self.request.session.get('account_verified_email')
email_keys = ['email']
if app_settings.SIGNUP_EMAIL_ENTER_TWICE:
email_keys.append('email2')
for email_key in email_keys:
form.fields[email_key].initial = email
login_url = passthrough_next_redirect_url(self.request,
reverse("account_login"),
self.redirect_field_name)
redirect_field_name = self.redirect_field_name
redirect_field_value = get_request_param(self.request,
redirect_field_name)
ret.update({"login_url": login_url,
"redirect_field_name": redirect_field_name,
"redirect_field_value": redirect_field_value})
return ret
signup = SignupView.as_view()
class ConfirmEmailView(TemplateResponseMixin, View):
template_name = "account/email_confirm." + app_settings.TEMPLATE_EXTENSION
def get(self, *args, **kwargs):
try:
self.object = self.get_object()
if app_settings.CONFIRM_EMAIL_ON_GET:
return self.post(*args, **kwargs)
except Http404:
self.object = None
ctx = self.get_context_data()
return self.render_to_response(ctx)
def post(self, *args, **kwargs):
self.object = confirmation = self.get_object()
confirmation.confirm(self.request)
get_adapter(self.request).add_message(
self.request,
messages.SUCCESS,
'account/messages/email_confirmed.txt',
{'email': confirmation.email_address.email})
if app_settings.LOGIN_ON_EMAIL_CONFIRMATION:
resp = self.login_on_confirm(confirmation)
if resp is not None:
return resp
# Don't -- allauth doesn't touch is_active so that sys admin can
# use it to block users et al
#
# user = confirmation.email_address.user
# user.is_active = True
# user.save()
redirect_url = self.get_redirect_url()
if not redirect_url:
ctx = self.get_context_data()
return self.render_to_response(ctx)
return redirect(redirect_url)
def login_on_confirm(self, confirmation):
"""
Simply logging in the user may become a security issue. If you
do not take proper care (e.g. don't purge used email
confirmations), a malicious person that got hold of the link
will be able to login over and over again and the user is
unable to do anything about it. Even restoring their own mailbox
security will not help, as the links will still work. For
password reset this is different, this mechanism works only as
long as the attacker has access to the mailbox. If they no
longer has access they cannot issue a password request and
intercept it. Furthermore, all places where the links are
listed (log files, but even Google Analytics) all of a sudden
need to be secured. Purging the email confirmation once
confirmed changes the behavior -- users will not be able to
repeatedly confirm (in case they forgot that they already
clicked the mail).
All in all, opted for storing the user that is in the process
of signing up in the session to avoid all of the above. This
may not 100% work in case the user closes the browser (and the
session gets lost), but at least we're secure.
"""
user_pk = None
user_pk_str = get_adapter(self.request).unstash_user(self.request)
if user_pk_str:
user_pk = url_str_to_user_pk(user_pk_str)
user = confirmation.email_address.user
if user_pk == user.pk and is_anonymous(self.request.user):
return perform_login(self.request,
user,
app_settings.EmailVerificationMethod.NONE,
# passed as callable, as this method
# depends on the authenticated state
redirect_url=self.get_redirect_url)
return None
def get_object(self, queryset=None):
key = self.kwargs['key']
emailconfirmation = EmailConfirmationHMAC.from_key(key)
if not emailconfirmation:
if queryset is None:
queryset = self.get_queryset()
try:
emailconfirmation = queryset.get(key=key.lower())
except EmailConfirmation.DoesNotExist:
raise Http404()
return emailconfirmation
def get_queryset(self):
qs = EmailConfirmation.objects.all_valid()
qs = qs.select_related("email_address__user")
return qs
def get_context_data(self, **kwargs):
ctx = kwargs
ctx["confirmation"] = self.object
site = get_current_site(self.request)
ctx.update({'site': site})
return ctx
def get_redirect_url(self):
return get_adapter(self.request).get_email_confirmation_redirect_url(
self.request)
confirm_email = ConfirmEmailView.as_view()
class EmailView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/email." + app_settings.TEMPLATE_EXTENSION
form_class = AddEmailForm
success_url = reverse_lazy('account_email')
def get_form_class(self):
return get_form_class(app_settings.FORMS, 'add_email', self.form_class)
def dispatch(self, request, *args, **kwargs):
sync_user_email_addresses(request.user)
return super(EmailView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(EmailView, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def form_valid(self, form):
email_address = form.save(self.request)
get_adapter(self.request).add_message(
self.request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': form.cleaned_data["email"]})
signals.email_added.send(sender=self.request.user.__class__,
request=self.request,
user=self.request.user,
email_address=email_address)
return super(EmailView, self).form_valid(form)
def post(self, request, *args, **kwargs):
res = None
if "action_add" in request.POST:
res = super(EmailView, self).post(request, *args, **kwargs)
elif request.POST.get("email"):
if "action_send" in request.POST:
res = self._action_send(request)
elif "action_remove" in request.POST:
res = self._action_remove(request)
elif "action_primary" in request.POST:
res = self._action_primary(request)
res = res or HttpResponseRedirect(self.success_url)
# Given that we bypassed AjaxCapableProcessFormViewMixin,
# we'll have to call invoke it manually...
res = _ajax_response(request, res, data=self._get_ajax_data_if())
else:
# No email address selected
res = HttpResponseRedirect(self.success_url)
res = _ajax_response(request, res, data=self._get_ajax_data_if())
return res
def _action_send(self, request, *args, **kwargs):
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get(
user=request.user,
email=email,
)
get_adapter(request).add_message(
request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': email})
email_address.send_confirmation(request)
return HttpResponseRedirect(self.get_success_url())
except EmailAddress.DoesNotExist:
pass
def _action_remove(self, request, *args, **kwargs):
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get(
user=request.user,
email=email
)
if email_address.primary:
get_adapter(request).add_message(
request,
messages.ERROR,
'account/messages/'
'cannot_delete_primary_email.txt',
{"email": email})
else:
email_address.delete()
signals.email_removed.send(sender=request.user.__class__,
request=request,
user=request.user,
email_address=email_address)
get_adapter(request).add_message(
request,
messages.SUCCESS,
'account/messages/email_deleted.txt',
{"email": email})
return HttpResponseRedirect(self.get_success_url())
except EmailAddress.DoesNotExist:
pass
def _action_primary(self, request, *args, **kwargs):
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get_for_user(
user=request.user,
email=email
)
# Not primary=True -- Slightly different variation, don't
# require verified unless moving from a verified
# address. Ignore constraint if previous primary email
# address is not verified.
if not email_address.verified and \
EmailAddress.objects.filter(user=request.user,
verified=True).exists():
get_adapter(request).add_message(
request,
messages.ERROR,
'account/messages/'
'unverified_primary_email.txt')
else:
# Sending the old primary address to the signal
# adds a db query.
try:
from_email_address = EmailAddress.objects \
.get(user=request.user, primary=True)
except EmailAddress.DoesNotExist:
from_email_address = None
email_address.set_as_primary()
get_adapter(request).add_message(
request,
messages.SUCCESS,
'account/messages/primary_email_set.txt')
signals.email_changed \
.send(sender=request.user.__class__,
request=request,
user=request.user,
from_email_address=from_email_address,
to_email_address=email_address)
return HttpResponseRedirect(self.get_success_url())
except EmailAddress.DoesNotExist:
pass
def get_context_data(self, **kwargs):
ret = super(EmailView, self).get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret['add_email_form'] = ret.get('form')
# (end NOTE)
return ret
def get_ajax_data(self):
data = []
for emailaddress in self.request.user.emailaddress_set.all():
data.append({
'email': emailaddress.email,
'verified': emailaddress.verified,
'primary': emailaddress.primary,
})
return data
email = login_required(EmailView.as_view())
class PasswordChangeView(AjaxCapableProcessFormViewMixin, FormView):
template_name = (
"account/password_change." + app_settings.TEMPLATE_EXTENSION)
form_class = ChangePasswordForm
success_url = reverse_lazy("account_change_password")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'change_password',
self.form_class)
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
return super(PasswordChangeView, self).dispatch(
request, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
if not self.request.user.has_usable_password():
return HttpResponseRedirect(reverse('account_set_password'))
return super(PasswordChangeView, self).render_to_response(
context, **response_kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
logout_on_password_change(self.request, form.user)
get_adapter(self.request).add_message(
self.request,
messages.SUCCESS,
'account/messages/password_changed.txt')
signals.password_changed.send(sender=self.request.user.__class__,
request=self.request,
user=self.request.user)
return super(PasswordChangeView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(PasswordChangeView, self).get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret['password_change_form'] = ret.get('form')
# (end NOTE)
return ret
password_change = login_required(PasswordChangeView.as_view())
class PasswordSetView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/password_set." + app_settings.TEMPLATE_EXTENSION
form_class = SetPasswordForm
success_url = reverse_lazy("account_set_password")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'set_password',
self.form_class)
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
return super(PasswordSetView, self).dispatch(request, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
if self.request.user.has_usable_password():
return HttpResponseRedirect(reverse('account_change_password'))
return super(PasswordSetView, self).render_to_response(
context, **response_kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordSetView, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
logout_on_password_change(self.request, form.user)
get_adapter(self.request).add_message(
self.request,
messages.SUCCESS,
'account/messages/password_set.txt')
signals.password_set.send(sender=self.request.user.__class__,
request=self.request, user=self.request.user)
return super(PasswordSetView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(PasswordSetView, self).get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret['password_set_form'] = ret.get('form')
# (end NOTE)
return ret
password_set = login_required(PasswordSetView.as_view())
class PasswordResetView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/password_reset." + app_settings.TEMPLATE_EXTENSION
form_class = ResetPasswordForm
success_url = reverse_lazy("account_reset_password_done")
redirect_field_name = "next"
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'reset_password',
self.form_class)
def form_valid(self, form):
form.save(self.request)
return super(PasswordResetView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(PasswordResetView, self).get_context_data(**kwargs)
login_url = passthrough_next_redirect_url(self.request,
reverse("account_login"),
self.redirect_field_name)
# NOTE: For backwards compatibility
ret['password_reset_form'] = ret.get('form')
# (end NOTE)
ret.update({"login_url": login_url})
return ret
password_reset = PasswordResetView.as_view()
class PasswordResetDoneView(TemplateView):
template_name = (
"account/password_reset_done." + app_settings.TEMPLATE_EXTENSION)
password_reset_done = PasswordResetDoneView.as_view()
class PasswordResetFromKeyView(AjaxCapableProcessFormViewMixin, FormView):
template_name = (
"account/password_reset_from_key." + app_settings.TEMPLATE_EXTENSION)
form_class = ResetPasswordKeyForm
success_url = reverse_lazy("account_reset_password_from_key_done")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'reset_password_from_key',
self.form_class)
def dispatch(self, request, uidb36, key, **kwargs):
self.request = request
self.key = key
# (Ab)using forms here to be able to handle errors in XHR #890
token_form = UserTokenForm(data={'uidb36': uidb36, 'key': key})
if not token_form.is_valid():
self.reset_user = None
response = self.render_to_response(
self.get_context_data(token_fail=True)
)
return _ajax_response(self.request, response, form=token_form)
else:
self.reset_user = token_form.reset_user
return super(PasswordResetFromKeyView, self).dispatch(request,
uidb36,
key,
**kwargs)
def get_context_data(self, **kwargs):
ret = super(PasswordResetFromKeyView, self).get_context_data(**kwargs)
ret['action_url'] = reverse(
'account_reset_password_from_key',
kwargs={'uidb36': self.kwargs['uidb36'],
'key': self.kwargs['key']})
return ret
def get_form_kwargs(self):
kwargs = super(PasswordResetFromKeyView, self).get_form_kwargs()
kwargs["user"] = self.reset_user
kwargs["temp_key"] = self.key
return kwargs
def form_valid(self, form):
form.save()
get_adapter(self.request).add_message(
self.request,
messages.SUCCESS,
'account/messages/password_changed.txt')
signals.password_reset.send(sender=self.reset_user.__class__,
request=self.request,
user=self.reset_user)
if app_settings.LOGIN_ON_PASSWORD_RESET:
return perform_login(
self.request, self.reset_user,
email_verification=app_settings.EMAIL_VERIFICATION)
return super(PasswordResetFromKeyView, self).form_valid(form)
password_reset_from_key = PasswordResetFromKeyView.as_view()
class PasswordResetFromKeyDoneView(TemplateView):
template_name = (
"account/password_reset_from_key_done." +
app_settings.TEMPLATE_EXTENSION)
password_reset_from_key_done = PasswordResetFromKeyDoneView.as_view()
class LogoutView(TemplateResponseMixin, View):
template_name = "account/logout." + app_settings.TEMPLATE_EXTENSION
redirect_field_name = "next"
def get(self, *args, **kwargs):
if app_settings.LOGOUT_ON_GET:
return self.post(*args, **kwargs)
if not is_authenticated(self.request.user):
return redirect(self.get_redirect_url())
ctx = self.get_context_data()
return self.render_to_response(ctx)
def post(self, *args, **kwargs):
url = self.get_redirect_url()
if is_authenticated(self.request.user):
self.logout()
return redirect(url)
def logout(self):
adapter = get_adapter(self.request)
adapter.add_message(
self.request,
messages.SUCCESS,
'account/messages/logged_out.txt')
adapter.logout(self.request)
def get_context_data(self, **kwargs):
ctx = kwargs
redirect_field_value = get_request_param(self.request,
self.redirect_field_name)
ctx.update({
"redirect_field_name": self.redirect_field_name,
"redirect_field_value": redirect_field_value})
return ctx
def get_redirect_url(self):
return (
get_next_redirect_url(
self.request,
self.redirect_field_name) or get_adapter(
self.request).get_logout_redirect_url(
self.request))
logout = LogoutView.as_view()
class AccountInactiveView(TemplateView):
template_name = (
'account/account_inactive.' + app_settings.TEMPLATE_EXTENSION)
account_inactive = AccountInactiveView.as_view()
class EmailVerificationSentView(TemplateView):
template_name = (
'account/verification_sent.' + app_settings.TEMPLATE_EXTENSION)
email_verification_sent = EmailVerificationSentView.as_view()
|
|
"""Test inter-conversion of different polynomial classes.
This tests the convert and cast methods of all the polynomial classes.
"""
from __future__ import division, absolute_import, print_function
import operator as op
from numbers import Number
import numpy as np
from numpy.compat import long
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
run_module_suite)
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE)
def test_class_methods():
for Poly1 in classes:
for Poly2 in classes:
yield check_conversion, Poly1, Poly2
yield check_cast, Poly1, Poly2
for Poly in classes:
yield check_call, Poly
yield check_identity, Poly
yield check_basis, Poly
yield check_fromroots, Poly
yield check_fit, Poly
yield check_equal, Poly
yield check_not_equal, Poly
yield check_add, Poly
yield check_sub, Poly
yield check_mul, Poly
yield check_floordiv, Poly
yield check_truediv, Poly
yield check_mod, Poly
yield check_divmod, Poly
yield check_pow, Poly
yield check_integ, Poly
yield check_deriv, Poly
yield check_roots, Poly
yield check_linspace, Poly
yield check_mapparms, Poly
yield check_degree, Poly
yield check_copy, Poly
yield check_cutdeg, Poly
yield check_truncate, Poly
yield check_trim, Poly
#
# helper functions
#
random = np.random.random
def assert_poly_almost_equal(p1, p2, msg=""):
try:
assert_(np.all(p1.domain == p2.domain))
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
msg = "Result: %s\nTarget: %s", (p1, p2)
raise AssertionError(msg)
#
# conversion methods that depend on two classes
#
def check_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,)) * .25
w1 = Poly1.window + random((2,)) * .25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,)) * .25
w2 = Poly2.window + random((2,)) * .25
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
def check_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,)) * .25
w1 = Poly1.window + random((2,)) * .25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,)) * .25
w2 = Poly2.window + random((2,)) * .25
p2 = Poly2.cast(p1, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
#
# methods that depend on one class
#
def check_identity(Poly):
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
x = np.linspace(d[0], d[1], 11)
p = Poly.identity(domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_almost_equal(p(x), x)
def check_basis(Poly):
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_equal(p.coef, [0] * 5 + [1])
def check_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
r = random((5,))
p1 = Poly.fromroots(r, domain=d, window=w)
assert_equal(p1.degree(), len(r))
assert_equal(p1.domain, d)
assert_equal(p1.window, w)
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
pdom = Polynomial.domain
pwin = Polynomial.window
p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
def check_fit(Poly):
def f(x):
return x * (x - 1) * (x - 2)
x = np.linspace(0, 3)
y = f(x)
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
# check with given domains and window
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape) * .25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
def check_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(p1 == p1)
assert_(not p1 == p2)
assert_(not p1 == p3)
assert_(not p1 == p4)
def check_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(not p1 != p1)
assert_(p1 != p2)
assert_(p1 != p3)
assert_(p1 != p4)
def check_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 + p2
assert_poly_almost_equal(p2 + p1, p3)
assert_poly_almost_equal(p1 + c2, p3)
assert_poly_almost_equal(c2 + p1, p3)
assert_poly_almost_equal(p1 + tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) + p1, p3)
assert_poly_almost_equal(p1 + np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) + p1, p3)
assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.add, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.add, p1, Polynomial([0]))
def check_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 - p2
assert_poly_almost_equal(p2 - p1, -p3)
assert_poly_almost_equal(p1 - c2, p3)
assert_poly_almost_equal(c2 - p1, -p3)
assert_poly_almost_equal(p1 - tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) - p1, -p3)
assert_poly_almost_equal(p1 - np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) - p1, -p3)
assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
def check_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 * p2
assert_poly_almost_equal(p2 * p1, p3)
assert_poly_almost_equal(p1 * c2, p3)
assert_poly_almost_equal(c2 * p1, p3)
assert_poly_almost_equal(p1 * tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) * p1, p3)
assert_poly_almost_equal(p1 * np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) * p1, p3)
assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
def check_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 // p2, p1)
assert_poly_almost_equal(p4 // c2, p1)
assert_poly_almost_equal(c4 // p2, p1)
assert_poly_almost_equal(p4 // tuple(c2), p1)
assert_poly_almost_equal(tuple(c4) // p2, p1)
assert_poly_almost_equal(p4 // np.array(c2), p1)
assert_poly_almost_equal(np.array(c4) // p2, p1)
assert_poly_almost_equal(2 // p2, Poly([0]))
assert_poly_almost_equal(p2 // 2, 0.5 * p2)
assert_raises(
TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(
TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
def check_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1, 2, 3])
p2 = p1 * 5
for stype in np.ScalarType:
if not issubclass(stype, Number) or issubclass(stype, bool):
continue
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in (int, long, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in [complex]:
s = stype(5, 0)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for s in [tuple(), list(), dict(), bool(), np.array([1])]:
assert_raises(TypeError, op.truediv, p2, s)
assert_raises(TypeError, op.truediv, s, p2)
for ptype in classes:
assert_raises(TypeError, op.truediv, p2, ptype(1))
def check_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 % p2, p3)
assert_poly_almost_equal(p4 % c2, p3)
assert_poly_almost_equal(c4 % p2, p3)
assert_poly_almost_equal(p4 % tuple(c2), p3)
assert_poly_almost_equal(tuple(c4) % p2, p3)
assert_poly_almost_equal(p4 % np.array(c2), p3)
assert_poly_almost_equal(np.array(c4) % p2, p3)
assert_poly_almost_equal(2 % p2, Poly([2]))
assert_poly_almost_equal(p2 % 2, Poly([0]))
assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
def check_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
quo, rem = divmod(p4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, c2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(c4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, tuple(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(tuple(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, np.array(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(np.array(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p2, 2)
assert_poly_almost_equal(quo, 0.5 * p2)
assert_poly_almost_equal(rem, Poly([0]))
quo, rem = divmod(2, p2)
assert_poly_almost_equal(quo, Poly([0]))
assert_poly_almost_equal(rem, Poly([2]))
assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, divmod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, divmod, p1, Polynomial([0]))
def check_roots(Poly):
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
tgt = np.sort(random((5,)))
res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots())
assert_almost_equal(res, tgt)
# default domain and window
res = np.sort(Poly.fromroots(tgt).roots())
assert_almost_equal(res, tgt)
def check_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
def check_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1 is not p2)
assert_(p1.coef is not p2.coef)
assert_(p1.domain is not p2.domain)
assert_(p1.window is not p2.window)
def check_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]))
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
# Check with k
p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]))
p1 = P.cast(p0.integ(k=1))
p2 = P.cast(p0.integ(2, k=[1, 1]))
assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
# Check with lbnd
p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]))
p1 = P.cast(p0.integ(lbnd=1))
p2 = P.cast(p0.integ(2, lbnd=1))
assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
# Check scaling
d = 2 * Poly.domain
p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]), domain=d)
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
def check_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
p1 = Poly([1, 2, 3], domain=d, window=w)
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
# default domain and window
p1 = Poly([1, 2, 3])
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
def check_linspace(Poly):
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
p = Poly([1, 2, 3], domain=d, window=w)
# check default domain
xtgt = np.linspace(d[0], d[1], 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20)
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
# check specified domain
xtgt = np.linspace(0, 2, 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20, domain=[0, 2])
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
def check_pow(Poly):
d = Poly.domain + random((2,)) * .25
w = Poly.window + random((2,)) * .25
tgt = Poly([1], domain=d, window=w)
tst = Poly([1, 2, 3], domain=d, window=w)
for i in range(5):
assert_poly_almost_equal(tst ** i, tgt)
tgt = tgt * tst
# default domain and window
tgt = Poly([1])
tst = Poly([1, 2, 3])
for i in range(5):
assert_poly_almost_equal(tst ** i, tgt)
tgt = tgt * tst
# check error for invalid powers
assert_raises(ValueError, op.pow, tgt, 1.5)
assert_raises(ValueError, op.pow, tgt, -1)
def check_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
# Check defaults
p = Poly.cast(P([1, 2, 3]))
tgt = 1 + x * (2 + 3 * x)
res = p(x)
assert_almost_equal(res, tgt)
def check_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(3)), 3)
assert_equal(len(p.cutdeg(2)), 3)
assert_equal(len(p.cutdeg(1)), 2)
assert_equal(len(p.cutdeg(0)), 1)
def check_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(4)), 3)
assert_equal(len(p.truncate(3)), 3)
assert_equal(len(p.truncate(2)), 2)
assert_equal(len(p.truncate(1)), 1)
def check_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-10).coef, c[:2])
assert_equal(p.trim(1e-5).coef, c[:1])
def check_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
p = Poly([1], domain=d, window=w)
assert_almost_equal([0, 1], p.mapparms())
#
w = 2 * d + 1
p = Poly([1], domain=d, window=w)
assert_almost_equal([1, 2], p.mapparms())
if __name__ == "__main__":
run_module_suite()
|
|
# pylint: disable=C0103, I1101
"""
File Converter Widget
"""
import os
import logging
import numpy as np
from PyQt5 import QtWidgets, QtCore, QtGui
from sas.sascalc.file_converter.ascii2d_loader import ASCII2DLoader
from sas.sascalc.file_converter.nxcansas_writer import NXcanSASWriter
from sas.sascalc.dataloader.data_info import Data1D
from sas.sascalc.dataloader.data_info import Detector
from sas.sascalc.dataloader.data_info import Sample
from sas.sascalc.dataloader.data_info import Source
from sas.sascalc.dataloader.data_info import Vector
import sas.sascalc.file_converter.FileConverterUtilities as Utilities
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sas.qtgui.Utilities.FrameSelect import FrameSelect
from sas.sasview import __version__ as SASVIEW_VERSION
from .UI.FileConverterUI import Ui_FileConverterUI
class FileConverterWidget(QtWidgets.QDialog, Ui_FileConverterUI):
"""
Class to describe the behaviour of the File Converter widget
"""
def __init__(self, parent=None):
"""
Parent here is the GUI Manager. Required for access to
the help location and to the file loader.
"""
super(FileConverterWidget, self).__init__()
self.parent = parent
self.setupUi(self)
# disable the context help icon
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
self.setWindowTitle("File Converter")
# i,q file fields are not editable
self.txtIFile.setEnabled(False)
self.txtQFile.setEnabled(False)
self.cmdConvert.setEnabled(False)
# globals
self.is1D = True
self.isBSL = False
self.ifile = ""
self.qfile = ""
self.ofile = ""
self.metadata = {}
self.setValidators()
self.addSlots()
def setValidators(self):
"""
Apply validators for double precision numbers to numerical fields
"""
#self.txtMG_RunNumber.setValidator(QtGui.QIntValidator())
self.txtMD_Distance.setValidator(GuiUtils.DoubleValidator())
self.txtMD_OffsetX.setValidator(GuiUtils.DoubleValidator())
self.txtMD_OffsetY.setValidator(GuiUtils.DoubleValidator())
self.txtMD_OrientRoll.setValidator(GuiUtils.DoubleValidator())
self.txtMD_OrientPitch.setValidator(GuiUtils.DoubleValidator())
self.txtMD_OrientYaw.setValidator(GuiUtils.DoubleValidator())
self.txtMD_PixelX.setValidator(GuiUtils.DoubleValidator())
self.txtMD_PixelY.setValidator(GuiUtils.DoubleValidator())
self.txtMD_BeamX.setValidator(GuiUtils.DoubleValidator())
self.txtMD_BeamY.setValidator(GuiUtils.DoubleValidator())
self.txtMD_SlitLength.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_Thickness.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_Transmission.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_Temperature.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_PositionX.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_PositionY.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_OrientR.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_OrientY.setValidator(GuiUtils.DoubleValidator())
self.txtMSa_OrientP.setValidator(GuiUtils.DoubleValidator())
self.txtMSo_BeamSizeX.setValidator(GuiUtils.DoubleValidator())
self.txtMSo_BeamSizeY.setValidator(GuiUtils.DoubleValidator())
self.txtMSo_BeamWavelength.setValidator(GuiUtils.DoubleValidator())
self.txtMSo_MinWavelength.setValidator(GuiUtils.DoubleValidator())
self.txtMSo_MaxWavelength.setValidator(GuiUtils.DoubleValidator())
self.txtMSo_Spread.setValidator(GuiUtils.DoubleValidator())
def addSlots(self):
"""
Create callbacks for UI elements and outside signals
"""
self.cmdConvert.clicked.connect(self.onConvert)
self.cmdClose.clicked.connect(self.accept)
self.cmdHelp.clicked.connect(self.onHelp)
self.btnQFile.clicked.connect(self.onQFileOpen)
self.btnIFile.clicked.connect(self.onIFileOpen)
self.btnOutputFile.clicked.connect(self.onNewFile)
self.txtOutputFile.editingFinished.connect(self.onNewFileEdited)
self.cbInputFormat.currentIndexChanged.connect(self.onInputFormat)
def onConvert(self):
"""
Call the conversion method (and update DataExplorer with converted data)?
"""
self.readMetadata()
try:
if not self.isBSL and self.is1D:
qdata = Utilities.extract_ascii_data(self.qfile)
iqdata = np.array([Utilities.extract_ascii_data(self.ifile)])
self.convert1Ddata(qdata, iqdata, self.ofile, self.getMetadata())
elif self.isBSL and self.is1D:
qdata, iqdata = Utilities.extract_otoko_data(self.qfile, self.ifile)
self.convert1Ddata(qdata, iqdata, self.ofile, self.getMetadata())
elif not self.isBSL and not self.is1D:
loader = ASCII2DLoader(self.ifile)
data = loader.load()
dataset = [data] # ASCII 2D only ever contains 1 frame
Utilities.convert_2d_data(dataset, self.ofile, self.getMetadata())
else: # self.data_type == 'bsl'
#dataset = Utilities.extract_bsl_data(self.ifile)
dataset = self.extractBSLdata(self.ifile)
if dataset is None:
return
Utilities.convert_2d_data(dataset, self.ofile, self.getMetadata())
except (ValueError, IOError) as ex:
msg = str(ex)
logging.error(msg)
return
# everything converted, notify the user
logging.info("File successfully converted.")
self.parent.communicate.statusBarUpdateSignal.emit("File converted successfully.")
# Optionally, load the freshly converted file into Data Explorer
if self.chkLoadFile.isChecked():
# awful climbing up the hierarchy... don't do that. please.
self.parent.filesWidget.loadFromURL([self.ofile])
def onHelp(self):
"""
Display online help related to the file converter
"""
location = "/user/qtgui/Calculators/file_converter_help.html"
self.parent.showHelp(location)
def onIFileOpen(self):
"""
Show the path chooser for file with I
"""
file_candidate = self.openFile()
if not file_candidate:
return
self.ifile = file_candidate
self.txtIFile.setText(os.path.basename(str(file_candidate)))
self.updateConvertState()
def onQFileOpen(self):
"""
Show the path chooser for file with Q
"""
file_candidate = self.openFile()
if not file_candidate:
return
self.qfile = file_candidate
self.txtQFile.setText(os.path.basename(str(file_candidate)))
self.updateConvertState()
def openFile(self):
"""
Show the path chooser for existent file
"""
datafile = None
try:
datafile = QtWidgets.QFileDialog.getOpenFileName(
self, "Choose a file", "", "All files (*.*)")[0]
except (RuntimeError, IOError) as ex:
log_msg = "File Converter failed with: {}".format(ex)
logging.error(log_msg)
raise
return datafile
def getDetectorMetadata(self):
"""
Read the detector metadata fields and put them in the dictionary
"""
detector = Detector()
detector.name = self.txtMD_Name.text()
detector.distance = Utilities.toFloat(self.txtMD_Distance.text())
detector.offset = Vector(x=Utilities.toFloat(self.txtMD_OffsetX.text()),
y=Utilities.toFloat(self.txtMD_OffsetY.text()))
detector.orientation = Vector(x=Utilities.toFloat(self.txtMD_OrientRoll.text()),
y=Utilities.toFloat(self.txtMD_OrientPitch.text()),
z=Utilities.toFloat(self.txtMD_OrientYaw.text()))
detector.beam_center = Vector(x=Utilities.toFloat(self.txtMD_BeamX.text()),
y=Utilities.toFloat(self.txtMD_BeamY.text()))
detector.pixel_size = Vector(x=Utilities.toFloat(self.txtMD_PixelX.text()),
y=Utilities.toFloat(self.txtMD_PixelY.text()))
detector.slit_length = Utilities.toFloat(self.txtMD_Distance.text())
return detector
def getSourceMetadata(self):
"""
Read the source metadata fields and put them in the dictionary
"""
source = Source()
# radiation is on the front panel
source.radiation = self.cbRadiation.currentText().lower()
# the rest is in the 'Source' tab of the Metadata tab
source.name = self.txtMSo_Name.text()
source.beam_size = Vector(x=Utilities.toFloat(self.txtMSo_BeamSizeX.text()),
y=Utilities.toFloat(self.txtMSo_BeamSizeY.text()))
source.beam_shape = self.txtMSo_BeamShape.text()
source.wavelength = Utilities.toFloat(self.txtMSo_BeamWavelength.text())
source.wavelength_min = Utilities.toFloat(self.txtMSo_MinWavelength.text())
source.wavelength_max = Utilities.toFloat(self.txtMSo_MaxWavelength.text())
source.wavelength_spread = Utilities.toFloat(self.txtMSo_Spread.text())
return source
def getSampleMetadata(self):
"""
Read the sample metadata fields and put them in the dictionary
"""
sample = Sample()
sample.name = self.txtMSa_Name.text()
sample.thickness = Utilities.toFloat(self.txtMSa_Thickness.text())
sample.transmission = Utilities.toFloat(self.txtMSa_Transmission.text())
sample.temperature = Utilities.toFloat(self.txtMSa_Temperature.text())
sample.temperature_unit = self.txtMSa_TempUnit.text()
sample.position = Vector(x=Utilities.toFloat(self.txtMSa_PositionX.text()),
y=Utilities.toFloat(self.txtMSa_PositionY.text()))
sample.orientation = Vector(x=Utilities.toFloat(self.txtMSa_OrientR.text()),
y=Utilities.toFloat(self.txtMSa_OrientP.text()),
z=Utilities.toFloat(self.txtMSa_OrientY.text()))
details = self.txtMSa_Details.toPlainText()
sample.details = [details] if details else []
return sample
def getMetadata(self):
''' metadata getter '''
return self.metadata
def readMetadata(self):
"""
Read the metadata fields and put them in the dictionary
This reads the UI elements directly, but we don't
have a clear MVP distinction in this widgets, so there.
"""
run_title = self.txtMG_RunName.text()
run = self.txtMG_RunNumber.text()
run = run.split(",")
run_name = None
if run:
run_number = run[0]
run_name = { run_number: run_title }
metadata = {
'title': self.txtMG_Title.text(),
'run': run,
'run_name': run_name, # if run_name != "" else "None" ,
'instrument': self.txtMG_Instrument.text(),
'detector': [self.getDetectorMetadata()],
'sample': self.getSampleMetadata(),
'source': self.getSourceMetadata(),
'notes': [f'Data file generated by SasView v{SASVIEW_VERSION}'],
}
self.metadata = metadata
def onNewFile(self):
"""
show the save new file widget
"""
wildcard1d = "CanSAS 1D files(*.xml);;" if self.is1D else ""
wildcard = wildcard1d + "NXcanSAS files (*.h5)"
kwargs = {
'caption' : 'Save As',
'filter' : wildcard,
'parent' : None,
'options' : QtWidgets.QFileDialog.DontUseNativeDialog
}
# Query user for filename.
filename_tuple = QtWidgets.QFileDialog.getSaveFileName(**kwargs)
filename = filename_tuple[0]
# User cancelled.
if not filename:
return
# Check/add extension
if not os.path.splitext(filename)[1]:
ext = filename_tuple[1]
if 'CanSAS' in ext:
filename += '.xml'
elif 'NXcanSAS' in ext:
filename += '.h5'
else:
filename += '.h5' # default for user entered filenames
self.ofile = filename
self.txtOutputFile.setText(filename)
self.updateConvertState()
def onNewFileEdited(self):
"""
Update the output file state on direct field edit
"""
text = self.txtOutputFile.text()
if not text:
return
# Check/add extension
filename_tuple = os.path.splitext(text)
ext = filename_tuple[1]
if not ext.lower() in ('.xml', '.h5'):
text += '.h5'
if not self.is1D and not '.h5' in ext.lower():
# quietly add .h5 as extension
text += '.h5'
self.ofile = text
self.updateConvertState()
def updateConvertState(self):
"""
Asserts presece of files for coversion.
If all present -> enable the Convert button.
"""
enabled = self.ifile != "" and os.path.exists(self.ifile) and self.ofile != ""
if self.is1D:
enabled = enabled and self.qfile != "" and os.path.exists(self.qfile)
self.cmdConvert.setEnabled(enabled)
def onInputFormat(self):
"""
Enable/disable UI items based on input format spec
"""
# ASCII 2D allows for one file only
self.is1D = not '2D' in self.cbInputFormat.currentText()
self.label_7.setVisible(self.is1D)
self.txtQFile.setVisible(self.is1D)
self.btnQFile.setVisible(self.is1D)
self.isBSL = 'BSL' in self.cbInputFormat.currentText()
# clear out filename fields
self.txtQFile.setText("")
self.txtIFile.setText("")
# No need to clear the output field.
def extractBSLdata(self, filename):
"""
Extracts data from a 2D BSL file
:param filename: The header file to extract the data from
:return x_data: A 1D array containing all the x coordinates of the data
:return y_data: A 1D array containing all the y coordinates of the data
:return frame_data: A dictionary of the form *{frame_number: data}*,
where data is a 2D numpy array containing the intensity data
"""
loader = Utilities.BSLLoader(filename)
frames = [0]
should_continue = True
if loader.n_frames > 1:
params = self.askFrameRange(loader.n_frames)
frames = params['frames']
if len(frames) == 0:
should_continue = False
elif loader.n_rasters == 1 and loader.n_frames == 1:
message = ("The selected file is an OTOKO file. Please select the "
"'OTOKO 1D' option if you wish to convert it.")
msgbox = QtWidgets.QMessageBox(self)
msgbox.setIcon(QtWidgets.QMessageBox.Warning)
msgbox.setText(message)
msgbox.setWindowTitle("File Conversion")
msgbox.exec_()
return
else:
msg = ("The selected data file only has 1 frame, it might be"
" a multi-frame OTOKO file.\nContinue conversion?")
msgbox = QtWidgets.QMessageBox(self)
msgbox.setIcon(QtWidgets.QMessageBox.Warning)
msgbox.setText(msg)
msgbox.setWindowTitle("File Conversion")
# custom buttons
button_yes = QtWidgets.QPushButton("Yes")
msgbox.addButton(button_yes, QtWidgets.QMessageBox.YesRole)
button_no = QtWidgets.QPushButton("No")
msgbox.addButton(button_no, QtWidgets.QMessageBox.RejectRole)
retval = msgbox.exec_()
if retval == QtWidgets.QMessageBox.RejectRole:
# cancel fit
return
if not should_continue:
return None
frame_data = loader.load_frames(frames)
return frame_data
def convert1Ddata(self, qdata, iqdata, ofile, metadata):
"""
Formats a 1D array of q_axis data and a 2D array of I axis data (where
each row of iqdata is a separate row), into an array of Data1D objects
"""
frames = []
increment = 1
single_file = True
n_frames = iqdata.shape[0]
# Standard file has 3 frames: SAS, calibration and WAS
if n_frames > 3:
# File has multiple frames - ask the user which ones they want to
# export
params = self.askFrameRange(n_frames)
frames = params['frames']
increment = params['inc']
single_file = params['file']
if frames == []:
return
else: # Only interested in SAS data
frames = [0]
output_path = ofile
frame_data = {}
for i in frames:
data = Data1D(x=qdata, y=iqdata[i])
frame_data[i] = data
if single_file:
# Only need to set metadata on first Data1D object
frame_data = list(frame_data.values()) # Don't need to know frame numbers
frame_data[0].filename = output_path.split('\\')[-1]
for key, value in metadata.items():
setattr(frame_data[0], key, value)
else:
# Need to set metadata for all Data1D objects
for datainfo in list(frame_data.values()):
datainfo.filename = output_path.split('\\')[-1]
for key, value in metadata.items():
setattr(datainfo, key, value)
_, ext = os.path.splitext(output_path)
if ext == '.xml':
run_name = metadata['title']
Utilities.convert_to_cansas(frame_data, output_path, run_name, single_file)
else: # ext == '.h5'
w = NXcanSASWriter()
w.write(frame_data, output_path)
def askFrameRange(self, n_frames=1):
"""
Display a dialog asking the user to input the range of frames they
would like to export
:param n_frames: How many frames the loaded data file has
:return: A dictionary containing the parameters input by the user
"""
valid_input = False
output_path = self.txtOutputFile.text()
if not output_path:
return
_, ext = os.path.splitext(output_path)
show_single_btn = (ext == '.h5')
frames = None
increment = None
single_file = True
dlg = FrameSelect(self, n_frames, show_single_btn)
if dlg.exec_() != QtWidgets.QDialog.Accepted:
return
(first_frame, last_frame, increment) = dlg.getFrames()
frames = list(range(first_frame, last_frame + 1, increment))
return { 'frames': frames, 'inc': increment, 'file': single_file }
|
|
"""Config flow to configure Philips Hue."""
import asyncio
import json
import os
from aiohue.discovery import discover_nupnp
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .bridge import get_bridge
from .const import DOMAIN, LOGGER
from .errors import AuthenticationRequired, CannotConnect
@callback
def configured_hosts(hass):
"""Return a set of the configured hosts."""
return set(entry.data['host'] for entry
in hass.config_entries.async_entries(DOMAIN))
def _find_username_from_config(hass, filename):
"""Load username from config.
This was a legacy way of configuring Hue until Home Assistant 0.67.
"""
path = hass.config.path(filename)
if not os.path.isfile(path):
return None
with open(path) as inp:
try:
return list(json.load(inp).values())[0]['username']
except ValueError:
# If we get invalid JSON
return None
@config_entries.HANDLERS.register(DOMAIN)
class HueFlowHandler(config_entries.ConfigFlow):
"""Handle a Hue config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the Hue flow."""
self.host = None
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_init(user_input)
async def async_step_init(self, user_input=None):
"""Handle a flow start."""
if user_input is not None:
self.host = user_input['host']
return await self.async_step_link()
websession = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(5):
bridges = await discover_nupnp(websession=websession)
except asyncio.TimeoutError:
return self.async_abort(
reason='discover_timeout'
)
if not bridges:
return self.async_abort(
reason='no_bridges'
)
# Find already configured hosts
configured = configured_hosts(self.hass)
hosts = [bridge.host for bridge in bridges
if bridge.host not in configured]
if not hosts:
return self.async_abort(
reason='all_configured'
)
if len(hosts) == 1:
self.host = hosts[0]
return await self.async_step_link()
return self.async_show_form(
step_id='init',
data_schema=vol.Schema({
vol.Required('host'): vol.In(hosts)
})
)
async def async_step_link(self, user_input=None):
"""Attempt to link with the Hue bridge.
Given a configured host, will ask the user to press the link button
to connect to the bridge.
"""
errors = {}
# We will always try linking in case the user has already pressed
# the link button.
try:
bridge = await get_bridge(
self.hass, self.host, username=None
)
return await self._entry_from_bridge(bridge)
except AuthenticationRequired:
errors['base'] = 'register_failed'
except CannotConnect:
LOGGER.error("Error connecting to the Hue bridge at %s", self.host)
errors['base'] = 'linking'
except Exception: # pylint: disable=broad-except
LOGGER.exception(
'Unknown error connecting with Hue bridge at %s',
self.host)
errors['base'] = 'linking'
# If there was no user input, do not show the errors.
if user_input is None:
errors = {}
return self.async_show_form(
step_id='link',
errors=errors,
)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered Hue bridge.
This flow is triggered by the SSDP component. It will check if the
host is already configured and delegate to the import step if not.
"""
# Filter out emulated Hue
if "HASS Bridge" in discovery_info.get('name', ''):
return self.async_abort(reason='already_configured')
# pylint: disable=unsupported-assignment-operation
host = self.context['host'] = discovery_info.get('host')
if any(host == flow['context']['host']
for flow in self._async_in_progress()):
return self.async_abort(reason='already_in_progress')
if host in configured_hosts(self.hass):
return self.async_abort(reason='already_configured')
# This value is based off host/description.xml and is, weirdly, missing
# 4 characters in the middle of the serial compared to results returned
# from the NUPNP API or when querying the bridge API for bridgeid.
# (on first gen Hue hub)
serial = discovery_info.get('serial')
return await self.async_step_import({
'host': host,
# This format is the legacy format that Hue used for discovery
'path': 'phue-{}.conf'.format(serial)
})
async def async_step_import(self, import_info):
"""Import a new bridge as a config entry.
Will read authentication from Phue config file if available.
This flow is triggered by `async_setup` for both configured and
discovered bridges. Triggered for any bridge that does not have a
config entry yet (based on host).
This flow is also triggered by `async_step_discovery`.
If an existing config file is found, we will validate the credentials
and create an entry. Otherwise we will delegate to `link` step which
will ask user to link the bridge.
"""
host = import_info['host']
path = import_info.get('path')
if path is not None:
username = await self.hass.async_add_job(
_find_username_from_config, self.hass,
self.hass.config.path(path))
else:
username = None
try:
bridge = await get_bridge(
self.hass, host, username
)
LOGGER.info('Imported authentication for %s from %s', host, path)
return await self._entry_from_bridge(bridge)
except AuthenticationRequired:
self.host = host
LOGGER.info('Invalid authentication for %s, requesting link.',
host)
return await self.async_step_link()
except CannotConnect:
LOGGER.error("Error connecting to the Hue bridge at %s", host)
return self.async_abort(reason='cannot_connect')
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unknown error connecting with Hue bridge at %s',
host)
return self.async_abort(reason='unknown')
async def _entry_from_bridge(self, bridge):
"""Return a config entry from an initialized bridge."""
# Remove all other entries of hubs with same ID or host
host = bridge.host
bridge_id = bridge.config.bridgeid
same_hub_entries = [entry.entry_id for entry
in self.hass.config_entries.async_entries(DOMAIN)
if entry.data['bridge_id'] == bridge_id or
entry.data['host'] == host]
if same_hub_entries:
await asyncio.wait([self.hass.config_entries.async_remove(entry_id)
for entry_id in same_hub_entries])
return self.async_create_entry(
title=bridge.config.name,
data={
'host': host,
'bridge_id': bridge_id,
'username': bridge.username,
}
)
|
|
#!/usr/bin/env python
"""
Tests for the backpacks.
Copyright (c) 2005 Dustin Sallings <[email protected]>
"""
# arch-tag: 0BCECE3E-2629-498A-A897-C66F6DC41EB4
import os
import sys
import time
import unittest
import exceptions
import xml.dom.minidom
import backpack
# These tests all assume you're in California.
os.environ['TZ']='America/Los_Angeles'
time.tzset()
class BaseCase(unittest.TestCase):
"""Base case for all test cases."""
def getFileData(self, p):
f=open(p)
r=f.read()
f.close()
return r
class UtilTest(unittest.TestCase):
"""Utility function tests."""
def testRelativeTime(self):
"""Test relative time calculations"""
# the time at which I started writing this test
# (9 is today in the future)
earlyMorning=1121844562.8812749
# Later in the afternoon
afterNoon=1121887792.692405
# Evening
evening=1121909413.8556659
# Alias
relTime=backpack.getRelativeTime
self.assertEquals(time.ctime(relTime("fifteen", earlyMorning)),
"Wed Jul 20 00:44:22 2005")
self.assertEquals(time.ctime(relTime("nexthour", earlyMorning)),
"Wed Jul 20 01:05:00 2005")
self.assertEquals(time.ctime(relTime("later", earlyMorning)),
"Wed Jul 20 02:29:22 2005")
self.assertEquals(time.ctime(relTime("morning", earlyMorning)),
"Wed Jul 20 10:00:00 2005")
self.assertEquals(time.ctime(relTime("afternoon", earlyMorning)),
"Wed Jul 20 14:00:00 2005")
self.assertEquals(time.ctime(relTime("evening", earlyMorning)),
"Wed Jul 20 19:00:00 2005")
self.assertEquals(time.ctime(relTime("coupledays", earlyMorning)),
"Fri Jul 22 00:29:22 2005")
self.assertEquals(time.ctime(relTime("nextweek", earlyMorning)),
"Wed Jul 27 00:29:22 2005")
# Later in the day...
self.assertEquals(time.ctime(relTime("morning", afterNoon)),
"Thu Jul 21 10:00:00 2005")
self.assertEquals(time.ctime(relTime("afternoon", afterNoon)),
"Wed Jul 20 14:00:00 2005")
# Still yet later
self.assertEquals(time.ctime(relTime("afternoon", evening)),
"Thu Jul 21 14:00:00 2005")
def testRelativeTimeDefault(self):
"""Test a default relative time."""
# This test is not as predictable, so we can only ensure they're in the
# future.
now=time.time()
for rel in ["later", "morning", "afternoon", "coupledays", "nextweek"]:
self.failUnless(backpack.getRelativeTime(rel) > now, rel)
def testTimeParsing(self):
"""Test the time parser"""
ts=backpack.parseTime("2005-02-02 13:35:35")
self.assertEquals(time.ctime(ts), "Wed Feb 2 13:35:35 2005")
def testTimeFormatting(self):
"""Test the time formatter"""
# When I wrote this test
then=1121847564.8214879
s=backpack.formatTime(then)
self.assertEquals(s, "2005-07-20 01:19:24")
class BackpackAPITest(BaseCase):
"""Test the base backpack functionality."""
def setUp(self):
self.bp=backpack.Backpack("x", "y")
def testConstructors(self):
"""Test the constructors and data work the way I think they do"""
bp1=backpack.BackpackAPI("x", "y")
self.failIf(bp1.debug, "first debug is set")
bp2=backpack.BackpackAPI("x", "y", True)
self.failUnless(bp2.debug, "second debug is not set")
self.failIf(bp1.debug, "first debug is set after second")
bp3=backpack.BackpackAPI("x", "y")
self.failIf(bp3.debug, "third debug is set")
def testException(self):
"""Validate exception parsing"""
try:
bpapi=backpack.BackpackAPI("x", "y")
data=bpapi._parseDocument(self.getFileData("data/error404.xml"))
self.fail("Parsed 404 error into " + data.toprettyxml())
except backpack.BackpackError, e:
self.assertEquals(e.code, 404)
self.assertEquals(e.msg, "Record not found")
class ReminderTest(BaseCase):
"""Test reminder-specific stuff."""
def testReminderParser(self):
"""Validate reminder parsing."""
reminder=backpack.ReminderAPI("x", "y")
data=reminder._parseDocument(self.getFileData("data/reminders.xml"))
rv=reminder._parseReminders(data)
expected=[
(1121755020.0, 52373, 'Get API working.'),
(1121763600.0, 52372, 'Be asleep.')]
self.assertEquals(rv, expected)
class PageTest(BaseCase):
"""Test the page code."""
def testPageListParser(self):
"""Test the page list parser."""
page=backpack.PageAPI("x", "y")
data=page._parseDocument(self.getFileData("data/pages.xml"))
rv=page._parsePageList(data)
def testPageParser(self):
"""Test the individual page parser."""
page=backpack.PageAPI("x", "y")
data=page._parseDocument(self.getFileData("data/page.xml"))
rv=page._parsePage(data)
self.assertEquals(rv.title, 'Ajax Summit')
self.assertEquals(rv.id, 1133)
self.assertEquals(rv.emailAddress, '[email protected]')
self.assertEquals(rv.notes,
[(1019, '', 1116113942.0, u"With O'Reilly and Adaptive Path"),
(1020, u'Hotel', 1116114071.0, u"Staying at the Savoy")])
self.assertEquals(rv.lists, [(937,'Trip to SF')])
self.assertEquals(rv.tags, [(4, 'Technology'),
(5, 'Travel')])
def testSearchResultParser(self):
"""Test the search result parser"""
page = backpack.PageAPI("x", "y")
data = page._parseDocument(self.getFileData("data/search.xml"))
rv = page._parseSearchResult(data)
self.assertEquals(len(rv), 2)
self.assertEquals(rv[0].pageId, 1134)
self.assertEquals(rv[0].pageTitle, "Haystack")
self.assertEquals(rv[0].type, "note")
self.assertEquals(rv[0].containerId, 33469)
self.assertEquals(rv[1].pageId, 2482)
self.assertEquals(rv[1].pageTitle, "Sewing")
self.assertEquals(rv[1].type, "list")
self.assertEquals(rv[1].containerId, 34263)
class ExportTest(BaseCase):
"""Test the backup code."""
def testExportParser(self):
"""Test the export parser doesn't break."""
exp=backpack.ExportAPI("x", "y")
data=exp._parseDocument(self.getFileData("data/export.xml"))
pages, reminders=exp._parseBackup(data)
expectedPageIds=[173034, 166626, 201574, 200381, 198053, 202561]
expectedPageIds.sort()
gotPageIds=[x[0] for x in pages]
gotPageIds.sort()
self.assertEquals(gotPageIds, expectedPageIds)
expectedReminderIds=[51604, 51613, 52079, 52373, 52403]
gotReminderIds=[x[1] for x in reminders]
self.assertEquals(gotReminderIds, expectedReminderIds)
class ListItemTest(BaseCase):
"""Test the list item code"""
def testListItemParser(self):
"""Test the list item parser"""
li=backpack.ListItemAPI("x", "y")
data = li._parseDocument(self.getFileData("data/listitem.xml"))
actual = li._parseListItems(data)
expected = [(1, False, "Hello world!"),
(2, False, "More world!"),
(3, True, "Done world!")]
self.assertEquals(actual, expected)
class ListTest(BaseCase):
"""Test the list code."""
def testListListParser(self):
"""Test parsing the List list"""
l=backpack.ListAPI("x", "y")
data=l._parseDocument(self.getFileData("data/list.xml"))
gotLists=l._parseLists(data)
expectedLists = [(1, "greetings"), (2, "goodbyes")]
self.assertEquals(gotLists, expectedLists)
class NotesTest(BaseCase):
"""Test the notes code."""
def testNoteListParser(self):
"""Test the notes list parser."""
n=backpack.NoteAPI("x", "y")
data=n._parseDocument(self.getFileData("data/notelist.xml"))
notes=n._parseNotes(data)
expected=[(263366, 'Test Note', 1124528874.0, 'This is a test note.')]
self.assertEquals(notes, expected)
class EmailTest(BaseCase):
"""Test the email code."""
def testAllEmails(self):
"""Test parsing the email list."""
e=backpack.EmailAPI("x", "y")
data=e._parseDocument(self.getFileData("data/emaillist.xml"))
emails=e._parseEmails(data)
expected=[(17507, 'test backpack email 2', 1124529799.0),
(17506, 'test backpack email 1', 1124529776.0)]
nobodies=[x[0:-1] for x in emails]
self.assertEquals(nobodies, expected)
def testIndividualEmail(self):
"""Test parsing an individual email."""
e=backpack.EmailAPI("x", "y")
data=e._parseDocument(self.getFileData("data/email.xml"))
email=e._parseEmails(data)[0]
expected=(17507, 'test backpack email 2', 1124529799.0)
self.assertEquals(email[0:-1], expected)
class TagTest(BaseCase):
"""Test the tagging code."""
def testCleaning(self):
"""Test the tag cleaner code."""
t=backpack.TagAPI("x", "y")
cleaned=t._cleanTags(["a", "abc", "abc def"])
expected=["a", "abc", '"abc def"']
self.assertEquals(cleaned, expected)
def testBadCleaning(self):
"""Test the tag cleaner with invalid input."""
t=backpack.TagAPI("x", "y")
try:
cleaned=t._cleanTags(["a", '"bc d"'])
self.fail("Cleaned tags that shouldn't be cleaned: " + `cleaned`)
except exceptions.ValueError, e:
self.assertEquals("Tags can't have quotes.", str(e))
def testPagesForTagParse(self):
"""Test parsing pages for tag response."""
t=backpack.TagAPI("x", "y")
data=t._parseDocument(self.getFileData("data/pagesfortag.xml"))
results=t._parseTaggedPageList(data)
expected=[(173034, 'Backpack API'), (18852, 'Nonsense')]
self.assertEquals(results, expected)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library for interacting with gdata (i.e. Google Docs, Tracker, etc)."""
from __future__ import print_function
import functools
import getpass
import os
import pickle
import re
import urllib
import xml.dom.minidom
import gdata.projecthosting.client
import gdata.service
import gdata.spreadsheet
import gdata.spreadsheet.service
from chromite.lib import operation
# pylint: disable=attribute-defined-outside-init,access-member-before-definition
TOKEN_FILE = os.path.join(os.environ['HOME'], '.gdata_token')
CRED_FILE = os.path.join(os.environ['HOME'], '.gdata_cred.txt')
oper = operation.Operation('gdata_lib')
_BAD_COL_CHARS_REGEX = re.compile(r'[ /_]')
def PrepColNameForSS(col):
"""Translate a column name for spreadsheet interface."""
# Spreadsheet interface requires column names to be
# all lowercase and with no spaces or other special characters.
return _BAD_COL_CHARS_REGEX.sub('', col.lower())
# TODO(mtennant): Rename PrepRowValuesForSS
def PrepRowForSS(row):
"""Make sure spreadsheet handles all values in row as strings."""
return dict((key, PrepValForSS(val)) for key, val in row.items())
# Regex to detect values that the spreadsheet will auto-format as numbers.
_NUM_REGEX = re.compile(r'^[\d\.]+$')
def PrepValForSS(val):
"""Make sure spreadsheet handles this value as a string."""
# The main reason for this is version strings (e.g. for portage packages),
# which Sheets automatically interprets as numbers and mangles.
if val and _NUM_REGEX.match(val):
return "'" + val
return val
def ScrubValFromSS(val):
"""Remove string indicator prefix if found."""
if val and val[0] == "'":
return val[1:]
return val
class Creds(object):
"""Class to manage user/password credentials."""
__slots__ = (
'docs_auth_token', # Docs Client auth token string
'creds_dirty', # True if user/password set and not, yet, saved
'password', # User password
'token_dirty', # True if auth token(s) set and not, yet, saved
'tracker_auth_token', # Tracker Client auth token string
'user', # User account ([email protected])
)
SAVED_TOKEN_ATTRS = ('docs_auth_token', 'tracker_auth_token', 'user')
def __init__(self):
self.user = None
self.password = None
self.docs_auth_token = None
self.tracker_auth_token = None
self.token_dirty = False
self.creds_dirty = False
def SetDocsAuthToken(self, auth_token):
"""Set the Docs auth_token string."""
self.docs_auth_token = auth_token
self.token_dirty = True
def SetTrackerAuthToken(self, auth_token):
"""Set the Tracker auth_token string."""
self.tracker_auth_token = auth_token
self.token_dirty = True
def LoadAuthToken(self, filepath):
"""Load previously saved auth token(s) from |filepath|.
This first clears both docs_auth_token and tracker_auth_token.
"""
self.docs_auth_token = None
self.tracker_auth_token = None
try:
f = open(filepath, 'r')
obj = pickle.load(f)
f.close()
if obj.has_key('auth_token'):
# Backwards compatability. Default 'auth_token' is what
# docs_auth_token used to be saved as.
self.docs_auth_token = obj['auth_token']
self.token_dirty = True
for attr in self.SAVED_TOKEN_ATTRS:
if obj.has_key(attr):
setattr(self, attr, obj[attr])
oper.Notice('Loaded Docs/Tracker auth token(s) from "%s"' % filepath)
except IOError:
oper.Error('Unable to load auth token file at "%s"' % filepath)
def StoreAuthTokenIfNeeded(self, filepath):
"""Store auth token(s) to |filepath| if anything changed."""
if self.token_dirty:
self.StoreAuthToken(filepath)
def StoreAuthToken(self, filepath):
"""Store auth token(s) to |filepath|."""
obj = {}
for attr in self.SAVED_TOKEN_ATTRS:
val = getattr(self, attr)
if val:
obj[attr] = val
try:
oper.Notice('Storing Docs and/or Tracker auth token to "%s"' % filepath)
f = open(filepath, 'w')
pickle.dump(obj, f)
f.close()
self.token_dirty = False
except IOError:
oper.Error('Unable to store auth token to file at "%s"' % filepath)
def SetCreds(self, user, password=None):
if not '@' in user:
user = '%[email protected]' % user
if not password:
password = getpass.getpass('Docs password for %s:' % user)
self.user = user
self.password = password
self.creds_dirty = True
def LoadCreds(self, filepath):
"""Load email/password credentials from |filepath|."""
# Read email from first line and password from second.
with open(filepath, 'r') as f:
(self.user, self.password) = (l.strip() for l in f.readlines())
oper.Notice('Loaded Docs/Tracker login credentials from "%s"' % filepath)
def StoreCredsIfNeeded(self, filepath):
"""Store email/password credentials to |filepath| if anything changed."""
if self.creds_dirty:
self.StoreCreds(filepath)
def StoreCreds(self, filepath):
"""Store email/password credentials to |filepath|."""
oper.Notice('Storing Docs/Tracker login credentials to "%s"' % filepath)
# Simply write email on first line and password on second.
with open(filepath, 'w') as f:
f.write(self.user + '\n')
f.write(self.password + '\n')
self.creds_dirty = False
class IssueComment(object):
"""Represent a Tracker issue comment."""
__slots__ = ['title', 'text']
def __init__(self, title, text):
self.title = title
self.text = text
def __str__(self):
text = '<no comment>'
if self.text:
text = '\n '.join(self.text.split('\n'))
return '%s:\n %s' % (self.title, text)
class Issue(object):
"""Represents one Tracker Issue."""
SlotDefaults = {
'comments': [], # List of IssueComment objects
'id': 0, # Issue id number (int)
'labels': [], # List of text labels
'owner': None, # Current owner (text, chromium.org account)
'status': None, # Current issue status (text) (e.g. Assigned)
'summary': None,# Issue summary (first comment)
'title': None, # Title text
'ccs': [], # Cc list
}
__slots__ = SlotDefaults.keys()
def __init__(self, **kwargs):
"""Init for one Issue object.
|kwargs| - key/value arguments to give initial values to
any additional attributes on |self|.
"""
# Use SlotDefaults overwritten by kwargs for starting slot values.
slotvals = self.SlotDefaults.copy()
slotvals.update(kwargs)
for slot in self.__slots__:
setattr(self, slot, slotvals.pop(slot))
if slotvals:
raise ValueError('I do not know what to do with %r' % slotvals)
def __str__(self):
"""Pretty print of issue."""
lines = [
'Issue %d - %s' % (self.id, self.title),
'Status: %s, Owner: %s' % (self.status, self.owner),
'Labels: %s' % ', '.join(self.labels),
]
if self.summary:
lines.append('Summary: %s' % self.summary)
if self.comments:
lines.extend(self.comments)
return '\n'.join(lines)
def InitFromTracker(self, t_issue, project_name):
"""Initialize |self| from tracker issue |t_issue|"""
# The __slots__ logic above confuses pylint.
# https://bitbucket.org/logilab/pylint/issue/380/
# pylint: disable=assigning-non-slot
self.id = int(t_issue.id.text.split('/')[-1])
self.labels = [label.text for label in t_issue.label]
if t_issue.owner:
self.owner = t_issue.owner.username.text
self.status = t_issue.status.text
self.summary = t_issue.content.text
self.title = t_issue.title.text
self.comments = self.GetTrackerIssueComments(self.id, project_name)
def GetTrackerIssueComments(self, issue_id, project_name):
"""Retrieve comments for |issue_id| from comments URL"""
comments = []
feeds = 'http://code.google.com/feeds'
url = '%s/issues/p/%s/issues/%d/comments/full' % (feeds, project_name,
issue_id)
doc = xml.dom.minidom.parse(urllib.urlopen(url))
entries = doc.getElementsByTagName('entry')
for entry in entries:
title_text_list = []
for key in ('title', 'content'):
child = entry.getElementsByTagName(key)[0].firstChild
title_text_list.append(child.nodeValue if child else None)
comments.append(IssueComment(*title_text_list))
return comments
def __eq__(self, other):
return (self.id == other.id and self.labels == other.labels and
self.owner == other.owner and self.status == other.status and
self.summary == other.summary and self.title == other.title)
def __ne__(self, other):
return not self == other
class TrackerError(RuntimeError):
"""Error class for tracker communication errors."""
class TrackerInvalidUserError(TrackerError):
"""Error class for when user not recognized by Tracker."""
class TrackerComm(object):
"""Class to manage communication with Tracker."""
__slots__ = (
'author', # Author when creating/editing Tracker issues
'it_client', # Issue Tracker client
'project_name', # Tracker project name
)
def __init__(self):
self.author = None
self.it_client = None
self.project_name = None
def Connect(self, creds, project_name, source='chromiumos'):
self.project_name = project_name
it_client = gdata.projecthosting.client.ProjectHostingClient()
it_client.source = source
if creds.tracker_auth_token:
oper.Notice('Logging into Tracker using previous auth token.')
it_client.auth_token = gdata.gauth.ClientLoginToken(
creds.tracker_auth_token)
else:
oper.Notice('Logging into Tracker as "%s".' % creds.user)
it_client.ClientLogin(creds.user, creds.password,
source=source, service='code',
account_type='GOOGLE')
creds.SetTrackerAuthToken(it_client.auth_token.token_string)
self.author = creds.user
self.it_client = it_client
def _QueryTracker(self, query):
"""Query the tracker for a list of issues. Return |None| on failure."""
try:
return self.it_client.get_issues(self.project_name, query=query)
except gdata.client.RequestError:
return None
def _CreateIssue(self, t_issue):
"""Create an Issue from a Tracker Issue."""
issue = Issue()
issue.InitFromTracker(t_issue, self.project_name)
return issue
# TODO(mtennant): This method works today, but is not being actively used.
# Leaving it in, because a logical use of the method is for to verify
# that a Tracker issue in the package spreadsheet is open, and to add
# comments to it when new upstream versions become available.
def GetTrackerIssueById(self, tid):
"""Get tracker issue given |tid| number. Return Issue object if found."""
query = gdata.projecthosting.client.Query(issue_id=str(tid))
feed = self._QueryTracker(query)
if feed.entry:
return self._CreateIssue(feed.entry[0])
return None
def GetTrackerIssuesByText(self, search_text, full_text=True,
only_open=True):
"""Find all Tracker Issues that contain the text search_text."""
if not full_text:
search_text = 'summary:"%s"' % search_text
if only_open:
search_text += ' is:open'
query = gdata.projecthosting.client.Query(text_query=search_text)
feed = self._QueryTracker(query)
if feed:
return [self._CreateIssue(tissue) for tissue in feed.entry]
else:
return []
def CreateTrackerIssue(self, issue):
"""Create a new issue in Tracker according to |issue|."""
try:
created = self.it_client.add_issue(project_name=self.project_name,
title=issue.title,
content=issue.summary,
author=self.author,
status=issue.status,
owner=issue.owner,
labels=issue.labels,
ccs=issue.ccs)
issue.id = int(created.id.text.split('/')[-1])
return issue.id
except gdata.client.RequestError as ex:
if ex.body and ex.body.lower() == 'user not found':
raise TrackerInvalidUserError('Tracker user %s not found' % issue.owner)
if ex.body and ex.body.lower() == 'issue owner must be a member':
raise TrackerInvalidUserError('Tracker user %s not a member' %
issue.owner)
raise
def AppendTrackerIssueById(self, issue_id, comment, owner=None):
"""Append |comment| to issue |issue_id| in Tracker"""
self.it_client.update_issue(project_name=self.project_name,
issue_id=issue_id,
author=self.author,
comment=comment,
owner=owner)
return issue_id
class SpreadsheetRow(dict):
"""Minor semi-immutable extension of dict to hold spreadsheet data.
This lets us keep the original spreadsheet row object and spreadsheet row
number as attributes.
No changes are made to equality checking or anything else, so client code
that wishes to handle this as a pure dict can.
"""
def __init__(self, ss_row_obj, ss_row_num, mapping=None):
if mapping:
dict.__init__(self, mapping)
self.ss_row_obj = ss_row_obj
self.ss_row_num = ss_row_num
def __setitem__(self, key, val):
raise TypeError('setting item in SpreadsheetRow not supported')
def __delitem__(self, key):
raise TypeError('deleting item in SpreadsheetRow not supported')
class SpreadsheetError(RuntimeError):
"""Error class for spreadsheet communication errors."""
def ReadWriteDecorator(func):
"""Raise SpreadsheetError if appropriate."""
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except gdata.service.RequestError as ex:
raise SpreadsheetError(str(ex))
f.__name__ = func.__name__
return f
class SpreadsheetComm(object):
"""Class to manage communication with one Google Spreadsheet worksheet."""
# Row numbering in spreadsheets effectively starts at 2 because row 1
# has the column headers.
ROW_NUMBER_OFFSET = 2
# Spreadsheet column numbers start at 1.
COLUMN_NUMBER_OFFSET = 1
__slots__ = (
'_columns', # Tuple of translated column names, filled in as needed
'_rows', # Tuple of Row dicts in order, filled in as needed
'gd_client', # Google Data client
'ss_key', # Spreadsheet key
'ws_name', # Worksheet name
'ws_key', # Worksheet key
)
@property
def columns(self):
"""The columns property is filled in on demand.
It is a tuple of column names, each run through PrepColNameForSS.
"""
if self._columns is None:
query = gdata.spreadsheet.service.CellQuery()
query['max-row'] = '1'
feed = self.gd_client.GetCellsFeed(self.ss_key, self.ws_key, query=query)
# The use of PrepColNameForSS here looks weird, but the values
# in row 1 are the unaltered column names, rather than the restricted
# column names used for interface purposes. In other words, if the
# spreadsheet looks like it has a column called "Foo Bar", then the
# first row will have a value "Foo Bar" but all interaction with that
# column for other rows will use column key "foobar". Translate to
# restricted names now with PrepColNameForSS.
cols = [PrepColNameForSS(entry.content.text) for entry in feed.entry]
self._columns = tuple(cols)
return self._columns
@property
def rows(self):
"""The rows property is filled in on demand.
It is a tuple of SpreadsheetRow objects.
"""
if self._rows is None:
rows = []
feed = self.gd_client.GetListFeed(self.ss_key, self.ws_key)
for rowIx, rowObj in enumerate(feed.entry, start=self.ROW_NUMBER_OFFSET):
row_dict = dict((key, ScrubValFromSS(val.text))
for key, val in rowObj.custom.iteritems())
rows.append(SpreadsheetRow(rowObj, rowIx, row_dict))
self._rows = tuple(rows)
return self._rows
def __init__(self):
for slot in self.__slots__:
setattr(self, slot, None)
def Connect(self, creds, ss_key, ws_name, source='chromiumos'):
"""Login to spreadsheet service and set current worksheet.
|creds| Credentials object for Google Docs
|ss_key| Spreadsheet key
|ws_name| Worksheet name
|source| Name to associate with connecting service
"""
self._Login(creds, source)
self.SetCurrentWorksheet(ws_name, ss_key=ss_key)
def SetCurrentWorksheet(self, ws_name, ss_key=None):
"""Change the current worksheet. This clears all caches."""
if ss_key and ss_key != self.ss_key:
self.ss_key = ss_key
self._ClearCache()
self.ws_name = ws_name
ws_key = self._GetWorksheetKey(self.ss_key, self.ws_name)
if ws_key != self.ws_key:
self.ws_key = ws_key
self._ClearCache()
def _ClearCache(self, keep_columns=False):
"""Called whenever column/row data might be stale."""
self._rows = None
if not keep_columns:
self._columns = None
def _Login(self, creds, source):
"""Login to Google doc client using given |creds|."""
gd_client = RetrySpreadsheetsService()
gd_client.source = source
# Login using previous auth token if available, otherwise
# use email/password from creds.
if creds.docs_auth_token:
oper.Notice('Logging into Docs using previous auth token.')
gd_client.SetClientLoginToken(creds.docs_auth_token)
else:
oper.Notice('Logging into Docs as "%s".' % creds.user)
gd_client.email = creds.user
gd_client.password = creds.password
gd_client.ProgrammaticLogin()
creds.SetDocsAuthToken(gd_client.GetClientLoginToken())
self.gd_client = gd_client
def _GetWorksheetKey(self, ss_key, ws_name):
"""Get the worksheet key with name |ws_name| in spreadsheet |ss_key|."""
feed = self.gd_client.GetWorksheetsFeed(ss_key)
# The worksheet key is the last component in the URL (after last '/')
for entry in feed.entry:
if ws_name == entry.title.text:
return entry.id.text.split('/')[-1]
oper.Die('Unable to find worksheet "%s" in spreadsheet "%s"' %
(ws_name, ss_key))
@ReadWriteDecorator
def GetColumns(self):
"""Return tuple of column names in worksheet.
Note that each returned name has been run through PrepColNameForSS.
"""
return self.columns
@ReadWriteDecorator
def GetColumnIndex(self, colName):
"""Get the column index (starting at 1) for column |colName|"""
try:
# Spreadsheet column indices start at 1, so +1.
return self.columns.index(colName) + self.COLUMN_NUMBER_OFFSET
except ValueError:
return None
@ReadWriteDecorator
def GetRows(self):
"""Return tuple of SpreadsheetRow objects in order."""
return self.rows
@ReadWriteDecorator
def GetRowCacheByCol(self, column):
"""Return a dict for looking up rows by value in |column|.
Each row value is a SpreadsheetRow object.
If more than one row has the same value for |column|, then the
row objects will be in a list in the returned dict.
"""
row_cache = {}
for row in self.GetRows():
col_val = row[column]
current_entry = row_cache.get(col_val, None)
if current_entry and type(current_entry) is list:
current_entry.append(row)
elif current_entry:
current_entry = [current_entry, row]
else:
current_entry = row
row_cache[col_val] = current_entry
return row_cache
@ReadWriteDecorator
def InsertRow(self, row):
"""Insert |row| at end of spreadsheet."""
self.gd_client.InsertRow(row, self.ss_key, self.ws_key)
self._ClearCache(keep_columns=True)
@ReadWriteDecorator
def UpdateRowCellByCell(self, rowIx, row):
"""Replace cell values in row at |rowIx| with those in |row| dict."""
for colName in row:
colIx = self.GetColumnIndex(colName)
if colIx is not None:
self.ReplaceCellValue(rowIx, colIx, row[colName])
self._ClearCache(keep_columns=True)
@ReadWriteDecorator
def DeleteRow(self, ss_row):
"""Delete the given |ss_row| (must be original spreadsheet row object."""
self.gd_client.DeleteRow(ss_row)
self._ClearCache(keep_columns=True)
@ReadWriteDecorator
def ReplaceCellValue(self, rowIx, colIx, val):
"""Replace cell value at |rowIx| and |colIx| with |val|"""
self.gd_client.UpdateCell(rowIx, colIx, val, self.ss_key, self.ws_key)
self._ClearCache(keep_columns=True)
@ReadWriteDecorator
def ClearCellValue(self, rowIx, colIx):
"""Clear cell value at |rowIx| and |colIx|"""
self.ReplaceCellValue(rowIx, colIx, None)
@ReadWriteDecorator
def ClearColumnWorksheet(self, colIx):
"""Clear column with index |colIX| from current worksheet."""
query = gdata.spreadsheet.service.CellQuery()
query.min_col = str(colIx)
query.max_col = str(colIx)
cells = self.gd_client.GetCellsFeed(self.ss_key, wksht_id=self.ws_key,
query=query)
batchRequest = gdata.spreadsheet.SpreadsheetsCellsFeed()
for entry in cells.entry:
entry.cell.inputValue = None
batchRequest.AddUpdate(entry)
self.gd_client.ExecuteBatch(batchRequest, cells.GetBatchLink().href)
@ReadWriteDecorator
def WriteColumnToWorksheet(self, colIx, data):
"""Clear column index |colIx| from worksheet and write |data| to it."""
self.ClearColumnWorksheet(colIx)
query = gdata.spreadsheet.service.CellQuery()
query.min_col = str(colIx)
query.max_col = str(colIx)
query.min_row = '1'
query.max_row = str(len(data))
query.return_empty = 'true'
cells = self.gd_client.GetCellsFeed(self.ss_key, wksht_id=self.ws_key,
query=query)
batchRequest = gdata.spreadsheet.SpreadsheetsCellsFeed()
for entry, value in zip(cells.entry, data):
entry.cell.inputValue = str(value)
batchRequest.AddUpdate(entry)
self.gd_client.ExecuteBatch(batchRequest, cells.GetBatchLink().href)
class RetrySpreadsheetsService(gdata.spreadsheet.service.SpreadsheetsService):
"""Extend SpreadsheetsService to put retry logic around http request method.
The entire purpose of this class is to remove some flakiness from
interactions with Google Drive spreadsheet service, in the form of
certain 40* and 50* http error responses to http requests. This is
documented in https://code.google.com/p/chromium/issues/detail?id=206798.
There are two "request" methods that need to be wrapped in retry logic.
1) The request method on self. Original implementation is in
base class atom.service.AtomService.
2) The request method on self.http_client. The class of self.http_client
can actually vary, so the original implementation of the request
method can also vary.
"""
TRY_MAX = 5
RETRYABLE_STATUSES = (
403, # Forbidden (but retries still seem to help).
500, # Internal server error.
)
def __init__(self, *args, **kwargs):
gdata.spreadsheet.service.SpreadsheetsService.__init__(self, *args,
**kwargs)
# Wrap self.http_client.request with retry wrapper. This request method
# is used by ProgrammaticLogin(), at least.
if hasattr(self, 'http_client'):
self.http_client.request = functools.partial(self._RetryRequest,
self.http_client.request)
self.request = functools.partial(self._RetryRequest, self.request)
def _RetryRequest(self, func, *args, **kwargs):
"""Retry wrapper for bound |func|, passing |args| and |kwargs|.
This retry wrapper can be used for any http request |func| that provides
an http status code via the .status attribute of the returned value.
Retry when the status value on the return object is in RETRYABLE_STATUSES,
and run up to TRY_MAX times. If successful (whether or not retries
were necessary) return the last return value returned from base method.
If unsuccessful return the first return value returned from base method.
"""
first_retval = None
for try_ix in xrange(1, self.TRY_MAX + 1):
retval = func(*args, **kwargs)
if retval.status not in self.RETRYABLE_STATUSES:
return retval
else:
oper.Warning('Retry-able HTTP request failure (status=%d), try %d/%d' %
(retval.status, try_ix, self.TRY_MAX))
if not first_retval:
first_retval = retval
oper.Warning('Giving up on HTTP request after %d tries' % self.TRY_MAX)
return first_retval
|
|
# Copyright 2016 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import mock
from six.moves.urllib import parse as urlparse
from oslo_utils import uuidutils
from osprofiler import _utils as osprofiler_utils
import osprofiler.profiler
from mistralclient.api import httpclient
from mistralclient.tests.unit import base
API_BASE_URL = 'http://localhost:8989/v2'
API_URL = '/executions'
EXPECTED_URL = API_BASE_URL + API_URL
AUTH_TOKEN = uuidutils.generate_uuid()
PROJECT_ID = uuidutils.generate_uuid()
USER_ID = uuidutils.generate_uuid()
REGION_NAME = 'fake_region'
PROFILER_HMAC_KEY = 'SECRET_HMAC_KEY'
PROFILER_TRACE_ID = uuidutils.generate_uuid()
EXPECTED_AUTH_HEADERS = {
'X-Auth-Token': AUTH_TOKEN,
'X-Project-Id': PROJECT_ID,
'X-User-Id': USER_ID,
'X-Region-Name': REGION_NAME
}
EXPECTED_REQ_OPTIONS = {
'headers': EXPECTED_AUTH_HEADERS
}
EXPECTED_BODY = {
'k1': 'abc',
'k2': 123,
'k3': True
}
class HTTPClientTest(base.BaseClientTest):
def setUp(self):
super(HTTPClientTest, self).setUp()
osprofiler.profiler.init(None)
self.client = httpclient.HTTPClient(
API_BASE_URL,
auth_token=AUTH_TOKEN,
project_id=PROJECT_ID,
user_id=USER_ID,
region_name=REGION_NAME
)
def assertExpectedAuthHeaders(self):
headers = self.requests_mock.last_request.headers
self.assertEqual(AUTH_TOKEN, headers['X-Auth-Token'])
self.assertEqual(PROJECT_ID, headers['X-Project-Id'])
self.assertEqual(USER_ID, headers['X-User-Id'])
return headers
def assertExpectedBody(self):
text = self.requests_mock.last_request.text
form = urlparse.parse_qs(text, strict_parsing=True)
self.assertEqual(len(EXPECTED_BODY), len(form))
for k, v in EXPECTED_BODY.items():
self.assertEqual([str(v)], form[k])
return form
def test_get_request_options(self):
m = self.requests_mock.get(EXPECTED_URL, text='text')
self.client.get(API_URL)
self.assertTrue(m.called_once)
self.assertExpectedAuthHeaders()
@mock.patch.object(
osprofiler.profiler._Profiler,
'get_base_id',
mock.MagicMock(return_value=PROFILER_TRACE_ID)
)
@mock.patch.object(
osprofiler.profiler._Profiler,
'get_id',
mock.MagicMock(return_value=PROFILER_TRACE_ID)
)
def test_get_request_options_with_profile_enabled(self):
m = self.requests_mock.get(EXPECTED_URL, text='text')
osprofiler.profiler.init(PROFILER_HMAC_KEY)
data = {'base_id': PROFILER_TRACE_ID, 'parent_id': PROFILER_TRACE_ID}
signed_data = osprofiler_utils.signed_pack(data, PROFILER_HMAC_KEY)
headers = {
'X-Trace-Info': signed_data[0],
'X-Trace-HMAC': signed_data[1]
}
self.client.get(API_URL)
self.assertTrue(m.called_once)
headers = self.assertExpectedAuthHeaders()
self.assertEqual(signed_data[0], headers['X-Trace-Info'])
self.assertEqual(signed_data[1], headers['X-Trace-HMAC'])
def test_get_request_options_with_headers_for_get(self):
m = self.requests_mock.get(EXPECTED_URL, text='text')
target_auth_url = uuidutils.generate_uuid()
target_auth_token = uuidutils.generate_uuid()
target_user_id = 'target_user'
target_project_id = 'target_project'
target_service_catalog = 'this should be there'
target_insecure = 'target insecure'
target_region = 'target region name'
target_user_domain_name = 'target user domain name'
target_project_domain_name = 'target project domain name'
target_client = httpclient.HTTPClient(
API_BASE_URL,
auth_token=AUTH_TOKEN,
project_id=PROJECT_ID,
user_id=USER_ID,
region_name=REGION_NAME,
target_auth_url=target_auth_url,
target_auth_token=target_auth_token,
target_project_id=target_project_id,
target_user_id=target_user_id,
target_service_catalog=target_service_catalog,
target_region_name=target_region,
target_user_domain_name=target_user_domain_name,
target_project_domain_name=target_project_domain_name,
target_insecure=target_insecure
)
target_client.get(API_URL)
self.assertTrue(m.called_once)
headers = self.assertExpectedAuthHeaders()
self.assertEqual(target_auth_url, headers['X-Target-Auth-Uri'])
self.assertEqual(target_auth_token, headers['X-Target-Auth-Token'])
self.assertEqual(target_user_id, headers['X-Target-User-Id'])
self.assertEqual(target_project_id, headers['X-Target-Project-Id'])
self.assertEqual(str(target_insecure), headers['X-Target-Insecure'])
self.assertEqual(target_region, headers['X-Target-Region-Name'])
self.assertEqual(target_user_domain_name,
headers['X-Target-User-Domain-Name'])
self.assertEqual(target_project_domain_name,
headers['X-Target-Project-Domain-Name'])
catalog = base64.b64encode(target_service_catalog.encode('utf-8'))
self.assertEqual(catalog, headers['X-Target-Service-Catalog'])
def test_get_request_options_with_headers_for_post(self):
m = self.requests_mock.post(EXPECTED_URL, text='text')
headers = {'foo': 'bar'}
self.client.post(API_URL, EXPECTED_BODY, headers=headers)
self.assertTrue(m.called_once)
headers = self.assertExpectedAuthHeaders()
self.assertEqual('application/json', headers['Content-Type'])
self.assertEqual('bar', headers['foo'])
self.assertExpectedBody()
def test_get_request_options_with_headers_for_put(self):
m = self.requests_mock.put(EXPECTED_URL, text='text')
headers = {'foo': 'bar'}
self.client.put(API_URL, EXPECTED_BODY, headers=headers)
self.assertTrue(m.called_once)
headers = self.assertExpectedAuthHeaders()
self.assertEqual('application/json', headers['Content-Type'])
self.assertEqual('bar', headers['foo'])
self.assertExpectedBody()
def test_get_request_options_with_headers_for_delete(self):
m = self.requests_mock.delete(EXPECTED_URL, text='text')
headers = {'foo': 'bar'}
self.client.delete(API_URL, headers=headers)
self.assertTrue(m.called_once)
headers = self.assertExpectedAuthHeaders()
self.assertEqual('bar', headers['foo'])
@mock.patch.object(
httpclient.HTTPClient,
'_get_request_options',
mock.MagicMock(return_value=copy.deepcopy(EXPECTED_REQ_OPTIONS))
)
def test_http_get(self):
m = self.requests_mock.get(EXPECTED_URL, text='text')
self.client.get(API_URL)
httpclient.HTTPClient._get_request_options.assert_called_with(
'get',
None
)
self.assertTrue(m.called_once)
self.assertExpectedAuthHeaders()
@mock.patch.object(
httpclient.HTTPClient,
'_get_request_options',
mock.MagicMock(return_value=copy.deepcopy(EXPECTED_REQ_OPTIONS))
)
def test_http_post(self):
m = self.requests_mock.post(EXPECTED_URL, status_code=201, text='text')
self.client.post(API_URL, EXPECTED_BODY)
httpclient.HTTPClient._get_request_options.assert_called_with(
'post',
None
)
self.assertTrue(m.called_once)
self.assertExpectedAuthHeaders()
self.assertExpectedBody()
@mock.patch.object(
httpclient.HTTPClient,
'_get_request_options',
mock.MagicMock(return_value=copy.deepcopy(EXPECTED_REQ_OPTIONS))
)
def test_http_put(self):
m = self.requests_mock.put(EXPECTED_URL, json={})
self.client.put(API_URL, EXPECTED_BODY)
httpclient.HTTPClient._get_request_options.assert_called_with(
'put',
None
)
self.assertTrue(m.called_once)
self.assertExpectedAuthHeaders()
self.assertExpectedBody()
@mock.patch.object(
httpclient.HTTPClient,
'_get_request_options',
mock.MagicMock(return_value=copy.deepcopy(EXPECTED_REQ_OPTIONS))
)
def test_http_delete(self):
m = self.requests_mock.delete(EXPECTED_URL, text='text')
self.client.delete(API_URL)
httpclient.HTTPClient._get_request_options.assert_called_with(
'delete',
None
)
self.assertTrue(m.called_once)
self.assertExpectedAuthHeaders()
|
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
url_basename,
)
class NPOBaseIE(InfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
token = self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
# Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js
token_l = list(token)
first = second = None
for i in range(5, len(token_l) - 4):
if token_l[i].isdigit():
if first is None:
first = i
elif second is None:
second = i
if first is None or second is None:
first = 12
second = 13
token_l[first], token_l[second] = token_l[second], token_l[first]
return ''.join(token_l)
class NPOIE(NPOBaseIE):
IE_NAME = 'npo.nl'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/(?!live|radio)[^/]+/[^/]+/(?P<id>[^/?]+)'
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = [{
'ext': 'vtt',
'url': 'http://e.omroep.nl/tt888/%s' % video_id,
}]
return {
'id': video_id,
'title': metadata['titel'],
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
# smooth streaming is not supported
if stream_type in ['ss', 'ms']:
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
'Unable to download %s URL' % stream_type,
transform_source=strip_jsonp, fatal=False)
if not stream_url:
continue
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
'preference': -10,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class TegenlichtVproIE(NPOIE):
IE_NAME = 'tegenlicht.vpro.nl'
_VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
},
},
]
def _real_extract(self, url):
name = url_basename(url)
webpage = self._download_webpage(url, name)
urn = self._html_search_meta('mediaurn', webpage)
info_page = self._download_json(
'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
return self._get_info(info_page['mid'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.