gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import argparse
import os
import sys
import time
import grpc
import traceback
from jinja2 import Environment, FileSystemLoader, select_autoescape, TemplateError
from google.api_core.exceptions import GoogleAPICallError
from google.auth.exceptions import DefaultCredentialsError
import demo_pb2
import demo_pb2_grpc
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from opencensus.ext.stackdriver import trace_exporter as stackdriver_exporter
from opencensus.ext.grpc import server_interceptor
from opencensus.common.transports.async_ import AsyncTransport
from opencensus.trace import samplers
# import googleclouddebugger
import googlecloudprofiler
from logger import getJSONLogger
logger = getJSONLogger('emailservice-server')
# try:
# googleclouddebugger.enable(
# module='emailserver',
# version='1.0.0'
# )
# except:
# pass
# Loads confirmation email template from file
env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template('confirmation.html')
class BaseEmailService(demo_pb2_grpc.EmailServiceServicer):
def Check(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.SERVING)
def Watch(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.UNIMPLEMENTED)
class EmailService(BaseEmailService):
def __init__(self):
raise Exception('cloud mail client not implemented')
super().__init__()
@staticmethod
def send_email(client, email_address, content):
response = client.send_message(
sender = client.sender_path(project_id, region, sender_id),
envelope_from_authority = '',
header_from_authority = '',
envelope_from_address = from_address,
simple_message = {
"from": {
"address_spec": from_address,
},
"to": [{
"address_spec": email_address
}],
"subject": "Your Confirmation Email",
"html_body": content
}
)
logger.info("Message sent: {}".format(response.rfc822_message_id))
def SendOrderConfirmation(self, request, context):
email = request.email
order = request.order
try:
confirmation = template.render(order = order)
except TemplateError as err:
context.set_details("An error occurred when preparing the confirmation mail.")
logger.error(err.message)
context.set_code(grpc.StatusCode.INTERNAL)
return demo_pb2.Empty()
try:
EmailService.send_email(self.client, email, confirmation)
except GoogleAPICallError as err:
context.set_details("An error occurred when sending the email.")
print(err.message)
context.set_code(grpc.StatusCode.INTERNAL)
return demo_pb2.Empty()
return demo_pb2.Empty()
class DummyEmailService(BaseEmailService):
def SendOrderConfirmation(self, request, context):
logger.info('A request to send order confirmation email to {} has been received.'.format(request.email))
return demo_pb2.Empty()
class HealthCheck():
def Check(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.SERVING)
def start(dummy_mode):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10),
interceptors=(tracer_interceptor,))
service = None
if dummy_mode:
service = DummyEmailService()
else:
raise Exception('non-dummy mode not implemented yet')
demo_pb2_grpc.add_EmailServiceServicer_to_server(service, server)
health_pb2_grpc.add_HealthServicer_to_server(service, server)
port = os.environ.get('PORT', "8080")
logger.info("listening on port: "+port)
server.add_insecure_port('[::]:'+port)
server.start()
try:
while True:
time.sleep(3600)
except KeyboardInterrupt:
server.stop(0)
def initStackdriverProfiling():
project_id = None
try:
project_id = os.environ["GCP_PROJECT_ID"]
except KeyError:
# Environment variable not set
pass
for retry in range(1,4):
try:
if project_id:
googlecloudprofiler.start(service='email_server', service_version='1.0.0', verbose=0, project_id=project_id)
else:
googlecloudprofiler.start(service='email_server', service_version='1.0.0', verbose=0)
logger.info("Successfully started Stackdriver Profiler.")
return
except (BaseException) as exc:
logger.info("Unable to start Stackdriver Profiler Python agent. " + str(exc))
if (retry < 4):
logger.info("Sleeping %d to retry initializing Stackdriver Profiler"%(retry*10))
time.sleep (1)
else:
logger.warning("Could not initialize Stackdriver Profiler after retrying, giving up")
return
if __name__ == '__main__':
logger.info('starting the email service in dummy mode.')
# Profiler
try:
if "DISABLE_PROFILER" in os.environ:
raise KeyError()
else:
logger.info("Profiler enabled.")
initStackdriverProfiling()
except KeyError:
logger.info("Profiler disabled.")
# Tracing
try:
if "DISABLE_TRACING" in os.environ:
raise KeyError()
else:
logger.info("Tracing enabled.")
sampler = samplers.AlwaysOnSampler()
exporter = stackdriver_exporter.StackdriverExporter(
project_id=os.environ.get('GCP_PROJECT_ID'),
transport=AsyncTransport)
tracer_interceptor = server_interceptor.OpenCensusServerInterceptor(sampler, exporter)
except (KeyError, DefaultCredentialsError):
logger.info("Tracing disabled.")
tracer_interceptor = server_interceptor.OpenCensusServerInterceptor()
except Exception as e:
logger.warn(f"Exception on Cloud Trace setup: {traceback.format_exc()}, tracing disabled.")
tracer_interceptor = server_interceptor.OpenCensusServerInterceptor()
start(dummy_mode = True)
|
|
#!/usr/bin/env python3
# Blackjack
# Uses object oriented programming methodology
# NOTES
# Deck is fully simulated, cards will not come up twice but deck needs shuffling after each turn (use shuffle(deck))
import random
import time
from random import shuffle
def pause():
"""Defines a pause to help the game flow"""
time.sleep(0.5)
def makeDeck():
"""Creates and shufles a deck of cards"""
deck = []
#Creates full deck of cards
for suit in 'sdhc':
for value in ["A",2,3,4,5,6,7,8,9,10,"J","Q","K"]:
deck.append(str(value)+suit)
#shuffles deck using imported shuffle
shuffle(deck)
return deck
def makePlayerHand():
"""Returns 2 cards from the deck"""
hand = []
for i in range(0, 2):
hand.append(deck.pop()) #Takes bottom 2 cards from the deck list
return hand
def makeDealerHand():
"""Returns ? placeholder for dealer and 2 cards from the deck"""
hand = []
hand.append("?") #Adds ? Placeholder to be replaced when player finishes hand
for i in range(0, 2):
hand.append(deck.pop()) #Takes bottom 2 cards from the deck list
return hand
def Board():
"""Prints the hands of all players."""
# ".join" function formats the list so it looks correct
playerBoard = playerName + "'s Hand: " + ' '.join(playerHand) + " = " + str(addCardValues(playerHand))
if dealerHand[0] != "?": #if Dealer has not had a turn dealer's second hand it hidden? placeholder
dealerBoard = "Dealer's Hand: " + ' '.join(dealerHand) + " = " + str(addCardValues(dealerHand))
else:
dealerBoard = "Dealer's Hand: " + str(dealerHand[1]) + ' ' + str(dealerHand[0])
pause()#A short pause to help game flow
print(dealerBoard)
print(playerBoard)
def playerBoard():
"""Prints the hand of the player."""
# ".join" function formats the list so it looks correct
playerBoard = playerName + "'s Hand: " + ' '.join(playerHand) + " = " + str(addCardValues(playerHand))
print(playerBoard)
def dealerBoard():
"""Prints the hands of the dealer."""
# ".join" function formats the list so it looks correct
if dealerHand[0] != "?": #if Dealer has not had a turn dealer's second hand it hidden? placeholder
dealerBoard = "Dealer's Hand: " + ' '.join(dealerHand) + " = " + str(addCardValues(dealerHand))
else:
dealerBoard = "Dealer's Hand: " + str(dealerHand[1]) + ' ' + str(dealerHand[0])
pause()#A short pause to help game flow
print(dealerBoard)
def addCardValues(player):
"""Returns the values of all cards in any hand added together"""
additionHand = player#detects list variable for the selected players hand
totalValue = 0
currentValue = 0
aceNumber = 0
for i in range(0, len(additionHand)): # for every card in the selected players hand
if additionHand[i] == "?":#if the entry is the dealers ? then skip the loop
currentValue = 0
continue
else:
currentCard = additionHand[i]#selects individual card
currentValue = currentCard[:-1]#gets rid of suit for selected card, just card value
if currentValue in ("J", "Q", "K"): #if picutre card (excluding ace)
currentValue = 10 #set value to 10
elif currentValue == "A": #if card is ace
currentValue = 1 #set ace to equal 1 (logic applied at end of hand)
aceNumber += 1
totalValue += int(currentValue)
#ENDFOR
#Ace logic
if aceNumber > 0: #if any card is an ace
for i in range(0, aceNumber): # for all aces
if totalValue < 12: #if adding value of ace does not bust
totalValue += 10 #add 10 (the 1 is already added)
return totalValue
def playerTurn():
""""Asks the player if they want to hit or stand and calls the appropiate function"""
userAction = input("Would you like to hit or stand? ").lower() #User input (.lower() function used so input not case sensitive)
if userAction.startswith("h"):
hit(playerHand)
elif userAction.startswith("s"):
return "break"#turn ends at break
else:
print("Input not valid please enter a string starting with h to hit or s to stand")
def hit(hand):
"""adds a crad from the deck to 'hand'"""
hand.append(deck.pop())
def isBust(hand):
"""Uses addCardValues to determine if a player has gone bust"""
if addCardValues(hand) > 21:
return True
else:
return False
def dealerTurn():
"""Dealer takes their turn"""
if addCardValues(dealerHand) < 17:
hit(dealerHand)
else:
return False
def playAgain():
userAgainInput = input(playerName + " would you like to play again? (Y/N)").lower() #user input (.lower() function used so input not case sensitive)
if userAgainInput.startswith("n"):
return False
def whoWins():
"""Determines who wins, adds a point to their score and prints the winner, or prints draw"""
#Tell Python dealerScore and playerScore refer to global variables
global dealerScore
global playerScore
if (len(playerHand) > 4) and (isBust(playerHand) == False): #if the player has 5 or more cards
playerScore += 1 #Adds a point to the player
print("You got a 5 card trick! Well done you get a point.")
if isBust(playerHand) == True:#if player is bust
dealerScore += 1 #Adds a point to the dealer
print("You went bust! The dealer gets a point.")
elif isBust(dealerHand) == True:#if dealer is bust
playerScore += 1 #Adds a point to the player
print("The dealer went bust! You get a point.")
elif addCardValues(playerHand) > addCardValues(dealerHand): #if the players cards add up to more than dealers
playerScore += 1 #Adds a point to the player
print("Well done " + playerName + "you win!")
elif addCardValues(dealerHand) > addCardValues(playerHand): #if the dealers cards add up to more than players
dealerScore += 1 #Adds a point to the dealer
print("Unlucky, the dealer wins.")
else:
print("The game is tied! Nobody gets a point this time...")
def scoreBoard():
print(playerName + ":" , str(playerScore), "Dealer:", str(dealerScore))
"""START OF MAIN GAME"""
#Sets scores for the game
playerScore = 0
dealerScore = 0
#Asks for Player's name
playerName = input("Please enter your name: ")
pause()
print("Blackjack:")
pause()
print("The aim of the game is for the value of your cards to equal 21")
pause()
while True:
#set up deck and hands
deck = makeDeck()
playerHand = makePlayerHand()
dealerHand = makeDealerHand()
Board()#show starting hands
pause()#A short pause to help game flow
while isBust(playerHand) == False:#if player busts turn ends else player gets more turns
pause()
if playerTurn() == "break":#player takes turn, if statement evaluates to true if player chooses to stand
break #breaks if player stands
playerBoard()#shows the players hand after each turn
#END PLAYER HAND
pause()
print("Your turn is finished")
pause()
time.sleep(1)#Extended pause
if isBust(playerHand) == True:#if player is bust
pass
#Skips to whoWins() as dealer does not take turn
else:
print("The dealer will now take his turn")
pause()
dealerHand.remove("?") #delete the placeholder card
dealerBoard() #print the board with dealer's card revealed
while dealerTurn() != False:#Performs the dealers turn, dealerTurn() returns false when bust or over 17.
time.sleep(2)#2 second gap between each turn for game flow
dealerBoard()#Prints booard after each dealer turn
#END DEALER HAND
time.sleep(2)#Extended pause
whoWins()#Determines and prints who wins.
pause()
scoreBoard()#prints scores
pause()
if playAgain() == False:
break
|
|
"""
This module provides the Template class for creating data-bound
templates.
"""
from browser import timer
try:
from ..utils.events import EventMixin
except:
from circular.utils.events import EventMixin
class PrefixLookupDict(dict):
"""
Helper class for looking up data allowing a single data item
to have variant keys. The implementation works by first
converting the key to a canonical form and only then doing
the lookup. The canonical form is derived as follows:
-- strip any prefix (set via the :func: ``set_prefix`` method)
-- remove any '-' and '_'
-- convert the key to upper case
"""
def __init__(self, init=None):
super().__init__()
self._prefix = ''
if isinstance(init, list):
for item in init:
self[item] = item
elif isinstance(init, dict):
for (key, val) in init.items():
self[key] = val
def _canonical(self, key):
canonical_key = key.upper().replace('-', '').replace('_', '')
if canonical_key.startswith(self._prefix):
return canonical_key[len(self._prefix):]
else:
return canonical_key
def remove(self, key):
try:
del self[key]
except KeyError:
pass
def set_prefix(self, prefix):
self._prefix = prefix.upper().replace('-', '').replace('_', '')
def update(self, other):
for (key, val) in other.items():
self[key] = val
def __delitem__(self, key):
return super().__delitem__(self._canonical(key))
def __getitem__(self, key):
return super().__getitem__(self._canonical(key))
def __setitem__(self, key, value):
return super().__setitem__(self._canonical(key), value)
def __contains__(self, key):
return super().__contains__(self._canonical(key))
PLUGINS = PrefixLookupDict()
def _build_kwargs(element, plugin):
"""
Helper function which removes all attributes of element
which are consumed by the plugin as parameters. It
returns them (with their values) as a dict.
"""
lookup_table = PrefixLookupDict(plugin['args'])
kwargs = {}
for attr in element.attributes:
if attr.name in lookup_table:
kwargs[lookup_table[attr.name]] = attr.value
element.removeAttribute(attr.name)
return kwargs
def _compile(tpl_element):
"""
A function used internally by the Template class and plugins
to recursively parse a dom-tree into a template. The argument
is a tpl_element. It returns an instance of the ``TagPlugin``
class representing the root of the template at ``tpl_element``.
The function works as follows:
1. If the element is a text node, initialize the ``TextPlugin``
(which handles ``{{ name }}`` type constructs)
2. Otherwise, if this is the first time the element is
seen, build a list of all attribute plugins which need to be applied
and save it to the element as a private attribute ``_plugins``
3. Next order the attribute plugins by priority and initialize
the first one (it is expected, that the plugin will recursively
call _compile on the element thus allowing the other plugins to
be initialized)
4. Next handle attributes with interpolated values (e.g. ``id="{{ dom_id }}``)
via the ``InterpolatedAttrsPlugin``
5. Finally, initialize the ``GenericTag`` plugin which takes care
of calling the ``_compile`` function recursively on the child elements.
(Note that plugins may choose to ignore the children (or do something else with them)
by not calling the _compile function)
"""
if tpl_element.nodeName == '#text':
# Interpolated text node plugin
return TextPlugin(tpl_element)
if not hasattr(tpl_element, '_plugins'):
# This is the first pass over tpl_element,
# we need to find out what the plugins are
# and remove their params from the element
# and save them for later
plugin_metas = []
for attr in tpl_element.attributes:
if attr.name in PLUGINS:
plugin_metas.append((attr.value, PLUGINS[attr.name]))
tpl_element.removeAttribute(attr.name)
# Order the plugins by priority
plugin_metas.sort(key=lambda x: x[1]['priority'])
plugins = []
for (arg, plugin) in plugin_metas:
plugins.append((plugin, [arg], _build_kwargs(tpl_element, plugin)))
if tpl_element.nodeName in PLUGINS:
tplug = PLUGINS[tpl_element.nodeName]
plugins.append(tplug, [], _build_kwargs(tpl_element, tplug))
setattr(tpl_element, '_plugins', plugins)
plugins = getattr(tpl_element, '_plugins')
# Now we initialize the first plugin, if any
if len(plugins) > 0:
plug_meta, args, kwargs = plugins.pop()
return plug_meta['class'](tpl_element, *args, **kwargs)
# If there are any attributes left, we initialize the
# InterpolatedAttrsPlugin
if len(tpl_element.attributes) > 0:
return InterpolatedAttrsPlugin(tpl_element)
# Finally, since no other plugin is found, return the GenericTag plugin
return GenericTagPlugin(tpl_element)
def register_plugin(plugin_class):
plugin_name = getattr(plugin_class, 'NAME', None) or plugin_class.__name__
meta = {
'class': plugin_class,
'args': PrefixLookupDict(list(plugin_class.__init__.__code__.co_varnames)),
'name': plugin_name,
'priority': getattr(plugin_class, 'PRIORITY', 0)
}
meta['args'].remove('self')
meta['args'].remove('tpl_element')
PLUGINS[plugin_name] = meta
def set_prefix(prefix):
"""
Sets the prefix which should be prepended to tag names. E.g. if the prefix is set to `tpl-`
then the `for` plugin must be written as `tpl-for`:
```
<li tpl-for="[1,2,3]" ...>...</li>
```
"""
PLUGINS.set_prefix(prefix)
class Template(EventMixin):
"""
The template class is the basic class used for data-binding functionality.
Its constructor takes a :class:`DOMNode` element (e.g. ``doc['element_id']``) and
parses it and its children into an internal structure. One can than
use the :func:`Template.bind_ctx` instance method to bind the template to
a context (an instence of the :class:`Context` class) containing
the data. Once the data-binding is setup in this way any change to the context
will trigger an update of the document tree.
.. note::
For performance reasons, the updates are only processed once every 100 msecs.
Assuming we have a template::
```
<div id='app'>
Greetings from the {{ country }}
</div>
```
We could write::
```
from browser import document as doc
from circular.template import Template, Context
ctx = Context()
ctx.country = 'Czech republic'
tpl = Template(doc['app'])
tpl.bind_ctx(ctx) # The page shows "Greetings from the Czech republic"
ctx.country = 'United Kingdom' # After a 100 msecs the page shows "Greetings from the United Kingdom"
```
"""
def __init__(self, elem):
super().__init__()
self.root = _compile(elem)
self.elem = elem
self.update_timer = None
def bind_ctx(self, ctx):
elem = self.root.bind_ctx(ctx)
self.elem.parent.replaceChild(elem, self.elem)
self.elem = elem
self.root.bind('change', self._start_timer)
def _start_timer(self, _event):
if self.update_timer is None:
self.update_timer = timer.set_interval(self.update, 50)
def update(self):
""" FIXME: We need handle the case when the root node
returns a new element(s) on update
"""
elems = self.root.update()
if self.update_timer is not None:
timer.clear_interval(self.update_timer)
self.update_timer = None
from .tags import *
|
|
import gzip
import json
import logging
import os
import tempfile
import zipfile
import pytest
import retrying
import test_helpers
from dcos_test_utils.diagnostics import Diagnostics
from dcos_test_utils.helpers import check_json
__maintainer__ = 'mnaboka'
__contact__ = '[email protected]'
# Expected latency for all dcos-diagnostics units to refresh after postflight plus
# another minute to allow for check-time to settle. See: DCOS_OSS-988
LATENCY = 120
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_health(dcos_api_session):
"""
test health endpoint /system/health/v1
"""
required_fields = ['units', 'hostname', 'ip', 'dcos_version', 'node_role', 'mesos_id', 'dcos_diagnostics_version']
required_fields_unit = ['id', 'health', 'output', 'description', 'help', 'name']
# Check all masters dcos-diagnostics instances on base port since this is extra-cluster request (outside localhost)
for host in dcos_api_session.masters:
response = check_json(dcos_api_session.health.get('/', node=host))
assert len(response) == len(required_fields), 'response must have the following fields: {}'.format(
', '.join(required_fields)
)
# validate units
assert 'units' in response, 'units field not found'
assert isinstance(response['units'], list), 'units field must be a list'
assert len(response['units']) > 0, 'units field cannot be empty'
for unit in response['units']:
assert len(unit) == len(required_fields_unit), 'unit must have the following fields: {}'.format(
', '.join(required_fields_unit)
)
for required_field_unit in required_fields_unit:
assert required_field_unit in unit, '{} must be in a unit repsonse'
# id, health and description cannot be empty
assert unit['id'], 'id field cannot be empty'
assert unit['health'] in [0, 1], 'health field must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
# check all required fields but units
for required_field in required_fields[1:]:
assert required_field in response, '{} field not found'.format(required_field)
assert response[required_field], '{} cannot be empty'.format(required_field)
# Check all agents running dcos-diagnostics behind agent-adminrouter on 61001
for host in dcos_api_session.slaves:
response = check_json(dcos_api_session.health.get('/', node=host))
assert len(response) == len(required_fields), 'response must have the following fields: {}'.format(
', '.join(required_fields)
)
# validate units
assert 'units' in response, 'units field not found'
assert isinstance(response['units'], list), 'units field must be a list'
assert len(response['units']) > 0, 'units field cannot be empty'
for unit in response['units']:
assert len(unit) == len(required_fields_unit), 'unit must have the following fields: {}'.format(
', '.join(required_fields_unit)
)
for required_field_unit in required_fields_unit:
assert required_field_unit in unit, '{} must be in a unit repsonse'
# id, health and description cannot be empty
assert unit['id'], 'id field cannot be empty'
assert unit['health'] in [0, 1], 'health field must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
# check all required fields but units
for required_field in required_fields[1:]:
assert required_field in response, '{} field not found'.format(required_field)
assert response[required_field], '{} cannot be empty'.format(required_field)
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_nodes(dcos_api_session):
"""
test a list of nodes with statuses endpoint /system/health/v1/nodes
"""
for master in dcos_api_session.masters:
response = check_json(dcos_api_session.health.get('/nodes', node=master))
assert len(response) == 1, 'nodes response must have only one field: nodes'
assert 'nodes' in response
assert isinstance(response['nodes'], list)
assert len(response['nodes']) == len(dcos_api_session.masters + dcos_api_session.all_slaves), \
('a number of nodes in response must be {}'.
format(len(dcos_api_session.masters + dcos_api_session.all_slaves)))
# test nodes
validate_node(response['nodes'])
@pytest.mark.supportedwindows
def test_dcos_diagnostics_nodes_node(dcos_api_session):
"""
test a specific node enpoint /system/health/v1/nodes/<node>
"""
for master in dcos_api_session.masters:
# get a list of nodes
response = check_json(dcos_api_session.health.get('/nodes', node=master))
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
for node in nodes:
node_response = check_json(dcos_api_session.health.get('/nodes/{}'.format(node), node=master))
validate_node([node_response])
@pytest.mark.supportedwindows
def test_dcos_diagnostics_nodes_node_units(dcos_api_session):
"""
test a list of units from a specific node, endpoint /system/health/v1/nodes/<node>/units
"""
for master in dcos_api_session.masters:
# get a list of nodes
response = check_json(dcos_api_session.health.get('/nodes', node=master))
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
for node in nodes:
units_response = check_json(dcos_api_session.health.get('/nodes/{}/units'.format(node), node=master))
assert len(units_response) == 1, 'unit response should have only 1 field `units`'
assert 'units' in units_response
validate_units(units_response['units'])
@pytest.mark.supportedwindows
def test_dcos_diagnostics_nodes_node_units_unit(dcos_api_session):
"""
test a specific unit for a specific node, endpoint /system/health/v1/nodes/<node>/units/<unit>
"""
for master in dcos_api_session.masters:
response = check_json(dcos_api_session.health.get('/nodes', node=master))
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
for node in nodes:
units_response = check_json(dcos_api_session.health.get('/nodes/{}/units'.format(node), node=master))
unit_ids = list(map(lambda unit: unit['id'], units_response['units']))
for unit_id in unit_ids:
validate_unit(
check_json(dcos_api_session.health.get('/nodes/{}/units/{}'.format(node, unit_id), node=master)))
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_units(dcos_api_session):
"""
test a list of collected units, endpoint /system/health/v1/units
"""
# get all unique unit names
all_units = set()
for node in dcos_api_session.masters:
node_response = check_json(dcos_api_session.health.get('/', node=node))
for unit in node_response['units']:
all_units.add(unit['id'])
for node in dcos_api_session.all_slaves:
node_response = check_json(dcos_api_session.health.get('/', node=node))
for unit in node_response['units']:
all_units.add(unit['id'])
# test against masters
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
validate_units(units_response['units'])
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
logging.info('collected units: {}'.format(pulled_units))
diff = set(pulled_units).symmetric_difference(all_units)
assert set(pulled_units) == all_units, ('not all units have been collected by dcos-diagnostics '
'puller, missing: {}'.format(diff))
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_systemd_units_health(dcos_api_session):
"""
test all units and make sure the units are healthy. This test will fail if any of systemd unit is unhealthy,
meaning it focuses on making sure the dcos_api_session is healthy, rather then testing dcos-diagnostics itself.
"""
unhealthy_output = []
assert dcos_api_session.masters, "Must have at least 1 master node"
report_response = check_json(dcos_api_session.health.get('/report', node=dcos_api_session.masters[0]))
assert 'Units' in report_response, "Missing `Units` field in response"
for unit_name, unit_props in report_response['Units'].items():
assert 'Health' in unit_props, "Unit {} missing `Health` field".format(unit_name)
if unit_props['Health'] != 0:
assert 'Nodes' in unit_props, "Unit {} missing `Nodes` field".format(unit_name)
assert isinstance(unit_props['Nodes'], list), 'Field `Node` must be a list'
for node in unit_props['Nodes']:
assert 'Health' in node, 'Field `Health` is expected to be in nodes properties, got {}'.format(node)
if node['Health'] != 0:
assert 'Output' in node, 'Field `Output` is expected to be in nodes properties, got {}'.format(node)
assert isinstance(node['Output'], dict), 'Field `Output` must be a dict'
assert unit_name in node['Output'], 'unit {} must be in node Output, got {}'.format(unit_name,
node['Output'])
assert 'IP' in node, 'Field `IP` is expected to be in nodes properties, got {}'.format(node)
unhealthy_output.append(
'Unhealthy unit {} has been found on node {}, health status {}. journalctl output {}'.format(
unit_name, node['IP'], unit_props['Health'], node['Output'][unit_name]))
if unhealthy_output:
raise AssertionError('\n'.join(unhealthy_output))
@pytest.mark.supportedwindows
def test_dcos_diagnostics_units_unit(dcos_api_session):
"""
test a unit response in a right format, endpoint: /system/health/v1/units/<unit>
"""
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
unit_response = check_json(dcos_api_session.health.get('/units/{}'.format(unit), node=master))
validate_units([unit_response])
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_units_unit_nodes(dcos_api_session):
"""
test a list of nodes for a specific unit, endpoint /system/health/v1/units/<unit>/nodes
"""
def get_nodes_from_response(response):
assert 'nodes' in response, 'response must have field `nodes`. Got {}'.format(response)
nodes_ip_map = make_nodes_ip_map(dcos_api_session)
nodes = []
for node in response['nodes']:
assert 'host_ip' in node, 'node response must have `host_ip` field. Got {}'.format(node)
assert node['host_ip'] in nodes_ip_map, 'nodes_ip_map must have node {}.Got {}'.format(node['host_ip'],
nodes_ip_map)
nodes.append(nodes_ip_map.get(node['host_ip']))
return nodes
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
nodes_response = check_json(dcos_api_session.health.get('/units/{}/nodes'.format(unit), node=master))
validate_node(nodes_response['nodes'])
# make sure dcos-mesos-master.service has master nodes and dcos-mesos-slave.service has agent nodes
master_nodes_response = check_json(
dcos_api_session.health.get('/units/dcos-mesos-master.service/nodes', node=master))
master_nodes = get_nodes_from_response(master_nodes_response)
assert len(master_nodes) == len(dcos_api_session.masters), \
'{} != {}'.format(master_nodes, dcos_api_session.masters)
assert set(master_nodes) == set(dcos_api_session.masters), 'a list of difference: {}'.format(
set(master_nodes).symmetric_difference(set(dcos_api_session.masters))
)
agent_nodes_response = check_json(
dcos_api_session.health.get('/units/dcos-mesos-slave.service/nodes', node=master))
agent_nodes = get_nodes_from_response(agent_nodes_response)
assert len(agent_nodes) == len(dcos_api_session.slaves), '{} != {}'.format(agent_nodes, dcos_api_session.slaves)
@pytest.mark.supportedwindows
def test_dcos_diagnostics_units_unit_nodes_node(dcos_api_session):
"""
test a specific node for a specific unit, endpoint /system/health/v1/units/<unit>/nodes/<node>
"""
required_node_fields = ['host_ip', 'health', 'role', 'output', 'help']
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
nodes_response = check_json(dcos_api_session.health.get('/units/{}/nodes'.format(unit), node=master))
pulled_nodes = list(map(lambda node: node['host_ip'], nodes_response['nodes']))
logging.info('pulled nodes: {}'.format(pulled_nodes))
for node in pulled_nodes:
node_response = check_json(
dcos_api_session.health.get('/units/{}/nodes/{}'.format(unit, node), node=master))
assert len(node_response) == len(required_node_fields), 'required fields: {}'.format(
', '.format(required_node_fields)
)
for required_node_field in required_node_fields:
assert required_node_field in node_response, 'field {} must be set'.format(required_node_field)
# host_ip, health, role, help cannot be empty
assert node_response['host_ip'], 'host_ip field cannot be empty'
assert node_response['health'] in [0, 1], 'health must be 0 or 1'
assert node_response['role'], 'role field cannot be empty'
assert node_response['help'], 'help field cannot be empty'
@pytest.mark.supportedwindows
def test_dcos_diagnostics_report(dcos_api_session):
"""
test dcos-diagnostics report endpoint /system/health/v1/report
"""
for master in dcos_api_session.masters:
report_response = check_json(dcos_api_session.health.get('/report', node=master))
assert 'Units' in report_response
assert len(report_response['Units']) > 0
assert 'Nodes' in report_response
assert len(report_response['Nodes']) > 0
@pytest.mark.xfailflake(
jira='DCOS-52191',
reason='test_dcos_diagnostics_bundle_create_download_delete is flaky.',
since='2019-04-26'
)
def test_dcos_diagnostics_bundle_create_download_delete(dcos_api_session):
"""
test bundle create, read, delete workflow
"""
app, test_uuid = test_helpers.marathon_test_app()
with dcos_api_session.marathon.deploy_and_cleanup(app):
bundle = _create_bundle(dcos_api_session)
_check_diagnostics_bundle_status(dcos_api_session)
_download_and_extract_bundle(dcos_api_session, bundle)
_download_and_extract_bundle_from_another_master(dcos_api_session, bundle)
_delete_bundle(dcos_api_session, bundle)
def _check_diagnostics_bundle_status(dcos_api_session):
# validate diagnostics job status response
diagnostics_bundle_status = check_json(dcos_api_session.health.get('/report/diagnostics/status/all'))
required_status_fields = ['is_running', 'status', 'errors', 'last_bundle_dir', 'job_started', 'job_ended',
'job_duration', 'diagnostics_bundle_dir', 'diagnostics_job_timeout_min',
'journald_logs_since_hours', 'diagnostics_job_get_since_url_timeout_min',
'command_exec_timeout_sec', 'diagnostics_partition_disk_usage_percent',
'job_progress_percentage']
for _, properties in diagnostics_bundle_status.items():
assert len(properties) == len(required_status_fields), 'response must have the following fields: {}'.format(
required_status_fields
)
for required_status_field in required_status_fields:
assert required_status_field in properties, 'property {} not found'.format(required_status_field)
def _create_bundle(dcos_api_session):
last_datapoint = {
'time': None,
'value': 0
}
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
create_response = diagnostics.start_diagnostics_job().json()
diagnostics.wait_for_diagnostics_job(last_datapoint=last_datapoint)
diagnostics.wait_for_diagnostics_reports()
bundles = diagnostics.get_diagnostics_reports()
assert len(bundles) == 1, 'bundle file not found'
assert bundles[0] == create_response['extra']['bundle_name']
return create_response['extra']['bundle_name']
def _delete_bundle(dcos_api_session, bundle):
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
bundles = diagnostics.get_diagnostics_reports()
assert bundle in bundles, 'not found {} in {}'.format(bundle, bundles)
dcos_api_session.health.post(os.path.join('/report/diagnostics/delete', bundle))
bundles = diagnostics.get_diagnostics_reports()
assert bundle not in bundles, 'found {} in {}'.format(bundle, bundles)
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def _download_and_extract_bundle(dcos_api_session, bundle):
_download_bundle_from_master(dcos_api_session, 0, bundle)
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def _download_and_extract_bundle_from_another_master(dcos_api_session, bundle):
if len(dcos_api_session.masters) > 1:
_download_bundle_from_master(dcos_api_session, 1, bundle)
def _download_bundle_from_master(dcos_api_session, master_index, bundle):
""" Download DC/OS diagnostics bundle from a master
:param dcos_api_session: dcos_api_session fixture
:param master_index: master index from dcos_api_session.masters array
:param bundle: bundle name to download from master
"""
assert len(dcos_api_session.masters) >= master_index + 1, '{} masters required. Got {}'.format(
master_index + 1, len(dcos_api_session.masters))
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
bundles = diagnostics.get_diagnostics_reports()
assert bundle in bundles, 'not found {} in {}'.format(bundle, bundles)
expected_common_files = ['dmesg_-T.output.gz',
'ip_addr.output.gz',
'ip_route.output.gz',
'ps_aux_ww_Z.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1vips.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1records.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdefault.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdns.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsmesos_listener.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricslashup.output.gz',
'timedatectl.output.gz',
'binsh_-c_cat etc*-release.output.gz',
'systemctl_list-units_dcos*.output.gz',
'sestatus.output.gz',
'iptables-save.output.gz',
'ip6tables-save.output.gz',
'ipset_list.output.gz',
'opt/mesosphere/active.buildinfo.full.json.gz',
'opt/mesosphere/etc/dcos-version.json.gz',
'opt/mesosphere/etc/expanded.config.json.gz',
'opt/mesosphere/etc/user.config.yaml.gz',
'dcos-diagnostics-health.json',
'var/lib/dcos/cluster-id.gz',
'proc/cmdline.gz',
'proc/cpuinfo.gz',
'proc/meminfo.gz',
'proc/self/mountinfo.gz',
'optmesospherebindetect_ip.output.gz',
'sysctl_-a.output.gz',
]
# these files are expected to be in archive for a master host
expected_master_files = [
'binsh_-c_cat proc`systemctl show dcos-mesos-master.service -p MainPID| cut -d\'=\' -f2`environ.output.gz',
'5050-quota.json',
'5050-overlay-master_state.json.gz',
'dcos-mesos-master.service.gz',
'var/lib/dcos/exhibitor/zookeeper/snapshot/myid.gz',
'var/lib/dcos/exhibitor/conf/zoo.cfg.gz',
'var/lib/dcos/mesos/log/mesos-master.log.gz',
'var/lib/dcos/mesos/log/mesos-master.log.1.gz',
'var/lib/dcos/mesos/log/mesos-master.log.2.gz.gz',
'var/lib/dcos/mesos/log/mesos-master.log.3.gz.gz',
] + expected_common_files
expected_agent_common_files = [
'5051-containers.json',
'5051-overlay-agent_overlay.json',
'var/log/mesos/mesos-agent.log.gz',
'docker_--version.output.gz',
'docker_ps.output.gz',
]
# for agent host
expected_agent_files = [
'dcos-mesos-slave.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
# for public agent host
expected_public_agent_files = [
'dcos-mesos-slave-public.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave-public.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
def _read_from_zip(z: zipfile.ZipFile, item: str, to_json=True):
# raises KeyError if item is not in zipfile.
item_content = z.read(item).decode()
if to_json:
# raises ValueError if cannot deserialize item_content.
return json.loads(item_content)
return item_content
def _get_dcos_diagnostics_health(z: zipfile.ZipFile, item: str):
# try to load dcos-diagnostics health report and validate the report is for this host
try:
_health_report = _read_from_zip(z, item)
except KeyError:
# we did not find a key in archive, let's take a look at items in archive and try to read
# diagnostics logs.
# namelist() gets a list of all items in a zip archive.
logging.info(z.namelist())
# summaryErrorsReport.txt and summaryReport.txt are diagnostic job log files.
for log in ('summaryErrorsReport.txt', 'summaryReport.txt'):
try:
log_data = _read_from_zip(z, log, to_json=False)
logging.info("{}:\n{}".format(log, log_data))
except KeyError:
logging.info("Could not read {}".format(log))
raise
except ValueError:
logging.info("Could not deserialize dcos-diagnostics-health")
raise
return _health_report
with tempfile.TemporaryDirectory() as tmp_dir:
bundle_full_location = os.path.join(tmp_dir, bundle)
with open(bundle_full_location, 'wb') as f:
r = dcos_api_session.health.get(os.path.join('/report/diagnostics/serve', bundle), stream=True,
node=dcos_api_session.masters[master_index])
for chunk in r.iter_content(1024):
f.write(chunk)
# validate bundle zip file.
assert zipfile.is_zipfile(bundle_full_location)
z = zipfile.ZipFile(bundle_full_location)
# get a list of all files in a zip archive.
archived_items = z.namelist()
# validate error log is empty
if 'summaryErrorsReport.txt' in archived_items:
log_data = _read_from_zip(z, 'summaryErrorsReport.txt', to_json=False)
raise AssertionError('summaryErrorsReport.txt must be empty. Got {}'.format(log_data))
# validate all files in zip archive are not empty
for item in archived_items:
assert z.getinfo(item).file_size, 'item {} is empty'.format(item)
# make sure all required log files for master node are in place.
for master_ip in dcos_api_session.masters:
master_folder = master_ip + '_master/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, master_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == master_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(master_folder + 'dcos-mesos-master.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(master_folder, archived_items, expected_master_files)
gzipped_state_output = z.open(master_folder + '5050-master_state.json.gz')
validate_state(gzipped_state_output)
# make sure all required log files for agent node are in place.
for slave_ip in dcos_api_session.slaves:
agent_folder = slave_ip + '_agent/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_folder + 'dcos-mesos-slave.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_folder, archived_items, expected_agent_files)
# make sure all required log files for public agent node are in place.
for public_slave_ip in dcos_api_session.public_slaves:
agent_public_folder = public_slave_ip + '_agent_public/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_public_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == public_slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_public_folder + 'dcos-mesos-slave-public.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_public_folder, archived_items, expected_public_agent_files)
def make_nodes_ip_map(dcos_api_session):
"""
a helper function to make a map detected_ip -> external_ip
"""
node_private_public_ip_map = {}
for node in dcos_api_session.masters:
detected_ip = check_json(dcos_api_session.health.get('/', node=node))['ip']
node_private_public_ip_map[detected_ip] = node
for node in dcos_api_session.all_slaves:
detected_ip = check_json(dcos_api_session.health.get('/', node=node))['ip']
node_private_public_ip_map[detected_ip] = node
return node_private_public_ip_map
def validate_node(nodes):
assert isinstance(nodes, list), 'input argument must be a list'
assert len(nodes) > 0, 'input argument cannot be empty'
required_fields = ['host_ip', 'health', 'role']
for node in nodes:
assert len(node) == len(required_fields), 'node should have the following fields: {}. Actual: {}'.format(
', '.join(required_fields), node)
for required_field in required_fields:
assert required_field in node, '{} must be in node. Actual: {}'.format(required_field, node)
# host_ip, health, role fields cannot be empty
assert node['health'] in [0, 1], 'health must be 0 or 1'
assert node['host_ip'], 'host_ip cannot be empty'
assert node['role'], 'role cannot be empty'
def validate_units(units):
assert isinstance(units, list), 'input argument must be list'
assert len(units) > 0, 'input argument cannot be empty'
required_fields = ['id', 'name', 'health', 'description']
for unit in units:
assert len(unit) == len(required_fields), 'a unit must have the following fields: {}. Actual: {}'.format(
', '.join(required_fields), unit)
for required_field in required_fields:
assert required_field in unit, 'unit response must have field: {}. Actual: {}'.format(required_field, unit)
# a unit must have all 3 fields not empty
assert unit['id'], 'id field cannot be empty'
assert unit['name'], 'name field cannot be empty'
assert unit['health'] in [0, 1], 'health must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
def validate_unit(unit):
assert isinstance(unit, dict), 'input argument must be a dict'
required_fields = ['id', 'health', 'output', 'description', 'help', 'name']
assert len(unit) == len(required_fields), 'unit must have the following fields: {}. Actual: {}'.format(
', '.join(required_fields), unit)
for required_field in required_fields:
assert required_field in unit, '{} must be in a unit. Actual: {}'.format(required_field, unit)
# id, name, health, description, help should not be empty
assert unit['id'], 'id field cannot be empty'
assert unit['name'], 'name field cannot be empty'
assert unit['health'] in [0, 1], 'health must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
assert unit['help'], 'help field cannot be empty'
def validate_state(zip_state):
assert isinstance(zip_state, zipfile.ZipExtFile)
state_output = gzip.decompress(zip_state.read())
state = json.loads(state_output)
assert len(state["frameworks"]) == 2, "bundle must contains information about frameworks"
task_count = len(state["frameworks"][1]["tasks"]) + len(state["frameworks"][0]["tasks"])
assert task_count == 1, "bundle must contains information about tasks"
def verify_archived_items(folder, archived_items, expected_files):
for expected_file in expected_files:
expected_file = folder + expected_file
# We don't know in advance whether the file will be gzipped or not,
# because that depends on the size of the diagnostics file, which can
# be influenced by multiple factors that are not under our control
# here.
# Since we only want to check whether the file _exists_ and don't care
# about whether it's gzipped or not, we check for an optional `.gz`
# file type in case it wasn't explicitly specified in the assertion.
# For more context, see: https://jira.mesosphere.com/browse/DCOS_OSS-4531
if expected_file.endswith('.gz'):
assert expected_file in archived_items, ('expecting {} in {}'.format(expected_file, archived_items))
else:
expected_gzipped_file = (expected_file + '.gz')
unzipped_exists = expected_file in archived_items
gzipped_exists = expected_gzipped_file in archived_items
message = ('expecting {} or {} in {}'.format(expected_file, expected_gzipped_file, archived_items))
assert (unzipped_exists or gzipped_exists), message
def verify_unit_response(zip_ext_file, min_lines):
assert isinstance(zip_ext_file, zipfile.ZipExtFile)
unit_output = gzip.decompress(zip_ext_file.read())
assert len(unit_output.decode().split('\n')) >= min_lines, 'Expect at least {} lines. Full unit output {}'.format(
min_lines, unit_output)
|
|
import datetime
import copy
import unicode_helpers
import json
import logging
from util import cached_property
from util import dict_from_dir
from totalimpactwebapp import db
logger = logging.getLogger("ti.aliases")
def clean_id(nid):
try:
nid = nid.strip(' "').strip()
nid = unicode_helpers.remove_nonprinting_characters(nid)
except (TypeError, AttributeError):
#isn't a string. That's ok, might be biblio
pass
return(nid)
def normalize_alias_tuple(ns, nid):
ns = clean_id(ns)
ns = ns.lower()
if ns == "biblio":
return (ns, nid)
nid = clean_id(nid)
from totalimpact.providers import crossref
from totalimpact.providers import pubmed
from totalimpact.providers import arxiv
from totalimpact.providers import webpage
from totalimpact import importer
clean_nid = None
if ns=="doi" or importer.is_doi(nid):
ns = "doi"
clean_nid = crossref.clean_doi(nid)
elif ns=="pmid" or importer.is_pmid(nid):
ns = "pmid"
clean_nid = pubmed.clean_pmid(nid)
elif ns=="arxiv" or importer.is_arxiv(nid):
ns = "arxiv"
clean_nid = arxiv.clean_arxiv_id(nid)
elif ns=="url" or importer.is_url(nid):
ns = "url"
clean_nid = webpage.clean_url(nid)
elif ns not in ["doi", "pmid", "arxiv", "url"]:
clean_nid = nid
if not clean_nid:
return None
return (ns, clean_nid)
def clean_alias_tuple_for_comparing(ns, nid):
alias_tuple = normalize_alias_tuple(ns, nid)
if not alias_tuple:
return None
try:
(ns, nid) = alias_tuple
cleaned_alias = (ns.lower(), nid.lower())
except AttributeError:
logger.debug(u"problem cleaning {ns} {nid}".format(
ns=ns, nid=nid))
cleaned_alias = (ns, nid)
return cleaned_alias
def alias_tuples_from_dict(aliases_dict):
"""
Convert from aliases dict we use in items, to a list of alias tuples.
The providers need the tuples list, which look like this:
[(doi, 10.123), (doi, 10.345), (pmid, 1234567)]
"""
alias_tuples = []
for ns, ids in aliases_dict.iteritems():
if isinstance(ids, basestring): # it's a date, not a list of ids
alias_tuples.append((ns, ids))
else:
for id in ids:
alias_tuples.append((ns, id))
return alias_tuples
def alias_dict_from_tuples(aliases_tuples):
alias_dict = {}
for (ns, ids) in aliases_tuples:
if ns in alias_dict:
alias_dict[ns] += [ids]
else:
alias_dict[ns] = [ids]
return alias_dict
def canonical_aliases(orig_aliases_dict):
# only put lowercase namespaces in items, and lowercase dois
lowercase_aliases_dict = {}
for orig_namespace in orig_aliases_dict:
lowercase_namespace = clean_id(orig_namespace.lower())
if lowercase_namespace == "doi":
lowercase_aliases_dict[lowercase_namespace] = [clean_id(doi.lower()) for doi in orig_aliases_dict[orig_namespace]]
else:
lowercase_aliases_dict[lowercase_namespace] = [clean_id(nid) for nid in orig_aliases_dict[orig_namespace]]
return lowercase_aliases_dict
def merge_alias_dicts(aliases1, aliases2):
#logger.debug(u"in MERGE ALIAS DICTS with %s and %s" %(aliases1, aliases2))
merged_aliases = copy.deepcopy(aliases1)
for ns, nid_list in aliases2.iteritems():
for nid in nid_list:
try:
if not nid in merged_aliases[ns]:
merged_aliases[ns].append(nid)
except KeyError: # no ids for that namespace yet. make it.
merged_aliases[ns] = [nid]
return merged_aliases
def matches_alias(product1, product2, exclude=[]):
alias_tuple_list1 = [alias_row.my_alias_tuple_for_comparing for alias_row in product1.alias_rows]
alias_tuple_list2 = [alias_row.my_alias_tuple_for_comparing for alias_row in product2.alias_rows]
has_matches = False
for alias_tuple1 in alias_tuple_list1:
if alias_tuple1:
(ns, nid) = alias_tuple1
if alias_tuple1 in alias_tuple_list2 and ns not in exclude:
has_matches = True
return has_matches
class AliasRow(db.Model):
__tablename__ = 'alias'
tiid = db.Column(db.Text, db.ForeignKey('item.tiid'), primary_key=True)
namespace = db.Column(db.Text, primary_key=True)
nid = db.Column(db.Text, primary_key=True)
collected_date = db.Column(db.DateTime())
def __init__(self, **kwargs):
if "collected_date" not in kwargs:
self.collected_date = datetime.datetime.utcnow()
super(AliasRow, self).__init__(**kwargs)
@cached_property
def alias_tuple(self):
return (self.namespace, self.nid)
@cached_property
def my_alias_tuple_for_comparing(self):
return clean_alias_tuple_for_comparing(self.namespace, self.nid)
def is_equivalent_alias(self, given_namespace, given_nid):
if not given_nid:
return False
given_clean_alias = clean_alias_tuple_for_comparing(given_namespace, given_nid)
if not given_clean_alias:
return False
return given_clean_alias==self.my_alias_tuple_for_comparing
class Aliases(object):
def __init__(self, alias_rows):
ignore_namepaces = ["biblio"]
self.tiid = None
for alias_row in alias_rows:
if alias_row.namespace not in ignore_namepaces:
self.tiid = alias_row.tiid
# each namespace has a list of various IDs. We can at some point
# be smart about picking which on is best. For now we just
# use the first one.
try:
getattr(self, alias_row.namespace).append(alias_row.nid)
except AttributeError:
setattr(self, alias_row.namespace, [alias_row.nid])
@cached_property
def best_url(self):
# try these first, in this order
if self.display_doi:
return u"http://doi.org/" + self.display_doi
if self.display_pmid:
return u"http://www.ncbi.nlm.nih.gov/pubmed/" + self.display_pmid
if self.display_pmc:
return u"http://www.ncbi.nlm.nih.gov/pmc/articles/" + self.display_pmc
if self.resolved_url:
return self.resolved_url
try:
return self.url[0]
except AttributeError:
return None
@cached_property
def display_best_url(self): # for consistency
return self.best_url
@cached_property
def display_pmid(self):
try:
return self.pmid[0]
except AttributeError:
return None
@cached_property
def display_pmc(self):
try:
return self.pmc[0]
except AttributeError:
return None
@cached_property
def display_doi(self):
try:
return self.doi[0]
except AttributeError:
return None
@cached_property
def display_arxiv(self):
try:
return self.arxiv[0]
except AttributeError:
return None
@cached_property
def has_formal_alias(self):
# has something other than urls and mendeley uuids etc
if self.display_arxiv or self.display_doi or self.display_pmid or self.display_pmc:
return True
else:
return False
@cached_property
def resolved_url(self):
try:
for url in self.url:
if "doi.org" in url:
continue
elif "ncbi.nlm.nih.gov/" in url:
continue
elif "europepmc.org" in url:
continue
elif "mendeley.com" in url:
continue
elif "scopus.com" in url:
continue
else:
return url
# only had those, so return one of those
return self.url[0]
except AttributeError:
return None
def get_genre(self):
return self._guess_genre_and_host_from_aliases()[0]
def get_host(self):
return self._guess_genre_and_host_from_aliases()[1]
def _guess_genre_and_host_from_aliases(self):
"""Uses available aliases to decide the item's genre"""
# logger.debug(u"in decide_genre with {alias_dict}".format(
# alias_dict=alias_dict))
genre = "unknown"
host = "unknown"
if hasattr(self, "doi"):
joined_doi_string = "".join(self.doi).lower()
if "10.5061/dryad." in joined_doi_string:
genre = "dataset"
host = "dryad"
elif ".figshare." in joined_doi_string:
# if was already set to something, wouldn't be here
host = "figshare"
genre = "dataset"
else:
genre = "article"
elif hasattr(self, "pmid"):
genre = "article"
elif hasattr(self, "arxiv"):
genre = "article"
host = "arxiv"
elif hasattr(self, "blog"):
genre = "blog"
host = "wordpresscom"
elif hasattr(self, "blog_post"):
genre = "blog"
host = "blog_post"
elif hasattr(self, "url"):
joined_url_string = "".join(self.url).lower()
if "slideshare.net" in joined_url_string:
genre = "slides"
host = "slideshare"
elif "github.com" in joined_url_string:
genre = "software"
host = "github"
elif ("youtube.com" in joined_url_string) or ("youtu.be" in joined_url_string):
genre = "video"
host = "youtube"
elif "vimeo.com" in joined_url_string:
genre = "video"
host = "vimeo"
else:
genre = "webpage"
return genre, host
def to_dict(self):
ret = dict_from_dir(self)
return ret
|
|
import cinder.compute.nova as nova
from cinder.image import image_utils
from oslo.config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.openstack.common import uuidutils
from cinder.volume import driver
from cinder.volume import volume_types
from oslo.config import cfg
# from libcloud.compute.types import Provider
#from libcloud.compute.providers import get_driver
#from libcloud.compute.base import Node
from adapter import Ec2Adapter as Ec2Adapter
from libcloud.compute.types import StorageVolumeState,NodeState
import exception_ex
import os
import cinder.context
import pdb
import requests
import time
import string
import rpyc
ec2api_opts = [
cfg.StrOpt('access_key_id',
default='',
help='the access key id for connection to EC2 '),
cfg.StrOpt('secret_key',
default='',
help='the secret key for connection to EC2 '),
cfg.StrOpt('region',
default='ap-southeast-1',
help='the region for connection to EC2 '),
cfg.StrOpt('driver_type',
default='ec2_ap_southeast',
help='the type for driver '),
cfg.StrOpt('provider_image_conversion_dir',
default='/tmp/ec2/',
help='volume convert to image dir'),
cfg.StrOpt('provider_instance_id',
default='',
help='aws instance id'),
cfg.StrOpt('cgw_host_id',
default='',
help='compute gateway id in provider cloud'),
cfg.StrOpt('cgw_host_ip',
default='',
help='compute gateway ip'),
cfg.StrOpt('cgw_username',
default='',
help='compute gateway user name'),
cfg.StrOpt('cgw_certificate',
default='',
help='full name of compute gateway public key'),
cfg.StrOpt('storage_tmp_dir',
default='wfbucketse',
help='a cloud storage temp directory'),
cfg.StrOpt('availability_zone',
default='ap-southeast-1a',
help='the availability_zone for connection to EC2 ')
]
vgw_opts = [
cfg.DictOpt('vgw_url',
default={
'fs_vgw_url': 'http://162.3.114.107:8090/',
'vcloud_vgw_url': 'http://162.3.114.108:8090/',
'aws_vgw_url': 'http://172.27.12.245:8090/'
},
help="These values will be used for upload/download image "
"from vgw host."),
cfg.StrOpt('store_file_dir',
default='/home/upload',
help='Directory used for temporary storage '
'during migrate volume'),
cfg.StrOpt('rpc_service_port',
default='9999',
help='port of rpc service')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(ec2api_opts)
CONF.register_opts(vgw_opts,'vgw')
# EC2 = get_driver(CONF.ec2.driver_type)
class AwsEc2VolumeDriver(driver.VolumeDriver):
VERSION = "1.0"
def __init__(self, *args, **kwargs):
super(AwsEc2VolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(ec2api_opts)
#self.configuration.append_config_values(vgw_opts)
LOG.info("access_key_id = %s,secret_key = %s" % (self.configuration.access_key_id,
self.configuration.secret_key))
if (self.configuration.access_key_id is None or
self.configuration.secret_key is None):
raise Exception(_("Must specify access_key_id and "
"secret_key to use aws ec2"))
self.adpter = Ec2Adapter(self.configuration.access_key_id, secret=self.configuration.secret_key,
region=self.configuration.region, secure=False)
def do_setup(self, context):
"""Instantiate common class and log in storage system."""
pass
def check_for_setup_error(self):
"""Check configuration file."""
pass
def create_volume(self, volume):
"""Create a volume."""
size = volume['size']
name = volume['name']
location = self.adpter.get_location(self.configuration.availability_zone)
if not location:
raise exception_ex.ProviderLocationError
provider_location = self.adpter.create_volume(size, name, location)
if not provider_location:
raise exception_ex.ProviderCreateVolumeError(volume_id=volume['id'])
LOG.info("create volume: %s; provider_volume: %s " % (volume['id'], provider_location.id))
create_tags_func = getattr(self.adpter, 'ex_create_tags')
if create_tags_func:
create_tags_func(provider_location, {'hybrid_cloud_volume_id': volume['id']})
ctx = cinder.context.get_admin_context()
if ctx:
self.db.volume_metadata_update(ctx, volume['id'], {'provider_volume_id': provider_location.id}, False)
model_update = {'provider_location': provider_location.id}
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
pass
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
pass
def extend_volume(self, volume, new_size):
"""Extend a volume."""
pass
def _get_provider_volumeid_from_volume(self, volume):
if not volume.get('provider_location',None):
ctx = cinder.context.get_admin_context()
metadata = self.db.volume_metadata_get(ctx, volume['id'])
return metadata.get('provider_volume_id',None)
else:
return volume.get('provider_location',None)
def delete_volume(self, volume):
"""Delete a volume."""
provider_volume_id = self._get_provider_volumeid_from_volume(volume)
if not provider_volume_id:
LOG.error('NO Mapping between cinder volume and provider volume')
return
provider_volumes = self.adpter.list_volumes(ex_volume_ids=[provider_volume_id])
if not provider_volumes:
LOG.error('provider_volume is not found')
return
#raise exception.VolumeNotFound(volume_id=volume['id'])
elif len(provider_volumes) > 1:
LOG.error('volume %s has more than one provider_volume' % volume['id'])
raise exception_ex.ProviderMultiVolumeError(volume_id=volume['id'])
delete_ret = self.adpter.destroy_volume(provider_volumes[0])
LOG.info("deleted volume return%d" % delete_ret)
def _get_provider_volumeID_from_snapshot(self, snapshot):
provider_volume_id = self._get_provider_volumeid_from_volume(snapshot['volume'])
return provider_volume_id
def _get_provider_volume(self, volume_id):
provider_volume = None
try:
#if not provider_volume_id:
provider_volumes = self.adpter.list_volumes(ex_volume_ids=[volume_id])
if provider_volumes is None:
LOG.warning('Can not get volume through tag:hybrid_cloud_volume_id %s' % volume_id)
return provider_volumes
if len(provider_volumes) == 1:
provider_volume = provider_volumes[0]
elif len(provider_volumes) >1:
LOG.warning('More than one volumes are found through tag:hybrid_cloud_volume_id %s' % volume_id)
else:
LOG.warning('Volume %s NOT Found at provider cloud' % volume_id)
except Exception as e:
LOG.error('Can NOT get volume %s from provider cloud tag' % volume_id)
LOG.error(e.message)
return provider_volume
def _get_provider_node(self,provider_node_id):
provider_node=None
try:
nodes = self.adpter.list_nodes(ex_node_ids=[provider_node_id])
if nodes is None:
LOG.error('Can NOT get node %s from provider cloud tag' % provider_node_id)
return nodes
if len(nodes) == 0:
LOG.debug('node %s NOT exist at provider cloud' % provider_node_id)
return []
else:
provider_node=nodes[0]
except Exception as e:
LOG.error('Can NOT get node %s from provider cloud tag' % provider_node_id)
LOG.error(e.message)
return provider_node
def create_snapshot(self, snapshot):
"""Create a snapshot."""
provider_volume_id = self._get_provider_volumeID_from_snapshot(snapshot)
provider_volumes = self.adpter.list_volumes(ex_volume_ids=[provider_volume_id])
if not provider_volumes:
LOG.error('provider_volume %s is not found' % provider_volume_id)
raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
elif len(provider_volumes) > 1:
LOG.error('volume %s has more than one provider_volume' % snapshot['volume_id'])
raise exception_ex.ProviderMultiVolumeError(volume_id=snapshot['volume_id'])
provider_snapshot = self.adpter.create_volume_snapshot(provider_volumes[0], snapshot['name'])
if not provider_snapshot:
raise exception_ex.ProviderCreateSnapshotError(snapshot_id=snapshot['id'])
create_tags_func = getattr(self.adpter, 'ex_create_tags')
if create_tags_func:
create_tags_func(provider_snapshot, {'hybrid_cloud_snapshot_id': snapshot['id']})
ctx = cinder.context.get_admin_context()
if ctx:
self.db.snapshot_metadata_update(ctx, snapshot['id'], {'provider_snapshot_id': provider_snapshot.id}, False)
model_update = {'provider_location': provider_snapshot.id}
return model_update
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
provider_snapshot_id = snapshot.get('provider_location',None)
if not provider_snapshot_id:
LOG.warning('snapshot has no provider_location')
return
provider_snapshots = self.adpter.list_snapshots(snapshot_ids=[provider_snapshot_id])
if not provider_snapshots:
LOG.warning('provider_snapshot %s is not found' % provider_snapshot_id)
return
provider_snapshot = provider_snapshots[0]
delete_ret = self.adpter.destroy_volume_snapshot(provider_snapshot)
LOG.info("deleted snapshot return%d" % delete_ret)
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
#volume_backend_name = self.adpter.get_volume_backend_name()
data = {'volume_backend_name': 'AMAZONEC2',
'storage_protocol': 'LSI Logic SCSI',
'driver_version': self.VERSION,
'vendor_name': 'Huawei',
'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'reserved_percentage': 0}
return data
def create_export(self, context, volume):
"""Export the volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def initialize_connection(self, volume, connector):
"""Map a volume to a host."""
LOG.info("attach volume: %s; provider_location: %s " % (volume['id'],
volume['provider_location']))
properties = {'volume_id': volume['id'],
'provider_location': volume['provider_location']}
LOG.info("initialize_connection success. Return data: %s."
% properties)
return {'driver_volume_type': 'provider_volume', 'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
pass
def _get_next_device_name(self,node):
provider_bdm_list = node.extra.get('block_device_mapping')
used_device_letter=set()
all_letters=set(string.ascii_lowercase)
for bdm in provider_bdm_list:
used_device_letter.add(bdm.get('device_name')[-1])
unused_device_letter=list(all_letters - used_device_letter)
device_name='/dev/xvd'+unused_device_letter[0]
return device_name
def copy_volume_to_image(self, context, volume, image_service, image_meta):
LOG.error('begin time of copy_volume_to_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
container_format=image_meta.get('container_format')
file_name=image_meta.get('id')
if container_format in ['fs_vgw_url','vcloud_vgw_url','aws_vgw_url']:
LOG.debug('get the vgw url')
vgw_url = CONF.vgw.vgw_url.get(container_format)
#vgw_url = 'http://162.3.125.52:9999/'
volume_id = volume['id']
#1.get the provider_volume at provider cloud
provider_volume_id = self._get_provider_volumeid_from_volume(volume)
if not provider_volume_id:
LOG.error('get provider_volume_id of volume %s error' % volume_id)
raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
provider_volume=self._get_provider_volume(provider_volume_id)
if not provider_volume:
LOG.error('get provider_volume of volume %s at provider cloud error' % volume_id)
raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
origin_provider_volume_state= provider_volume.extra.get('attachment_status')
origin_attach_node_id = None
origin_device_name=None
#2.judge if the volume is available
if origin_provider_volume_state is not None:
origin_attach_node_id = provider_volume.extra['instance_id']
origin_device_name = provider_volume.extra['device']
self.adpter.detach_volume(provider_volume)
time.sleep(1)
retry_time = 50
provider_volume=self._get_provider_volume(provider_volume_id)
while retry_time > 0:
if provider_volume and provider_volume.extra.get('attachment_status') is None:
break
else:
time.sleep(1)
provider_volume=self._get_provider_volume(provider_volume_id)
retry_time = retry_time-1
#3.attach the volume to vgw host
try:
#3.1 get the vgw host
vgw_host= self._get_provider_node(self.configuration.cgw_host_id)
if not vgw_host:
raise exception_ex.VgwHostNotFound(Vgw_id=self.configuration.cgw_host_id)
device_name=self._get_next_device_name(vgw_host)
LOG.error('**********************************************')
LOG.error('the volume status %s' %provider_volume.state)
self.adpter.attach_volume(vgw_host, provider_volume,
device_name)
#query volume status
time.sleep(1)
retry_time = 120
provider_volume=self._get_provider_volume(provider_volume_id)
while retry_time > 0:
if provider_volume and provider_volume.extra.get('attachment_status') =='attached':
break
else:
time.sleep(1)
provider_volume=self._get_provider_volume(provider_volume_id)
retry_time = retry_time-1
except Exception as e:
raise e
time.sleep(5)
conn=rpyc.connect(self.configuration.cgw_host_ip,int(CONF.vgw.rpc_service_port))
LOG.error('begin time of copy_volume_to_file is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
full_file_path = conn.root.copy_volume_to_file(device_name,file_name,CONF.vgw.store_file_dir)
LOG.error('end time of copy_volume_to_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
#todo exception occured clean env
if not full_file_path:
self.adpter.detach_volume(provider_volume)
conn.close()
raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
LOG.error('begin time of push_file_to_vgw is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
push_file_result =conn.root.exposed_push_file_to_vgw(full_file_path,vgw_url)
LOG.error('end time of push_file_to_vgw is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if not push_file_result:
LOG.error('post file file %s to %s failed' %(push_file_result,vgw_url))
self.adpter.detach_volume(provider_volume)
conn.close()
raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
conn.close()
#create a empty file to glance
with image_utils.temporary_file() as tmp:
image_utils.upload_volume(context,
image_service,
image_meta,
tmp)
fileutils.delete_if_exists(tmp)
#4.detach form vgw
self.adpter.detach_volume(provider_volume)
time.sleep(1)
retry_time = 120
provider_volume=self._get_provider_volume(provider_volume_id)
while retry_time > 0:
if provider_volume and provider_volume.extra.get('attachment_status') is None:
break
else:
time.sleep(1)
provider_volume=self._get_provider_volume(provider_volume_id)
retry_time = retry_time-1
LOG.error('**********************************************')
LOG.error('the volume status %s' %provider_volume.state)
#attach the volume back
if origin_provider_volume_state is not None:
origin_attach_node = self._get_provider_node(origin_attach_node_id)
self.adpter.attach_volume(origin_attach_node, provider_volume,
origin_device_name)
else:
if not os.path.exists(self.configuration.provider_image_conversion_dir):
fileutils.ensure_tree(self.configuration.provider_image_conversion_dir)
provider_volume_id = self._get_provider_volumeid_from_volume(volume)
task_ret = self.adpter.export_volume(provider_volume_id,
self.configuration.provider_image_conversion_dir,
str(image_meta['id']),
cgw_host_id=self.configuration.cgw_host_id,
cgw_host_ip=self.configuration.cgw_host_ip,
cgw_username=self.configuration.cgw_username,
cgw_certificate=self.configuration.cgw_certificate,
transfer_station=self.configuration.storage_tmp_dir)
if not task_ret:
raise exception_ex.ProviderExportVolumeError
temp_path = os.path.join(self.configuration.provider_image_conversion_dir, str(image_meta['id']))
upload_image = temp_path
try:
image_utils.upload_volume(context, image_service, image_meta,
upload_image)
finally:
fileutils.delete_if_exists(upload_image)
LOG.error('end time of copy_volume_to_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
def copy_image_to_volume(self, context, volume, image_service, image_id):
LOG.error('begin time of copy_image_to_volume is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
image_meta = image_service.show(context, image_id)
container_format=image_meta.get('container_format')
if container_format in ['fs_vgw_url','vcloud_vgw_url','aws_vgw_url']:
#1.get the provider_volume at provider cloud
provider_volume_id = self._get_provider_volumeid_from_volume(volume)
retry_time = 10
provider_volume=self._get_provider_volume(provider_volume_id)
while retry_time > 0:
if provider_volume and \
provider_volume.state == StorageVolumeState.AVAILABLE and \
provider_volume.extra.get('attachment_status') is None:
break
else:
time.sleep(1)
provider_volume=self._get_provider_volume(provider_volume_id)
retry_time = retry_time-1
try:
#3.1 get the vgw host
vgw_host= self._get_provider_node(self.configuration.cgw_host_id)
if not vgw_host:
raise exception_ex.VgwHostNotFound(Vgw_id=self.configuration.cgw_host_id)
device_name=self._get_next_device_name(vgw_host)
self.adpter.attach_volume(vgw_host, provider_volume,
device_name)
#query volume status
time.sleep(1)
retry_time = 10
provider_volume=self._get_provider_volume(provider_volume_id)
while retry_time > 0:
if provider_volume and provider_volume.extra.get('attachment_status') =='attached':
break
else:
time.sleep(1)
provider_volume=self._get_provider_volume(provider_volume_id)
retry_time = retry_time-1
LOG.error('**********************************************')
LOG.error('the volume status %s' %provider_volume.state)
conn=rpyc.connect(self.configuration.cgw_host_ip,int(CONF.vgw.rpc_service_port))
copy_file_to_device_result = conn.root.copy_file_to_volume(image_id,CONF.vgw.store_file_dir,device_name)
if not copy_file_to_device_result:
LOG.error("qemu-img convert %s %s failed" %(image_id,device_name))
self.adpter.detach_volume(provider_volume)
conn.close()
raise exception.ImageUnacceptable(
reason= ("copy image %s file to volume %s failed " %(image_id,volume['id'])))
conn.close()
self.adpter.detach_volume(provider_volume)
while retry_time > 0:
if provider_volume and provider_volume.extra.get('attachment_status') is None:
break
else:
time.sleep(1)
provider_volume=self._get_provider_volume(provider_volume_id)
retry_time = retry_time-1
LOG.error('**********************************************')
LOG.error('the volume status %s' %provider_volume.state)
except Exception as e:
raise e
else:
pass
LOG.error('end time of copy_image_to_volume is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
def validate_connector(self, connector):
"""Fail if connector doesn't contain all the data needed by driver."""
pass
def clone_image(self, volume, image_location, image_id, image_meta):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
image_id is a string which represents id of the image.
It can be used by the driver to introspect internal
stores or registry to do an efficient image clone.
image_meta is a dictionary that includes 'disk_format' (e.g.
raw, qcow2) and other image attributes that allow drivers to
decide whether they can clone the image without first requiring
conversion.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
container_format=image_meta.get('container_format')
if container_format in ['fs_vgw_url','vcloud_vgw_url','aws_vgw_url']:
return {'provider_location': None}, False
else:
return {'provider_location': None}, True
|
|
# Copyright (c) 2012 Eliot Eshelman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""2D, 3D and 4D Simplex Noise functions return 'random' values in (-1, 1).
This algorithm was originally designed by Ken Perlin, but my code has been
adapted from the implementation written by Stefan Gustavson ([email protected])
Raw Simplex noise functions return the value generated by Ken's algorithm.
Scaled Raw Simplex noise functions adjust the range of values returned from the
traditional (-1, 1) to whichever bounds are passed to the function.
Multi-Octave Simplex noise functions compine multiple noise values to create a
more complex result. Each successive layer of noise is adjusted and scaled.
Scaled Multi-Octave Simplex noise functions scale the values returned from the
traditional (-1,1) range to whichever range is passed to the function.
In many cases, you may think you only need a 1D noise function, but in practice
2D is almost always better. For instance, if you're using the current frame
number as the parameter for the noise, all objects will end up with the same
noise value at each frame. By adding a second parameter on the second
dimension, you can ensure that each gets a unique noise value and they don't
all look identical.
"""
import math
def octave_noise_2d(octaves, persistence, scale, x, y):
"""2D Multi-Octave Simplex noise.
For each octave, a higher frequency/lower amplitude function will be added
to the original. The higher the persistence [0-1], the more of each
succeeding octave will be added.
"""
total = 0.0
frequency = scale
amplitude = 1.0
# We have to keep track of the largest possible amplitude,
# because each octave adds more, and we need a value in [-1, 1].
maxAmplitude = 0.0;
for i in range(octaves):
total += raw_noise_2d(x * frequency, y * frequency) * amplitude
frequency *= 2.0
maxAmplitude += amplitude;
amplitude *= persistence
return total / maxAmplitude
def octave_noise_3d(octaves, persistence, scale, x, y, z):
"""3D Multi-Octave Simplex noise.
For each octave, a higher frequency/lower amplitude function will be added
to the original. The higher the persistence [0-1], the more of each
succeeding octave will be added.
"""
total = 0.0
frequency = scale
amplitude = 1.0
# We have to keep track of the largest possible amplitude,
# because each octave adds more, and we need a value in [-1, 1].
maxAmplitude = 0.0;
for i in range(octaves):
total += raw_noise_3d( x * frequency,
y * frequency,
z * frequency) * amplitude
frequency *= 2.0
maxAmplitude += amplitude;
amplitude *= persistence
return total / maxAmplitude
def octave_noise_4d(octaves, persistence, scale, x, y, z, w):
"""4D Multi-Octave Simplex noise.
For each octave, a higher frequency/lower amplitude function will be added
to the original. The higher the persistence [0-1], the more of each
succeeding octave will be added.
"""
total = 0.0
frequency = scale
amplitude = 1.0
# We have to keep track of the largest possible amplitude,
# because each octave adds more, and we need a value in [-1, 1].
maxAmplitude = 0.0;
for i in range(octaves):
total += raw_noise_4d( x * frequency,
y * frequency,
z * frequency,
w * frequency) * amplitude
frequency *= 2.0
maxAmplitude += amplitude;
amplitude *= persistence
return total / maxAmplitude
def scaled_octave_noise_2d(octaves, persistence, scale, loBound, hiBound, x, y):
"""2D Scaled Multi-Octave Simplex noise.
Returned value will be between loBound and hiBound.
"""
return (octave_noise_2d(octaves, persistence, scale, x, y) *
(hiBound - loBound) / 2 +
(hiBound + loBound) / 2)
def scaled_octave_noise_3d(octaves, persistence, scale, loBound, hiBound, x, y, z):
"""3D Scaled Multi-Octave Simplex noise.
Returned value will be between loBound and hiBound.
"""
return (octave_noise_3d(octaves, persistence, scale, x, y, z) *
(hiBound - loBound) / 2 +
(hiBound + loBound) / 2)
def scaled_octave_noise_4d(octaves, persistence, scale, loBound, hiBound, x, y, z, w):
"""4D Scaled Multi-Octave Simplex noise.
Returned value will be between loBound and hiBound.
"""
return (octave_noise_4d(octaves, persistence, scale, x, y, z, w) *
(hiBound - loBound) / 2 +
(hiBound + loBound) / 2)
def scaled_raw_noise_2d(loBound, hiBound, x, y):
"""2D Scaled Raw Simplex noise.
Returned value will be between loBound and hiBound.
"""
return (raw_noise_2d(x, y) *
(hiBound - loBound) / 2+
(hiBound + loBound) / 2)
def scaled_raw_noise_3d(loBound, hiBound, x, y, z):
"""3D Scaled Raw Simplex noise.
Returned value will be between loBound and hiBound.
"""
return (raw_noise_3d(x, y, z) *
(hiBound - loBound) / 2+
(hiBound + loBound) / 2)
def scaled_raw_noise_4d(loBound, hiBound, x, y, z, w):
"""4D Scaled Raw Simplex noise.
Returned value will be between loBound and hiBound.
"""
return (raw_noise_4d(x, y, z, w) *
(hiBound - loBound) / 2+
(hiBound + loBound) / 2)
def raw_noise_2d(x, y):
"""2D Raw Simplex noise."""
# Noise contributions from the three corners
n0, n1, n2 = 0.0, 0.0, 0.0
# Skew the input space to determine which simplex cell we're in
F2 = 0.5 * (math.sqrt(3.0) - 1.0)
# Hairy skew factor for 2D
s = (x + y) * F2
i = int(x + s)
j = int(y + s)
G2 = (3.0 - math.sqrt(3.0)) / 6.0
t = float(i + j) * G2
# Unskew the cell origin back to (x,y) space
X0 = i - t
Y0 = j - t
# The x,y distances from the cell origin
x0 = x - X0
y0 = y - Y0
# For the 2D case, the simplex shape is an equilateral triangle.
# Determine which simplex we are in.
i1, j1 = 0, 0 # Offsets for second (middle) corner of simplex in (i,j) coords
if x0 > y0: # lower triangle, XY order: (0,0)->(1,0)->(1,1)
i1 = 1
j1 = 0
else: # upper triangle, YX order: (0,0)->(0,1)->(1,1)
i1 = 0
j1 = 1
# A step of (1,0) in (i,j) means a step of (1-c,-c) in (x,y), and
# a step of (0,1) in (i,j) means a step of (-c,1-c) in (x,y), where
# c = (3-sqrt(3))/6
x1 = x0 - i1 + G2 # Offsets for middle corner in (x,y) unskewed coords
y1 = y0 - j1 + G2
x2 = x0 - 1.0 + 2.0 * G2 # Offsets for last corner in (x,y) unskewed coords
y2 = y0 - 1.0 + 2.0 * G2
# Work out the hashed gradient indices of the three simplex corners
ii = int(i) & 255
jj = int(j) & 255
gi0 = _perm[ii+_perm[jj]] % 12
gi1 = _perm[ii+i1+_perm[jj+j1]] % 12
gi2 = _perm[ii+1+_perm[jj+1]] % 12
# Calculate the contribution from the three corners
t0 = 0.5 - x0*x0 - y0*y0
if t0 < 0:
n0 = 0.0
else:
t0 *= t0
n0 = t0 * t0 * dot2d(_grad3[gi0], x0, y0)
t1 = 0.5 - x1*x1 - y1*y1
if t1 < 0:
n1 = 0.0
else:
t1 *= t1
n1 = t1 * t1 * dot2d(_grad3[gi1], x1, y1)
t2 = 0.5 - x2*x2-y2*y2
if t2 < 0:
n2 = 0.0
else:
t2 *= t2
n2 = t2 * t2 * dot2d(_grad3[gi2], x2, y2)
# Add contributions from each corner to get the final noise value.
# The result is scaled to return values in the interval [-1,1].
return 70.0 * (n0 + n1 + n2)
def raw_noise_3d(x, y, z):
"""3D Raw Simplex noise."""
# Noise contributions from the four corners
n0, n1, n2, n3 = 0.0, 0.0, 0.0, 0.0
# Skew the input space to determine which simplex cell we're in
F3 = 1.0/3.0
# Very nice and simple skew factor for 3D
s = (x+y+z) * F3
i = int(x + s)
j = int(y + s)
k = int(z + s)
G3 = 1.0 / 6.0
t = float(i+j+k) * G3
# Unskew the cell origin back to (x,y,z) space
X0 = i - t
Y0 = j - t
Z0 = k - t
# The x,y,z distances from the cell origin
x0 = x - X0
y0 = y - Y0
z0 = z - Z0
# For the 3D case, the simplex shape is a slightly irregular tetrahedron.
# Determine which simplex we are in.
i1, j1, k1 = 0,0,0 # Offsets for second corner of simplex in (i,j,k) coords
i2, j2, k2 = 0,0,0 # Offsets for third corner of simplex in (i,j,k) coords
if x0 >= y0:
if y0 >= z0: # X Y Z order
i1 = 1
j1 = 0
k1 = 0
i2 = 1
j2 = 1
k2 = 0
elif x0 >= z0: # X Z Y order
i1 = 1
j1 = 0
k1 = 0
i2 = 1
j2 = 0
k2 = 1
else: # Z X Y order
i1 = 0
j1 = 0
k1 = 1
i2 = 1
j2 = 0
k2 = 1
else:
if y0 < z0: # Z Y X order
i1 = 0
j1 = 0
k1 = 1
i2 = 0
j2 = 1
k2 = 1
elif x0 < z0: # Y Z X order
i1 = 0
j1 = 1
k1 = 0
i2 = 0
j2 = 1
k2 = 1
else: # Y X Z order
i1 = 0
j1 = 1
k1 = 0
i2 = 1
j2 = 1
k2 = 0
# A step of (1,0,0) in (i,j,k) means a step of (1-c,-c,-c) in (x,y,z),
# a step of (0,1,0) in (i,j,k) means a step of (-c,1-c,-c) in (x,y,z), and
# a step of (0,0,1) in (i,j,k) means a step of (-c,-c,1-c) in (x,y,z), where
# c = 1/6.
x1 = x0 - i1 + G3 # Offsets for second corner in (x,y,z) coords
y1 = y0 - j1 + G3
z1 = z0 - k1 + G3
x2 = x0 - i2 + 2.0*G3 # Offsets for third corner in (x,y,z) coords
y2 = y0 - j2 + 2.0*G3
z2 = z0 - k2 + 2.0*G3
x3 = x0 - 1.0 + 3.0*G3 # Offsets for last corner in (x,y,z) coords
y3 = y0 - 1.0 + 3.0*G3
z3 = z0 - 1.0 + 3.0*G3
# Work out the hashed gradient indices of the four simplex corners
ii = int(i) & 255
jj = int(j) & 255
kk = int(k) & 255
gi0 = _perm[ii+_perm[jj+_perm[kk]]] % 12
gi1 = _perm[ii+i1+_perm[jj+j1+_perm[kk+k1]]] % 12
gi2 = _perm[ii+i2+_perm[jj+j2+_perm[kk+k2]]] % 12
gi3 = _perm[ii+1+_perm[jj+1+_perm[kk+1]]] % 12
# Calculate the contribution from the four corners
t0 = 0.6 - x0*x0 - y0*y0 - z0*z0
if t0 < 0:
n0 = 0.0
else:
t0 *= t0
n0 = t0 * t0 * dot3d(_grad3[gi0], x0, y0, z0)
t1 = 0.6 - x1*x1 - y1*y1 - z1*z1
if t1 < 0:
n1 = 0.0
else:
t1 *= t1
n1 = t1 * t1 * dot3d(_grad3[gi1], x1, y1, z1)
t2 = 0.6 - x2*x2 - y2*y2 - z2*z2
if t2 < 0:
n2 = 0.0
else:
t2 *= t2
n2 = t2 * t2 * dot3d(_grad3[gi2], x2, y2, z2)
t3 = 0.6 - x3*x3 - y3*y3 - z3*z3
if t3 < 0:
n3 = 0.0
else:
t3 *= t3
n3 = t3 * t3 * dot3d(_grad3[gi3], x3, y3, z3)
# Add contributions from each corner to get the final noise value.
# The result is scaled to stay just inside [-1,1]
return 32.0 * (n0 + n1 + n2 + n3)
def raw_noise_4d(x, y, z, w):
"""4D Raw Simplex noise."""
# Noise contributions from the five corners
n0, n1, n2, n3, n4 = 0.0, 0.0, 0.0, 0.0, 0.0
# The skewing and unskewing factors are hairy again for the 4D case
F4 = (math.sqrt(5.0)-1.0) / 4.0
# Skew the (x,y,z,w) space to determine which cell of 24 simplices we're in
s = (x + y + z + w) * F4
i = int(x + s)
j = int(y + s)
k = int(z + s)
l = int(w + s)
G4 = (5.0-math.sqrt(5.0)) / 20.0
t = (i + j + k + l) * G4
# Unskew the cell origin back to (x,y,z,w) space
X0 = i - t
Y0 = j - t
Z0 = k - t
W0 = l - t
# The x,y,z,w distances from the cell origin
x0 = x - X0
y0 = y - Y0
z0 = z - Z0
w0 = w - W0
# For the 4D case, the simplex is a 4D shape I won't even try to describe.
# To find out which of the 24 possible simplices we're in, we need to
# determine the magnitude ordering of x0, y0, z0 and w0.
# The method below is a good way of finding the ordering of x,y,z,w and
# then find the correct traversal order for the simplex we're in.
# First, six pair-wise comparisons are performed between each possible pair
# of the four coordinates, and the results are used to add up binary bits
# for an integer index.
c1 = 32 if x0 > y0 else 0
c2 = 16 if x0 > z0 else 0
c3 = 8 if y0 > z0 else 0
c4 = 4 if x0 > w0 else 0
c5 = 2 if y0 > w0 else 0
c6 = 1 if z0 > w0 else 0
c = c1 + c2 + c3 + c4 + c5 + c6
i1, j1, k1, l1 = 0,0,0,0 # The integer offsets for the second simplex corner
i2, j2, k2, l2 = 0,0,0,0 # The integer offsets for the third simplex corner
i3, j3, k3, l3 = 0,0,0,0 # The integer offsets for the fourth simplex corner
# simplex[c] is a 4-vector with the numbers 0, 1, 2 and 3 in some order.
# Many values of c will never occur, since e.g. x>y>z>w makes x<z, y<w and x<w
# impossible. Only the 24 indices which have non-zero entries make any sense.
# We use a thresholding to set the coordinates in turn from the largest magnitude.
# The number 3 in the "simplex" array is at the position of the largest coordinate.
i1 = 1 if _simplex[c][0] >= 3 else 0
j1 = 1 if _simplex[c][1] >= 3 else 0
k1 = 1 if _simplex[c][2] >= 3 else 0
l1 = 1 if _simplex[c][3] >= 3 else 0
# The number 2 in the "simplex" array is at the second largest coordinate.
i2 = 1 if _simplex[c][0] >= 2 else 0
j2 = 1 if _simplex[c][1] >= 2 else 0
k2 = 1 if _simplex[c][2] >= 2 else 0
l2 = 1 if _simplex[c][3] >= 2 else 0
# The number 1 in the "simplex" array is at the second smallest coordinate.
i3 = 1 if _simplex[c][0] >= 1 else 0
j3 = 1 if _simplex[c][1] >= 1 else 0
k3 = 1 if _simplex[c][2] >= 1 else 0
l3 = 1 if _simplex[c][3] >= 1 else 0
# The fifth corner has all coordinate offsets = 1, so no need to look that up.
x1 = x0 - i1 + G4 # Offsets for second corner in (x,y,z,w) coords
y1 = y0 - j1 + G4
z1 = z0 - k1 + G4
w1 = w0 - l1 + G4
x2 = x0 - i2 + 2.0*G4 # Offsets for third corner in (x,y,z,w) coords
y2 = y0 - j2 + 2.0*G4
z2 = z0 - k2 + 2.0*G4
w2 = w0 - l2 + 2.0*G4
x3 = x0 - i3 + 3.0*G4 # Offsets for fourth corner in (x,y,z,w) coords
y3 = y0 - j3 + 3.0*G4
z3 = z0 - k3 + 3.0*G4
w3 = w0 - l3 + 3.0*G4
x4 = x0 - 1.0 + 4.0*G4 # Offsets for last corner in (x,y,z,w) coords
y4 = y0 - 1.0 + 4.0*G4
z4 = z0 - 1.0 + 4.0*G4
w4 = w0 - 1.0 + 4.0*G4
# Work out the hashed gradient indices of the five simplex corners
ii = int(i) & 255
jj = int(j) & 255
kk = int(k) & 255
ll = int(l) & 255
gi0 = _perm[ii+_perm[jj+_perm[kk+_perm[ll]]]] % 32
gi1 = _perm[ii+i1+_perm[jj+j1+_perm[kk+k1+_perm[ll+l1]]]] % 32
gi2 = _perm[ii+i2+_perm[jj+j2+_perm[kk+k2+_perm[ll+l2]]]] % 32
gi3 = _perm[ii+i3+_perm[jj+j3+_perm[kk+k3+_perm[ll+l3]]]] % 32
gi4 = _perm[ii+1+_perm[jj+1+_perm[kk+1+_perm[ll+1]]]] % 32
# Calculate the contribution from the five corners
t0 = 0.6 - x0*x0 - y0*y0 - z0*z0 - w0*w0
if t0 < 0:
n0 = 0.0
else:
t0 *= t0
n0 = t0 * t0 * dot4d(_grad4[gi0], x0, y0, z0, w0)
t1 = 0.6 - x1*x1 - y1*y1 - z1*z1 - w1*w1
if t1 < 0:
n1 = 0.0
else:
t1 *= t1
n1 = t1 * t1 * dot4d(_grad4[gi1], x1, y1, z1, w1)
t2 = 0.6 - x2*x2 - y2*y2 - z2*z2 - w2*w2
if t2 < 0:
n2 = 0.0
else:
t2 *= t2
n2 = t2 * t2 * dot4d(_grad4[gi2], x2, y2, z2, w2)
t3 = 0.6 - x3*x3 - y3*y3 - z3*z3 - w3*w3
if t3 < 0:
n3 = 0.0
else:
t3 *= t3
n3 = t3 * t3 * dot4d(_grad4[gi3], x3, y3, z3, w3)
t4 = 0.6 - x4*x4 - y4*y4 - z4*z4 - w4*w4
if t4 < 0:
n4 = 0.0
else:
t4 *= t4
n4 = t4 * t4 * dot4d(_grad4[gi4], x4, y4, z4, w4)
# Sum up and scale the result to cover the range [-1,1]
return 27.0 * (n0 + n1 + n2 + n3 + n4)
def dot2d(g, x, y):
return g[0]*x + g[1]*y
def dot3d(g, x, y, z):
return g[0]*x + g[1]*y + g[2]*z
def dot4d(g, x, y, z, w):
return g[0]*x + g[1]*y + g[2]*z + g[3]*w
"""The gradients are the midpoints of the vertices of a cube."""
_grad3 = [
[1,1,0], [-1,1,0], [1,-1,0], [-1,-1,0],
[1,0,1], [-1,0,1], [1,0,-1], [-1,0,-1],
[0,1,1], [0,-1,1], [0,1,-1], [0,-1,-1]
]
"""The gradients are the midpoints of the vertices of a cube."""
_grad4 = [
[0,1,1,1], [0,1,1,-1], [0,1,-1,1], [0,1,-1,-1],
[0,-1,1,1], [0,-1,1,-1], [0,-1,-1,1], [0,-1,-1,-1],
[1,0,1,1], [1,0,1,-1], [1,0,-1,1], [1,0,-1,-1],
[-1,0,1,1], [-1,0,1,-1], [-1,0,-1,1], [-1,0,-1,-1],
[1,1,0,1], [1,1,0,-1], [1,-1,0,1], [1,-1,0,-1],
[-1,1,0,1], [-1,1,0,-1], [-1,-1,0,1], [-1,-1,0,-1],
[1,1,1,0], [1,1,-1,0], [1,-1,1,0], [1,-1,-1,0],
[-1,1,1,0], [-1,1,-1,0], [-1,-1,1,0], [-1,-1,-1,0]
]
"""Permutation table. The same list is repeated twice."""
_perm = [
151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,
8,99,37,240,21,10,23,190,6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,
35,11,32,57,177,33,88,237,149,56,87,174,20,125,136,171,168,68,175,74,165,71,
134,139,48,27,166,77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,
55,46,245,40,244,102,143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,
18,169,200,196,135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226,
250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,
189,28,42,223,183,170,213,119,248,152,2,44,154,163,70,221,153,101,155,167,43,
172,9,129,22,39,253,19,98,108,110,79,113,224,232,178,185,112,104,218,246,97,
228,251,34,242,193,238,210,144,12,191,179,162,241,81,51,145,235,249,14,239,
107,49,192,214,31,181,199,106,157,184,84,204,176,115,121,50,45,127,4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180,
151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,
8,99,37,240,21,10,23,190,6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,
35,11,32,57,177,33,88,237,149,56,87,174,20,125,136,171,168,68,175,74,165,71,
134,139,48,27,166,77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,
55,46,245,40,244,102,143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,
18,169,200,196,135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226,
250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,
189,28,42,223,183,170,213,119,248,152,2,44,154,163,70,221,153,101,155,167,43,
172,9,129,22,39,253,19,98,108,110,79,113,224,232,178,185,112,104,218,246,97,
228,251,34,242,193,238,210,144,12,191,179,162,241,81,51,145,235,249,14,239,
107,49,192,214,31,181,199,106,157,184,84,204,176,115,121,50,45,127,4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
]
"""A lookup table to traverse the simplex around a given point in 4D."""
_simplex = [
[0,1,2,3],[0,1,3,2],[0,0,0,0],[0,2,3,1],[0,0,0,0],[0,0,0,0],[0,0,0,0],[1,2,3,0],
[0,2,1,3],[0,0,0,0],[0,3,1,2],[0,3,2,1],[0,0,0,0],[0,0,0,0],[0,0,0,0],[1,3,2,0],
[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[1,2,0,3],[0,0,0,0],[1,3,0,2],[0,0,0,0],[0,0,0,0],[0,0,0,0],[2,3,0,1],[2,3,1,0],
[1,0,2,3],[1,0,3,2],[0,0,0,0],[0,0,0,0],[0,0,0,0],[2,0,3,1],[0,0,0,0],[2,1,3,0],
[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[2,0,1,3],[0,0,0,0],[0,0,0,0],[0,0,0,0],[3,0,1,2],[3,0,2,1],[0,0,0,0],[3,1,2,0],
[2,1,0,3],[0,0,0,0],[0,0,0,0],[0,0,0,0],[3,1,0,2],[0,0,0,0],[3,2,0,1],[3,2,1,0]
]
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import SkyCoord
try:
from astropy.extern import six
except ImportError:
import six
class WCSProj(object):
"""Class that encapsulates both a WCS object and the definition of
the image extent (number of pixels). Also provides a number of
helper methods for accessing the properties of the WCS object."""
def __init__(self, wcs, npix):
self._wcs = wcs
self._npix = np.array(npix, ndmin=1)
self._coordsys = get_coordsys(wcs)
cdelt0 = np.abs(self.wcs.wcs.cdelt[0])
cdelt1 = np.abs(self.wcs.wcs.cdelt[1])
xindex = 0
yindex = 1
self._width = np.array([cdelt0 * self._npix[xindex],
cdelt1 * self._npix[yindex]])
self._pix_center = np.array([(self._npix[xindex] - 1.0) / 2.,
(self._npix[yindex] - 1.0) / 2.])
self._pix_size = np.array([cdelt0, cdelt1])
self._skydir = SkyCoord.from_pixel(self._pix_center[0],
self._pix_center[1],
self.wcs)
@property
def wcs(self):
return self._wcs
@property
def coordsys(self):
return self._coordsys
@property
def skydir(self):
"""Return the sky coordinate of the image center."""
return self._skydir
@property
def width(self):
"""Return the dimensions of the image."""
return self._width
@property
def npix(self):
return self._npix
@classmethod
def create(cls, skydir, cdelt, npix, coordsys='CEL', projection='AIT'):
npix = np.array(npix, ndmin=1)
crpix = npix / 2. + 0.5
wcs = create_wcs(skydir, coordsys, projection,
cdelt, crpix)
return cls(wcs, npix)
def distance_to_edge(self, skydir):
"""Return the angular distance from the given direction and
the edge of the projection."""
xpix, ypix = skydir.to_pixel(self.wcs, origin=0)
deltax = np.array((xpix - self._pix_center[0]) * self._pix_size[0],
ndmin=1)
deltay = np.array((ypix - self._pix_center[1]) * self._pix_size[1],
ndmin=1)
deltax = np.abs(deltax) - 0.5 * self._width[0].value
deltay = np.abs(deltay) - 0.5 * self._width[1].value
m0 = (deltax < 0) & (deltay < 0)
m1 = (deltax > 0) & (deltay < 0)
m2 = (deltax < 0) & (deltay > 0)
m3 = (deltax > 0) & (deltay > 0)
mx = np.abs(deltax) <= np.abs(deltay)
my = np.abs(deltay) < np.abs(deltax)
delta = np.zeros(len(deltax))
delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1]
delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2]
return delta
def distance_to_edge(geom, skydir):
"""Return the angular distance from the given direction and
the edge of the projection."""
# FIXME: We should add a pixel_size property in gammapy.maps
# FIXME: We should make this into a MapGeom method
xpix, ypix = skydir.to_pixel(geom.wcs, origin=0)
deltax = np.array((xpix - geom.center_pix[0]) * geom._cdelt[0],
ndmin=1)
deltay = np.array((ypix - geom.center_pix[1]) * geom._cdelt[1],
ndmin=1)
deltax = np.abs(deltax) - 0.5 * geom.width[0].value
deltay = np.abs(deltay) - 0.5 * geom.width[1].value
m0 = (deltax < 0) & (deltay < 0)
m1 = (deltax > 0) & (deltay < 0)
m2 = (deltax < 0) & (deltay > 0)
m3 = (deltax > 0) & (deltay > 0)
mx = np.abs(deltax) <= np.abs(deltay)
my = np.abs(deltay) < np.abs(deltax)
delta = np.zeros(len(deltax))
delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1]
delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2]
return delta
def create_wcs(skydir, coordsys='CEL', projection='AIT',
cdelt=1.0, crpix=1., naxis=2, energies=None):
"""Create a WCS object.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky coordinate of the WCS reference point.
coordsys : str
projection : str
cdelt : float or (float,float)
In the first case the same value is used for x and y axes
crpix : float or (float,float)
In the first case the same value is used for x and y axes
naxis : {2, 3}
Number of dimensions of the projection.
energies : array-like
Array of energies that defines the third dimension if naxis=3.
"""
w = WCS(naxis=naxis)
if coordsys == 'CEL':
w.wcs.ctype[0] = 'RA---%s' % (projection)
w.wcs.ctype[1] = 'DEC--%s' % (projection)
w.wcs.crval[0] = skydir.icrs.ra.deg
w.wcs.crval[1] = skydir.icrs.dec.deg
elif coordsys == 'GAL':
w.wcs.ctype[0] = 'GLON-%s' % (projection)
w.wcs.ctype[1] = 'GLAT-%s' % (projection)
w.wcs.crval[0] = skydir.galactic.l.deg
w.wcs.crval[1] = skydir.galactic.b.deg
else:
raise Exception('Unrecognized coordinate system.')
try:
w.wcs.crpix[0] = crpix[0]
w.wcs.crpix[1] = crpix[1]
except:
w.wcs.crpix[0] = crpix
w.wcs.crpix[1] = crpix
try:
w.wcs.cdelt[0] = cdelt[0]
w.wcs.cdelt[1] = cdelt[1]
except:
w.wcs.cdelt[0] = -cdelt
w.wcs.cdelt[1] = cdelt
w = WCS(w.to_header())
if naxis == 3 and energies is not None:
w.wcs.crpix[2] = 1
w.wcs.crval[2] = energies[0]
w.wcs.cdelt[2] = energies[1] - energies[0]
w.wcs.ctype[2] = 'Energy'
w.wcs.cunit[2] = 'MeV'
return w
def wcs_add_energy_axis(wcs, energies):
"""Copy a WCS object, and add on the energy axis.
Parameters
----------
wcs : `~astropy.wcs.WCS`
WCS
energies : array-like
Array of energies.
"""
if wcs.naxis != 2:
raise Exception(
'wcs_add_energy_axis, input WCS naxis != 2 %i' % wcs.naxis)
w = WCS(naxis=3)
w.wcs.crpix[0] = wcs.wcs.crpix[0]
w.wcs.crpix[1] = wcs.wcs.crpix[1]
w.wcs.ctype[0] = wcs.wcs.ctype[0]
w.wcs.ctype[1] = wcs.wcs.ctype[1]
w.wcs.crval[0] = wcs.wcs.crval[0]
w.wcs.crval[1] = wcs.wcs.crval[1]
w.wcs.cdelt[0] = wcs.wcs.cdelt[0]
w.wcs.cdelt[1] = wcs.wcs.cdelt[1]
w = WCS(w.to_header())
w.wcs.crpix[2] = 1
w.wcs.crval[2] = energies[0]
w.wcs.cdelt[2] = energies[1] - energies[0]
w.wcs.ctype[2] = 'Energy'
return w
def offset_to_sky(skydir, offset_lon, offset_lat,
coordsys='CEL', projection='AIT'):
"""Convert a cartesian offset (X,Y) in the given projection into
a pair of spherical coordinates."""
offset_lon = np.array(offset_lon, ndmin=1)
offset_lat = np.array(offset_lat, ndmin=1)
w = create_wcs(skydir, coordsys, projection)
pixcrd = np.vstack((offset_lon, offset_lat)).T
return w.wcs_pix2world(pixcrd, 0)
def sky_to_offset(skydir, lon, lat, coordsys='CEL', projection='AIT'):
"""Convert sky coordinates to a projected offset. This function
is the inverse of offset_to_sky."""
w = create_wcs(skydir, coordsys, projection)
skycrd = np.vstack((lon, lat)).T
if len(skycrd) == 0:
return skycrd
return w.wcs_world2pix(skycrd, 0)
def offset_to_skydir(skydir, offset_lon, offset_lat,
coordsys='CEL', projection='AIT'):
"""Convert a cartesian offset (X,Y) in the given projection into
a SkyCoord."""
offset_lon = np.array(offset_lon, ndmin=1)
offset_lat = np.array(offset_lat, ndmin=1)
w = create_wcs(skydir, coordsys, projection)
return SkyCoord.from_pixel(offset_lon, offset_lat, w, 0)
def skydir_to_pix(skydir, wcs):
"""Convert skydir object to pixel coordinates.
Gracefully handles 0-d coordinate arrays.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
wcs : `~astropy.wcs.WCS`
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
"""
if len(skydir.shape) > 0 and len(skydir) == 0:
return [np.empty(0), np.empty(0)]
return skydir.to_pixel(wcs, origin=0)
def pix_to_skydir(xpix, ypix, wcs):
"""Convert pixel coordinates to a skydir object.
Gracefully handles 0-d coordinate arrays.
Always returns a celestial coordinate.
Parameters
----------
xpix : `numpy.ndarray`
ypix : `numpy.ndarray`
wcs : `~astropy.wcs.WCS`
"""
xpix = np.array(xpix)
ypix = np.array(ypix)
if xpix.ndim > 0 and len(xpix) == 0:
return SkyCoord(np.empty(0), np.empty(0), unit='deg',
frame='icrs')
return SkyCoord.from_pixel(xpix, ypix, wcs,
origin=0).transform_to('icrs')
def get_coordsys(wcs):
if 'RA' in wcs.wcs.ctype[0]:
return 'CEL'
elif 'GLON' in wcs.wcs.ctype[0]:
return 'GAL'
raise ValueError('Unrecognized WCS coordinate system.')
def get_coordsys_from_geom(geom):
if geom.frame == 'galactic':
return 'GAL'
elif geom.frame == 'icrs':
return 'CEL'
raise ValueError('Unrecognized WCS coordinate system %s.' % geom.frame)
def coordsys_to_frame(coordsys):
if coordsys == 'GAL':
return 'galactic'
elif coordsys == 'CEL':
return 'icrs'
raise ValueError('Unrecognized WCS coordinate system %s.' % coordsys)
def get_target_skydir(config, ref_skydir=None):
if ref_skydir is None:
ref_skydir = SkyCoord(0.0, 0.0, unit=u.deg)
radec = config.get('radec', None)
if isinstance(radec, six.string_types):
return SkyCoord(radec, unit=u.deg)
elif isinstance(radec, list):
return SkyCoord(radec[0], radec[1], unit=u.deg)
ra = config.get('ra', None)
dec = config.get('dec', None)
if ra is not None and dec is not None:
return SkyCoord(ra, dec, unit=u.deg, frame='icrs')
glon = config.get('glon', None)
glat = config.get('glat', None)
if glon is not None and glat is not None:
return SkyCoord(glon, glat, unit=u.deg,
frame='galactic').transform_to('icrs')
offset_ra = config.get('offset_ra', None)
offset_dec = config.get('offset_dec', None)
if offset_ra is not None and offset_dec is not None:
return offset_to_skydir(ref_skydir, offset_ra, offset_dec,
coordsys='CEL')[0]
offset_glon = config.get('offset_glon', None)
offset_glat = config.get('offset_glat', None)
if offset_glon is not None and offset_glat is not None:
return offset_to_skydir(ref_skydir, offset_glon, offset_glat,
coordsys='GAL')[0].transform_to('icrs')
return ref_skydir
def wcs_to_axes(w, npix):
"""Generate a sequence of bin edge vectors corresponding to the
axes of a WCS object."""
npix = npix[::-1]
x = np.linspace(-(npix[0]) / 2., (npix[0]) / 2.,
npix[0] + 1) * np.abs(w.wcs.cdelt[0])
y = np.linspace(-(npix[1]) / 2., (npix[1]) / 2.,
npix[1] + 1) * np.abs(w.wcs.cdelt[1])
if w.wcs.naxis == 2:
return x, y
cdelt2 = np.log10((w.wcs.cdelt[2] + w.wcs.crval[2]) / w.wcs.crval[2])
z = (np.linspace(0, npix[2], npix[2] + 1)) * cdelt2
z += np.log10(w.wcs.crval[2])
return x, y, z
def wcs_to_coords(w, shape):
"""Generate an N x D list of pixel center coordinates where N is
the number of pixels and D is the dimensionality of the map."""
if w.naxis == 2:
y, x = wcs_to_axes(w, shape)
elif w.naxis == 3:
z, y, x = wcs_to_axes(w, shape)
else:
raise Exception("Wrong number of WCS axes %i" % w.naxis)
x = 0.5 * (x[1:] + x[:-1])
y = 0.5 * (y[1:] + y[:-1])
if w.naxis == 2:
x = np.ravel(np.ones(shape) * x[:, np.newaxis])
y = np.ravel(np.ones(shape) * y[np.newaxis, :])
return np.vstack((x, y))
z = 0.5 * (z[1:] + z[:-1])
x = np.ravel(np.ones(shape) * x[:, np.newaxis, np.newaxis])
y = np.ravel(np.ones(shape) * y[np.newaxis, :, np.newaxis])
z = np.ravel(np.ones(shape) * z[np.newaxis, np.newaxis, :])
return np.vstack((x, y, z))
def get_map_skydir(filename, maphdu=0):
hdulist = fits.open(filename)
wcs = WCS(hdulist[maphdu].header)
return wcs_to_skydir(wcs)
def get_cel_to_gal_angle(skydir):
"""Calculate the rotation angle in radians between the longitude
axes of a local projection in celestial and galactic coordinates.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Direction of projection center.
Returns
-------
angle : float
Rotation angle in radians.
"""
wcs0 = create_wcs(skydir, coordsys='CEL')
wcs1 = create_wcs(skydir, coordsys='GAL')
x, y = SkyCoord.to_pixel(SkyCoord.from_pixel(1.0, 0.0, wcs0), wcs1)
return np.arctan2(y, x)
def wcs_to_skydir(wcs):
lon = wcs.wcs.crval[0]
lat = wcs.wcs.crval[1]
coordsys = get_coordsys(wcs)
if coordsys == 'GAL':
return SkyCoord(lon, lat, unit='deg',
frame='galactic').transform_to('icrs')
else:
return SkyCoord(lon, lat, unit='deg', frame='icrs')
def is_galactic(wcs):
coordsys = get_coordsys(wcs)
if coordsys == 'GAL':
return True
elif coordsys == 'CEL':
return False
else:
raise Exception('Unsupported coordinate system: %s' % coordsys)
def extract_mapcube_region(infile, skydir, outfile, maphdu=0):
"""Extract a region out of an all-sky mapcube file.
Parameters
----------
infile : str
Path to mapcube file.
skydir : `~astropy.coordinates.SkyCoord`
"""
h = fits.open(os.path.expandvars(infile))
npix = 200
shape = list(h[maphdu].data.shape)
shape[1] = 200
shape[2] = 200
wcs = WCS(h[maphdu].header)
skywcs = WCS(h[maphdu].header, naxis=[1, 2])
coordsys = get_coordsys(skywcs)
region_wcs = wcs.deepcopy()
if coordsys == 'CEL':
region_wcs.wcs.crval[0] = skydir.ra.deg
region_wcs.wcs.crval[1] = skydir.dec.deg
elif coordsys == 'GAL':
region_wcs.wcs.crval[0] = skydir.galactic.l.deg
region_wcs.wcs.crval[1] = skydir.galactic.b.deg
else:
raise Exception('Unrecognized coordinate system.')
region_wcs.wcs.crpix[0] = npix // 2 + 0.5
region_wcs.wcs.crpix[1] = npix // 2 + 0.5
from reproject import reproject_interp
data, footprint = reproject_interp(h, region_wcs.to_header(),
hdu_in=maphdu,
shape_out=shape)
hdu_image = fits.PrimaryHDU(data, header=region_wcs.to_header())
hdulist = fits.HDUList([hdu_image, h['ENERGIES']])
hdulist.writeto(outfile, clobber=True)
|
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
#!/bin/python
from __future__ import with_statement
import re
import sys
import os.path
import win32com.client
import StringIO
import platform
import runpy
import subprocess
import itertools
import posixpath
import urllib
from urlparse import urlparse
# Disable early binding: full of race conditions writing the cache files,
# and changes the semantics since inheritance isn't handled correctly
import win32com.client.gencache
_savedGetClassForCLSID = win32com.client.gencache.GetClassForCLSID
win32com.client.gencache.GetClassForCLSID = lambda x: None
def get_last_modified(filename):
local_file = urllib.urlopen(filename)
last_modified = local_file.info()['Last-Modified']
local_file.close()
return last_modified
def urlretrieve(url, file=None):
if file is None:
file = posixpath.basename(urlparse(url).path)
class NotModifiedException(Exception):
pass
class MyURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
void = fp.read()
fp.close()
if errcode == 304:
raise NotModifiedException()
else:
raise Exception("Error downloading '" + url + "': " + str(errcode) + " " + errmsg)
opener = MyURLopener()
if os.path.isfile(file):
opener.addheader('If-Modified-Since', get_last_modified(file))
try:
(file, msg) = opener.retrieve(url, file)
except NotModifiedException:
pass
# Elevation helpers
def execute_elevated(*args):
# FIXME: support **kwargs
from win32com.shell.shell import ShellExecuteEx
from win32com.shell import shellcon
import win32process, win32event
import winxpgui
import win32api
import win32con
try:
hwnd = winxpgui.GetConsoleWindow()
except winxpgui.error:
hwnd = 0
parameters = ""
if not hasattr(sys, "frozen"):
# Not running under py2exe exe
parameters += "\"" + sys.argv[0] + "\" "
parameters += " ".join(map(lambda x: "\"" + str(x) + "\"", args))
print "Executing elevated with parameters " + parameters
# TODO: capture output (maybe via named pipe)
rc = ShellExecuteEx(hwnd=hwnd, fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, lpVerb="runas", lpFile=sys.executable,
lpParameters=parameters, nShow=win32con.SW_SHOW)
hproc = rc['hProcess']
win32event.WaitForSingleObject(hproc, win32event.INFINITE)
exit_code = win32process.GetExitCodeProcess(hproc)
if exit_code:
raise Exception("Error: subprocess failed (exit code %s)." % exit_code)
def is_elevated():
import win32security
import win32api
hToken = win32security.OpenProcessToken(win32api.GetCurrentProcess(), win32security.TOKEN_QUERY)
return win32security.GetTokenInformation(hToken, win32security.TokenElevation) != 0
def maybe_elevate():
def decorator(func):
if not hasattr(sys, "getwindowsversion") or sys.getwindowsversion()[0] < 6:
wrap = func
else:
def wrap(*args, **kwargs):
if not is_elevated():
execute_elevated(func.__name__, *args, **kwargs)
else:
func(*args, **kwargs)
setattr(sys.modules[__name__], func.__name__, wrap)
return wrap
return decorator
@maybe_elevate()
def elevated_check_call(*args):
return subprocess.check_call(args)
# GME functions
def create_project(project, connection, paradigm):
return project.Create(connection, paradigm)
# aka CreateMga.vbs
def xme2mga(xmefile, mgafile=None):
# use abspath, since on GME x64-only, parser will be called out-of-proc (which won't share the same cwd)
xmefile = os.path.abspath(xmefile)
if not mgafile:
mgafile = os.path.splitext(xmefile)[0] + ".mga"
with Project.open(xmefile, mgafile) as project:
project.save(project.mgafile)
return project.mgafile
def run_interpreter(interpreter, file, focusobj=None, selectedobj=None, param=None, mga_to_save=None, save=True):
if param is None:
param = 128
with Project.open(file, mga_to_save=mga_to_save) as project:
project.run_interpreter(interpreter, focusobj, selectedobj, param)
if not save:
project.project.Close(True)
del(project)
import gc
gc.collect()
def run_interpreter_with_focusobj(interpreter, file, focusobj=None, selectedobj=None, param=None, mga_to_save=None, save=True):
if param is None:
param = 128
with Project.open(file, mga_to_save=mga_to_save) as project:
if focusobj:
focusobj = project.project.GetFCOByID(focusobj)
project.run_interpreter(interpreter, focusobj, selectedobj, param)
if not save:
project.project.Close(True)
# MGAUTILLib.regaccessmode_enum.REGACCESS_BOTH = 3
def get_paradigm_file(paradigm, regaccess=3):
"Returns the .mta file for a given registered paradigm"
registrar = win32com.client.DispatchEx("Mga.MgaRegistrar")
guid = registrar.GetParadigmGUIDStringDisp(regaccess, paradigm)
import uuid
if platform.system() != 'Java':
buf = buffer(uuid.UUID(guid).bytes_le, 0, 16)
else:
buf = str(uuid.UUID(guid).bytes_le[0:16])
(connstr, guid) = registrar.QueryParadigm(paradigm, None, buf, regaccess)
# >>> constr
# "MGA=....mta"
return connstr[4:]
def _associate(progid, paradigm, regaccess):
registrar = win32com.client.DispatchEx("Mga.MgaRegistrar")
registrar.Associate(progid, paradigm, regaccess)
@maybe_elevate()
def _associate_elevated(*args):
_associate(*args)
def associate(progid, paradigm, regaccess=1):
"Associate a component with a paradigm"
regaccess = int(regaccess)
if regaccess != 1:
_associate_elevated(progid, paradigm, regaccess)
else:
_associate(progid, paradigm, regaccess)
def is_registered(paradigm):
registrar = win32com.client.DispatchEx("Mga.MgaRegistrar")
paradigms = []
# REGACCESS_USER = 1
paradigms.extend(registrar.GetParadigmsDisp(1))
# REGACCESS_SYSTEM = 2
paradigms.extend(registrar.GetParadigmsDisp(2))
return filter(lambda p: p == paradigm, paradigms)
REGISTER = 128
DONT_REGISTER = 0
def register_if_not_registered(file):
"Register an xme or mga if it has not already been registered"
if os.path.splitext(file)[1] == ".xmp":
if not is_registered(os.path.basename(os.path.splitext(file)[0])) or (platform.system() != 'Java' and not os.path.isfile(get_paradigm_file(os.path.splitext(os.path.basename(file))[0]))):
regxmp(file)
return
# if we don't give GME an absolute path, it registers the mta with a relative path (which is bad)
with Project.open(os.path.abspath(file), mga_to_save=True) as project:
# KMS FIXME: build systems need to run this before MetaInterpreter. a new build_customization is needed
# project.run_interpreter("MGA.Interpreter.MetaGME2Uml", mgafile, None, None, 128)
paradigm = project.project.RootFolder.Name
if not is_registered(paradigm):
project.run_interpreter("MGA.Interpreter.MetaInterpreter", param=REGISTER)
print "Paradigm '%s' is now registered" % paradigm
elif not os.path.isfile(get_paradigm_file(paradigm)):
#FIXME: should we look for the .xmp?
project.run_interpreter("MGA.Interpreter.MetaInterpreter", param=REGISTER)
print "Paradigm '%s' had nonexistant .mta; it is now reregistered" % paradigm
# TODO: can we check if it is up-to-date?
# or os.path.getmtime(get_paradigm_file(paradigm)) < os.path.getmtime(file):
else:
print "Paradigm '%s' is already registered" % paradigm
def mga2xmp(mgafile, register=REGISTER):
# if we don't give GME an absolute path, it registers the mta with a relative path (which is bad)
run_interpreter("MGA.Interpreter.MetaInterpreter", os.path.abspath(mgafile), param=register, mga_to_save=True, save=False)
def xme2xmp(xmefile, register=REGISTER):
mgafile = xme2mga(xmefile)
mga2xmp(mgafile, register)
return mgafile
def regmta(mtafile, regaccess=1):
regaccess = int(regaccess)
if regaccess != 1:
_regxmp_elevated(mtafile, regaccess)
else:
_regxmp(mtafile, regaccess)
def _regxmp(xmpfile, regaccess):
REG_USER = 1
REG_SYSTEM = 2
REG_BOTH = 3
registrar = win32com.client.DispatchEx("Mga.MgaRegistrar")
if os.path.splitext(xmpfile)[1].lower() == ".xmp":
registrar.RegisterParadigmFromData("XML=" + os.path.abspath(xmpfile), "", regaccess)
else:
registrar.RegisterParadigmFromData("MGA=" + os.path.abspath(xmpfile), "", regaccess)
@maybe_elevate()
def _regxmp_elevated(xmpfile, regaccess):
_regxmp(xmpfile, regaccess)
def regxmp(xmpfile, regaccess=1):
regaccess = int(regaccess)
if regaccess != 1:
_regxmp_elevated(xmpfile, regaccess)
else:
_regxmp(xmpfile, regaccess)
def _reggmexmps(regaccess):
regaccess = int(regaccess)
for file in [ 'HFSM/HFSM.xmp', 'MetaGME/MetaGME.xmp', 'SF/SF.xmp', 'UML/UML.xmp' ]:
regxmp(os.path.join(os.path.join(os.environ['GME_ROOT'], 'Paradigms'), file), regaccess)
@maybe_elevate()
def _reggmexmps_elevated(regaccess):
_reggmexmps(regaccess)
def reggmexmps(regaccess=1):
regaccess = int(regaccess)
if regaccess != 1:
_reggmexmps_elevated(regaccess)
else:
_reggmexmps(regaccess)
def mga2xme(mgafile, xmefile=None):
if not xmefile:
xmefile = os.path.splitext(mgafile)[0] + ".xme"
with Project.open(mgafile) as project:
project.save(xmefile)
return xmefile
def register_component(file, warn_on_tlb_error=None):
'''Register a GME component .dll'''
# TODO: on Vista or 7 we need to start an elevated registrar
registrar = win32com.client.DispatchEx("Mga.MgaRegistrar")
# REGACCESS_BOTH = 3,
registrar.RegisterComponentLibrary(file, 3)
# UDM functions
def meta2uml(mgafile, umlfile=None):
if not os.path.isfile(mgafile):
raise Exception("'" + mgafile + "' not found")
# n.b. this uses the SxS config in gmepy-setup.py under gmepy.exe (but not gme.py)
with Project.open(mgafile) as project:
project.run_interpreter("MGA.Interpreter.MetaGME2Uml", None, None, 128)
output_path = os.path.join(os.path.dirname(mgafile), project.project.RootFolder.Name + "_uml.mga")
# project.project.Save("MGA=" + os.path.splitext(mgafile)[0] + "_after_MetaGME2Uml.mga")
project.project.Close(True)
if umlfile and os.path.normcase(os.path.abspath(umlfile)) != os.path.normcase(os.path.abspath(output_path)):
import shutil
try:
os.remove(umlfile)
except OSError:
pass
shutil.move(output_path, umlfile)
#subprocess.check_call(["MetaGME2UML.exe", mgafile, umlfile])
# aka CreateUdmXml.vbs
def mga2udmxml(mgafile):
run_interpreter("MGA.Interpreter.UML2XML", mgafile, param=128, save=False)
# GReAT functions
def RunGreatMasterInt(file):
file = os.path.abspath(file)
mtime = os.stat(file).st_mtime
# The interpreter updates the GReAT config path, so we need to save the .mga
run_interpreter("MGA.Interpreter.GReAT Master Interpreter", file, param=128, save=True)
# Let's lie and modify the timestamp so incremental build behaves better
os.utime(file, (mtime, mtime))
# Explorer context menu
def context_menu_reg():
"""Register explorer context menu options"""
import _winreg
if hasattr(sys, "frozen"):
# Running under py2exe exe
gmepydir = os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding( )))
else:
gmepydir = os.path.dirname(__file__)
# Windows won't let us start gme.py from the context menu, so use the exe
gmepy = gmepydir + "\\gmepy.exe"
mga = "mga"
xme = "xme"
xmp = "xmp"
menus = [ (mga, "mga2xme"),
(mga, "mga2xmp"),
(mga, "mga2udmxml"),
(xme, "xme2mga"),
(xmp, "regxmp"),
]
regname = gmepydir + "\\gmepy_context_menu.reg"
with open(regname, "w") as reg:
reg.write("Windows Registry Editor Version 5.00\n")
for p in menus:
try:
key = _winreg.OpenKey(_winreg.ConnectRegistry(None, _winreg.HKEY_CLASSES_ROOT), "."+p[0])
n,v,t = _winreg.EnumValue(key, 0)
ext = v
_winreg.CloseKey(key)
except WindowsError:
ext = "."+p[0]
str = """[HKEY_CLASSES_ROOT\{ext}\shell]
[HKEY_CLASSES_ROOT\{ext}\shell\{name}]
[HKEY_CLASSES_ROOT\{ext}\shell\{name}\command]
@="\\"{gmepy}\\" {name} \\\"%1\\\""
""".format(ext=ext, name=p[1], gmepy=gmepy.replace("\\", "\\\\"))
reg.write(str)
elevated_check_call("regedit", regname)
OBJTYPE_MODEL = 1
OBJTYPE_ATOM = 2
OBJTYPE_REFERENCE = 3
OBJTYPE_CONNECTION = 4
OBJTYPE_SET = 5
OBJTYPE_FOLDER = 6
def is_container(fco):
return fco.ObjType == OBJTYPE_MODEL or fco.ObjType == OBJTYPE_FOLDER
import tempfile
class Project():
def __init__(self, com_project):
self.project = com_project
def __enter__(self):
self.begin_transaction()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.project.ProjectStatus == 3 or self.project.ProjectStatus == 4:
if exc_type:
self.project.AbortTransaction()
else:
self.project.CommitTransaction()
if self.territory:
self.territory.Destroy()
self.territory = None
if self.project.ProjectStatus != 0:
if exc_type:
self.project.Close(True)
else:
self.project.Close()
def get_fco(self, path):
def get_children(mga_object):
import itertools
children = mga_object.ChildFCOs
if current.ObjType == OBJTYPE_FOLDER:
children = itertools.chain(children, current.ChildFolders)
return children
path_a = path.split("/")
current = self.project.RootFolder
for name in path_a[0:-1]:
containers = filter(is_container, get_children(current))
matches = list(filter(lambda x: x.Name == name, containers))
if matches:
current = matches[0]
else:
raise Exception("Cant find %s in path %s" % (name, path))
matches = [child for child in get_children(current) if child.Name == path_a[-1]]
if matches:
return matches[0]
else:
raise Exception("Cant find %s in path %s" % (path_a[-1], path))
def save(self, filename=None):
if not filename:
filename = self.file
filename = os.path.abspath(filename)
self.project.CommitTransaction()
if self.territory:
self.territory.Destroy()
self.territory = None
extension = os.path.splitext(filename)[1]
if extension == ".mga":
self.project.Save("MGA=" + filename)
elif extension == ".xme":
dumper = win32com.client.DispatchEx("Mga.MgaDumper")
dumper.DumpProject(self.project, filename)
else:
raise Exception("Don't know how to save '%s'" % filename)
self.territory = self.project.BeginTransactionInNewTerr()
def begin_transaction(self):
self.territory = self.project.BeginTransactionInNewTerr()
def commit_transaction(self):
self.project.CommitTransaction()
if self.territory:
self.territory.Destroy()
self.territory = None
def abort_transaction(self):
self.project.AbortTransaction()
if self.territory:
self.territory.Destroy()
self.territory = None
def run_interpreter(self, interpreter, focusobj=None, selectedobj=None, param=0):
if not selectedobj:
selectedobj=win32com.client.DispatchEx("Mga.MgaFCOs")
self.commit_transaction()
try:
launcher = win32com.client.DispatchEx("Mga.MgaLauncher")
launcher.RunComponent(interpreter, self.project, focusobj, selectedobj, param)
finally:
self.begin_transaction()
@staticmethod
def create(mgafile, paradigm):
project = win32com.client.DispatchEx("Mga.MgaProject")
create_project(project, "MGA=" + mgafile, paradigm)
p = Project(project)
p.filename = mgafile
p.mgafile = mgafile
return p
@staticmethod
def open(file, mga_to_save=None):
if not os.path.isfile(file):
raise Exception("'" + file + "' not found")
extension = os.path.splitext(file)[1]
mga = None
if extension == ".mga":
mga = win32com.client.DispatchEx("Mga.MgaProject")
mga.Open("MGA=" + file)
mga_to_save = file
elif extension == ".xme":
xme = win32com.client.DispatchEx("Mga.MgaParser")
(paradigm, parversion, parguid, basename, ver) = xme.GetXMLInfo(file)
mga = win32com.client.DispatchEx("Mga.MgaProject")
xme = win32com.client.DispatchEx("Mga.MgaParser")
resolver = win32com.client.DispatchEx("Mga.MgaResolver")
try:
resolver.IsInteractive = False
xme.Resolver = resolver
except AttributeError:
# older GME
pass
if mga_to_save == True:
mga_to_save = os.path.splitext(file)[0] + ".mga"
elif not mga_to_save:
mga_to_save = tempfile.gettempdir() + "gmepy-%s.mga" % os.getpid()
create_project(mga, "MGA=" + mga_to_save, paradigm)
try:
xme.ParseProject(mga, file)
except:
mga.Close(True)
raise
else:
raise Exception("Don't know how to open '%s'" % file)
p = Project(mga)
p.filename = file
p.mgafile = mga_to_save
return p
def print_paradigm(xmefile):
"Print the input file and paradigm of a given xme"
xme = win32com.client.DispatchEx("Mga.MgaParser")
(paradigm, parversion, parguid, basename, ver) = xme.GetXMLInfo(xmefile)
print xmefile
print paradigm
def run_module(name, *args):
sys.path.append('.')
sys.argv[1:] = args
runpy.run_module(name, run_name='__main__')
def usage():
gme_dict = sys.modules[__name__].__dict__
names = []
names.extend(gme_dict.keys())
for name in filter(lambda name: type(gme_dict[name]) == type(print_paradigm), names):
if gme_dict[name].__doc__ and name.find('_') != 0:
print name
print "\t" + gme_dict[name].__doc__
sys.exit(2)
import traceback
if __name__ == '__main__':
try:
if len(sys.argv) < 2 or sys.argv[1] not in dir():
usage()
else:
# TRY:
# sys.modules[__name__].__getattribute__(sys.argv[1]).__call__(*sys.argv[2:])
eval("%s(*sys.argv[2:])" % sys.argv[1])
except:
traceback.print_exc(file=sys.stderr)
sys.stdin.readline()
sys.exit(1)
|
|
"""
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import pytest
import sys
import warnings
import re
import numpy as np
import joblib
from numpy.testing import (
assert_almost_equal,
assert_array_equal,
assert_allclose,
)
from sklearn.datasets import load_digits, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from io import StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler, scale
from scipy.sparse import csr_matrix
from sklearn.utils._testing import ignore_warnings
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
X_digits, y_digits = load_digits(n_class=3, return_X_y=True)
X_digits_multi = MinMaxScaler().fit_transform(X_digits[:200])
y_digits_multi = y_digits[:200]
X_digits, y_digits = load_digits(n_class=2, return_X_y=True)
X_digits_binary = MinMaxScaler().fit_transform(X_digits[:200])
y_digits_binary = y_digits[:200]
classification_datasets = [
(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary),
]
X_reg, y_reg = make_regression(
n_samples=200, n_features=10, bias=20.0, noise=100.0, random_state=7
)
y_reg = scale(y_reg)
regression_datasets = [(X_reg, y_reg)]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(
np.array([absolute_sum(mlp.coefs_[0]), absolute_sum(mlp.coefs_[1])])
)
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(
solver="sgd",
learning_rate_init=0.1,
alpha=0.1,
activation="logistic",
random_state=1,
max_iter=1,
hidden_layer_sizes=2,
momentum=0,
)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.n_features_in_ = 3
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = "logistic"
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [
np.zeros_like(intercepts) for intercepts in mlp.intercepts_
]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(
mlp.coefs_[0],
np.array([[0.098, 0.195756], [0.2956664, 0.096008], [0.4939998, -0.002244]]),
decimal=3,
)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]), decimal=3)
assert_almost_equal(mlp.intercepts_[0], np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
random_state = np.random.RandomState(seed=42)
X = random_state.rand(n_samples, n_features)
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(
activation=activation,
hidden_layer_sizes=10,
solver="lbfgs",
alpha=1e-5,
learning_rate_init=0.2,
max_iter=1,
random_state=1,
)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ + mlp.intercepts_])
layer_units = [X.shape[1]] + [mlp.hidden_layer_sizes] + [mlp.n_outputs_]
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0], layer_units[i + 1])))
deltas.append(np.empty((X.shape[0], layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(
t, X, Y, activations, deltas, coef_grads, intercept_grads
)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = (
loss_grad_fun(theta + dtheta)[0] - loss_grad_fun(theta - dtheta)[0]
) / (epsilon * 2.0)
assert_almost_equal(numgrad, grad)
@pytest.mark.parametrize("X,y", classification_datasets)
def test_lbfgs_classification(X, y):
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(
solver="lbfgs",
hidden_layer_sizes=50,
max_iter=150,
shuffle=True,
random_state=1,
activation=activation,
)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert mlp.score(X_train, y_train) > 0.95
assert (y_predict.shape[0], y_predict.dtype.kind) == expected_shape_dtype
@pytest.mark.parametrize("X,y", regression_datasets)
def test_lbfgs_regression(X, y):
# Test lbfgs on the regression dataset.
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(
solver="lbfgs",
hidden_layer_sizes=50,
max_iter=150,
shuffle=True,
random_state=1,
activation=activation,
)
mlp.fit(X, y)
if activation == "identity":
assert mlp.score(X, y) > 0.80
else:
# Non linear models perform much better than linear bottleneck:
assert mlp.score(X, y) > 0.98
@pytest.mark.parametrize("X,y", classification_datasets)
def test_lbfgs_classification_maxfun(X, y):
# Test lbfgs parameter max_fun.
# It should independently limit the number of iterations for lbfgs.
max_fun = 10
# classification tests
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(
solver="lbfgs",
hidden_layer_sizes=50,
max_iter=150,
max_fun=max_fun,
shuffle=True,
random_state=1,
activation=activation,
)
with pytest.warns(ConvergenceWarning):
mlp.fit(X, y)
assert max_fun >= mlp.n_iter_
@pytest.mark.parametrize("X,y", regression_datasets)
def test_lbfgs_regression_maxfun(X, y):
# Test lbfgs parameter max_fun.
# It should independently limit the number of iterations for lbfgs.
max_fun = 10
# regression tests
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(
solver="lbfgs",
hidden_layer_sizes=50,
tol=0.0,
max_iter=150,
max_fun=max_fun,
shuffle=True,
random_state=1,
activation=activation,
)
with pytest.warns(ConvergenceWarning):
mlp.fit(X, y)
assert max_fun >= mlp.n_iter_
mlp.max_fun = -1
with pytest.raises(ValueError):
mlp.fit(X, y)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(
solver="sgd",
hidden_layer_sizes=4,
learning_rate=learning_rate,
max_iter=1,
power_t=0.25,
warm_start=True,
)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == "constant":
assert prev_eta == post_eta
elif learning_rate == "invscaling":
assert mlp.learning_rate_init / pow(8 + 1, mlp.power_t) == post_eta
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(
n_samples=50, random_state=0, return_indicator=True
)
mlp = MLPClassifier(
solver="lbfgs",
hidden_layer_sizes=50,
alpha=1e-5,
max_iter=150,
random_state=0,
activation="logistic",
learning_rate_init=0.2,
)
mlp.fit(X, y)
assert mlp.score(X, y) > 0.97
# test partial fit method
mlp = MLPClassifier(
solver="sgd",
hidden_layer_sizes=50,
max_iter=150,
random_state=0,
activation="logistic",
alpha=1e-5,
learning_rate_init=0.2,
)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert mlp.score(X, y) > 0.9
# Make sure early stopping still work now that splitting is stratified by
# default (it is disabled for multilabel classification)
mlp = MLPClassifier(early_stopping=True)
mlp.fit(X, y).predict(X)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(
solver="lbfgs", hidden_layer_sizes=50, max_iter=200, random_state=1
)
mlp.fit(X, y)
assert mlp.score(X, y) > 0.9
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver="sgd")
clf.partial_fit(X, y, classes=[0, 1])
with pytest.raises(ValueError):
clf.partial_fit(X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
mlp = MLPClassifier(
solver="sgd",
max_iter=100,
random_state=1,
tol=0,
alpha=1e-5,
learning_rate_init=0.2,
)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(
solver="sgd", random_state=1, alpha=1e-5, learning_rate_init=0.2
)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert mlp.score(X, y) > 0.95
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"], classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]) > 0
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = X_reg
y = y_reg
for momentum in [0, 0.9]:
mlp = MLPRegressor(
solver="sgd",
max_iter=100,
activation="relu",
random_state=1,
learning_rate_init=0.01,
batch_size=X.shape[0],
momentum=momentum,
)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(
solver="sgd",
activation="relu",
learning_rate_init=0.01,
random_state=1,
batch_size=X.shape[0],
momentum=momentum,
)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_allclose(pred1, pred2)
score = mlp.score(X, y)
assert score > 0.65
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
with pytest.raises(ValueError):
MLPClassifier(solver="sgd").partial_fit(X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert not hasattr(MLPClassifier(solver="lbfgs"), "partial_fit")
@pytest.mark.parametrize(
"args",
[
{"hidden_layer_sizes": -1},
{"max_iter": -1},
{"shuffle": "true"},
{"alpha": -1},
{"learning_rate_init": -1},
{"momentum": 2},
{"momentum": -0.5},
{"nesterovs_momentum": "invalid"},
{"early_stopping": "invalid"},
{"validation_fraction": 1},
{"validation_fraction": -0.5},
{"beta_1": 1},
{"beta_1": -0.5},
{"beta_2": 1},
{"beta_2": -0.5},
{"epsilon": -0.5},
{"n_iter_no_change": -1},
{"solver": "hadoken"},
{"learning_rate": "converge"},
{"activation": "cloak"},
],
)
def test_params_errors(args):
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
with pytest.raises(ValueError):
clf(**args).fit(X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5, activation="logistic", random_state=1)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert y_proba.shape == (n_samples, n_classes)
assert_array_equal(proba_max, proba_log_max)
assert_allclose(y_log_proba, np.log(y_proba))
assert roc_auc_score(y, y_proba[:, 1]) == 1.0
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert y_proba.shape == (n_samples, n_classes)
assert_array_equal(proba_max, proba_log_max)
assert_allclose(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(
n_samples=50, random_state=0, return_indicator=True
)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver="lbfgs", hidden_layer_sizes=30, random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert y_proba.shape == (n_samples, n_classes)
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert (y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1) > 1e-10
assert_array_equal(proba_max, proba_log_max)
assert_allclose(y_log_proba, np.log(y_proba))
def test_shuffle():
# Test that the shuffle parameter affects the training process (it should)
X, y = make_regression(n_samples=50, n_features=5, n_targets=1, random_state=0)
# The coefficients will be identical if both do or do not shuffle
for shuffle in [True, False]:
mlp1 = MLPRegressor(
hidden_layer_sizes=1,
max_iter=1,
batch_size=1,
random_state=0,
shuffle=shuffle,
)
mlp2 = MLPRegressor(
hidden_layer_sizes=1,
max_iter=1,
batch_size=1,
random_state=0,
shuffle=shuffle,
)
mlp1.fit(X, y)
mlp2.fit(X, y)
assert np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0])
# The coefficients will be slightly different if shuffle=True
mlp1 = MLPRegressor(
hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=True
)
mlp2 = MLPRegressor(
hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=False
)
mlp1.fit(X, y)
mlp2.fit(X, y)
assert not np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0])
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver="lbfgs", hidden_layer_sizes=15, random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver="sgd")
clf.fit(X, y)
assert clf.max_iter > clf.n_iter_
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver="sgd", max_iter=2, verbose=10, hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert "Iteration" in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver="sgd", early_stopping=True)
clf.fit(X, y)
assert clf.max_iter > clf.n_iter_
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert max(valid_scores) == best_valid_score
assert best_valid_score + tol > valid_scores[-2]
assert best_valid_score + tol > valid_scores[-1]
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver="sgd", learning_rate="adaptive")
clf.fit(X, y)
assert clf.max_iter > clf.n_iter_
assert 1e-6 > clf._optimizer.learning_rate
@ignore_warnings(category=RuntimeWarning)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver="lbfgs", warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver="lbfgs", warm_start=True).fit(
X, y
)
message = (
"warm_start can only be used where `y` has the same "
"classes as in the previous call to fit."
" Previously got [0 1 2], `y` has %s"
% np.unique(y_i)
)
with pytest.raises(ValueError, match=re.escape(message)):
clf.fit(X, y_i)
@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
def test_warm_start_full_iteration(MLPEstimator):
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/16812
# Check that the MLP estimator accomplish `max_iter` with a
# warm started estimator.
X, y = X_iris, y_iris
max_iter = 3
clf = MLPEstimator(
hidden_layer_sizes=2, solver="sgd", warm_start=True, max_iter=max_iter
)
clf.fit(X, y)
assert max_iter == clf.n_iter_
clf.fit(X, y)
assert 2 * max_iter == clf.n_iter_
def test_n_iter_no_change():
# test n_iter_no_change using binary data set
# the classifying fitting process is not prone to loss curve fluctuations
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.01
max_iter = 3000
# test multiple n_iter_no_change
for n_iter_no_change in [2, 5, 10, 50, 100]:
clf = MLPClassifier(
tol=tol, max_iter=max_iter, solver="sgd", n_iter_no_change=n_iter_no_change
)
clf.fit(X, y)
# validate n_iter_no_change
assert clf._no_improvement_count == n_iter_no_change + 1
assert max_iter > clf.n_iter_
@ignore_warnings(category=ConvergenceWarning)
def test_n_iter_no_change_inf():
# test n_iter_no_change using binary data set
# the fitting process should go to max_iter iterations
X = X_digits_binary[:100]
y = y_digits_binary[:100]
# set a ridiculous tolerance
# this should always trigger _update_no_improvement_count()
tol = 1e9
# fit
n_iter_no_change = np.inf
max_iter = 3000
clf = MLPClassifier(
tol=tol, max_iter=max_iter, solver="sgd", n_iter_no_change=n_iter_no_change
)
clf.fit(X, y)
# validate n_iter_no_change doesn't cause early stopping
assert clf.n_iter_ == max_iter
# validate _update_no_improvement_count() was always triggered
assert clf._no_improvement_count == clf.n_iter_ - 1
def test_early_stopping_stratified():
# Make sure data splitting for early stopping is stratified
X = [[1, 2], [2, 3], [3, 4], [4, 5]]
y = [0, 0, 0, 1]
mlp = MLPClassifier(early_stopping=True)
with pytest.raises(
ValueError, match="The least populated class in y has only 1 member"
):
mlp.fit(X, y)
def test_mlp_classifier_dtypes_casting():
# Compare predictions for different dtypes
mlp_64 = MLPClassifier(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
)
mlp_64.fit(X_digits[:300], y_digits[:300])
pred_64 = mlp_64.predict(X_digits[300:])
proba_64 = mlp_64.predict_proba(X_digits[300:])
mlp_32 = MLPClassifier(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
)
mlp_32.fit(X_digits[:300].astype(np.float32), y_digits[:300])
pred_32 = mlp_32.predict(X_digits[300:].astype(np.float32))
proba_32 = mlp_32.predict_proba(X_digits[300:].astype(np.float32))
assert_array_equal(pred_64, pred_32)
assert_allclose(proba_64, proba_32, rtol=1e-02)
def test_mlp_regressor_dtypes_casting():
mlp_64 = MLPRegressor(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
)
mlp_64.fit(X_digits[:300], y_digits[:300])
pred_64 = mlp_64.predict(X_digits[300:])
mlp_32 = MLPRegressor(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50
)
mlp_32.fit(X_digits[:300].astype(np.float32), y_digits[:300])
pred_32 = mlp_32.predict(X_digits[300:].astype(np.float32))
assert_allclose(pred_64, pred_32, rtol=1e-04)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("Estimator", [MLPClassifier, MLPRegressor])
def test_mlp_param_dtypes(dtype, Estimator):
# Checks if input dtype is used for network parameters
# and predictions
X, y = X_digits.astype(dtype), y_digits
mlp = Estimator(alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50)
mlp.fit(X[:300], y[:300])
pred = mlp.predict(X[300:])
assert all([intercept.dtype == dtype for intercept in mlp.intercepts_])
assert all([coef.dtype == dtype for coef in mlp.coefs_])
if Estimator == MLPRegressor:
assert pred.dtype == dtype
def test_mlp_loading_from_joblib_partial_fit(tmp_path):
"""Loading from MLP and partial fitting updates weights. Non-regression
test for #19626."""
pre_trained_estimator = MLPRegressor(
hidden_layer_sizes=(42,), random_state=42, learning_rate_init=0.01, max_iter=200
)
features, target = [[2]], [4]
# Fit on x=2, y=4
pre_trained_estimator.fit(features, target)
# dump and load model
pickled_file = tmp_path / "mlp.pkl"
joblib.dump(pre_trained_estimator, pickled_file)
load_estimator = joblib.load(pickled_file)
# Train for a more epochs on point x=2, y=1
fine_tune_features, fine_tune_target = [[2]], [1]
for _ in range(200):
load_estimator.partial_fit(fine_tune_features, fine_tune_target)
# finetuned model learned the new target
predicted_value = load_estimator.predict(fine_tune_features)
assert_allclose(predicted_value, fine_tune_target, rtol=1e-4)
|
|
#!/usr/bin/python
import re
import sys
import os
import platform
is_windows = platform.system().lower().startswith("win")
import subprocess
import shutil
from markdown2 import *
from datetime import *
from multiprocessing import Process
from utils import *
from jira import *
from docbook import *
try:
from xml.etree.ElementTree import ElementTree
except:
prettyprint('''
Welcome to the ModeShape Release Script.
This release script requires that you use at least Python 2.5.0. It appears
that you do not thave the ElementTree XML APIs available, which are available
by default in Python 2.5.0.
''', Levels.FATAL)
sys.exit(1)
modules = []
docbooks = []
uploader = None
git = None
jira = None
def get_modules(directory):
'''Analyses the pom.xml file and extracts declared modules'''
tree = ElementTree()
f = directory + "/pom.xml"
if settings['verbose']:
print "Parsing %s to get a list of modules in project" % f
tree.parse(f)
mods = tree.findall(".//{%s}module" % maven_pom_xml_namespace)
for m in mods:
modules.append(m.text)
def help_and_exit():
prettyprint('''
%s ModeShape Release Script%s
This script automates much of the work of releasing a new version of the ModeShape project, and includes
the following tasks:
- create a local branch for the new release;
- change the project-related versions in the POM files and documentation;
- commit those changes locally;
- create a tag for the release;
- generate the release notes in multiple formats;
- generate emails to all the people who have filed, commented on, or worked on issues
fixed in this release;
- run a full assembly build of the software to product all artifacts and documentation;
- place a copy of all artifacts and documentation in the '../archive' folder;
- deploy all artifacts to the JBoss.org Maven repository in a staging area (authorization required)
- upload all artifacts and documentation to JBoss.org (authorization required); and
- push the commit and tag to the official Git repository (authorization required)
Note that the last three steps are not performed during a dry run.
Before this script is executed, be sure to update and commit the 'release_notes.md' file. It also ensures
that the local Git repository is a writable clone of the official ModeShape repository on GitHub.
%s Usage:%s
$ bin/release.py [options] <version> [<branch>]
where:
<version> The name of the new version (e.g., '2.4.0.Final' but without quotes), which must
comply with the format '<major>.<minor>.<patch>.<qualifier>', where the qualifier
must be one of 'Final', 'Alpha', 'Beta', or 'CR'.
branch The name of the existing branch from which the release should be made. This defaults
to 'master'.
and where the options include:
--verbose Show more detailed logging and messages
--dry-run Used for trial runs of the release process. This leaves a temporary branch in the local
git repository that contains the committed changes, but does NOT push to the official
Git repository and it does NOT publish artifacts to JBoss.org.
--skip-tests Do not run the unit or integration tests when building the software
--single-threaded Perform all operations sequentially without using multiple threads
--multi-threaded Perform some operations in parallel to reduce the overall run time.
--key-file=file A fully qualified path to a private key file that should be used when copying remote files via SCP or RSYNC.
If provided, is the equivalent of using the '-i' switch for 'scp' or 'rsync'. If not, these commands are
invoked without this switch.
This option is not available with '--dry-run'
--help|? Display this usage message
%s Examples:%s
$ bin/release.py 3.0.0.Final
This will release '3.0.0.Final' based off of 'master'
$ bin/release.py 2.8.1.Final 2.x
This will release '2.8.1.Final' based off of the existing '2.x' branch
''' % (Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color()), Levels.INFO)
sys.exit(0)
def validate_version(version):
version_pattern = get_version_pattern()
if version_pattern.match(version):
return version.strip()
else:
prettyprint("Invalid version '"+version+"'!\n", Levels.FATAL)
help_and_exit()
def tag_release(version, branch):
if git.remote_branch_exists():
git.switch_to_branch()
git.create_tag_branch()
else:
prettyprint("Branch %s cannot be found on upstream repository. Aborting!" % branch, Levels.FATAL)
sys.exit(100)
def get_project_version_tag(tree):
return tree.find("./{%s}version" % (maven_pom_xml_namespace))
def get_parent_version_tag(tree):
return tree.find("./{%s}parent/{%s}version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def patch_poms(working_dir, version):
patched_poms = list()
walker = GlobDirectoryWalker(working_dir, "pom.xml")
for pom_file in walker:
tree = ElementTree()
tree.parse(pom_file)
# The current version of the POM is what we're looking for ...
current_version_elem = get_project_version_tag(tree)
if current_version_elem == None:
# There is no version for the POM, so get it from the parent ...
current_version_elem = get_parent_version_tag(tree)
current_version = current_version_elem.text
if walker.replace_all_in(pom_file,"<version>%s</version>" % current_version,"<version>%s</version>" % version):
patched_poms.append(pom_file)
return patched_poms
def generate_release_notes(markdown_file,version,output_dir):
f = open(markdown_file)
readme_md = f.read()
# Replace the version entity with the actual version ...
readme_md = re.sub('&version;',version,readme_md)
# Append the JIRA-generated release notes
issues_md = jira.get_release_notes_in_markdown()
readme_md = readme_md + "\n\n" + issues_md
# Convert the lines to HTML using Markdown ...
readme_html = Markdown().convert(readme_md);
# Convert the lines to text by removing the Markdown patterns ...
readme_text = unmarkdown(readme_md)
# Write out the two files in the desired location ...
if not os.path.exists(output_dir):
os.makedirs(output_dir)
path = os.path.join(output_dir,"release.html")
mdf = open(path,'w')
mdf.write(readme_html)
mdf.close()
path = os.path.join(output_dir,"release.txt")
mdt = open(path,'w')
mdt.write(readme_text)
mdt.close()
def generate_contribution_emails(output_dir,bcc_address):
'''Generates an HTML page in the output directory containing mailto links that can be used to create the contribution emails in your mail application'''
html_content = jira.get_contribution_html(bcc_address)
file_path = os.path.join(output_dir,"contributions.html")
f = open(file_path,'w')
f.write(html_content)
f.close()
def copy_artifacts_to_archive_location(archive_path,version):
try:
os.makedirs(archive_path)
except:
pass
# Copy the 'modeshape-distribution' artifacts ...
from_files = ['dist.zip', 'source.zip', 'jboss-wf9-dist.zip', 'javadoc.zip']
to_files = ['dist.zip', 'source.zip', 'jboss-wf9-dist.zip', 'javadoc.zip']
for fsuffix,tsuffix in zip(from_files,to_files):
shutil.copy("modeshape-distribution/target/modeshape-%s-%s" % (version,fsuffix), "%s/modeshape-%s-%s" % (archive_path,version,tsuffix))
# Make an area for the documentation ...
docs_path = os.path.join(archive_path,version)
if not os.path.exists(docs_path):
os.makedirs(docs_path)
# Copy the Full JavaDoc ...
from_path = os.path.join('modeshape-distribution','target','api')
copy_folder(from_path,os.path.join(docs_path,'api'))
## Copy the API JavaDoc ...
#from_path = os.path.join('modeshape-distribution','target','api')
#copy_folder(from_path,os.path.join(docs_path,'api'))
#
## Copy the XRef ...
#from_path = os.path.join('modeshape-distribution','target','xref')
#if os.path.exists(from_path):
# copy_folder(from_path,os.path.join(docs_path,'xref'))
# Copy the release notes into the archive area...
for readme in ['release.html','release.txt']:
from_path = os.path.join('target',readme)
shutil.copy(from_path,os.path.join(docs_path,readme))
shutil.copy(from_path,os.path.join(archive_path,readme))
def copy_release_notes_to_archive_location(archive_path,version):
try:
os.makedirs(archive_path)
except:
pass
# Copy the release notes into the archive area...
for readme in ['release.html','release.txt']:
from_path = os.path.join('target',readme)
shutil.copy(from_path,os.path.join(archive_path,readme))
def copy_folder( from_path, to_path ):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path,to_path)
def update_versions(version):
modified_files = []
## Update versions in the POM files ...
for pom in patch_poms('.',version):
modified_files.append(pom)
# Now make sure this goes back into the repository.
git.commit(modified_files)
def get_module_name(pom_file):
tree = ElementTree()
tree.parse(pom_file)
return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
def upload_artifacts(base_dir, version):
"""Downloadable artifacts get rsync'ed to filemgmt.jboss.org, in the downloads_htdocs/modeshape directory"""
# Create an area under 'target' where we can move all the files/folders that we need to upload ...
os.chdir("%s/target/" % (base_dir))
os.makedirs("downloads/%s" % version)
# Copy the 'modeshape-distribution' artifacts ...
from_files = ['dist.zip', 'source.zip', 'jboss-wf9-dist.zip', 'javadoc.zip']
to_files = ['dist.zip', 'source.zip', 'jboss-wf9-dist.zip', 'javadoc.zip']
for fsuffix,tsuffix in zip(from_files,to_files):
shutil.copy("%s/modeshape-distribution/target/modeshape-%s-%s" % (base_dir,version,fsuffix), "downloads/%s/modeshape-%s-%s" % (version,version,tsuffix))
# Copy the readme files ...
for readme in ['release.html','release.txt']:
from_path = os.path.join(base_dir,'target',readme)
to_path = os.path.join('downloads',version,readme)
shutil.copy(from_path,to_path)
# rsync this stuff to filemgmt.jboss.org
os.chdir("%s/target/downloads" % (base_dir))
flags=[]
if ('key_file' in settings):
flags=['-i ' + settings['key_file']]
if is_windows:
uploader.upload_scp(version, "[email protected]:/downloads_htdocs/modeshape", flags)
else:
flags = flags + ['--protocol=28']
uploader.upload_rsync(version, "[email protected]:/downloads_htdocs/modeshape", flags)
# We're done, so go back to where we were ...
os.chdir(base_dir)
def upload_documentation(base_dir, version):
"""Javadocs get rsync'ed to filemgmt.jboss.org, in the docs_htdocs/modeshape directory"""
# Create an area under 'target' where we can move all the files/folders that we need to upload ...
os.chdir("%s/target/" % (base_dir))
os.makedirs("docs/%s" % version)
# Move the 'api' folder into the 'docs/<version>/' folder so we can rsync that '<version>' folder
os.rename("%s/modeshape-distribution/target/api" % base_dir, "docs/%s/api" % version)
# Copy the readme files ...
for readme in ['release.html','release.txt']:
from_path = os.path.join(base_dir,'target',readme)
to_path = os.path.join('docs',version,readme)
shutil.copy(from_path,to_path)
# rsync this stuff to filemgmt.jboss.org
os.chdir("%s/target/docs" % (base_dir))
flags=[]
if ('key_file' in settings):
flags=['-i ' + settings['key_file']]
if is_windows:
uploader.upload_scp(version, "[email protected]:/docs_htdocs/modeshape", flags)
else:
flags = flags + ['--protocol=28']
uploader.upload_rsync(version, "[email protected]:/docs_htdocs/modeshape", flags)
# We're done, so go back to where we were ...
os.chdir(base_dir)
def do_task(target, args, async_processes):
if settings['multi_threaded']:
async_processes.append(Process(target = target, args = args))
else:
target(*args)
### This is the starting place for this script.
def release():
global settings
global uploader
global git
global jira
assert_python_minimum_version(2, 5)
base_dir = os.getcwd()
# Process the arguments ...
version = None
branch = 'master'
if len(sys.argv) > 1:
for arg in sys.argv[1:len(sys.argv)]:
if arg == '--verbose':
settings['verbose'] = True
elif arg == '--dry-run':
settings['dry_run'] = True
elif arg == '--skip-tests':
settings['skip_tests'] = True
elif arg == '--multi-threaded':
settings['multi_threaded'] = True
elif arg == '--single-threaded':
settings['multi_threaded'] = False
elif arg.startswith('--key-file'):
pair = arg.split('=')
if (len(pair) < 2):
prettyprint("When using --key-file you must supply a file using --key-file=your_file")
sys.exit(0)
else:
settings['key_file'] = pair[1]
elif arg == '--help' or arg == '?':
help_and_exit()
else:
if version == None:
# The first non-option is the version
print "validating version '%s'" % arg
version = validate_version(arg)
else:
branch = arg
## Set up network interactive tools
if settings['dry_run']:
# Use stubs
prettyprint("***", Levels.DEBUG)
prettyprint("*** This is a DRY RUN. No changes will be committed and no files will be published. Used to test this release script only. ***", Levels.DEBUG)
prettyprint("***", Levels.DEBUG)
prettyprint("Your settings are %s" % settings, Levels.DEBUG)
uploader = DryRunUploader()
else:
uploader = Uploader()
# Make sure they want to continue ...
sure = input_with_default("\nDid you update and commit the 'release_notes.md' file?", "N")
if not sure.upper().startswith("Y"):
prettyprint("... Please do this now and rerun this script.", Levels.WARNING)
print ""
sys.exit(1)
prettyprint("", Levels.INFO)
prettyprint("Releasing ModeShape version %s from branch '%s'" % (version, branch), Levels.INFO)
sure = input_with_default("Are you sure you want to continue?", "N")
if not sure.upper().startswith("Y"):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
prettyprint("OK, releasing! Please stand by ...", Levels.INFO)
tag_name = "modeshape-%s" % version
git = Git(branch, tag_name)
if not git.is_upstream_clone():
proceed = input_with_default('This is not a clone of an %supstream%s ModeShape repository! Are you sure you want to proceed?' % (Colors.UNDERLINE, Colors.END), 'N')
if not proceed.upper().startswith('Y'):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
# Haven't yet done anything ...
## Release order:
# Step 1: Tag in Git
prettyprint("Step 1: Tagging %s in git as %s" % (branch, version), Levels.INFO)
tag_release(version, branch)
prettyprint("Step 1: Complete", Levels.INFO)
# Step 2: Update version in tagged files
prettyprint("Step 2: Updating version number in source files", Levels.INFO)
maven_clean()
update_versions(version)
prettyprint("Step 2: Complete", Levels.INFO)
# Step 3: Build and test in Maven2
prettyprint("Step 3: Build and test in Maven3", Levels.INFO)
maven_build_distribution(version)
prettyprint("Step 3: Complete", Levels.INFO)
# Step 4: Generate release notes and place into the 'target' folder
jira_url = "https://issues.jboss.org/"
project_key = 'MODE'
project_name = 'ModeShape'
project_id = '12310930'
prettyprint("Step 4: Generating release notes using JIRA and placing in './target'", Levels.INFO)
jira = Jira(jira_url,project_key,project_id,project_name,version)
generate_release_notes('release_notes.md',version,"target")
prettyprint("Step 4: Complete", Levels.INFO)
# # Step 5: Copy files into archive
archive_path = os.path.join("..","archive",version);
if not os.path.exists(archive_path):
os.makedirs(archive_path)
print "archive_path = '%s'" % archive_path
prettyprint("Step 5: Copying build artifacts and documentation to archive '%s'" % (archive_path), Levels.INFO)
copy_artifacts_to_archive_location(archive_path,version)
copy_release_notes_to_archive_location(archive_path,version);
prettyprint("Step 5: Complete", Levels.INFO)
# Step 6: Generate contribution emails
prettyprint("Step 6: Generating contribution emails using JIRA and placing in '%s'" % (archive_path), Levels.INFO)
generate_contribution_emails(archive_path,'[email protected]')
prettyprint("Step 6: Complete", Levels.INFO)
# Nothing else should modify any files locally ...
## Clean up in git
prettyprint("Step 7: Committing changes to Git, creating release tag, and pushing to 'origin'", Levels.INFO)
git.tag_for_release()
if not settings['dry_run']:
git.push_to_origin()
git.cleanup()
else:
prettyprint("In dry-run mode. Not pushing tag to remote origin and not removing temp release branch '%s'." % git.working_branch, Levels.DEBUG)
prettyprint("Step 7: Complete", Levels.INFO)
if is_windows:
if settings['dry_run']:
prettyprint("\n\n\nWindows detected; In dry-run mode no file uploading will be performed, so all done.", Levels.INFO)
else:
prettyprint("\n\n\nWindows detected; Make sure you upload the files & documentation manually.", Levels.INFO)
return
async_processes = []
# Step 8: Upload javadocs to JBoss.org
prettyprint("Step 8: Uploading documentation to JBoss.org", Levels.INFO)
do_task(upload_documentation, [base_dir, version], async_processes)
prettyprint("Step 8: Complete", Levels.INFO)
# Step 9: Upload downloads to JBoss.org
prettyprint("Step 9: Uploading downloads to JBoss.org", Levels.INFO)
do_task(upload_artifacts, [base_dir, version], async_processes)
prettyprint("Step 9: Complete", Levels.INFO)
## Wait for processes to finish
for p in async_processes:
p.start()
for p in async_processes:
p.join()
prettyprint("\n\n\nDone! Now all you need to do is the remaining post-release tasks as outlined in https://docspace.corp.redhat.com/docs/DOC-28594", Levels.INFO)
if __name__ == "__main__":
release()
|
|
"""
Utility functions to check the availability of dependencies
and suggest how it may be installed
"""
import sys
# Only really old versions of Python don't have sys.hexversion. We don't
# support them. The logging module was introduced in Python 2.3
if not hasattr(sys, 'hexversion') or sys.hexversion < 0x20300F0:
sys.exit(
'Python version: %s\n'
'PyBitmessage requires Python 2.7.4 or greater (but not Python 3)'
% sys.version
)
import logging
import os
from importlib import import_module
# We can now use logging so set up a simple configuration
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger('both')
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
OS_RELEASE = {
"Debian GNU/Linux".lower(): "Debian",
"fedora": "Fedora",
"opensuse": "openSUSE",
"ubuntu": "Ubuntu",
"gentoo": "Gentoo",
"calculate": "Gentoo"
}
PACKAGE_MANAGER = {
"OpenBSD": "pkg_add",
"FreeBSD": "pkg install",
"Debian": "apt-get install",
"Ubuntu": "apt-get install",
"Ubuntu 12": "apt-get install",
"openSUSE": "zypper install",
"Fedora": "dnf install",
"Guix": "guix package -i",
"Gentoo": "emerge"
}
PACKAGES = {
"PyQt4": {
"OpenBSD": "py-qt4",
"FreeBSD": "py27-qt4",
"Debian": "python-qt4",
"Ubuntu": "python-qt4",
"Ubuntu 12": "python-qt4",
"openSUSE": "python-qt",
"Fedora": "PyQt4",
"Guix": "[email protected]",
"Gentoo": "dev-python/PyQt4",
"optional": True,
"description":
"You only need PyQt if you want to use the GUI."
" When only running as a daemon, this can be skipped.\n"
"However, you would have to install it manually"
" because setuptools does not support PyQt."
},
"msgpack": {
"OpenBSD": "py-msgpack",
"FreeBSD": "py27-msgpack-python",
"Debian": "python-msgpack",
"Ubuntu": "python-msgpack",
"Ubuntu 12": "msgpack-python",
"openSUSE": "python-msgpack-python",
"Fedora": "python2-msgpack",
"Guix": "python2-msgpack",
"Gentoo": "dev-python/msgpack",
"optional": True,
"description":
"python-msgpack is recommended for improved performance of"
" message encoding/decoding"
},
"pyopencl": {
"FreeBSD": "py27-pyopencl",
"Debian": "python-pyopencl",
"Ubuntu": "python-pyopencl",
"Ubuntu 12": "python-pyopencl",
"Fedora": "python2-pyopencl",
"openSUSE": "",
"OpenBSD": "",
"Guix": "",
"Gentoo": "dev-python/pyopencl",
"optional": True,
"description":
"If you install pyopencl, you will be able to use"
" GPU acceleration for proof of work.\n"
"You also need a compatible GPU and drivers."
},
"setuptools": {
"OpenBSD": "py-setuptools",
"FreeBSD": "py27-setuptools",
"Debian": "python-setuptools",
"Ubuntu": "python-setuptools",
"Ubuntu 12": "python-setuptools",
"Fedora": "python2-setuptools",
"openSUSE": "python-setuptools",
"Guix": "python2-setuptools",
"Gentoo": "dev-python/setuptools",
"optional": False,
}
}
def detectOS():
"""Finding out what Operating System is running"""
if detectOS.result is not None:
return detectOS.result
if sys.platform.startswith('openbsd'):
detectOS.result = "OpenBSD"
elif sys.platform.startswith('freebsd'):
detectOS.result = "FreeBSD"
elif sys.platform.startswith('win'):
detectOS.result = "Windows"
elif os.path.isfile("/etc/os-release"):
detectOSRelease()
elif os.path.isfile("/etc/config.scm"):
detectOS.result = "Guix"
return detectOS.result
detectOS.result = None
def detectOSRelease():
"""Detecting the release of OS"""
with open("/etc/os-release", 'r') as osRelease:
version = None
for line in osRelease:
if line.startswith("NAME="):
detectOS.result = OS_RELEASE.get(
line.replace('"', '').split("=")[-1].strip().lower())
elif line.startswith("VERSION_ID="):
try:
version = float(line.split("=")[1].replace("\"", ""))
except ValueError:
pass
if detectOS.result == "Ubuntu" and version < 14:
detectOS.result = "Ubuntu 12"
def try_import(module, log_extra=False):
"""Try to import the non imported packages"""
try:
return import_module(module)
except ImportError:
module = module.split('.')[0]
logger.error('The %s module is not available.', module)
if log_extra:
logger.error(log_extra)
dist = detectOS()
logger.error(
'On %s, try running "%s %s" as root.',
dist, PACKAGE_MANAGER[dist], PACKAGES[module][dist])
return False
def check_ripemd160():
"""Check availability of the RIPEMD160 hash function"""
try:
from fallback import RIPEMD160Hash # pylint: disable=relative-import
except ImportError:
return False
return RIPEMD160Hash is not None
def check_sqlite():
"""Do sqlite check.
Simply check sqlite3 module if exist or not with hexversion
support in python version for specifieed platform.
"""
if sys.hexversion < 0x020500F0:
logger.error(
'The sqlite3 module is not included in this version of Python.')
if sys.platform.startswith('freebsd'):
logger.error(
'On FreeBSD, try running "pkg install py27-sqlite3" as root.')
return False
sqlite3 = try_import('sqlite3')
if not sqlite3:
return False
logger.info('sqlite3 Module Version: %s', sqlite3.version)
logger.info('SQLite Library Version: %s', sqlite3.sqlite_version)
# sqlite_version_number formula: https://sqlite.org/c3ref/c_source_id.html
sqlite_version_number = (
sqlite3.sqlite_version_info[0] * 1000000 +
sqlite3.sqlite_version_info[1] * 1000 +
sqlite3.sqlite_version_info[2]
)
conn = None
try:
try:
conn = sqlite3.connect(':memory:')
if sqlite_version_number >= 3006018:
sqlite_source_id = conn.execute(
'SELECT sqlite_source_id();'
).fetchone()[0]
logger.info('SQLite Library Source ID: %s', sqlite_source_id)
if sqlite_version_number >= 3006023:
compile_options = ', '.join(
[row[0] for row in conn.execute('PRAGMA compile_options;')])
logger.info(
'SQLite Library Compile Options: %s', compile_options)
# There is no specific version requirement as yet, so we just
# use the first version that was included with Python.
if sqlite_version_number < 3000008:
logger.error(
'This version of SQLite is too old.'
' PyBitmessage requires SQLite 3.0.8 or later')
return False
return True
except sqlite3.Error:
logger.exception('An exception occured while checking sqlite.')
return False
finally:
if conn:
conn.close()
def check_openssl():
"""Do openssl dependency check.
Here we are checking for openssl with its all dependent libraries
and version checking.
"""
# pylint: disable=too-many-branches, too-many-return-statements
# pylint: disable=protected-access, redefined-outer-name
ctypes = try_import('ctypes')
if not ctypes:
logger.error('Unable to check OpenSSL.')
return False
# We need to emulate the way PyElliptic searches for OpenSSL.
if sys.platform == 'win32':
paths = ['libeay32.dll']
if getattr(sys, 'frozen', False):
import os.path
paths.insert(0, os.path.join(sys._MEIPASS, 'libeay32.dll'))
else:
paths = ['libcrypto.so', 'libcrypto.so.1.0.0']
if sys.platform == 'darwin':
paths.extend([
'libcrypto.dylib',
'/usr/local/opt/openssl/lib/libcrypto.dylib',
'./../Frameworks/libcrypto.dylib'
])
import re
if re.match(r'linux|darwin|freebsd', sys.platform):
try:
import ctypes.util
path = ctypes.util.find_library('ssl')
if path not in paths:
paths.append(path)
except:
pass
openssl_version = None
openssl_hexversion = None
openssl_cflags = None
cflags_regex = re.compile(r'(?:OPENSSL_NO_)(AES|EC|ECDH|ECDSA)(?!\w)')
import pyelliptic.openssl
for path in paths:
logger.info('Checking OpenSSL at %s', path)
try:
library = ctypes.CDLL(path)
except OSError:
continue
logger.info('OpenSSL Name: %s', library._name)
try:
openssl_version, openssl_hexversion, openssl_cflags = \
pyelliptic.openssl.get_version(library)
except AttributeError: # sphinx chokes
return True
if not openssl_version:
logger.error('Cannot determine version of this OpenSSL library.')
return False
logger.info('OpenSSL Version: %s', openssl_version)
logger.info('OpenSSL Compile Options: %s', openssl_cflags)
# PyElliptic uses EVP_CIPHER_CTX_new and EVP_CIPHER_CTX_free which were
# introduced in 0.9.8b.
if openssl_hexversion < 0x90802F:
logger.error(
'This OpenSSL library is too old. PyBitmessage requires'
' OpenSSL 0.9.8b or later with AES, Elliptic Curves (EC),'
' ECDH, and ECDSA enabled.')
return False
matches = cflags_regex.findall(openssl_cflags)
if matches:
logger.error(
'This OpenSSL library is missing the following required'
' features: %s. PyBitmessage requires OpenSSL 0.9.8b'
' or later with AES, Elliptic Curves (EC), ECDH,'
' and ECDSA enabled.', ', '.join(matches))
return False
return True
return False
# ..todo:: The minimum versions of pythondialog and dialog need to be determined
def check_curses():
"""Do curses dependency check.
Here we are checking for curses if available or not with check as interface
requires the `pythondialog <https://pypi.org/project/pythondialog>`_ package
and the dialog utility.
"""
if sys.hexversion < 0x20600F0:
logger.error(
'The curses interface requires the pythondialog package and'
' the dialog utility.')
return False
curses = try_import('curses')
if not curses:
logger.error('The curses interface can not be used.')
return False
logger.info('curses Module Version: %s', curses.version)
dialog = try_import('dialog')
if not dialog:
logger.error('The curses interface can not be used.')
return False
import subprocess
try:
subprocess.check_call(['which', 'dialog'])
except subprocess.CalledProcessError:
logger.error(
'Curses requires the `dialog` command to be installed as well as'
' the python library.')
return False
logger.info('pythondialog Package Version: %s', dialog.__version__)
dialog_util_version = dialog.Dialog().cached_backend_version
# The pythondialog author does not like Python2 str, so we have to use
# unicode for just the version otherwise we get the repr form which
# includes the module and class names along with the actual version.
logger.info('dialog Utility Version %s', unicode(dialog_util_version))
return True
def check_pyqt():
"""Do pyqt dependency check.
Here we are checking for PyQt4 with its version, as for it require
PyQt 4.8 or later.
"""
QtCore = try_import(
'PyQt4.QtCore', 'PyBitmessage requires PyQt 4.8 or later and Qt 4.7 or later.')
if not QtCore:
return False
logger.info('PyQt Version: %s', QtCore.PYQT_VERSION_STR)
logger.info('Qt Version: %s', QtCore.QT_VERSION_STR)
passed = True
if QtCore.PYQT_VERSION < 0x40800:
logger.error(
'This version of PyQt is too old. PyBitmessage requries'
' PyQt 4.8 or later.')
passed = False
if QtCore.QT_VERSION < 0x40700:
logger.error(
'This version of Qt is too old. PyBitmessage requries'
' Qt 4.7 or later.')
passed = False
return passed
def check_msgpack():
"""Do sgpack module check.
simply checking if msgpack package with all its dependency
is available or not as recommended for messages coding.
"""
return try_import(
'msgpack', 'It is highly recommended for messages coding.') is not False
def check_dependencies(verbose=False, optional=False):
"""Do dependency check.
It identifies project dependencies and checks if there are
any known, publicly disclosed, vulnerabilities.basically
scan applications (and their dependent libraries) so that
easily identify any known vulnerable components.
"""
if verbose:
logger.setLevel(logging.INFO)
has_all_dependencies = True
# Python 2.7.4 is the required minimum.
# (https://bitmessage.org/forum/index.php?topic=4081.0)
# Python 3+ is not supported, but it is still useful to provide
# information about our other requirements.
logger.info('Python version: %s', sys.version)
if sys.hexversion < 0x20704F0:
logger.error(
'PyBitmessage requires Python 2.7.4 or greater'
' (but not Python 3+)')
has_all_dependencies = False
if sys.hexversion >= 0x3000000:
logger.error(
'PyBitmessage does not support Python 3+. Python 2.7.4'
' or greater is required.')
has_all_dependencies = False
check_functions = [check_ripemd160, check_sqlite, check_openssl]
if optional:
check_functions.extend([check_msgpack, check_pyqt, check_curses])
# Unexpected exceptions are handled here
for check in check_functions:
try:
has_all_dependencies &= check()
except:
logger.exception('%s failed unexpectedly.', check.__name__)
has_all_dependencies = False
if not has_all_dependencies:
sys.exit(
'PyBitmessage cannot start. One or more dependencies are'
' unavailable.'
)
logger.setLevel(0)
|
|
import numpy
from palm.util import n_choose_k
from palm.route_collection import RouteCollectionFactory
class Route(object):
'''
A generic route class for AggregatedKineticModels. Routes represent
transitions between states in such models.
Parameters
----------
id_str : string
Identifier string for this route.
start_state_id, end_state_id : string
The identifier strings for the states that are connected by this route.
rate_id : string
A string that denotes the rate law that governs this route.
multiplicity : int
A combinatoric factor for this route, which is determined by
the number of fluorophores in the initial microstate of
the transition.
'''
def __init__(self, id_str, start_state_id, end_state_id, rate_id,
multiplicity):
self.id = id_str
self.start_state_id = start_state_id
self.end_state_id = end_state_id
self.rate_id = rate_id
self.multiplicity = multiplicity
def __str__(self):
my_str = "%s %s %s %s %d" % (
self.id, self.start_state_id, self.end_state_id,
self.rate_id, self.multiplicity)
return my_str
def get_id(self):
return self.id
def get_start_state(self):
return self.start_state_id
def get_end_state(self):
return self.end_state_id
def get_multiplicity(self):
return self.multiplicity
def as_dict(self):
return {'start_state':self.start_state_id,
'end_state':self.end_state_id,
'rate_id':self.rate_id,
'multiplicity':self.multiplicity}
class SingleDarkRouteMapperFactory(object):
"""
This factory class creates a route mapper for
a blink model with one dark state.
Attributes
----------
transition_factory : class
A factory class for transitions, which are simply used as
helper objects when enumerating all of the routes.
Parameters
----------
parameter_set : ParameterSet
route_factory : class, optional
A class for making Route objects.
max_A : int, optional
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, parameter_set, route_factory=Route, max_A=5):
super(SingleDarkRouteMapperFactory, self).__init__()
self.parameter_set = parameter_set
self.route_factory = route_factory
self.max_A = max_A
self.transition_factory = SingleDarkTransition
def create_route_mapper(self):
"""
Creates a method that builds a RouteCollection, made up of
all possible routes in the model.
Returns
-------
map_routes : callable f(state_collection)
A method that builds a RouteCollection.
"""
activation = self.transition_factory(
-1, 1, 0, 0, {'I':1}, 'ka')
blinking = self.transition_factory(
0, -1, 1, 0, {'A':1}, 'kd')
recovery = self.transition_factory(
0, 1, -1, 0, {'D':1}, 'kr')
bleaching = self.transition_factory(
0, -1, 0, 1, {'A':1}, 'kb')
allowed_transitions_list = [activation, blinking, recovery, bleaching]
def map_routes(state_collection):
"""
Build a route collection, based on the states in `state_collection`.
Parameters
----------
state_collection : StateCollection
States for a model with one dark state.
Returns
-------
route_collection : RouteCollection
"""
rc_factory = RouteCollectionFactory()
for start_id, start_state in state_collection.iter_states():
route_iterator = self._enumerate_allowed_transitions(
start_state, allowed_transitions_list)
for end_id, transition in route_iterator:
rate_id = transition.rate_id
multiplicity = transition.compute_multiplicity(start_state)
route_id = "%s__%s" % (start_id, end_id)
new_route = self.route_factory(route_id, start_id, end_id,
rate_id, multiplicity)
rc_factory.add_route(new_route)
route_collection = rc_factory.make_route_collection()
return route_collection
return map_routes
def _enumerate_allowed_transitions(self, start_state,
allowed_transitions_list):
"""
Iterate through possible transitions and filter out those
that lead to invalid states.
Parameters
----------
start_state : State
Enumerate transitions that lead from this state to other states.
allowed_transitions_list : list
Possible transitions from `start_state` to other states.
Returns
-------
end_id : string
Transition will lead from `start_state` to the state with this id.
transition : SingleDarkTransition
"""
for transition in allowed_transitions_list:
I2 = start_state['I'] + transition.get_dPop('I')
A2 = start_state['A'] + transition.get_dPop('A')
D2 = start_state['D'] + transition.get_dPop('D')
B2 = start_state['B'] + transition.get_dPop('B')
end_state_array = numpy.array([I2, A2, D2, B2])
no_negative_pop = len(numpy.where(end_state_array < 0)[0]) == 0
if A2 <= self.max_A and no_negative_pop and\
transition.is_allowed(start_state):
end_id = "%d_%d_%d_%d" % (I2, A2, D2, B2)
yield end_id, transition
class SingleDarkTransition(object):
"""
A helper class for SingleDarkRouteMapperFactory. Represents information
about a transition between two states.
Attributes
----------
dPop_dict : dict
The change in populations of microstates for this transition.
Parameters
----------
dI, dA, dD, dB : int
Changes in microstate populations. The little 'd' here means 'delta'.
reacting_species_dict : dict
The stoichiometry of the reactants for this transition.
If the transition is I goes to A. Then `reacting_species_dict` will
be a dictionary like this {'I':1}.
rate_id : string
A string that denotes the rate law that governs this transition.
"""
def __init__(self, dI, dA, dD, dB, reacting_species_dict, rate_id):
self.dPop_dict = {'I':dI, 'A':dA, 'D':dD, 'B':dB}
self.reacting_species_dict = reacting_species_dict
self.rate_id = rate_id
def __str__(self):
return "%s %d_%d_%d_%d" % (self.label,
self.dPop_dict['I'],
self.dPop_dict['A'],
self.dPop_dict['D'],
self.dPop_dict['B'])
def get_dPop(self, species_label):
return self.dPop_dict[species_label]
def is_allowed(self, state):
"""
Determine whether state can undergo this transition,
based on whether the state has the requisite reactants
to undergo the transition.
Parameters
----------
state : State
Starting state for the transition.
Returns
-------
is_transition_allowed : bool
"""
is_transition_allowed = True
for rs in self.reacting_species_dict.iterkeys():
num_reactants = self.reacting_species_dict[rs]
if rs == 'I':
species_starting_pop = state['I']
elif rs == 'A':
species_starting_pop = state['A']
elif rs == 'D':
species_starting_pop = state['D']
elif rs == 'B':
species_starting_pop = state['B']
if species_starting_pop < num_reactants:
# we need at least num_reactants for the transition
is_transition_allowed = False
break
return is_transition_allowed
def compute_multiplicity(self, start_state):
return 10**self.compute_log_combinatoric_factor(start_state)
def compute_log_combinatoric_factor(self, start_state):
"""
Compute combinatoric factor for this transition,
which is based on the population of the reactant
species (microstate) and the stoichiometry of
the transition.
Parameters
----------
state_state : State
Returns
-------
log_combinatoric_factor : float
Log base 10 combinatoric factor.
"""
# reacting_species_id = I, A, D, or B
reacting_species_id = self.reacting_species_dict.keys()[0]
n = start_state[reacting_species_id]
k = abs(self.reacting_species_dict[reacting_species_id])
combinatoric_factor = n_choose_k(n,k)
log_combinatoric_factor = numpy.log10(combinatoric_factor)
return log_combinatoric_factor
class DoubleDarkRouteMapperFactory(object):
"""
This factory class creates a route mapper for
a blink model with two dark states.
Attributes
----------
transition_factory : class
A factory class for transitions, which are simply used as
helper objects when enumerating all of the routes.
Parameters
----------
parameter_set : ParameterSet
route_factory : class, optional
A class for making Route objects.
max_A : int, optional
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, parameter_set, route_factory=Route, max_A=5):
super(DoubleDarkRouteMapperFactory, self).__init__()
self.parameter_set = parameter_set
self.route_factory = route_factory
self.max_A = max_A
self.transition_factory = DoubleDarkTransition
def create_route_mapper(self):
"""
Creates a method that builds a RouteCollection, made up of
all possible routes in the model.
Returns
-------
map_routes : callable f(state_collection)
A method that builds a RouteCollection.
"""
activation = self.transition_factory(
-1, 1, 0, 0, 0, {'I':1}, 'ka')
blinking1 = self.transition_factory(
0, -1, 1, 0, 0, {'A':1}, 'kd1')
recovery1 = self.transition_factory(
0, 1, -1, 0, 0, {'D1':1}, 'kr1')
blinking2 = self.transition_factory(
0, -1, 0, 1, 0, {'A':1}, 'kd2')
recovery2 = self.transition_factory(
0, 1, 0, -1, 0, {'D2':1}, 'kr2')
bleaching = self.transition_factory(
0, -1, 0, 0, 1, {'A':1}, 'kb')
allowed_transitions_list = [activation, blinking1, recovery1,
blinking2, recovery2, bleaching]
def map_routes(state_collection):
"""
Build a route collection, based on the states in `state_collection`.
Parameters
----------
state_collection : StateCollection
States for a model with two dark states.
Returns
-------
route_collection : RouteCollection
"""
rc_factory = RouteCollectionFactory()
for start_id, start_state in state_collection.iter_states():
route_iterator = self._enumerate_allowed_transitions(
start_state, allowed_transitions_list)
for end_id, transition in route_iterator:
rate_id = transition.rate_id
multiplicity = transition.compute_multiplicity(start_state)
route_id = "%s__%s" % (start_id, end_id)
new_route = self.route_factory(route_id, start_id, end_id,
rate_id, multiplicity)
rc_factory.add_route(new_route)
route_collection = rc_factory.make_route_collection()
return route_collection
return map_routes
def _enumerate_allowed_transitions(self, start_state,
allowed_transitions_list):
"""
Iterate through possible transitions and filter out those
that lead to invalid states.
Parameters
----------
start_state : State
Enumerate transitions that lead from this state to other states.
allowed_transitions_list : list
Possible transitions from `start_state` to other states.
Returns
-------
end_id : string
Transition will lead from `start_state` to the state with this id.
transition : SingleDarkTransition
"""
for transition in allowed_transitions_list:
end_I = start_state['I'] + transition.get_dPop('I')
end_A = start_state['A'] + transition.get_dPop('A')
end_D1 = start_state['D1'] + transition.get_dPop('D1')
end_D2 = start_state['D2'] + transition.get_dPop('D2')
end_B = start_state['B'] + transition.get_dPop('B')
end_state_array = numpy.array([end_I, end_A, end_D1, end_D2, end_B])
no_negative_pop = len(numpy.where(end_state_array < 0)[0]) == 0
if end_A <= self.max_A and no_negative_pop and\
transition.is_allowed(start_state):
end_id = "%d_%d_%d_%d_%d" % (end_I, end_A, end_D1, end_D2, end_B)
yield end_id, transition
class DoubleDarkTransition(object):
"""
A helper class for DoubleDarkRouteMapperFactory. Represents information
about a transition between two states.
Attributes
----------
dPop_dict : dict
The change in populations of microstates for this transition.
Parameters
----------
dI, dA, dD1, dD2, dB : int
Changes in microstate populations. The little 'd' here means 'delta'.
reacting_species_dict : dict
The stoichiometry of the reactants for this transition.
If the transition is I goes to A. Then `reacting_species_dict` will
be a dictionary like this {'I':1}.
rate_id : string
A string that denotes the rate law that governs this route.
"""
def __init__(self, dI, dA, dD1, dD2, dB, reacting_species_dict, rate_id):
self.dPop_dict = {'I':dI, 'A':dA, 'D1':dD1, 'D2':dD2, 'B':dB}
self.reacting_species_dict = reacting_species_dict
self.rate_id = rate_id
def __str__(self):
return "%s %d_%d_%d_%d_%d" % (self.label,
self.dPop_dict['I'],
self.dPop_dict['A'],
self.dPop_dict['D1'],
self.dPop_dict['D2'],
self.dPop_dict['B'])
def get_dPop(self, species_label):
return self.dPop_dict[species_label]
def is_allowed(self, start_state):
"""
Determine whether state can undergo this transition,
based on whether the state has the requisite reactants
to undergo the transition.
Parameters
----------
state : State
Starting state for the transition.
Returns
-------
is_transition_allowed : bool
"""
return_value = True
for rs in self.reacting_species_dict.iterkeys():
num_reactants = self.reacting_species_dict[rs]
if rs == 'I':
species_starting_pop = start_state['I']
elif rs == 'A':
species_starting_pop = start_state['A']
elif rs == 'D1':
species_starting_pop = start_state['D1']
elif rs == 'D2':
species_starting_pop = start_state['D2']
elif rs == 'B':
species_starting_pop = start_state['B']
if species_starting_pop < num_reactants:
# we need at least num_reactants for the transition
return_value = False
break
return return_value
def compute_multiplicity(self, start_state):
return 10**self.compute_log_combinatoric_factor(start_state)
def compute_log_combinatoric_factor(self, start_state):
"""
Compute combinatoric factor for this transition,
which is based on the population of the reactant
species (microstate) and the stoichiometry of
the transition.
Parameters
----------
state_state : State
Returns
-------
log_combinatoric_factor : float
Log base 10 combinatoric factor.
"""
# reacting_species_id = I, A, D1, D2, or B
reacting_species_id = self.reacting_species_dict.keys()[0]
n = start_state[reacting_species_id]
k = abs(self.reacting_species_dict[reacting_species_id])
combinatoric_factor = n_choose_k(n,k)
return numpy.log10(combinatoric_factor)
class ConnectedDarkRouteMapperFactory(object):
"""
This factory class creates a route mapper for
a blink model with two, connected dark states.
Attributes
----------
transition_factory : class
A factory class for transitions, which are simply used as
helper objects when enumerating all of the routes.
Parameters
----------
parameter_set : ParameterSet
route_factory : class, optional
A class for making Route objects.
max_A : int, optional
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, parameter_set, route_factory=Route, max_A=5):
super(ConnectedDarkRouteMapperFactory, self).__init__()
self.parameter_set = parameter_set
self.route_factory = route_factory
self.max_A = max_A
self.transition_factory = DoubleDarkTransition
def create_route_mapper(self):
"""
Creates a method that builds a RouteCollection, made up of
all possible routes in the model.
Returns
-------
map_routes : callable f(state_collection)
A method that builds a RouteCollection.
"""
activation = self.transition_factory(
-1, 1, 0, 0, 0, {'I':1}, 'ka')
blinking1 = self.transition_factory(
0, -1, 1, 0, 0, {'A':1}, 'kd1')
recovery1 = self.transition_factory(
0, 1, -1, 0, 0, {'D1':1}, 'kr1')
blinking2 = self.transition_factory(
0, 0, -1, 1, 0, {'D1':1}, 'kd2')
recovery2 = self.transition_factory(
0, 0, 1, -1, 0, {'D2':1}, 'kr2b')
bleaching = self.transition_factory(
0, -1, 0, 0, 1, {'A':1}, 'kb')
allowed_transitions_list = [activation, blinking1, recovery1,
blinking2, recovery2, bleaching]
def map_routes(state_collection):
"""
Build a route collection, based on the states in `state_collection`.
Parameters
----------
state_collection : StateCollection
States for a model with two dark states.
Returns
-------
route_collection : RouteCollection
"""
rc_factory = RouteCollectionFactory()
for start_id, start_state in state_collection.iter_states():
route_iterator = self._enumerate_allowed_transitions(
start_state, allowed_transitions_list)
for end_id, transition in route_iterator:
rate_id = transition.rate_id
multiplicity = transition.compute_multiplicity(start_state)
route_id = "%s__%s" % (start_id, end_id)
new_route = self.route_factory(route_id, start_id, end_id,
rate_id, multiplicity)
rc_factory.add_route(new_route)
route_collection = rc_factory.make_route_collection()
return route_collection
return map_routes
def _enumerate_allowed_transitions(self, start_state,
allowed_transitions_list):
"""
Iterate through possible transitions and filter out those
that lead to invalid states.
Parameters
----------
start_state : State
Enumerate transitions that lead from this state to other states.
allowed_transitions_list : list
Possible transitions from `start_state` to other states.
Returns
-------
end_id : string
Transition will lead from `start_state` to the state with this id.
transition : SingleDarkTransition
"""
for transition in allowed_transitions_list:
end_I = start_state['I'] + transition.get_dPop('I')
end_A = start_state['A'] + transition.get_dPop('A')
end_D1 = start_state['D1'] + transition.get_dPop('D1')
end_D2 = start_state['D2'] + transition.get_dPop('D2')
end_B = start_state['B'] + transition.get_dPop('B')
end_state_array = numpy.array([end_I, end_A, end_D1, end_D2, end_B])
no_negative_pop = len(numpy.where(end_state_array < 0)[0]) == 0
if end_A <= self.max_A and no_negative_pop and\
transition.is_allowed(start_state):
end_id = "%d_%d_%d_%d_%d" % (end_I, end_A, end_D1, end_D2, end_B)
yield end_id, transition
|
|
# Copyright (c) 2019 Manfred Moitzi
# License: MIT License
# created 2019-02-14
import pytest
import ezdxf
from ezdxf.lldxf.const import DXFAttributeError, DXF12, DXFValueError
from ezdxf.lldxf.tagwriter import TagCollector
from ezdxf.entities import DXFEntity, is_graphic_entity, Insert
from ezdxf.lldxf.extendedtags import DXFTag
from ezdxf.entities.line import Line
ENTITY = """0
DXFENTITY
5
FFFF
330
ABBA
"""
@pytest.fixture
def entity():
return DXFEntity.from_text(ENTITY)
def test_dxfentity_is_set_compatible():
assert len({DXFEntity(), DXFEntity(), DXFEntity()}) == 3
def test_is_not_graphic_entity(entity):
assert is_graphic_entity(entity) is False
def test_default_constructor():
entity = DXFEntity()
assert entity.dxftype() == "DXFENTITY"
assert entity.dxf.handle is None
assert entity.dxf.owner is None
assert entity == entity
assert entity != DXFEntity()
def test_init_with_tags(entity):
assert entity.dxftype() == "DXFENTITY"
assert entity.dxf.handle == "FFFF"
assert entity.dxf.owner == "ABBA"
assert str(entity) == "DXFENTITY(#FFFF)"
assert (
repr(entity)
== "<class 'ezdxf.entities.dxfentity.DXFEntity'> DXFENTITY(#FFFF)"
)
def test_invalid_dxf_attrib(entity):
with pytest.raises(DXFAttributeError):
_ = entity.dxf.color
def test_get_all_dxf_attribs(entity):
dxfattribs = entity.dxfattribs()
assert len(dxfattribs) == 2
assert dxfattribs["handle"] == "FFFF"
assert dxfattribs["owner"] == "ABBA"
def test_get_dxf_attribs_and_discard_some(entity):
dxfattribs = entity.dxfattribs(drop={"owner"})
assert len(dxfattribs) == 1
assert dxfattribs["handle"] == "FFFF"
assert "owner" not in dxfattribs
def test_write_r12_dxf(entity):
tagwriter = TagCollector(dxfversion=DXF12)
entity.export_dxf(tagwriter)
tag = tagwriter.tags
assert len(tag) == 2
assert tag[0] == (0, "DXFENTITY")
assert tag[1] == (5, "FFFF")
def test_write_latest_dxf(entity):
tagwriter = TagCollector()
entity.export_dxf(tagwriter)
tag = tagwriter.tags
assert len(tag) == 3
assert tag[0] == (0, "DXFENTITY")
assert tag[1] == (5, "FFFF")
assert tag[2] == (330, "ABBA")
def test_is_alive(entity):
assert entity.is_alive is True
entity.destroy()
assert entity.is_alive is False
def test_calling_destroy_multiple_times(entity):
entity.destroy()
entity.destroy(), "2nd call should do nothing"
entity.destroy(), "3rd call should do nothing"
assert entity.is_alive is False
def test_dont_write_handles_for_R12(entity):
from ezdxf.lldxf.tagwriter import TagWriter
from io import StringIO
s = StringIO()
t = TagWriter(s)
t.dxfversion = DXF12
t.write_handles = False
entity.export_dxf(t)
result = s.getvalue()
assert "5\nFFFF\n" not in result
def test_uuid():
e1 = DXFEntity()
e2 = DXFEntity()
assert e1.dxf.handle == e2.dxf.handle, "can't distinguish by handle"
assert e1.uuid != e2.uuid, "can distinguish by uuid"
def test_source_of_copy_is_none_for_a_new_entity():
e = DXFEntity()
assert e.source_of_copy is None
def test_set_source_of_copy():
e = DXFEntity()
e.set_source_of_copy(e)
assert e.source_of_copy is e
def test_delete_missing_source_of_copy_without_exception():
e = DXFEntity()
e.del_source_of_copy()
assert True is True
def test_source_block_reference_is_none_for_a_new_entity():
e = DXFEntity()
assert e.has_source_block_reference is False
assert e.source_block_reference is None
def test_set_source_block_reference():
e = DXFEntity()
insert = Insert()
e.set_source_block_reference(insert)
assert e.has_source_block_reference is True
assert e.source_block_reference is insert
def test_setting_source_block_reference_twice_without_exception():
e = DXFEntity()
e.set_source_block_reference(Insert())
e.set_source_block_reference(Insert())
assert True is True
def test_setting_source_block_reference_a_second_time_has_no_effect():
e = DXFEntity()
insert = Insert()
e.set_source_block_reference(insert)
e.set_source_block_reference(Insert())
assert (
e.source_block_reference is insert
), "source block reference should not change"
def test_do_not_copy_source_block_reference():
e = DXFEntity()
insert = Insert()
e.set_source_block_reference(insert)
copy = e.copy()
assert copy.has_source_block_reference is False
assert copy.source_block_reference is None
def test_delete_missing_source_block_reference_without_exception():
e = DXFEntity()
e.del_source_block_reference()
assert True is True
LINE_DATA = """ 0
LINE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbLine
10
0.0
20
0.0
30
0.0
11
1.0
21
1.0
31
1.0
"""
@pytest.fixture
def line():
return Line.from_text(LINE_DATA)
def test_str(line):
assert str(line) == "LINE(#0)"
assert repr(line) == "<class 'ezdxf.entities.line.Line'> LINE(#0)"
def test_get_dxf_defaul(line):
# get_dxf_default returns the DXF default value for unset attributes
assert line.dxf.get_default("thickness") == 0
# get returns the given default for unset attributes, which is None by default :)
assert line.dxf.get("thickness") is None
def test_ocs(line):
ocs = line.ocs()
assert ocs.uz == (0, 0, 1)
assert ocs.transform is False
class TestAppData:
@pytest.fixture
def entity(self):
return Line.from_text(LINE_DATA)
def test_new_app_data(self, entity):
assert entity.has_app_data("{MOZMAN") is False
entity.set_app_data("{MOZMAN", tags=[DXFTag(330, "DEAD")])
assert entity.has_app_data("{MOZMAN") is True
def test_get_app_data(self, entity):
entity.set_app_data("{MOZMAN", tags=[DXFTag(330, "DEAD")])
app_data = entity.get_app_data("{MOZMAN")
assert len(app_data) == 1
assert DXFTag(330, "DEAD") == app_data[0]
def test_set_app_data(self, entity):
entity.set_app_data("{MOZMAN", tags=[DXFTag(330, "DEAD")])
app_data = entity.get_app_data("{MOZMAN")
assert 1 == len(app_data)
assert DXFTag(330, "DEAD") == app_data[0]
app_data.append(DXFTag(360, "DEAD2"))
entity.set_app_data("{MOZMAN", app_data)
app_data = entity.get_app_data("{MOZMAN")
assert 2 == len(app_data)
assert DXFTag(330, "DEAD") == app_data[0]
assert DXFTag(360, "DEAD2") == app_data[1]
def test_not_existing_appid(self, entity):
with pytest.raises(DXFValueError):
entity.get_app_data("XYZ")
class TestXData:
@pytest.fixture
def entity(self):
return Line.from_text(LINE_DATA)
def test_new_app_data(self, entity):
assert entity.has_xdata("MOZMAN") is False
entity.set_xdata("MOZMAN", tags=[DXFTag(1000, "Extended Data String")])
assert entity.has_xdata("MOZMAN") is True
def test_get_xdata(self, entity):
entity.set_xdata("MOZMAN", tags=[DXFTag(1000, "Extended Data String")])
xdata = entity.get_xdata("MOZMAN")
assert len(xdata) == 1
assert DXFTag(1000, "Extended Data String") == xdata[0]
def test_set_xdata(self, entity):
entity.set_xdata("MOZMAN", tags=[DXFTag(1000, "Extended Data String")])
xdata = entity.get_xdata("MOZMAN")
assert 1 == len(xdata)
assert DXFTag(1000, "Extended Data String") == xdata[0]
xdata.append(DXFTag(1000, "Extended Data String2"))
entity.set_xdata("MOZMAN", xdata)
xdata = entity.get_xdata("MOZMAN")
assert 2 == len(xdata)
assert DXFTag(1000, "Extended Data String") == xdata[0]
assert DXFTag(1000, "Extended Data String2") == xdata[1]
def test_not_existing_appid(self, entity):
with pytest.raises(DXFValueError):
entity.get_xdata("XYZ")
def test_get_xdata_list_exception(self, entity):
with pytest.raises(DXFValueError):
_ = entity.get_xdata_list("ACAD", "DSTYLE")
entity.set_xdata("ACAD", tags=[DXFTag(1000, "Extended Data String")])
with pytest.raises(DXFValueError):
_ = entity.get_xdata_list("ACAD", "DSTYLE")
def test_has_xdata_list(self, entity):
assert entity.has_xdata_list("ACAD", "DSTYLE") is False
entity.set_xdata("ACAD", tags=[DXFTag(1000, "Extended Data String")])
assert entity.has_xdata_list("ACAD", "DSTYLE") is False
def test_set_xdata_list(self, entity):
entity.set_xdata_list("ACAD", "DSTYLE", [(1070, 1), (1000, "String")])
xdata_list = entity.get_xdata_list("ACAD", "DSTYLE")
assert len(xdata_list) == 5
assert xdata_list == [
(1000, "DSTYLE"),
(1002, "{"),
(1070, 1),
(1000, "String"),
(1002, "}"),
]
# add another list to ACAD
entity.set_xdata_list("ACAD", "MOZMAN", [(1070, 2), (1000, "mozman")])
xdata = entity.get_xdata_list("ACAD", "MOZMAN")
assert len(xdata) == 5
assert xdata == [
(1000, "MOZMAN"),
(1002, "{"),
(1070, 2),
(1000, "mozman"),
(1002, "}"),
]
xdata = entity.get_xdata("ACAD")
assert len(xdata) == 10
def test_discard_xdata_list(self, entity):
entity.set_xdata_list("ACAD", "DSTYLE", [(1070, 1), (1000, "String")])
xdata_list = entity.get_xdata_list("ACAD", "DSTYLE")
assert len(xdata_list) == 5
entity.discard_xdata_list("ACAD", "DSTYLE")
with pytest.raises(DXFValueError):
_ = entity.get_xdata_list("ACAD", "DSTYLE")
entity.discard_xdata_list("ACAD", "DSTYLE")
def test_replace_xdata_list(self, entity):
entity.set_xdata_list("ACAD", "DSTYLE", [(1070, 1), (1000, "String")])
xdata_list = entity.get_xdata_list("ACAD", "DSTYLE")
assert len(xdata_list) == 5
assert xdata_list == [
(1000, "DSTYLE"),
(1002, "{"),
(1070, 1),
(1000, "String"),
(1002, "}"),
]
entity.set_xdata_list(
"ACAD", "DSTYLE", [(1070, 2), (1000, "mozman"), (1000, "data")]
)
xdata_list = entity.get_xdata_list("ACAD", "DSTYLE")
assert len(xdata_list) == 6
assert xdata_list == [
(1000, "DSTYLE"),
(1002, "{"),
(1070, 2),
(1000, "mozman"),
(1000, "data"),
(1002, "}"),
]
# replace not existing list -> append list
entity.replace_xdata_list("ACAD", "MOZMAN", [(1070, 3), (1000, "new")])
xdata_list = entity.get_xdata_list("ACAD", "MOZMAN")
assert len(xdata_list) == 5
assert xdata_list == [
(1000, "MOZMAN"),
(1002, "{"),
(1070, 3),
(1000, "new"),
(1002, "}"),
]
xdata = entity.get_xdata("ACAD")
assert len(xdata) == 6 + 5
class TestReactors:
@pytest.fixture
def entity(self):
return Line.from_text(LINE_DATA)
def test_has_no_reactors(self, entity):
assert entity.has_reactors() is False
def test_set_reactors(self, entity):
entity.set_reactors(["A000", "B000", "C000"])
assert entity.has_reactors() is True
handles = entity.get_reactors()
assert ["A000", "B000", "C000"] == handles
def test_append_handle(self, entity):
entity.set_reactors([])
assert 0 == len(entity.get_reactors())
entity.append_reactor_handle("A000")
assert "A000" in entity.get_reactors()
entity.append_reactor_handle("B000")
assert "B000" in entity.get_reactors()
assert 2 == len(entity.get_reactors())
entity.append_reactor_handle("B000") # same handle again
assert "B000" in entity.get_reactors()
assert 2 == len(
entity.get_reactors()
), "handle entries should be unique"
entity.append_reactor_handle(
"FF"
) # smallest handle, should be first handle in reactors
assert entity.get_reactors()[0] == "FF"
entity.append_reactor_handle(
"FFFF"
) # biggest handle, should be last handle in reactors
assert "FFFF" == entity.get_reactors()[-1]
def test_discard_handle(self, entity):
entity.set_reactors(["A000", "B000", "C000"])
entity.discard_reactor_handle("A000")
assert 2 == len(entity.get_reactors()), "Handle not deleted"
entity.discard_reactor_handle("FFFF") # ignore not existing handles
assert 2 == len(entity.get_reactors())
class TestGetLayout:
@pytest.fixture(scope="class")
def doc(self):
return ezdxf.new()
def test_get_layout_model_space(self, doc):
msp = doc.modelspace()
circle = msp.add_circle(center=(0, 0), radius=1)
layout = circle.get_layout()
assert msp.name == layout.name
def test_get_layout_paper_space(self, doc):
psp = doc.layout()
circle = psp.add_circle(center=(0, 0), radius=1)
layout = circle.get_layout()
assert psp.name == layout.name
def test_get_layout_block(self, doc):
block = doc.blocks.new("TEST")
circle = block.add_circle(center=(0, 0), radius=1)
layout = circle.get_layout()
assert block.name == layout.name
def test_get_layout_without_layout(self, doc):
msp = doc.modelspace()
circle = msp.add_circle(center=(0, 0), radius=1)
msp.unlink_entity(circle)
assert circle.get_layout() is None
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import abc
import datetime
import warnings
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from gas import task_register
import six
# from gas import configuration
from gas.cmdline_parser import CmdlineParser
_no_value = object()
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class Parameter(object):
"""
An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
.. code:: python
class MyTask(luigi.Task):
foo = luigi.Parameter()
class RequiringTask(luigi.Task):
def requires(self):
return MyTask(foo="hello")
def run(self):
print(self.requires().foo) # prints "hello"
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the
value will be resolved in this order of falling priority:
* Any value provided on the command line:
- To the root task (eg. ``--param xyz``)
- Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``).
* With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion`
* Any default value set using the ``default`` flag.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
Parameter objects may be reused, but you must then set the ``positional=False`` flag.
"""
_counter = 0 # non-atomically increasing counter used for ordering parameters.
def __init__(self, default=_no_value, is_global=False, significant=True, description=None,
config_path=None, positional=True, always_in_help=False):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. It's true by default but we recommend
``positional=False`` for abstract base classes and similar cases.
:param bool always_in_help: For the --help option in the command line
parsing. Set true to always show in --help.
"""
self._default = default
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.description = description
self.always_in_help = always_in_help
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self.__config = config_path
self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class)
Parameter._counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError):
return _no_value
return self.parse(value)
def _get_value(self, task_name, param_name):
for value, warn in self._value_iterator(task_name, param_name):
if value != _no_value:
if warn:
warnings.warn(warn, DeprecationWarning)
return value
return _no_value
def _value_iterator(self, task_name, param_name):
"""
Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first.
"""
cp_parser = CmdlineParser.get_instance()
if cp_parser:
dest = self._parser_global_dest(param_name, task_name)
found = getattr(cp_parser.known_args, dest, None)
yield (self._parse_or_no_value(found), None)
yield (self._get_value_from_config(task_name, param_name), None)
yield (self._get_value_from_config(task_name, param_name.replace('_', '-')),
'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format(
task_name, param_name))
if self.__config:
yield (self._get_value_from_config(self.__config['section'], self.__config['name']),
'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format(
self.__config['section'], self.__config['name'], task_name, param_name))
yield (self._default, None)
def has_task_value(self, task_name, param_name):
return self._get_value(task_name, param_name) != _no_value
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return self.normalize(value)
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is the identity function, but subclasses should override
this method for specialized parsing.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return str(x)
def normalize(self, x):
"""
Given a parsed parameter value, normalizes it.
The value can either be the result of parse(), the default value or
arguments passed into the task's constructor by instantiation.
This is very implementation defined, but can be used to validate/clamp
valid values. For example, if you wanted to only accept even integers,
and "correct" odd values to the nearest integer, you can implement
normalize as ``x // 2 * 2``.
"""
return x # default impl
def next_in_enumeration(self, _value):
"""
If your Parameter type has an enumerable ordering of values. You can
choose to override this method. This method is used by the
:py:mod:`luigi.execution_summary` module for pretty printing
purposes. Enabling it to pretty print tasks like ``MyTask(num=1),
MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``.
:param value: The value
:return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering.
"""
return None
def _parse_or_no_value(self, x):
if not x:
return _no_value
else:
return self.parse(x)
@staticmethod
def _parser_global_dest(param_name, task_name):
return task_name + '_' + param_name
@staticmethod
def _parser_action():
return "store"
_UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0)
class _DateParameterBase(Parameter):
"""
Base class Parameter for date (not datetime).
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DateParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH.date()
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
def parse(self, s):
"""
Parses a date string formatted like ``YYYY-MM-DD``.
"""
return datetime.datetime.strptime(s, self.date_format).date()
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
class DateParameter(_DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
"""
date_format = '%Y-%m-%d'
def next_in_enumeration(self, value):
return value + datetime.timedelta(days=self.interval)
def normalize(self, value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = value.date()
delta = (value - self.start).days % self.interval
return value - datetime.timedelta(days=delta)
class MonthParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the month
(day of :py:class:`~datetime.date` is "rounded" to first of the month).
A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies
July of 2013.
"""
date_format = '%Y-%m'
def _add_months(self, date, months):
"""
Add ``months`` months to ``date``.
Unfortunately we can't use timedeltas to add months because timedelta counts in days
and there's no foolproof way to add N months in days without counting the number of
days per month.
"""
year = date.year + (date.month + months - 1) // 12
month = (date.month + months - 1) % 12 + 1
return datetime.date(year=year, month=month, day=1)
def next_in_enumeration(self, value):
return self._add_months(value, self.interval)
def normalize(self, value):
if value is None:
return None
months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month)
months_since_start -= months_since_start % self.interval
return self._add_months(self.start, months_since_start)
class YearParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the year
(day and month of :py:class:`~datetime.date` is "rounded" to first day of the year).
A YearParameter is a Date string formatted ``YYYY``.
"""
date_format = '%Y'
def next_in_enumeration(self, value):
return value.replace(year=value.year + self.interval)
def normalize(self, value):
if value is None:
return None
delta = (value.year - self.start.year) % self.interval
return datetime.date(year=value.year - delta, month=1, day=1)
class _DatetimeParameterBase(Parameter):
"""
Base class Parameter for datetime
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DatetimeParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
@abc.abstractproperty
def _timedelta(self):
"""
How to move one interval of this type forward (i.e. not counting self.interval).
"""
pass
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime`.
"""
return datetime.datetime.strptime(s, self.date_format)
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
def normalize(self, dt):
"""
Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at
:py:attr:`~_DatetimeParameterBase.start`.
"""
if dt is None:
return None
dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues.
delta = (dt - self.start).total_seconds()
granularity = (self._timedelta * self.interval).total_seconds()
return dt - datetime.timedelta(seconds=delta % granularity)
def next_in_enumeration(self, value):
return value + self._timedelta * self.interval
class DateHourParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
_timedelta = datetime.timedelta(hours=1)
class DateMinuteParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at
19:07.
The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute.
"""
date_format = '%Y-%m-%dT%H%M'
_timedelta = datetime.timedelta(minutes=1)
deprecated_date_format = '%Y-%m-%dT%HH%M'
def parse(self, s):
try:
value = datetime.datetime.strptime(s, self.deprecated_date_format)
warnings.warn(
'Using "H" between hours and minutes is deprecated, omit it instead.',
DeprecationWarning,
stacklevel=2
)
return value
except ValueError:
return super(DateMinuteParameter, self).parse(s)
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
def next_in_enumeration(self, value):
return value + 1
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``. This parameter have an implicit
default value of ``False``.
"""
def __init__(self, *args, **kwargs):
super(BoolParameter, self).__init__(*args, **kwargs)
if self._default == _no_value:
self._default = False
def parse(self, s):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
return {'true': True, 'false': False}[str(s).lower()]
def normalize(self, value):
# coerce anything truthy to True
return bool(value) if value is not None else None
@staticmethod
def _parser_action():
return 'store_true'
class BooleanParameter(BoolParameter):
"""
DEPRECATED. Use :py:class:`~BoolParameter`
"""
def __init__(self, *args, **kwargs):
warnings.warn(
'BooleanParameter is deprecated, use BoolParameter instead',
DeprecationWarning,
stacklevel=2
)
super(BooleanParameter, self).__init__(*args, **kwargs)
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 date notation for dates
(eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks
(eg. "2015-W35"). In addition, it also supports arbitrary date intervals
provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04").
"""
def parse(self, s):
"""
Parses a :py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
from luigi import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""
Class that maps to timedelta using strings in any of the following forms:
* ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
* ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
* ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
import re
re_match = re.match(regex, input)
if re_match:
kwargs = {}
has_val = False
for k, v in six.iteritems(re_match.groupdict(default="0")):
val = int(v)
has_val = has_val or val != 0
kwargs[k] = val
if has_val:
return datetime.timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return r"(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex, input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
class TaskParameter(Parameter):
"""
A parameter that takes another luigi task class.
When used programatically, the parameter should be specified
directly with the :py:class:`luigi.task.Task` (sub) class. Like
``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line,
you specify the :py:attr:`luigi.task.Task.task_family`. Like
.. code:: console
$ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask
Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module.
When the :py:class:`luigi.task.Task` class is instantiated to an object.
The value will always be a task class (and not a string).
"""
def parse(self, input):
"""
Parse a task_famly using the :class:`~luigi.task_register.Register`
"""
return task_register.Register.get_task_cls(input)
def serialize(self, cls):
"""
Converts the :py:class:`luigi.task.Task` (sub) class to its family name.
"""
return cls.task_family
class EnumParameter(Parameter):
"""
A parameter whose value is an :class:`~enum.Enum`.
In the task definition, use
.. code-block:: python
class Models(enum.IntEnum):
Honda = 1
class MyTask(luigi.Task):
my_param = luigi.EnumParameter(enum=Models)
At the command line, use,
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param Honda
"""
def __init__(self, *args, **kwargs):
if 'enum' not in kwargs:
raise ParameterException('An enum class must be specified.')
self._enum = kwargs.pop('enum')
super(EnumParameter, self).__init__(*args, **kwargs)
def parse(self, s):
try:
return self._enum[s]
except KeyError:
raise ValueError('Invalid enum value - could not be parsed')
def serialize(self, e):
return e.name
|
|
from django.conf import settings
from django.test import Client, TestCase
from factory.django import DjangoModelFactory
from .models import Ingredient, Recipe, RecipeIngredientRelationship
from .forms import RecipeIngredientRelationshipFormSet, IngredientForm, RecipeForm
from .views import vary_recipe
from django.forms import formsets
from datetime import datetime
import factory
PASSWORD = 'this is the password'
class UserFactory(DjangoModelFactory):
"""Instantiate a user model instance for testing."""
class Meta:
model = settings.AUTH_USER_MODEL
username = factory.Faker('first_name')
class RecipeFactory(DjangoModelFactory):
"""Instantiate a recipe model instance for testing."""
class Meta:
model = Recipe
author = factory.SubFactory(UserFactory)
title = factory.Faker('name')
class IngredientFactory(DjangoModelFactory):
"""Instantiate an ingredient model instance for testing."""
class Meta:
model = Ingredient
name = factory.Faker('name')
class RecipeIngredientFactory(DjangoModelFactory):
"""Instantiate a recipe-ingredient model instance for testing."""
class Meta:
model = RecipeIngredientRelationship
recipe = factory.SubFactory(RecipeFactory)
ingredient = factory.SubFactory(IngredientFactory)
class RecipeModelTest(TestCase):
"""Test the Recipe model."""
fixtures = ['users.json', 'recipes.json']
def setUp(self):
"""Prepare for class test methods."""
self.no_work_bread = Recipe.objects.get(pk=1)
self.ants_on_a_log = Recipe.objects.get(pk=2)
self.michael = self.no_work_bread.author
def test_title(self):
"""Confirm basic (unrelated) model field is populated correctly."""
self.assertEqual(self.no_work_bread.title, 'No Work Bread')
def test_author_relationship(self):
"""Confirm that loaded data is relating properly."""
self.assertEqual(self.no_work_bread.author, self.michael)
def test_parent_not_required(self):
"""Confirm parent field may be none."""
self.assertFalse(self.no_work_bread.parent)
def test_variant_relationship(self):
"""Confirm a new recipe can be a variant."""
self.variant = RecipeFactory(title='Fuzzy Ants on a log',
description='A zesty take on an old classic',
directions='Throw it all together, carefully.',
parent=self.ants_on_a_log)
self.assertEqual(self.variant, self.ants_on_a_log.variations.first())
self.assertEqual(self.variant.parent, self.ants_on_a_log)
class IngredientModelTest(TestCase):
"""Test the Ingredient model."""
fixtures = ['ingredients.json']
def setUp(self):
self.yeast = Ingredient.objects.get(pk=4)
self.ingredient1 = IngredientFactory()
def test_factory(self):
"""Confirm factory is creating ingredient model instances."""
self.assertIsInstance(self.ingredient1, Ingredient)
def test_name(self):
"""Test basic field for model instance."""
self.assertEqual(self.yeast.name, 'yeast')
class RecipeIngredientModelTest(TestCase):
"""Test the RecipeIngredientRelationship model."""
fixtures = ['users.json',
'ingredients.json',
'recipes.json',
'recipeingredientrelationship']
def setUp(self):
self.recipe_ingredient1 = RecipeIngredientFactory.build()
self.no_work_bread = Recipe.objects.get(pk=1)
self.ants_on_a_log = Recipe.objects.get(pk=2)
self.salt = Ingredient.objects.get(name='salt')
self.flour = Ingredient.objects.get(name='flour, white')
def test_factory(self):
"""Confirm factory is creating recipe-ingredient model instances."""
self.assertIsInstance(self.recipe_ingredient1,
RecipeIngredientRelationship)
def test_recipe_to_ingredient(self):
"""Confirm a given recipe has a relationship to said ingredient."""
thru_table = RecipeIngredientRelationship.objects.filter(
recipe=self.no_work_bread
)
self.assertTrue(thru_table.filter(ingredient=self.salt))
self.assertEqual(self.no_work_bread.ingredients.first(), self.flour)
class TestView(TestCase):
"""Generic class for testing views."""
def setUp(self):
"""Provide user and client authenticated as user."""
self.user = UserFactory()
self.user.set_password(PASSWORD)
self.user.save()
self.client = Client()
self.client.login(username=self.user, password=PASSWORD)
class ViewMyFavorites(TestView):
def setUp(self):
super(ViewMyFavorites, self).setUp()
self.favorite_recipe = RecipeFactory()
self.favorite_recipe.favorite_of.add(self.user.profile)
self.unfavorited_recipe = RecipeFactory()
def test_including(self):
"""Confirm view lists user's favorite recipes."""
response = self.client.get('/recipe/view/favorites/')
self.assertIn(str(self.favorite_recipe), str(response.content))
def test_excluding(self):
"""Confirm view excludes unfavorited recipes."""
response = self.client.get('/recipe/view/favorites/')
self.assertNotIn(str(self.unfavorited_recipe), str(response.content))
class ViewMyRecipes(TestView):
def setUp(self):
super(ViewMyRecipes, self).setUp()
self.authored_recipe = RecipeFactory(author=self.user)
self.unauthored_recipe = RecipeFactory(author=UserFactory())
def test_including(self):
"""Confirm view lists user's recipes."""
response = self.client.get('/recipe/view/my_recipes/')
self.assertIn(str(self.authored_recipe), str(response.content))
def test_excluding(self):
"""Confirm view excludes non-authored recipes."""
response = self.client.get('/recipe/view/my_recipes/')
self.assertNotIn(str(self.unauthored_recipe), str(response.content))
class ViewRecipe(TestView):
def setUp(self):
"""Prepare for test methods."""
super(ViewRecipe, self).setUp()
non_author = UserFactory(username='non-author')
non_author.set_password(PASSWORD)
non_author.save()
self.non_author_client = Client()
self.non_author_client.login(username=non_author.username,
password=PASSWORD)
self.public_recipe = RecipeFactory(author=self.user, privacy='pu')
self.private_recipe = RecipeFactory(author=self.user, privacy='pr')
def test_authored_public(self):
"""Confirm author can view public authored recipe."""
url = ''.join(['/recipe/view/',
str(self.public_recipe.pk),
'/'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_authored_private(self):
"""Confirm author can view private authored recipe."""
url = ''.join(['/recipe/view/',
str(self.private_recipe.pk),
'/'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_unauthored_public(self):
"""Confirm non-author can view public recipe."""
url = ''.join(['/recipe/view/',
str(self.public_recipe.pk),
'/'])
response = self.non_author_client.get(url)
self.assertEqual(response.status_code, 200)
def test_unauthored_private(self):
"""Confirm non-author cannot view private recipe."""
url = ''.join(['/recipe/view/',
str(self.private_recipe.pk),
'/'])
response = self.non_author_client.get(url)
self.assertEqual(response.status_code, 404)
class Autocomplete(TestCase):
"""Test autocomplete functionality."""
# load fixtures into test database
fixtures = ['users.json',
'ingredients.json',
'recipes.json',
'recipeingredientrelationship']
def setUp(self):
"""Prepare for testing methods."""
self.auth_user = UserFactory()
username = self.auth_user.username
self.auth_user.set_password(PASSWORD)
self.auth_user.save()
self.auth_client = Client()
self.loggedin = self.auth_client.login(username=username,
password=PASSWORD)
self.unauth_client = Client()
def test_authenticated(self):
"""Confirm authenticated client is authenticated."""
self.assertTrue(self.loggedin)
def test_autocomplete_unauthenticated(self):
"""Confirm unauthenticated client can not see autocomplete view."""
url = '/recipe/ingredient-autocomplete/'
query = '?q=w'
response = self.unauth_client.get(''.join([url, query]))
self.assertEqual(response.status_code, 302)
def test_autocomplete_authenticated(self):
"""Confirm autocomplete returning json with completions."""
url = '/recipe/ingredient-autocomplete/'
query = '?q=w'
response = self.auth_client.get(''.join([url, query]))
expected = ['"text": "water"', '"id": 2', ]
for item in expected:
self.assertIn(str(item), str(response.content))
class CreateRecipe(TestCase):
"""Test create view Functionality."""
def setUp(self):
"""Prepare for test methods."""
author = UserFactory(username='author')
author.set_password(PASSWORD)
author.save()
non_author = UserFactory(username='non-author')
non_author.set_password(PASSWORD)
non_author.save()
self.author_client = Client()
self.loggedin = self.author_client.login(username=author.username,
password=PASSWORD)
self.loggedout_client = Client()
def test_auth_200(self):
"""Verify logged in user can get to create recipe page."""
response = self.author_client.get('/recipe/add/')
self.assertEquals(response.status_code, 200)
def test_unauth_302(self):
"""Verify logged out user is redirected to
login page when attempting to create a recipe."""
response = self.loggedout_client.get('/recipe/add/')
self.assertEquals(response.status_code, 302)
def test_forms_available(self):
"""Verify some of the expected forms are on the page."""
response = self.author_client.get('/recipe/add/')
self.assertIn('id="id_title"', str(response.content))
self.assertIn('id="id_prep_time"', str(response.content))
self.assertIn('id="id_cook_time"', str(response.content))
self.assertIn('id="id_privacy"', str(response.content))
self.assertIn('id="id_description"', str(response.content))
def test_formset_autocomplete(self):
"""Verify the ingredient form is pointing the autocomplete url."""
response = self.author_client.get('/recipe/add/')
self.assertIn('data-autocomplete-light-url="/recipe/ingredient-autocomplete/"', str(response.content))
class EditRecipe(TestCase):
"""Test create view Functionality."""
def setUp(self):
"""Prepare for test methods."""
author = UserFactory(username='author')
author.set_password(PASSWORD)
author.save()
non_author = UserFactory(username='non-author')
non_author.set_password(PASSWORD)
non_author.save()
self.author_client = Client()
self.loggedin = self.author_client.login(username=author.username,
password=PASSWORD)
self.loggedout_client = Client()
self.public_recipe = RecipeFactory(author=author, privacy='pu')
self.public_recipe.save()
relationship1 = RecipeIngredientFactory()
relationship1.recipe = self.public_recipe
relationship2 = RecipeIngredientFactory()
relationship2.recipe = self.public_recipe
relationship1.save()
relationship2.save()
self.pk = self.public_recipe.pk
def test_auth_200(self):
"""Verify logged in user can get to create recipe page."""
response = self.author_client.get('/recipe/edit/{}/'.format(self.pk))
self.assertEquals(response.status_code, 200)
def test_unauth_302(self):
"""Verify logged out user is redirected to
login page when attempting to create a recipe."""
response = self.loggedout_client.get('/recipe/edit/{}/'.format(self.pk))
self.assertEquals(response.status_code, 302)
def test_forms_available(self):
"""Verify some of the expected forms are on the page."""
response = self.author_client.get('/recipe/edit/{}/'.format(self.pk))
self.assertIn('id="id_title"', str(response.content))
self.assertIn('id="id_prep_time"', str(response.content))
self.assertIn('id="id_cook_time"', str(response.content))
self.assertIn('id="id_privacy"', str(response.content))
self.assertIn('id="id_description"', str(response.content))
def test_formset_autocomplete(self):
"""Verify the ingredient form is pointing the autocomplete url."""
response = self.author_client.get('/recipe/edit/{}/'.format(self.pk))
self.assertIn('data-autocomplete-light-url="/recipe/ingredient-autocomplete/"', str(response.content))
class FormsTest(TestCase):
"""Test Form Creation."""
def setUp(self):
"""Prepare for test methods."""
self.formset = RecipeIngredientRelationshipFormSet(
queryset=RecipeIngredientRelationship.objects.none())
author = UserFactory(username='author')
author.set_password(PASSWORD)
author.save()
self.public_recipe = RecipeFactory(author=author, privacy='pu')
self.public_recipe.save()
relationship1 = RecipeIngredientFactory()
relationship1.recipe = self.public_recipe
relationship2 = RecipeIngredientFactory()
relationship2.recipe = self.public_recipe
relationship1.save()
relationship2.save()
self.formset_filled = RecipeIngredientRelationshipFormSet(
queryset=RecipeIngredientRelationship.objects.filter(recipe=self.public_recipe))
self.recipe_form = RecipeForm()
self.ingredient_form = IngredientForm()
def test_formset_0_initial(self):
"""Verify empty formset initializes with 0 forms."""
self.assertEquals(self.formset.initial_form_count(), 0)
def test_formset_1_extra(self):
"""Verify one additional form is added to the formset."""
self.assertEquals(len(self.formset.extra_forms), 1)
def test_formset_based_on_recipe_ingredient_count(self):
"""Verify populated formset has forms for each ingredient."""
self.assertEquals(len(self.public_recipe.ingredients.all()),
self.formset_filled.initial_form_count())
def test_recipe_form_fields(self):
"""Verify recipe form does not show specific fields."""
self.assertRaises(KeyError, lambda: self.recipe_form['author'])
self.assertRaises(KeyError, lambda: self.recipe_form['parent'])
self.assertRaises(KeyError, lambda: self.recipe_form['created'])
def test_ingredient_form_fields(self):
"""Verify recipe form shows all ingredients."""
count = 0
for option in self.ingredient_form.fields['ingredient'].choices:
count += 1
self.assertEquals(count, 3)
class ModelTests(TestCase):
"""Test recipe models and associated models."""
def setUp(self):
"""Prepare for test methods."""
self.author = UserFactory(username='author')
self.author.set_password(PASSWORD)
self.author.save()
self.public_recipe = RecipeFactory(author=self.author, privacy='pu')
self.public_recipe.save()
self.relationship1 = RecipeIngredientFactory()
self.relationship1.recipe = self.public_recipe
self.relationship1.quantity = '5 Cups'
relationship2 = RecipeIngredientFactory()
relationship2.recipe = self.public_recipe
self.relationship1.save()
relationship2.save()
def test_recipe_title(self):
"""Verify recipe title exists."""
self.assertTrue(self.public_recipe.title)
def test_recipe_create_date(self):
"""Verify recipe has a created date after it being saved."""
self.assertTrue(self.public_recipe.created)
def test_recipe_create_date_type(self):
"""Verify datetime object exists in the recipe model after being saved."""
recipe2 = Recipe(author=self.author, title='test', description='test')
self.assertFalse(recipe2.created)
recipe2.save()
self.assertTrue(recipe2.created)
def test_recipe_datetime_object(self):
"""Verify datetime property is the type datetime."""
self.assertIsInstance(self.public_recipe.created, datetime)
def test_recipe_privacy_default(self):
"""Verify that public is the default privacy setting."""
self.assertEquals(self.public_recipe.privacy, 'pu')
def test_recipe_has_ingredients(self):
"""Verify that the setup recipe has ingredients,
seen through the through table."""
self.assertTrue(self.public_recipe.ingredients)
def test_ingredient_has_name(self):
"""Test stringing an ingredient displays name."""
pasta = Ingredient(name='pasta')
self.assertEquals(str(pasta), 'pasta')
def test_relationship_exists(self):
"""Verify relationship points to ingredient and recipe."""
self.assertIsInstance(self.relationship1.ingredient, Ingredient)
self.assertIsInstance(self.relationship1.recipe, Recipe)
def test_relationship_quantity(self):
self.assertEquals(self.relationship1.quantity, '5 Cups')
class VaryRecipeView(TestView):
def setUp(self):
super(VaryRecipeView, self).setUp()
self.recipe = RecipeFactory(author=self.user)
self.url = ''.join(['/recipe/vary/', str(self.recipe.pk), '/'])
def test_get(self):
"""Confirm can get 200."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# def test_autocomplete_can_create(self):
# """Confirm autocomplete can confirm records."""
# # no tomatoes to begin with
# queryset = Ingredient.objects.filter(name='tomatoes')
# self.assertFalse(queryset)
#
# # get to help the csrf token pop
# url = '/recipe/ingredient-autocomplete/'
# query = '?q=tomatoes'
# get_response = self.auth_client.get(''.join([url, query]))
#
#
# # use client to post
# data = {'text': 'tomatoes', id: 200}
# url = '/recipe/ingredient-autocomplete/'
# post_response = self.auth_client.post(url, data)
#
# # assert tomatoes
# queryset = Ingredient.objects.filter(name='tomatoes')
# self.assertTrue(queryset)
|
|
# -*- coding: utf-8 -*-
import functools
import datetime
import mock
from factory import SubFactory
from factory.fuzzy import FuzzyDateTime, FuzzyAttribute, FuzzyChoice
from mock import patch, Mock
import factory
import pytz
from factory.django import DjangoModelFactory
from django.utils import timezone
from django.db.utils import IntegrityError
from faker import Factory
from modularodm.exceptions import NoResultsFound
from website.notifications.constants import NOTIFICATION_TYPES
from website.util import permissions
from website.project.licenses import ensure_licenses
from website.project.model import ensure_schemas
from website.archiver import ARCHIVER_SUCCESS
from framework.auth.core import Auth
from osf import models
from osf.models.sanctions import Sanction
from osf.utils.names import impute_names_model
from osf.modm_compat import Q
from addons.osfstorage.models import OsfStorageFile
fake = Factory.create()
ensure_licenses = functools.partial(ensure_licenses, warn=False)
def get_default_metaschema():
"""This needs to be a method so it gets called after the test database is set up"""
try:
return models.MetaSchema.find()[0]
except IndexError:
ensure_schemas()
return models.MetaSchema.find()[0]
def FakeList(provider, n, *args, **kwargs):
func = getattr(fake, provider)
return [func(*args, **kwargs) for _ in range(n)]
class UserFactory(DjangoModelFactory):
# TODO: Change this to only generate long names and see what breaks
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
username = factory.Faker('email')
password = factory.PostGenerationMethodCall('set_password',
'queenfan86')
is_registered = True
is_claimed = True
date_confirmed = factory.Faker('date_time_this_decade', tzinfo=pytz.utc)
merged_by = None
verification_key = None
class Meta:
model = models.OSFUser
@factory.post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@factory.post_generation
def set_emails(self, create, extracted):
if self.username not in self.emails:
self.emails.append(str(self.username))
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@factory.post_generation
def add_auth(self, create, extracted):
self.auth = (self.username, 'queenfan86')
class AuthFactory(factory.base.Factory):
class Meta:
model = Auth
user = factory.SubFactory(UserFactory)
class UnregUserFactory(DjangoModelFactory):
email = factory.Faker('email')
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
date_registered = factory.Faker('date_time', tzinfo=pytz.utc)
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
return ret
@classmethod
def _create(cls, target_class, *args, **kwargs):
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
ret.save()
return ret
class UnconfirmedUserFactory(DjangoModelFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
class Meta:
model = models.OSFUser
username = factory.Faker('email')
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
password = 'lolomglgt'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
return instance
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
instance.save()
return instance
class BaseNodeFactory(DjangoModelFactory):
title = factory.Faker('catch_phrase')
description = factory.Faker('sentence')
date_created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
class Meta:
model = models.Node
class ProjectFactory(BaseNodeFactory):
category = 'project'
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
class NodeFactory(BaseNodeFactory):
category = 'hypothesis'
parent = factory.SubFactory(ProjectFactory)
class InstitutionFactory(DjangoModelFactory):
name = factory.Faker('company')
login_url = factory.Faker('url')
logout_url = factory.Faker('url')
domains = FakeList('url', n=3)
email_domains = FakeList('domain_name', n=1)
logo_name = factory.Faker('file_name')
class Meta:
model = models.Institution
class NodeLicenseRecordFactory(DjangoModelFactory):
year = factory.Faker('year')
copyright_holders = FakeList('name', n=3)
class Meta:
model = models.NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
try:
models.NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
except NoResultsFound:
ensure_licenses()
kwargs['node_license'] = kwargs.get(
'node_license',
models.NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
class NodeLogFactory(DjangoModelFactory):
class Meta:
model = models.NodeLog
action = 'file_added'
user = SubFactory(UserFactory)
class PrivateLinkFactory(DjangoModelFactory):
class Meta:
model = models.PrivateLink
name = factory.Faker('word')
key = factory.Faker('md5')
anonymous = False
creator = factory.SubFactory(UserFactory)
class CollectionFactory(DjangoModelFactory):
class Meta:
model = models.Collection
is_bookmark_collection = False
title = factory.Faker('catch_phrase')
creator = factory.SubFactory(UserFactory)
class BookmarkCollectionFactory(CollectionFactory):
is_bookmark_collection = True
class RegistrationFactory(BaseNodeFactory):
creator = None
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception('Cannot build registration without saving.')
@classmethod
def _create(cls, target_class, project=None, is_public=False,
schema=None, data=None,
archive=False, embargo=None, registration_approval=None, retraction=None,
*args, **kwargs):
user = None
if project:
user = project.creator
user = kwargs.pop('user', None) or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
# Original project to be registered
project = project or target_class(*args, **kwargs)
if project.has_permission(user, 'admin'):
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
# Default registration parameters
schema = schema or get_default_metaschema()
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator, reg)
reg.sanction.save()
with patch('framework.celery_tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
if not archive:
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
reg.archive_job.status = ARCHIVER_SUCCESS
reg.archive_job.save()
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
# models.ArchiveJob(
# src_node=project,
# dst_node=reg,
# initiator=user,
# )
if is_public:
reg.is_public = True
reg.save()
return reg
class WithdrawnRegistrationFactory(BaseNodeFactory):
@classmethod
def _create(cls, *args, **kwargs):
registration = kwargs.pop('registration', None)
registration.is_public = True
user = kwargs.pop('user', registration.creator)
registration.retract_registration(user)
withdrawal = registration.retraction
token = withdrawal.approval_state.values()[0]['approval_token']
with patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
return withdrawal
class SanctionFactory(DjangoModelFactory):
class Meta:
abstract = True
@classmethod
def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs):
user = kwargs.pop('user', None) or UserFactory()
kwargs['initiated_by'] = initiated_by or user
sanction = super(SanctionFactory, cls)._create(target_class, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
class Meta:
model = models.Retraction
user = factory.SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
class Meta:
model = models.Embargo
user = factory.SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
class Meta:
model = models.RegistrationApproval
user = factory.SubFactory(UserFactory)
class EmbargoTerminationApprovalFactory(DjangoModelFactory):
FACTORY_STRATEGY = factory.base.CREATE_STRATEGY
@classmethod
def create(cls, registration=None, user=None, embargo=None, *args, **kwargs):
if registration:
if not user:
user = registration.creator
else:
user = user or UserFactory()
if not embargo:
embargo = EmbargoFactory(state=models.Sanction.APPROVED, approve=True)
registration = embargo._get_registration()
else:
registration = RegistrationFactory(creator=user, user=user, embargo=embargo)
with mock.patch('osf.models.sanctions.TokenApprovableSanction.ask', mock.Mock()):
approval = registration.request_embargo_termination(Auth(user))
return approval
class DraftRegistrationFactory(DjangoModelFactory):
class Meta:
model = models.DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
try:
registration_schema = registration_schema or models.MetaSchema.find()[0]
except IndexError:
ensure_schemas()
registration_metadata = registration_metadata or {}
draft = models.DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft
class CommentFactory(DjangoModelFactory):
class Meta:
model = models.Comment
content = factory.Sequence(lambda n: 'Comment {0}'.format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
instance.save()
return instance
class SubjectFactory(DjangoModelFactory):
text = factory.Sequence(lambda n: 'Example Subject #{}'.format(n))
class Meta:
model = models.Subject
@classmethod
def _create(cls, target_class, parent=None, provider=None, bepress_subject=None, *args, **kwargs):
provider = provider or models.PreprintProvider.objects.first() or PreprintProviderFactory(_id='osf')
if provider._id != 'osf' and not bepress_subject:
osf = models.PreprintProvider.load('osf') or PreprintProviderFactory(_id='osf')
bepress_subject = SubjectFactory(provider=osf)
try:
ret = super(SubjectFactory, cls)._create(target_class, parent=parent, provider=provider, bepress_subject=bepress_subject, *args, **kwargs)
except IntegrityError:
ret = models.Subject.objects.get(text=kwargs['text'])
if parent:
ret.parent = parent
return ret
class PreprintProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
logo_name = factory.Faker('file_name', category='image')
banner_name = factory.Faker('file_name', category='image')
class Meta:
model = models.PreprintProvider
class PreprintFactory(DjangoModelFactory):
doi = factory.Sequence(lambda n: '10.123/{}'.format(n))
provider = factory.SubFactory(PreprintProviderFactory)
external_url = 'http://hello.org'
class Meta:
model = models.PreprintService
@classmethod
def _build(cls, target_class, project=None, filename='preprint_file.txt', provider=None,
doi=None, external_url=None, is_published=True, subjects=None, finish=True, *args, **kwargs):
user = None
if project:
user = project.creator
user = kwargs.get('user') or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
# Original project to be converted to a preprint
project = project or ProjectFactory(*args, **kwargs)
project.save()
if not project.is_contributor(user):
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=True
)
file = OsfStorageFile.create(
node=project,
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
file.save()
from addons.osfstorage import settings as osfstorage_settings
file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
preprint = target_class(node=project, provider=provider)
auth = Auth(project.creator)
if finish:
preprint.set_primary_file(file, auth=auth)
subjects = subjects or [[SubjectFactory()._id]]
preprint.save()
preprint.set_subjects(subjects, auth=auth)
preprint.set_published(is_published, auth=auth)
if not preprint.is_published:
project._has_abandoned_preprint = True
project.preprint_article_doi = doi
project.save()
return preprint
@classmethod
def _create(cls, target_class, project=None, filename='preprint_file.txt', provider=None,
doi=None, external_url=None, is_published=True, subjects=None, finish=True, *args, **kwargs):
instance = cls._build(
target_class=target_class,
project=project, filename=filename, provider=provider,
doi=doi, external_url=external_url, is_published=is_published, subjects=subjects,
finish=finish, *args, **kwargs
)
instance.save()
return instance
class TagFactory(DjangoModelFactory):
class Meta:
model = models.Tag
name = factory.Faker('word')
system = False
class ApiOAuth2PersonalTokenFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2PersonalToken
owner = factory.SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = factory.Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class ApiOAuth2ApplicationFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2Application
owner = factory.SubFactory(UserFactory)
name = factory.Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class AlternativeCitationFactory(DjangoModelFactory):
class Meta:
model = models.AlternativeCitation
@classmethod
def _create(cls, target_class, *args, **kwargs):
name = kwargs.get('name')
text = kwargs.get('text')
instance = target_class(
name=name,
text=text
)
instance.save()
return instance
class ForkFactory(DjangoModelFactory):
class Meta:
model = models.Node
@classmethod
def _create(cls, *args, **kwargs):
project = kwargs.pop('project', None)
user = kwargs.pop('user', project.creator)
title = kwargs.pop('title', None)
fork = project.fork_node(auth=Auth(user), title=title)
fork.save()
return fork
class IdentifierFactory(DjangoModelFactory):
class Meta:
model = models.Identifier
referent = factory.SubFactory(RegistrationFactory)
value = factory.Sequence(lambda n: 'carp:/2460{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
kwargs['category'] = kwargs.get('category', 'carpid')
return super(IdentifierFactory, cls)._create(*args, **kwargs)
class NodeRelationFactory(DjangoModelFactory):
class Meta:
model = models.NodeRelation
child = factory.SubFactory(NodeFactory)
parent = factory.SubFactory(NodeFactory)
class ExternalAccountFactory(DjangoModelFactory):
class Meta:
model = models.ExternalAccount
oauth_key = 'some-silly-key'
oauth_secret = 'some-super-secret'
provider = 'mock2'
provider_id = factory.Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = factory.Sequence(lambda n: 'user-{0}'.format(n))
profile_url = 'http://wutwut.com/'
refresh_token = 'some-sillier-key'
class MockOAuth2Provider(models.ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = "mock2_client_id"
client_secret = "mock2_client_secret"
auth_url_base = "https://mock2.com/auth"
callback_url = "https://mock2.com/callback"
auto_refresh_url = "https://mock2.com/callback"
refresh_time = 300
expiry_time = 9001
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class NotificationSubscriptionFactory(DjangoModelFactory):
class Meta:
model = models.NotificationSubscription
def make_node_lineage():
node1 = NodeFactory()
node2 = NodeFactory(parent=node1)
node3 = NodeFactory(parent=node2)
node4 = NodeFactory(parent=node3)
return [node1._id, node2._id, node3._id, node4._id]
class NotificationDigestFactory(DjangoModelFactory):
timestamp = FuzzyDateTime(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
node_lineage = FuzzyAttribute(fuzzer=make_node_lineage)
user = factory.SubFactory(UserFactory)
send_type = FuzzyChoice(choices=NOTIFICATION_TYPES.keys())
message = fake.text(max_nb_chars=2048)
event = fake.text(max_nb_chars=50)
class Meta:
model = models.NotificationDigest
class ConferenceFactory(DjangoModelFactory):
class Meta:
model = models.Conference
endpoint = factory.Sequence(lambda n: 'conference{0}'.format(n))
name = factory.Faker('catch_phrase')
active = True
is_meeting = True
@factory.post_generation
def admins(self, create, extracted, **kwargs):
self.admins = extracted or [UserFactory()]
class SessionFactory(DjangoModelFactory):
class Meta:
model = models.Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class ArchiveJobFactory(DjangoModelFactory):
class Meta:
model = models.ArchiveJob
|
|
import uuid
from gevent import monkey
from mock import MagicMock
monkey.patch_all()
from vnc_api.vnc_api import KeyValuePair, KeyValuePairs
from kube_manager.common.kube_config_db import NamespaceKM, PodKM, ServiceKM
from kube_manager.tests.vnc import test_case
from kube_manager.tests.vnc.db_mock import DBBaseKM
from kube_manager.vnc.vnc_kubernetes import VncKubernetes
from kube_manager.vnc.vnc_kubernetes_config import VncKubernetesConfig
TEST_NAMESPACE = 'test-namespace'
TEST_SERVICE_NAME = 'test-service'
TEST_SERVICE_SPEC = {
'type': 'ClusterIP',
'ports': [{
'name': 'http',
'protocol': 'TCP',
'port': 80,
}]
}
class VncEndpointsTestBase(test_case.KMTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
super(VncEndpointsTestBase, cls).setUpClass(*args, **kwargs)
cls.kube_mock = MagicMock()
VncKubernetes._vnc_kubernetes.endpoints_mgr._kube = cls.kube_mock
@classmethod
def tearDownClass(cls):
for pod in list(PodKM):
PodKM.delete(pod)
for service in list(ServiceKM):
ServiceKM.delete(service)
for namespace in list(NamespaceKM):
NamespaceKM.delete(namespace)
super(VncEndpointsTestBase, cls).tearDownClass()
def setUp(self, *args, **kwargs):
super(VncEndpointsTestBase, self).setUp(*args, **kwargs)
self._add_namespace(namespace_name=TEST_NAMESPACE)
self.service_uid = self._add_service(
namespace=TEST_NAMESPACE,
srv_name=TEST_SERVICE_NAME,
srv_spec=TEST_SERVICE_SPEC)
self.wait_for_all_tasks_done()
self._check_service()
def _add_namespace(self, namespace_name, isolated=False):
ns_uid = str(uuid.uuid4())
ns_add_event = self.create_add_namespace_event(namespace_name, ns_uid)
if isolated:
ns_add_event['object']['metadata']['annotations'] = {
'opencontrail.org/isolation': 'true'}
NamespaceKM.locate(ns_uid, ns_add_event['object'])
self.enqueue_event(ns_add_event)
return namespace_name, ns_uid
def _add_service(self, namespace, srv_name, srv_spec):
srv_meta = {
'name': srv_name,
'uid': str(uuid.uuid4()),
'namespace': namespace
}
srv_add_event = self.create_event(
'Service', srv_spec, srv_meta, 'ADDED')
ServiceKM.locate(srv_meta['uid'], srv_add_event['object'])
self.enqueue_event(srv_add_event)
self.kube_mock.get_resource.return_value = {'metadata': srv_meta}
return srv_meta['uid']
def _delete_service(self, srv_uid, srv_name, namespace):
srv_meta = {
'uid': srv_uid,
'name': srv_name,
'namespace': namespace
}
srv_delete_event = self.create_event('Service', {}, srv_meta, 'DELETED')
ServiceKM.delete(srv_uid)
self.enqueue_event(srv_delete_event)
def _check_service(self):
# Assert proper creation of loadbalancer, listener, and pool
lb = self._vnc_lib.loadbalancer_read(
id=self.service_uid, fields=('loadbalancer_listener_back_refs',))
self.assertEqual(len(lb.loadbalancer_listener_back_refs), 1)
self.listener_uid = lb.loadbalancer_listener_back_refs[0]['uuid']
lb_listener = self._vnc_lib.loadbalancer_listener_read(
id=self.listener_uid, fields=('loadbalancer_pool_back_refs',))
self.assertEqual(len(lb_listener.loadbalancer_pool_back_refs), 1)
self.pool_uid = lb_listener.loadbalancer_pool_back_refs[0]['uuid']
lb_pool = self._vnc_lib.loadbalancer_pool_read(
id=self.pool_uid, fields=('loadbalancer-pool-loadbalancer-member',))
self.assertIsNone(lb_pool.get_loadbalancer_members())
def _add_pod(self, pod_name, pod_namespace, pod_status):
pod_uid = str(uuid.uuid4())
pod_spec = {'nodeName': 'test-node'}
pod_meta = {
'name': pod_name,
'uid': pod_uid,
'namespace': pod_namespace,
'labels': {}
}
pod_add_event = self.create_event('Pod', pod_spec, pod_meta, 'ADDED')
pod_add_event['object']['status'] = pod_status
PodKM.locate(pod_uid, pod_add_event['object'])
self.enqueue_event(pod_add_event)
return pod_uid
def _add_endpoints(self, name, namespace, pod_uids=(), host_ips=()):
endpoint_uid = str(uuid.uuid4())
event = self.create_event(
kind='Endpoints',
spec={},
meta={
'name': name,
'namespace': namespace,
'uid': endpoint_uid
},
event_type='ADDED'
)
if pod_uids:
addresses = [{
'targetRef': {
'kind': 'Pod',
'name': 'test-pod',
'namespace': namespace,
'uid': pod_uid
}
} for pod_uid in pod_uids]
else:
addresses = [{
'ip': ip
} for ip in host_ips]
event['object']['subsets'] = [{
'ports': [{
'name': 'http',
'port': 80,
'protocol': 'TCP'
}],
'addresses': addresses
}]
self.enqueue_event(event)
return event['object']
def _add_pod_to_endpoints(self, endpoints, namespace, pod_uid=None,
host_ip=None):
event = {
'object': endpoints,
'type': 'MODIFIED'
}
if pod_uid is not None:
event['object']['subsets'][0]['addresses'].append({
'targetRef': {
'kind': 'Pod',
'name': 'test-pod',
'namespace': namespace,
'uid': pod_uid
}
})
else:
event['object']['subsets'][0]['addresses'].append({
'ip': host_ip
})
self.enqueue_event(event)
return event['object']
def _delete_pod_from_endpoints(self, endpoints, pod_uid=None, host_ip=None):
event = {
'object': endpoints,
'type': 'MODIFIED'
}
if pod_uid is not None:
event['object']['subsets'][0]['addresses'] = [
address for address in endpoints['subsets'][0]['addresses']
if address['targetRef']['uid'] != pod_uid]
else:
event['object']['subsets'][0]['addresses'] = [
address for address in endpoints['subsets'][0]['addresses']
if address['ip'] != host_ip]
self.enqueue_event(event)
return event['object']
def _replace_pod_in_endpoints(self, endpoints, old_pod_uid, new_pod_uid):
event = {
'object': endpoints,
'type': 'MODIFIED'
}
for address in event['object']['subsets'][0]['addresses']:
if address['targetRef']['uid'] == old_pod_uid:
address['targetRef']['uid'] = new_pod_uid
break
self.enqueue_event(event)
return event['object']
def _delete_endpoints(self, endpoints):
self.enqueue_event({
'object': endpoints,
'type': 'DELETED'
})
def _get_vmi_uid(self, pod_uid):
# Assert proper creation of pod and return id of vm interface
vm = self._vnc_lib.virtual_machine_read(
id=pod_uid, fields=('virtual_machine_interface_back_refs',))
self.assertEqual(len(vm.virtual_machine_interface_back_refs), 1)
vm_interface = self._vnc_lib.virtual_machine_interface_read(
id=vm.virtual_machine_interface_back_refs[0]['uuid'])
return vm_interface.uuid
def _check_lb_members(self, *members):
lb_pool = self._vnc_lib.loadbalancer_pool_read(
id=self.pool_uid, fields=('loadbalancer-pool-loadbalancer-member',))
lb_members = lb_pool.get_loadbalancer_members() or ()
self.assertEqual(len(lb_members), len(members))
lb_members = [
self._vnc_lib.loadbalancer_member_read(id=member['uuid'])
for member in lb_members]
member_annotations = [member.annotations for member in lb_members]
for vm_uid, vmi_uid in members:
self.assertIn(
KeyValuePairs([
KeyValuePair('vm', vm_uid),
KeyValuePair('vmi', vmi_uid)]),
member_annotations)
class VncEndpointsTest(VncEndpointsTestBase):
def test_endpoints_add(self):
pod_uid = self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
self.wait_for_all_tasks_done()
vmi_uid = self._get_vmi_uid(pod_uid)
self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
pod_uids=[pod_uid])
self.wait_for_all_tasks_done()
self._check_lb_members((pod_uid, vmi_uid))
def test_endpoints_modify_pod_added_to_service(self):
pod1_uid = self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
pod2_uid = self._add_pod(
pod_name='test-pod2',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.2',
'phase': 'created'
})
self.wait_for_all_tasks_done()
vmi1_uid = self._get_vmi_uid(pod1_uid)
vmi2_uid = self._get_vmi_uid(pod2_uid)
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
pod_uids=[pod1_uid])
self.wait_for_all_tasks_done()
self._check_lb_members((pod1_uid, vmi1_uid))
self._add_pod_to_endpoints(
endpoints=endpoints,
namespace=TEST_NAMESPACE,
pod_uid=pod2_uid)
self.wait_for_all_tasks_done()
self._check_lb_members(
(pod1_uid, vmi1_uid),
(pod2_uid, vmi2_uid))
def test_endpoints_modify_pod_deleted_from_service(self):
pod1_uid = self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
pod2_uid = self._add_pod(
pod_name='test-pod2',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.2',
'phase': 'created'
})
self.wait_for_all_tasks_done()
vmi1_uid = self._get_vmi_uid(pod1_uid)
vmi2_uid = self._get_vmi_uid(pod2_uid)
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
pod_uids=(pod1_uid, pod2_uid))
self.wait_for_all_tasks_done()
self._check_lb_members(
(pod1_uid, vmi1_uid),
(pod2_uid, vmi2_uid))
self._delete_pod_from_endpoints(
endpoints=endpoints,
pod_uid=pod2_uid)
self.wait_for_all_tasks_done()
self._check_lb_members((pod1_uid, vmi1_uid))
def test_endpoints_modify_pods_added_deleted_from_service(self):
pod1_uid = self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
pod2_uid = self._add_pod(
pod_name='test-pod2',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.2',
'phase': 'created'
})
pod3_uid = self._add_pod(
pod_name='test-pod3',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.3',
'phase': 'created'
})
self.wait_for_all_tasks_done()
vmi1_uid = self._get_vmi_uid(pod1_uid)
vmi2_uid = self._get_vmi_uid(pod2_uid)
vmi3_uid = self._get_vmi_uid(pod3_uid)
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
pod_uids=(pod1_uid, pod2_uid))
self.wait_for_all_tasks_done()
self._check_lb_members(
(pod1_uid, vmi1_uid),
(pod2_uid, vmi2_uid))
self._replace_pod_in_endpoints(
endpoints=endpoints,
old_pod_uid=pod2_uid,
new_pod_uid=pod3_uid)
self.wait_for_all_tasks_done()
self._check_lb_members(
(pod1_uid, vmi1_uid),
(pod3_uid, vmi3_uid))
def test_endpoints_delete(self):
pod_uid = self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
self.wait_for_all_tasks_done()
vmi_uid = self._get_vmi_uid(pod_uid)
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
pod_uids=[pod_uid])
self.wait_for_all_tasks_done()
self._check_lb_members((pod_uid, vmi_uid))
self._delete_endpoints(endpoints)
self.wait_for_all_tasks_done()
self._check_lb_members()
def test_endpoints_add_before_service_add(self):
pod_uid = self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
self.wait_for_all_tasks_done()
lb_members = self._vnc_lib.loadbalancer_members_list()
self._add_endpoints(
name='some-unexisting-service',
namespace=TEST_NAMESPACE,
pod_uids=[pod_uid])
self.wait_for_all_tasks_done()
# Assert no new loadbalancer member was created
self.assertEqual(lb_members, self._vnc_lib.loadbalancer_members_list())
def test_endpoints_delete_after_service_delete(self):
pod_uid = self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
self.wait_for_all_tasks_done()
vmi_uid = self._get_vmi_uid(pod_uid)
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
pod_uids=[pod_uid])
self.wait_for_all_tasks_done()
self._check_lb_members((pod_uid, vmi_uid))
self._delete_service(
srv_uid=self.service_uid,
srv_name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE)
self.wait_for_all_tasks_done()
self._delete_endpoints(endpoints)
# No assertion here. It should just pass without error.
class VncEndpointsNestedTest(VncEndpointsTestBase):
@classmethod
def setUpClass(cls, extra_config_knobs=None, kube_args=None):
super(VncEndpointsNestedTest, cls).setUpClass(
extra_config_knobs=extra_config_knobs,
kube_args=(('KUBERNETES', 'nested_mode', '1'),))
@classmethod
def tearDownClass(cls):
super(VncEndpointsNestedTest, cls).tearDownClass()
DBBaseKM.set_nested(False)
def setUp(self, *args, **kwargs):
super(VncEndpointsNestedTest, self).setUp(*args, **kwargs)
self.default_vn = self._vnc_lib.virtual_network_read(
fq_name=VncKubernetesConfig.cluster_default_network_fq_name())
def tearDown(self, *args, **kwargs):
for vm in self._vnc_lib.virtual_machines_list()['virtual-machines']:
self.delete_virtual_machine(vm_id=vm['uuid'])
super(VncEndpointsNestedTest, self).tearDown()
def test_endpoints_add(self):
vm, vmi, _ = self.create_virtual_machine(
'test-vm', self.default_vn, '10.32.0.1')
self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '10.32.0.1',
'phase': 'created'
})
self.wait_for_all_tasks_done()
self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
host_ips=['10.32.0.1'])
self.wait_for_all_tasks_done()
self._check_lb_members((vm.uuid, vmi.uuid))
def test_endpoints_modify_pod_added_to_service(self):
vm1, vmi1, _ = self.create_virtual_machine(
'test-vm1', self.default_vn, '10.32.0.1')
vm2, vmi2, _ = self.create_virtual_machine(
'test-vm2', self.default_vn, '10.32.0.2')
for i in ('1', '2'):
self._add_pod(
pod_name='test-pod' + i,
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '10.32.0.' + i,
'phase': 'created'
})
self.wait_for_all_tasks_done()
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
host_ips=['10.32.0.1'])
self.wait_for_all_tasks_done()
self._check_lb_members((vm1.uuid, vmi1.uuid))
self._add_pod_to_endpoints(
endpoints=endpoints,
namespace=TEST_NAMESPACE,
host_ip='10.32.0.2')
self.wait_for_all_tasks_done()
self._check_lb_members(
(vm1.uuid, vmi1.uuid),
(vm2.uuid, vmi2.uuid))
def test_endpoints_modify_pod_deleted_from_service(self):
vm1, vmi1, _ = self.create_virtual_machine(
'test-vm1', self.default_vn, '10.32.0.1')
vm2, vmi2, _ = self.create_virtual_machine(
'test-vm2', self.default_vn, '10.32.0.2')
for i in ('1', '2'):
self._add_pod(
pod_name='test-pod' + i,
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '10.32.0.' + i,
'phase': 'created'
})
self.wait_for_all_tasks_done()
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
host_ips=['10.32.0.1', '10.32.0.2'])
self.wait_for_all_tasks_done()
self._check_lb_members(
(vm1.uuid, vmi1.uuid),
(vm2.uuid, vmi2.uuid))
self._delete_pod_from_endpoints(
endpoints=endpoints,
host_ip='10.32.0.2')
self.wait_for_all_tasks_done()
self._check_lb_members((vm1.uuid, vmi1.uuid))
def test_endpoints_delete(self):
vm, vmi, _ = self.create_virtual_machine(
'test-vm', self.default_vn, '10.32.0.1')
self._add_pod(
pod_name='test-pod',
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '10.32.0.1',
'phase': 'created'
})
self.wait_for_all_tasks_done()
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
host_ips=['10.32.0.1'])
self.wait_for_all_tasks_done()
self._check_lb_members((vm.uuid, vmi.uuid))
self._delete_endpoints(endpoints)
self.wait_for_all_tasks_done()
self._check_lb_members()
class VncEndpointsTestScaling(VncEndpointsTestBase):
def test_endpoints_add_scaling(self):
scale = 50
endpoint_list = []
for i in xrange(scale):
pod_uid = self._add_pod(
pod_name='test-pod' + str(i),
pod_namespace=TEST_NAMESPACE,
pod_status={
'hostIP': '192.168.0.1',
'phase': 'created'
})
self.wait_for_all_tasks_done()
vmi_uid = self._get_vmi_uid(pod_uid)
endpoints = self._add_endpoints(
name=TEST_SERVICE_NAME,
namespace=TEST_NAMESPACE,
pod_uids=[pod_uid])
self.wait_for_all_tasks_done()
self._check_lb_members((pod_uid, vmi_uid))
endpoint_list.append(endpoints)
for i in xrange(scale):
self._delete_endpoints(endpoint_list[i])
self.wait_for_all_tasks_done()
self._check_lb_members()
|
|
# -*- coding: utf-8 -*-
'''
Simple returner for CouchDB. Optional configuration
settings are listed below, along with sane defaults.
couchdb.db: 'salt'
couchdb.url: 'http://salt:5984/'
'''
import logging
import time
import urllib2
import json
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'couchdb'
def __virtual__():
return __virtualname__
def _get_options():
'''
Get the couchdb options from salt. Apply defaults
if required.
'''
server_url = __salt__['config.option']('couchdb.url')
if not server_url:
log.debug("Using default url.")
server_url = "http://salt:5984/"
db_name = __salt__['config.option']('couchdb.db')
if not db_name:
log.debug("Using default database.")
db_name = "salt"
return {"url": server_url, "db": db_name}
def _generate_doc(ret, options):
'''
Create a object that will be saved into the database based on
options.
'''
# Create a copy of the object that we will return.
r = ret
# Set the ID of the document to be the JID.
r["_id"] = ret["jid"]
# Add a timestamp field to the document
r["timestamp"] = time.time()
return r
def _request(method, url, content_type=None, _data=None):
'''
Makes a HTTP request. Returns the JSON parse, or an obj with an error.
'''
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, data=_data)
if content_type:
request.add_header('Content-Type', content_type)
request.get_method = lambda: method
try:
handler = opener.open(request)
except urllib2.HTTPError as e:
return {'error': '{0}'.format(e)}
return json.loads(handler.read())
def returner(ret):
'''
Take in the return and shove it into the couchdb database.
'''
options = _get_options()
# Check to see if the database exists.
_response = _request("GET", options['url'] + "_all_dbs")
if options['db'] not in _response:
# Make a PUT request to create the database.
_response = _request("PUT", options['url'] + options['db'])
# Confirm that the response back was simple 'ok': true.
if not 'ok' in _response or _response['ok'] is not True:
return log.error('Unable to create database "{0}"'
.format(options['db']))
log.info('Created database "{0}"'.format(options['db']))
# Call _generate_doc to get a dict object of the document we're going to
# shove into the database.
doc = _generate_doc(ret, options)
# Make the actual HTTP PUT request to create the doc.
_response = _request("PUT",
options['url'] + options['db'] + "/" + doc['_id'],
'application/json',
json.dumps(doc))
# Santiy check regarding the response..
if not 'ok' in _response or _response['ok'] is not True:
log.error('Unable to create document: "{0}"'.format(_response))
def get_jid(jid):
'''
Get the document with a given JID.
'''
options = _get_options()
_response = _request("GET", options['url'] + options['db'] + '/' + jid)
if 'error' in _response:
log.error('Unable to get JID "{0}" : "{1}"'.format(jid, _response))
return {}
return {_response['id']: _response}
def get_jids():
'''
List all the jobs that we have..
'''
options = _get_options()
_response = _request("GET", options['url'] + options['db'] + "/_all_docs")
# Make sure the 'total_rows' is returned.. if not error out.
if not 'total_rows' in _response:
log.error('Didn\'t get valid response from requesting all docs: {0}'
.format(_response))
return []
# Return the rows.
ret = []
for row in _response['rows']:
# Because this shows all the documents in the database, including the
# design documents, whitelist the matching salt jid's which is a 20
# digit int.
# See if the identifier is an int..
try:
int(row['id'])
except Exception:
continue
# Check the correct number of digits by simply casting to str and
# splitting.
if len(str(row['id'])) == 20:
ret.append(row['id'])
return ret
def get_fun(fun):
'''
Return a dict with key being minion and value
being the job details of the last run of function 'fun'.
'''
# Get the options..
options = _get_options()
# Define a simple return object.
_ret = {}
# get_minions takes care of calling ensure_views for us.
# For each minion we know about
for minion in get_minions():
# Make a query of the by-minion-and-timestamp view and limit the count
# to 1.
_response = _request("GET",
options['url'] +
options['db'] +
('/_design/salt/_view/by-minion-fun-times'
'tamp?descending=true&endkey=["{0}","{1}'
'",0]&startkey=["{2}","{3}",9999999999]&'
'limit=1').format(minion,
fun,
minion,
fun))
# Skip the minion if we got an error..
if 'error' in _response:
log.warning('Got an error when querying for last command by a '
'minion: {0}'.format(_response['error']))
continue
# Skip the minion if we didn't get any rows back. ( IE function that
# they're looking for has a typo in it or some such ).
if len(_response['rows']) < 1:
continue
# Set the respnse ..
_ret[minion] = _response['rows'][0]['value']
return _ret
def get_minions():
'''
Return a list of minion identifiers from a request of the view.
'''
options = _get_options()
# Make sure the views are valid, which includes the minions..
if not ensure_views():
return []
# Make the request for the view..
_response = _request("GET",
options['url'] +
options['db'] +
"/_design/salt/_view/minions?group=true")
# Verify that we got a response back.
if not 'rows' in _response:
log.error('Unable to get available minions: {0}'.format(_response))
return []
# Iterate over the rows to build up a list return it.
_ret = []
for row in _response['rows']:
_ret.append(row['key'])
return _ret
def ensure_views():
'''
This function makes sure that all the views that should
exist in the design document do exist.
'''
# Get the options so we have the URL and DB..
options = _get_options()
# Make a request to check if the design document exists.
_response = _request("GET",
options['url'] + options['db'] + "/_design/salt")
# If the document doesn't exist, or for some reason there are not views.
if 'error' in _response:
return set_salt_view()
# Determine if any views are missing from the design doc stored on the
# server.. If we come across one, simply set the salt view and return out.
# set_salt_view will set all the views, so we don't need to continue t
# check.
for view in get_valid_salt_views():
if not view in _response['views']:
return set_salt_view()
# Valid views, return true.
return True
def get_valid_salt_views():
'''
Returns a dict object of views that should be
part of the salt design document.
'''
ret = {}
ret['minions'] = {}
ret['minions']['map'] = "function( doc ){ emit( doc.id, null ); }"
ret['minions']['reduce'] = \
"function( keys,values,rereduce ){ return key[0]; }"
ret['by-minion-fun-timestamp'] = {}
ret['by-minion-fun-timestamp']['map'] = \
"function( doc ){ emit( [doc.id,doc.fun,doc.timestamp], doc ); }"
return ret
def set_salt_view():
'''
Helper function that sets the salt design
document. Uses get_valid_salt_views and some hardcoded values.
'''
options = _get_options()
# Create the new object that we will shove in as the design doc.
new_doc = {}
new_doc['views'] = get_valid_salt_views()
new_doc['language'] = "javascript"
# Make the request to update the design doc.
_response = _request("PUT",
options['url'] + options['db'] + "/_design/salt",
"application/json", json.dumps(new_doc))
if 'error' in _response:
log.warning("Unable to set the salt design document: {0}"
.format(_response['error']))
return False
return True
|
|
"""
The ``mlflow.onnx`` module provides APIs for logging and loading ONNX models in the MLflow Model
format. This module exports MLflow Models with the following flavors:
ONNX (native) format
This is the main flavor that can be loaded back as an ONNX model object.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import os
import yaml
import numpy as np
from pathlib import Path
import pandas as pd
from mlflow import pyfunc
from mlflow.models import Model
from mlflow.models.model import MLMODEL_FILE_NAME
import mlflow.tracking
from mlflow.exceptions import MlflowException
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.annotations import experimental
from mlflow.utils.environment import (
_mlflow_conda_env,
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
)
from mlflow.utils.requirements_utils import _get_pinned_requirement
from mlflow.utils.file_utils import write_to
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
)
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
FLAVOR_NAME = "onnx"
ONNX_EXECUTION_PROVIDERS = ["CUDAExecutionProvider", "CPUExecutionProvider"]
def get_default_pip_requirements():
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at minimum, contains these requirements.
"""
return list(
map(
_get_pinned_requirement,
[
"onnx",
# The ONNX pyfunc representation requires the OnnxRuntime
# inference engine. Therefore, the conda environment must
# include OnnxRuntime
"onnxruntime",
],
)
)
@experimental
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def save_model(
onnx_model,
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
onnx_execution_providers=None,
):
"""
Save an ONNX model to a path on the local file system.
:param onnx_model: ONNX model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param onnx_execution_providers: List of strings defining onnxruntime execution providers.
Defaults to example:
``['CUDAExecutionProvider', 'CPUExecutionProvider']``
This uses GPU preferentially over CPU.
See onnxruntime API for further descriptions:
https://onnxruntime.ai/docs/execution-providers/
"""
import onnx
if onnx_execution_providers is None:
onnx_execution_providers = ONNX_EXECUTION_PROVIDERS
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException(
message="Path '{}' already exists".format(path), error_code=RESOURCE_ALREADY_EXISTS
)
os.makedirs(path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
model_data_subpath = "model.onnx"
model_data_path = os.path.join(path, model_data_subpath)
# Save onnx-model
onnx.save_model(onnx_model, model_data_path)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.onnx",
data=model_data_subpath,
env=_CONDA_ENV_FILE_NAME,
code=code_dir_subpath,
)
mlflow_model.add_flavor(
FLAVOR_NAME,
onnx_version=onnx.__version__,
data=model_data_subpath,
providers=onnx_execution_providers,
code=code_dir_subpath,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
def _load_model(model_file):
import onnx
onnx_model = onnx.load(model_file)
# Check Formation
onnx.checker.check_model(onnx_model)
return onnx_model
class _OnnxModelWrapper:
def __init__(self, path, providers=None):
import onnxruntime
# Get the model meta data from the MLModel yaml file which may contain the providers
# specification.
local_path = str(Path(path).parent)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
# Check if the MLModel config has the providers meta data
if "providers" in model_meta.flavors.get(FLAVOR_NAME).keys():
providers = model_meta.flavors.get(FLAVOR_NAME)["providers"]
# If not, then default to the predefined list.
else:
providers = ONNX_EXECUTION_PROVIDERS
# NOTE: Some distributions of onnxruntime require the specification of the providers
# argument on calling. E.g. onnxruntime-gpu. The package import call does not differnetiate
# which architecture specific version has been installed, as all are imported with
# onnxruntime. onnxruntime documentation says that from v1.9.0 some distributions require
# the providers list to be provided on calling an InferenceSession. Therefore the try
# catch structure below attempts to create an inference session with just the model path
# as pre v1.9.0. If that fails, it will use the providers list call.
# At the moment this is just CUDA and CPU, and probably should be expanded.
# A method of user customisation has been provided by adding a variable in the save_model()
# function, which allows the ability to pass the list of execution providers via a
# optional argument e.g.
#
# mlflow.onnx.save_model(..., providers=['CUDAExecutionProvider'...])
#
# For details of the execution providers construct of onnxruntime, see:
# https://onnxruntime.ai/docs/execution-providers/
#
# For a information on how execution providers are used with onnxruntime InferenceSession,
# see the API page below:
# https://onnxruntime.ai/docs/api/python/api_summary.html#id8
#
try:
self.rt = onnxruntime.InferenceSession(path)
except ValueError:
self.rt = onnxruntime.InferenceSession(path, providers=providers)
assert len(self.rt.get_inputs()) >= 1
self.inputs = [(inp.name, inp.type) for inp in self.rt.get_inputs()]
self.output_names = [outp.name for outp in self.rt.get_outputs()]
def _cast_float64_to_float32(self, feeds):
for input_name, input_type in self.inputs:
if input_type == "tensor(float)":
feed = feeds.get(input_name)
if feed is not None and feed.dtype == np.float64:
feeds[input_name] = feed.astype(np.float32)
return feeds
@experimental
def predict(self, data):
"""
:param data: Either a pandas DataFrame, numpy.ndarray or a dictionary.
Dictionary input is expected to be a valid ONNX model feed dictionary.
Numpy array input is supported iff the model has a single tensor input and is
converted into an ONNX feed dictionary with the appropriate key.
Pandas DataFrame is converted to ONNX inputs as follows:
- If the underlying ONNX model only defines a *single* input tensor, the
DataFrame's values are converted to a NumPy array representation using the
`DataFrame.values()
<https://pandas.pydata.org/pandas-docs/stable/reference/api/
pandas.DataFrame.values.html#pandas.DataFrame.values>`_ method.
- If the underlying ONNX model defines *multiple* input tensors, each column
of the DataFrame is converted to a NumPy array representation.
For more information about the ONNX Runtime, see
`<https://github.com/microsoft/onnxruntime>`_.
:return: Model predictions. If the input is a pandas.DataFrame, the predictions are returned
in a pandas.DataFrame. If the input is a numpy array or a dictionary the
predictions are returned in a dictionary.
"""
if isinstance(data, dict):
feed_dict = data
elif isinstance(data, np.ndarray):
# NB: We do allow scoring with a single tensor (ndarray) in order to be compatible with
# supported pyfunc inputs iff the model has a single input. The passed tensor is
# assumed to be the first input.
if len(self.inputs) != 1:
inputs = [x[0] for x in self.inputs]
raise MlflowException(
"Unable to map numpy array input to the expected model "
"input. "
"Numpy arrays can only be used as input for MLflow ONNX "
"models that have a single input. This model requires "
"{0} inputs. Please pass in data as either a "
"dictionary or a DataFrame with the following tensors"
": {1}.".format(len(self.inputs), inputs)
)
feed_dict = {self.inputs[0][0]: data}
elif isinstance(data, pd.DataFrame):
if len(self.inputs) > 1:
feed_dict = {name: data[name].values for (name, _) in self.inputs}
else:
feed_dict = {self.inputs[0][0]: data.values}
else:
raise TypeError(
"Input should be a dictionary or a numpy array or a pandas.DataFrame, "
"got '{}'".format(type(data))
)
# ONNXRuntime throws the following exception for some operators when the input
# contains float64 values. Unfortunately, even if the original user-supplied input
# did not contain float64 values, the serialization/deserialization between the
# client and the scoring server can introduce 64-bit floats. This is being tracked in
# https://github.com/mlflow/mlflow/issues/1286. Meanwhile, we explicitly cast the input to
# 32-bit floats when needed. TODO: Remove explicit casting when issue #1286 is fixed.
feed_dict = self._cast_float64_to_float32(feed_dict)
predicted = self.rt.run(self.output_names, feed_dict)
if isinstance(data, pd.DataFrame):
def format_output(data):
# Output can be list and it should be converted to a numpy array
# https://github.com/mlflow/mlflow/issues/2499
data = np.asarray(data)
return data.reshape(-1)
response = pd.DataFrame.from_dict(
{c: format_output(p) for (c, p) in zip(self.output_names, predicted)}
)
return response
else:
return dict(zip(self.output_names, predicted))
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
"""
return _OnnxModelWrapper(path)
@experimental
def load_model(model_uri, dst_path=None):
"""
Load an ONNX model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model, for example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see the
`Artifacts Documentation <https://www.mlflow.org/docs/latest/
tracking.html#artifact-stores>`_.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:return: An ONNX model instance.
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
onnx_model_artifacts_path = os.path.join(local_model_path, flavor_conf["data"])
return _load_model(model_file=onnx_model_artifacts_path)
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
onnx_model,
artifact_path,
conda_env=None,
code_paths=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
onnx_execution_providers=None,
):
"""
Log an ONNX model as an MLflow artifact for the current run.
:param onnx_model: ONNX model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param registered_model_name: If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param onnx_execution_providers: List of strings defining onnxruntime execution providers.
Defaults to example:
['CUDAExecutionProvider', 'CPUExecutionProvider']
This uses GPU preferentially over CPU.
See onnxruntime API for further descriptions:
https://onnxruntime.ai/docs/execution-providers/
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.onnx,
onnx_model=onnx_model,
conda_env=conda_env,
code_paths=code_paths,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
onnx_execution_providers=onnx_execution_providers,
)
|
|
#!/usr/bin/python
import MySQLdb
from stemming.porter2 import stem
import math
from textblob import TextBlob as tb
import math
import string
import operator
from testingMajorSimilarity import majorRelations
idfcache = {}
def tf(word, blob):
return float(blob.words.count(word))/ float(len(blob.words))
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob)
def idf(word, bloblist):
if(word in idfcache):
return idfcache[word]
answer = float(math.log(len(bloblist)) / float((1 + n_containing(word, bloblist))))
idfcache[word] = answer
return answer
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf(word, bloblist)
class Database:
def __init__(self):
self.courses = {}
def addCourse(self,course):
self.courses[course.id] = course
def getCourse(self,cid):
return self.courses[cid]
class Course:
def __init__(self,cid_,name_,dep_,num_,desc_):
self.cid = cid_
self.name = name_
self.department = dep_
self.number = num_
self.description = desc_
self.blob = ""
self.similarities = []
self.scores = {}
self.stemming = True
self.courseLevels = True
self.majorRelations = True
self.stemmedTitle = ""
def setBlob(self,blob_):
paragraph = filter(lambda x: x in printable, blob_)
blob = tb(paragraph)
newBlob = ""
if(self.stemming):
for word in blob.words:
newBlob+=" "+(stem(word.lower()))
self.blob = tb(newBlob)
def getBlob(self):
return self.blob
def setWords(self,array):
self.similarities = array
def getWords(self):
return self.similarities
def setScore(self,course2,score):
thousands = self.number/1000
thousands2 = course2.number/1000
hundreds2 = course2.number/100
tens2 = course2.number/10
numRep1 = list(str(self.number))
numRep2 = list(str(course2.number))
#print thousands,thousands2, "=>",
##this is a mess
coeff = 1.0
if(self.courseLevels):
if(numRep2[1] == '9' and numRep2[2] == '9'):
coeff= 0.0;
elif(thousands == 2 and thousands2 == 2):
coeff = 2.0
elif(thousands == 4 and thousands2==4):
coeff = 4.0
elif(thousands == 1 and thousands2==1):
coeff = 1.5
elif(thousands == 4 and thousands2==1):
coeff = 0.25
elif(thousands == 2 and thousands2==1):
coeff = 0.5
elif(thousands == 1 and thousands2==2):
coeff = 1.2
elif(thousands == 1 and thousands2==4):
coeff = 0.8
elif(thousands == 1 and thousands2==6):
coeff = .05
elif(thousands == 2 and thousands2==6):
coeff = .15
elif(thousands == 4 and thousands2==6):
coeff = 2.0
elif(thousands == 6 and thousands2==6):
coeff = 6.0
else:
coeff = 1.0
else:
coeff *= 1.0
if(self.majorRelations):
if(self.department == ""):
coeff *=1
else:
coeff *= (majorRelations(self.department,course2.department)+0.02)
#print coeff
self.scores[course2] = score*coeff
def getScore(self,course2):
return self.scores[course2]
def similarity(array1,array2):
##name similarity too!
#print array1,array2
if(len(array1)==0 or len(array2)==0):
return -1
score1 = 0.0
score2 = 0.0
dict_ = {}
'''
for item in array1:
if(item[0] in dict_):
dict_[item[0]] += float(item[1])
else:
dict_[item[0]] = float(item[1])
for elem in array2:
if(elem[0] in dict_):
score1+= float(dict_[elem[0]])*float(elem[1])
#print
'''
#print "SIMILAR RELEVANT WORDS MULTIPLIER MACHINE"
for item in array1:
for elem in array2:
if(item[0] == elem[0]):
score1 += float(item[1]*elem[1])
#print item[0],item[1],":",elem[0],elem[1]
return score1
def twoCourses(course1str,course2str):
##PRINTING
print "Comparing ",course1str," to ",course2str
similarities = {}
##NUMBER OF RELEVANT WORDS TO CONSIDER
cutoff_limit = 30
##GENERATE COURSE1 from course1 (string name),
cur.execute("SELECT * FROM Courses WHERE name = '"+course1str+"'")
tup = cur.fetchall()[0]
course1 = Course(tup[0],tup[1],tup[2],tup[3],tup[4])
course1.setBlob(course1.description)
blob1 = course1.getBlob()
##GENERATE COURSE2 from course2 (string name),
cur.execute("SELECT * FROM Courses WHERE name = '"+course2str+"'")
tup = cur.fetchall()[0]
course2 = Course(tup[0],tup[1],tup[2],tup[3],tup[4])
course2.setBlob(course2.description)
blob2 = course2.getBlob()
print course1.name
##GET MOST RELEVANT WORDS IN COURSE1 DESCRIPTION
scores = {word: tfidf(word, blob1, bloblist) for word in blob1.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
##LIMIT IS THE AMOUNT OF RELEVANT WORDS TO CONSIDER,
##or total amount of words, whichever is less
limit = min(cutoff_limit,len(sorted_words))
##PRINTING MOST RELEVANT WORDS JUST CALCULATED
for word, score in sorted_words[:limit]:
print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
##SET THOSE COMMON WORDS TO THE COURSE1 OBJ.
course1.setWords(sorted_words[:limit])
print course2.name
##GET MOST RELEVANT WORDS IN COURSE1 DESCRIPTION
scores = {word: tfidf(word, blob2, bloblist) for word in blob2.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
##LIMIT IS THE AMOUNT OF RELEVANT WORDS TO CONSIDER,
##or total amount of words, whichever is less
limit = min(cutoff_limit,len(sorted_words))
##PRINTING MOST RELEVANT WORDS JUST CALCULATED
for word, score in sorted_words[:limit]:
print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
##SET THOSE COMMON WORDS TO THE COURSE2 OBJ.
course2.setWords(sorted_words[:limit])
##SETTING SIMILARITY SCORE OF COURSE1 OF COURSE2
course1.setScore(course2,similarity(course1.getWords(),course2.getWords()))
score = course1.getScore(course2)
##PRINTING THE SCORES SO YOU CAN SEE WHAT'S UP!
print course1.name," , ",course2.name," ->" ,course1.getScore(course2)
return score
def courseSimilarity(course1):
print "\n \n ",course1," \n"
similarities = {}
cutoff_limit = 30
cur.execute("SELECT * FROM Courses WHERE name = '"+course1+"'")
tup = cur.fetchall()[0]
course = Course(tup[0],tup[1],tup[2],tup[3],tup[4])
course.setBlob(course.description)
blob = course.getBlob()
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
limit = max(cutoff_limit,len(sorted_words))
for word, score in sorted_words[:limit]:
print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
course.setWords(sorted_words[:limit])
for c in courseList:
i = c.cid
blob = c.getBlob()
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
c.setWords(sorted_words)
limit = min(cutoff_limit,len(sorted_words))
course.setScore(c,similarity(course.getWords(),sorted_words[:limit]))
sorted_courses = sorted(courseList, key=lambda x: course.getScore(x), reverse=True)
for cour in sorted_courses[0:8]:
#print cour.cid," ",cour.department," ",cour.name," ",course.getScore(cour)
print cour.department,cour.number,cour.name
score = 0
return score
def customCourseSimilarity(level,description,major):
similarities = {}
cutoff_limit = 30
if(major == ""):
major = "Computer Science"
cur.execute("SELECT id FROM Departments WHERE name = '"+major+"'")
tup = cur.fetchall()[0]
course = Course(1,"custom",tup[0],level,description)
course.setBlob(course.description)
blob = course.getBlob()
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
limit = max(cutoff_limit,len(sorted_words))
course.setWords(sorted_words[:limit])
for c in courseList:
i = c.cid
blob = c.getBlob()
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
c.setWords(sorted_words)
limit = min(cutoff_limit,len(sorted_words))
course.setScore(c,similarity(course.getWords(),sorted_words[:limit]))
sorted_courses = sorted(courseList, key=lambda x: course.getScore(x), reverse=True)
for cour in sorted_courses[0:15]:
#print cour.cid," ",cour.department," ",cour.name," ",course.getScore(cour)
print cour.department,cour.number,cour.name
score = 0
return score
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="root",
port=8889,# your password
unix_socket='/Applications/MAMP/tmp/mysql/mysql.sock',
db="suggestr") # name of the data base
cur = db.cursor()
coursesData = []
cur.execute("SELECT * FROM courses")
courses = cur.fetchall()
cur.execute("SELECT Description FROM Courses")
allCourses = cur.fetchall()
printable = set(string.printable)
bloblist = []
for item in allCourses:
if(item[0]!= ""):
line = filter(lambda x: x in printable, item[0])
blob = tb(line)
for item in blob.words:
item = item.lower()
bloblist.append(blob)
courseList = []
cur.execute("SELECT * FROM courses")
courses = cur.fetchall()
for item in courses:
tup = item
course = Course(tup[0],tup[1],tup[2],tup[3],tup[4])
course.setBlob(course.description)
courseList.append(course)
'''
for i, blob in enumerate(bloblist):
print("Top words in document {}".format(i + 1))
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:3]:
print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
'''
'''
crs = ["Calculus I","Calculus II","Computer Organization","Foundations of Computer Science","Computer Science I","Beginning Programming for Engineers","Machine and Computational Learning","Data Science",\
"Introduction to Algorithms","Data Structures",\
"Networking Laboratory I","Numerical Computing"]
crs = ["General Psychology","Programming For Cognitive Science And Artific","Sculpture I"]
crs = ["Introduction To Visual Communication","Research in Biochemistry/Biophysics"]
crs = ["Introduction to Materials Engineering","Molecular Biochemistry I","Energy Politics"\
,"Nature and Society","Options, Futures, and Derivatives Markets","A Passion for Physics"\
,"Computer Music","Deep Listening","Construction Systems","Experimental Physics","Minds and Machines","Introduction to Engineering Analysis"]
for item in crs:
try:
courseSimilarity(item)
except:
print "Fail"
'''
#twoCourses("Introduction To Visual Communication","Research in Biochemistry/Biophysics")
#courseSimilarity("")
#customCourseSimilarity(2000,"Networking server big data","Computer Science")
#customCourseSimilarity(2000,"Big data finance finance finance prediction","Computer Science")
#customCourseSimilarity(4000,"I want a course that talks about philosophy and metaphysics","")
|
|
import re, os
import json
import commands
from termcolor import colored
from sqlupdater.parser import HiveDatabaseParser
from sqlupdater.utils import open_file, FileLock
class Executor(object):
def update_lock(self, project):
current_commit = str(project.repo.head.commit)
working_dir = project.repo.working_dir
file_name = os.path.join(os.path.dirname(working_dir), '.commit_lock')
FileLock.save(file_name, current_commit)
def execute(self, project):
raise Exception("Method not implemented")
class DummyExecutor(Executor):
def execute(self, project):
modified_files = project.diff()
if modified_files:
print '%d files has been modified' % len(modified_files)
for _file in modified_files:
if _file.change_type in ['D', 'M']:
word = 'New' if _file.change_type == 'D' else 'Modified'
print "- %s " % word + colored(_file.file_path, "green")
elif _file.change_type in ['A']:
print "- Deleted " + colored(_file.file_path, "red")
else:
print 'Nothing has changed'
class CacheInterface(object):
def set_item(self, keu, value):
raise Exception("Method not implemented")
def remove_item(self, key, item):
raise Exception("Method not implemented")
def update_item(self, keu, value):
raise Exception("Method not implemented")
def get_item(self, key):
raise Exception("Method not implemented")
def has_item(self, key):
raise Exception("Method not implemented")
def set_value_in_item(self, key, item):
raise Exception("Method not implemented")
def store(self, path):
raise Exception("Method not implemented")
class CacheData(CacheInterface):
def __init__(self, project):
self._data = {}
self._project = project
working_dir = project.repo.working_dir
file_path = os.path.join(os.path.dirname(working_dir), '.db_cache')
self._path = file_path
self._data = self._get_data()
def _get_data(self):
if not os.path.exists(self._path):
return {}
_cached_data = open_file(self._path)
if not _cached_data:
return {}
return json.loads(open_file(self._path))
def set_item(self, key, value):
self._data[key] = value
def set_value_in_item(self, key, item):
if self.has_item(key):
self._data[key] += [item]
def remove_item(self, key, item):
if self.has_item(key):
if item in self._data[key]:
self._data[key].remove(item)
def update_item(self, key, value):
self.remove_item(key)
self.set_item(key, value)
def get_item(self, key):
if key not in self._data:
return None
return self._data[key]
def has_item(self, key):
if key not in self._data:
return False
return True
def __del__(self):
data = json.dumps(self._data)
with open(self._path, 'w+') as _file:
_file.write(data)
class HiveClient(object):
PARSE_DATA_REGEX = "OK\\n([a-zA-Z0-9_\\n]+)\\nTime"
def __init__(self, cache):
self._cache = cache
def _execute_command(self, command, return_output=False):
output = commands.getstatusoutput(command)
if output[0] == 0:
if return_output:
return output[1]
return True
else:
raise Exception(output[1])
def _get_items_from_output(self, output):
items = re.findall(self.PARSE_DATA_REGEX, output)
if len(items) > 0:
return items[0].split("\n")
return []
def get_databases(self):
if self._cache.has_item("databases"):
return self._cache.get_item("databases")
output = self._execute_command("sudo -u mapr hive -e \"SHOW "
"DATABASES\"", True)
databases = self._get_items_from_output(output)
self._cache.set_item("databases", databases)
return databases
def create_database(self, database):
if self._execute_command("sudo -u mapr hive -e \"CREATE "
"DATABASE IF NOT EXISTS %s\""
% database):
self._cache.set_value_in_item("databases", database)
return True
return False
def get_tables(self, database):
key = "{}_tables".format(database)
if self._cache.has_item(key):
return self._cache.get_item(key)
output = self._execute_command("sudo -u mapr hive -e \"USE %s; "
"SHOW TABLES;\"" % database, True)
tables = self._get_items_from_output(output)
self._cache.set_item(key, tables)
return tables
def create_table_from_file(self, file_path, database, table):
if self._execute_command("sudo -u mapr hive -f %s" % file_path):
key = "{}_tables".format(database)
self._cache.set_value_in_item(key, table)
return True
return False
def drop_table(self, database, table):
if self._execute_command("sudo -u mapr hive -e \"USE %s; DROP "
"TABLE %s;\"" % (database, table)):
key = "{}_tables".format(database)
if self._cache.has_item(key):
self._cache.remove_item(key, table)
return True
return False
def drop_database(self, database):
return self._execute_command("sudo -u mapr hive -e \"DROP DATABASE "
"%s;\"" % database)
def repair_table(self, database, table):
return self._execute_command("sudo -u mapr hive -e \"USE %s; "
"MSCK REPAIR TABLE %s\"" % (
database, table))
class HiveExecutor(Executor):
def __init__(self, hive_client):
self._hive_client = hive_client
def _database_exists(self, database):
if database in self._hive_client.get_databases():
return True
else:
return False
def _create_tables(self, buffer, query_file):
parser = HiveDatabaseParser()
try:
(database, table) = parser.parse(buffer)
if not self._database_exists(database):
if self._hive_client.create_database(database):
print colored("Database %s created" % database, "green")
else:
print colored("Was an error creating database %s" %
database, "red")
else:
print "Database %s already exists" % database
if table in self._hive_client.get_tables(database):
if self._hive_client.drop_table(database, table):
print "Table %s.%s dropped successfully" % (database,
table)
if self._hive_client.create_table_from_file(query_file,
database, table):
print "Table %s.%s created successfully" % (database, table)
if self._hive_client.repair_table(database, table):
print "Table %s.%s repaired successfully" % (database, table)
except ValueError, e:
print colored("Content not valid", "red")
raise
except Exception, e:
raise e
def execute(self, project):
modified_files = project.diff()
if modified_files:
print '%d files has been modified' % len(modified_files)
for _file in modified_files:
if _file.change_type in ['D', 'M']:
word = 'New' if _file.change_type == 'D' else 'Modified'
print "- %s " % word + colored(_file.file_path, "green")
buffer = open_file(_file.file_path)
try:
self._create_tables(buffer, _file.file_path)
# self.update_lock(project)
except ValueError, e:
raise e
else:
print 'Everything up to date'
|
|
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
from pypuppetdb.utils import json_to_datetime
log = logging.getLogger(__name__)
class Event(object):
"""This object represents an event.
:param node: The hostname of the node this event fired on.
:param status: The status for the event.
:param timestamp: A timestamp of when this event occured.
:param hash\_: The hash of this event.
:param title: The resource title this event was fired for.
:param property\_: The property of the resource this event was fired for.
:param message: A message associated with this event.
:param new_value: The new value/state of the resource.
:param old_value: The old value/state of the resource.
:param type\_: The type of the resource this event fired for.
:param class_\_: The class responsible for running this event.
:param execution_path: The path used to reach this particular resource.
:param source_file: The puppet source code file containing the class.
:param line_number: The line number in the source file containing the
definition responsible for triggering this event.
:ivar status: A :obj:`string` of this event's status.
:ivar failed: The :obj:`bool` equivalent of `status`.
:ivar timestamp: A :obj:`datetime.datetime` of when this event happend.
:ivar node: The hostname of the machine this event\
occured on.
:ivar hash\_: The hash of this event.
:ivar item: :obj:`dict` with information about the item/resource this\
event was triggered for.
"""
def __init__(self, node, status, timestamp, hash_, title, property_,
message, new_value, old_value, type_, class_, execution_path,
source_file, line_number):
self.node = node
self.status = status
if self.status == 'failure':
self.failed = True
else:
self.failed = False
self.timestamp = json_to_datetime(timestamp)
self.hash_ = hash_
self.item = {}
self.item['title'] = title
self.item['type'] = type_
self.item['property'] = property_
self.item['message'] = message
self.item['old'] = old_value
self.item['new'] = new_value
self.item['class'] = class_
self.item['execution_path'] = execution_path
self.item['source_file'] = source_file
self.item['line_number'] = line_number
self.__string = '{0}[{1}]/{2}'.format(self.item['type'],
self.item['title'],
self.hash_)
def __repr__(self):
return str('Event: {0}'.format(self.__string))
def __str__(self):
return str('{0}').format(self.__string)
def __unicode__(self):
return self.__string
class Report(object):
"""This object represents a report.
:param node: The hostname of the node this report originated on.
:param hash\_: A string uniquely identifying this report.
:param start: The start time of the agent run.
:type start: :obj:`string` formatted as ``%Y-%m-%dT%H:%M:%S.%fZ``
:param end: The time the agent finished its run.
:type end: :obj:`string` formatted as ``%Y-%m-%dT%H:%M:%S.%fZ``
:param received: The time PuppetDB received the report.
:type received: :obj:`string` formatted as ``%Y-%m-%dT%H:%M:%S.%fZ``
:param version: The catalog / configuration version.
:type version: :obj:`string`
:param format\_: The catalog format version.
:type format\_: :obj:`int`
:param agent_version: The Puppet agent version.
:type agent_version: :obj:`string`
:param transaction: The UUID of this transaction.
:type transaction: :obj:`string`
:ivar node: The hostname this report originated from.
:ivar hash\_: Unique identifier of this report.
:ivar start: :obj:`datetime.datetime` when the Puppet agent run started.
:ivar end: :obj:`datetime.datetime` when the Puppet agent run ended.
:ivar received: :obj:`datetime.datetime` when the report finished\
uploading.
:ivar version: :obj:`string` catalog configuration version.
:ivar format\_: :obj:`int` catalog format version.
:ivar agent_version: :obj:`string` Puppet Agent version.
:ivar run_time: :obj:`datetime.timedelta` of **end** - **start**.
:ivar transaction: UUID identifying this transaction.
"""
def __init__(self, node, hash_, start, end, received, version,
format_, agent_version, transaction):
self.node = node
self.hash_ = hash_
self.start = json_to_datetime(start)
self.end = json_to_datetime(end)
self.received = json_to_datetime(received)
self.version = version
self.format_ = format_
self.agent_version = agent_version
self.run_time = self.end - self.start
self.transaction = transaction
self.__string = '{0}'.format(self.hash_)
def __repr__(self):
return str('Report: {0}'.format(self.__string))
def __str__(self):
return str('{0}').format(self.__string)
def __unicode__(self):
return self.__string
class Fact(object):
"""his object represents a fact.
:param node: The hostname this fact was collected from.
:param name: The fact's name, such as 'osfamily'
:param value: The fact's value, such as 'Debian'
:ivar node: :obj:`string` holding the hostname.
:ivar name: :obj:`string` holding the fact's name.
:ivar value: :obj:`string` holding the fact's value.
"""
def __init__(self, node, name, value):
self.node = node
self.name = name
self.value = value
self.__string = '{0}/{1}'.format(self.name, self.node)
def __repr__(self):
return str('Fact: {0}'.format(self.__string))
def __str__(self):
return str('{0}').format(self.__string)
def __unicode__(self):
return self.__string
class Resource(object):
"""This object represents a resource.
:param node: The hostname this resource is located on.
:param name: The name of the resource in the Puppet manifest.
:param type\_: Type of the Puppet resource.
:param tags: Tags associated with this resource.
:type tags: :obj:`list`
:param exported: If it's an exported resource.
:type exported: :obj:`bool`
:param sourcefile: The Puppet manifest this resource is declared in.
:param sourceline: The line this resource is declared at.
:param parameters: The parameters this resource has been declared with.
:type parameters: :obj:`dict`
:ivar node: The hostname this resources is located on.
:ivar name: The name of the resource in the Puppet manifest.
:ivar type\_: The type of Puppet resource.
:ivar exported: :obj:`bool` if the resource is exported.
:ivar sourcefile: The Puppet manifest this resource is declared in.
:ivar sourceline: The line this resource is declared at.
:ivar parameters: :obj:`dict` with key:value pairs of parameters.
:ivar relationships: :obj:`list` Contains all relationships to other\
resources
"""
def __init__(self, node, name, type_, tags, exported, sourcefile,
sourceline, parameters={}):
self.node = node
self.name = name
self.type_ = type_
self.tags = tags
self.exported = exported
self.sourcefile = sourcefile
self.sourceline = sourceline
self.parameters = parameters
self.relationships = []
self.__string = '{0}[{1}]'.format(self.type_, self.name)
def __repr__(self):
return str('<Resource: {0}>').format(self.__string)
def __str__(self):
return str('{0}').format(self.__string)
def __unicode__(self):
return self.__string
class Node(object):
"""This object represents a node. It additionally has some helper methods
so that you can query for resources or facts directly from the node scope.
:param api: API object.
:param name: Hostname of this node.
:param deactivated: (default `None`) Time this node was deactivated at.
:type deactivated: :obj:`string` formatted as ``%Y-%m-%dT%H:%M:%S.%fZ``
:param report_timestamp: (default `None`) Time of the last report.
:type report_timestamp: :obj:`string` formatted as\
``%Y-%m-%dT%H:%M:%S.%fZ``
:param catalog_timestamp: (default `None`) Time the last time a catalog\
was compiled.
:type catalog_timestamp: :obj:`string` formatted as\
``%Y-%m-%dT%H:%M:%S.%fZ``
:param facts_timestamp: (default `None`) Time the last time facts were\
collected.
:type facts_timestamp: :obj:`string` formatted as\
``%Y-%m-%dT%H:%M:%S.%fZ``
:param status: (default `None`) Status of the node\
changed | unchanged | unreported | failed
:type status: :obj:`string`
:param events: (default `None`) Counted events from latest Report
:type events: :obj:`dict`
:param unreported_time: (default `None`) Time since last report
:type unreported_time: :obj:`string`
:ivar name: Hostname of this node.
:ivar deactivated: :obj:`datetime.datetime` when this host was\
deactivated or `False`.
:ivar report_timestamp: :obj:`datetime.datetime` when the last run\
occured or `None`.
:ivar catalog_timestamp: :obj:`datetime.datetime` last time a catalog was\
compiled or `None`.
:ivar facts_timestamp: :obj:`datetime.datetime` last time when facts were\
collected or `None`.
"""
def __init__(self, api, name, deactivated=None, report_timestamp=None,
catalog_timestamp=None, facts_timestamp=None,
status=None, events=None, unreported_time=None):
self.name = name
self.status = status
self.events = events
self.unreported_time = unreported_time
if deactivated is not None:
self.deactivated = json_to_datetime(deactivated)
else:
self.deactivated = False
if report_timestamp is not None:
self.report_timestamp = json_to_datetime(report_timestamp)
else:
self.report_timestamp = report_timestamp
if facts_timestamp is not None:
self.facts_timestamp = json_to_datetime(facts_timestamp)
else:
self.facts_timestamp = facts_timestamp
if catalog_timestamp is not None:
self.catalog_timestamp = json_to_datetime(catalog_timestamp)
else:
self.catalog_timestamp = catalog_timestamp
self.__api = api
self.__query_scope = '["=", "certname", "{0}"]'.format(self.name)
self.__string = self.name
def __repr__(self):
return str('<Node: {0}>').format(self.__string)
def __str__(self):
return str('{0}').format(self.__string)
def __unicode__(self):
return self.__string
def facts(self):
"""Get all facts of this node."""
return self.__api.facts(query=self.__query_scope)
def fact(self, name):
"""Get a single fact from this node."""
facts = self.__api.facts(name=name, query=self.__query_scope)
return next(fact for fact in facts)
def resources(self, type_=None, title=None):
"""Get all resources of this node or all resources of the specified
type."""
if type_ is None:
resources = self.__api.resources(query=self.__query_scope)
elif type_ is not None and title is None:
resources = self.__api.resources(type_=type_,
query=self.__query_scope)
else:
resources = self.__api.resources(type_=type_, title=title,
query=self.__query_scope)
return resources
def resource(self, type_, title):
"""Get a resource matching the supplied type and title."""
resources = self.__api.resources(type_=type_, title=title,
query=self.__query_scope)
return next(resource for resource in resources)
def reports(self):
"""Get all reports for this node."""
return self.__api.reports(self.__query_scope)
class Catalog(object):
"""
This object represents a compiled catalog from puppet. It contains Resource
and Edge object that represent the dependency graph.
:param node: Name of the host
:type edges: :obj:`string`
:param edges: Edges returned from Catalog data
:type edges: :obj:`list` containing :obj:`dict` with Edge information
:param resources: Resources returned from Catalog data
:type resources: :obj:`list` containing :obj:`dict` with Resources
:param version: Catalog version from Puppet (unique for each node)
:type version: :obj:`string`
:param transaction_uuid: A string used to match the catalog with the
corresponding report that was issued during
the same puppet run
:type transaction_uuid: :obj:`string`
:ivar node: :obj:`string` Name of the host
:ivar version: :obj:`string` Catalog version from Puppet
(unique for each node)
:ivar transaction_uuid: :obj:`string` used to match the catalog with
corresponding report
:ivar edges: :obj:`list` of :obj:`Edge` The source Resource object\
of the relationship
:ivar resources: :obj:`dict` of :obj:`Resource` The source Resource\
object of the relationship
"""
def __init__(self, node, edges, resources,
version, transaction_uuid):
self.node = node
self.version = version
self.transaction_uuid = transaction_uuid
self.resources = dict()
for resource in resources:
if 'file' not in resource:
resource['file'] = None
if 'line' not in resource:
resource['line'] = None
identifier = resource['type']+'['+resource['title']+']'
res = Resource(node, resource['title'],
resource['type'], resource['tags'],
resource['exported'], resource['file'],
resource['line'], resource['parameters'])
self.resources[identifier] = res
self.edges = []
for edge in edges:
identifier_source = edge['source']['type'] + \
'[' + edge['source']['title'] + ']'
identifier_target = edge['target']['type'] + \
'[' + edge['target']['title'] + ']'
e = Edge(self.resources[identifier_source],
self.resources[identifier_target],
edge['relationship'])
self.edges.append(e)
self.resources[identifier_source].relationships.append(e)
self.resources[identifier_target].relationships.append(e)
self.__string = '{0}/{1}'.format(self.node, self.transaction_uuid)
def __repr__(self):
return str('<Catalog: {0}>').format(self.__string)
def __str__(self):
return str('{0}').format(self.__string)
def __unicode__(self):
return self.__string
def get_resources(self):
return self.resources.itervalues()
def get_resource(self, resource_type, resource_title):
identifier = resource_type + \
'[' + resource_title + ']'
return self.resources[identifier]
def get_edges(self):
return iter(self.edges)
class Edge(object):
"""
This object represents the connection between two Resource objects
:param source: The source Resource object of the relationship
:type source: :obj:`Resource`
:param target: The target Resource object of the relationship
:type target: :obj:`Resource`
:param relaptionship: Name of the Puppet Ressource Relationship
:type relationship: :obj:`string`
:ivar source: :obj:`Resource` The source Resource object
:ivar target: :obj:`Resource` The target Resource object
:ivar relationship: :obj:`string` Name of the Puppet Resource relationship
"""
def __init__(self, source, target, relationship):
self.source = source
self.target = target
self.relationship = relationship
self.__string = '{0} - {1} - {2}'.format(self.source,
self.relationship,
self.target)
def __repr__(self):
return str('<Edge: {0}>').format(self.__string)
def __str__(self):
return str('{0}').format(self.__string)
def __unicode__(self):
return self.__string
|
|
#!/usr/bin/env python
import asyncio
import getopt
import logging
import os
import sys
from typing import Dict, List, Optional, Tuple, cast
import tomodachi
from tomodachi.config import parse_config_files
from tomodachi.launcher import ServiceLauncher
try:
if ModuleNotFoundError:
pass
except Exception:
class ModuleNotFoundError(ImportError):
pass
class CLI:
def help_command_usage(self) -> str:
return (
"Usage: tomodachi <command> [options] [arguments]\n"
"\n"
"Options:\n"
" -h, --help Show this help message and exit\n"
" -v, --version Print tomodachi version\n"
" --dependency-versions Print versions of dependencies\n"
"\n"
"Available commands:\n"
" ---\n"
" Command: run\n"
" Starts service(s) defined in the .py files specified as <service> argument(s)\n"
"\n"
" $ tomodachi run <service ...> [-c <config-file ...>] [--production]\n"
" | --loop [auto|asyncio|uvloop] Event loop implementation [asyncio]\n"
" | --production Disable restart on file changes\n"
" | -c, --config <files> Use configuration from JSON files\n"
" | -l, --log <level>, --log-level <level> Specify log level\n"
"\n"
">> Version: {}\n"
">> Full documentation at: https://tomodachi.dev/docs"
).format(tomodachi.__version__)
def help_command(self) -> None:
print(self.help_command_usage())
sys.exit(2)
def version_command(self) -> None:
print("tomodachi {}".format(tomodachi.__version__))
sys.exit(0)
def dependency_versions_command(self) -> None:
CLI.test_dependencies(fail_on_errors=False, output_versions=True, output_errors=True)
sys.exit(0)
@classmethod
def test_dependencies(
cls, fail_on_errors: bool = True, output_versions: bool = False, output_errors: bool = True
) -> Dict[str, Optional[str]]:
errors = False
aioamqp_version = ""
aiobotocore_version = ""
aiohttp_version = ""
botocore_version = ""
protobuf_version = ""
uvloop_version = ""
try:
import aioamqp # noqa # isort:skip
aioamqp_version = aioamqp.__version__
if output_versions:
print("aioamqp/{}".format(aioamqp_version))
except ModuleNotFoundError as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'aioamqp' failed to load (error: \"{}\")".format(str(e)))
except Exception as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'aioamqp' failed to load (error: \"{}\")".format(str(e)))
logging.exception("")
print("")
try:
import aiobotocore # noqa # isort:skip
aiobotocore_version = aiobotocore.__version__
if output_versions:
print("aiobotocore/{}".format(aiobotocore_version))
except ModuleNotFoundError as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'aiobotocore' failed to load (error: \"{}\")".format(str(e)))
except Exception as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'aiobotocore' failed to load (error: \"{}\")".format(str(e)))
logging.exception("")
print("")
try:
import aiohttp # noqa # isort:skip
aiohttp_version = aiohttp.__version__
if output_versions:
print("aiohttp/{}".format(aiohttp_version))
except ModuleNotFoundError as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'aiohttp' failed to load (error: \"{}\")".format(str(e)))
except Exception as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'aiohttp' failed to load (error: \"{}\")".format(str(e)))
logging.exception("")
print("")
try:
import botocore # noqa # isort:skip
botocore_version = botocore.__version__
if output_versions:
print("botocore/{}".format(botocore_version))
except ModuleNotFoundError as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'botocore' failed to load (error: \"{}\")".format(str(e)))
except Exception as e: # pragma: no cover
errors = True
if output_errors:
print("Dependency failure: 'botocore' failed to load (error: \"{}\")".format(str(e)))
logging.exception("")
print("")
try:
# Optional
import google.protobuf # noqa # isort:skip
protobuf_version_ = google.protobuf.__version__
if isinstance(protobuf_version_, bytes):
protobuf_version = cast(bytes, protobuf_version_).decode()
else:
protobuf_version = str(protobuf_version_)
if output_versions:
print("protobuf/{}".format(protobuf_version))
except ModuleNotFoundError: # pragma: no cover
pass
except Exception: # pragma: no cover
pass
try:
# Optional
import uvloop # noqa # isort:skip
uvloop_version = uvloop.__version__
if output_versions:
print("uvloop/{}".format(uvloop_version))
except ModuleNotFoundError: # pragma: no cover
pass
except Exception: # pragma: no cover
pass
if not errors:
try:
import tomodachi.helpers.logging # noqa # isort:skip
import tomodachi.invoker # noqa # isort:skip
import tomodachi.transport.amqp # noqa # isort:skip
import tomodachi.transport.aws_sns_sqs # noqa # isort:skip
import tomodachi.transport.http # noqa # isort:skip
import tomodachi.transport.schedule # noqa # isort:skip
except Exception as e: # pragma: no cover
errors = True
if output_errors:
print('Dependency failure: tomodachi essentials failed to load (error: "{}")'.format(str(e)))
logging.exception("")
print("")
if errors:
if fail_on_errors:
logging.getLogger("exception").warning("Unable to initialize dependencies")
logging.getLogger("exception").warning("Error: See above exceptions and traceback")
sys.exit(1)
elif output_errors:
print("There were errors - see above for exceptions and traceback")
return {
"aioamqp": aioamqp_version or None,
"aiobotocore": aiobotocore_version or None,
"aiohttp": aiohttp_version or None,
"botocore": botocore_version or None,
"protobuf": protobuf_version or None,
"uvloop": uvloop_version or None,
}
def run_command_usage(self) -> str:
return "Usage: tomodachi run <service ...> [-c <config-file ...>] [--loop auto|asyncio|uvloop] [--production]"
def run_command(self, args: List[str]) -> None:
if len(args) == 0:
print(self.run_command_usage())
else:
configuration = None
log_level = logging.INFO
env_loop = str(os.getenv("TOMODACHI_LOOP", "")).lower() or None
if env_loop or "--loop" in args:
if "--loop" in args:
index = args.index("--loop")
args.pop(index)
value = args.pop(index).lower()
if env_loop and env_loop != value:
print("Invalid argument to --loop, '{}' differs from env TOMODACHI_LOOP".format(value))
sys.exit(2)
elif env_loop:
value = env_loop
else:
value = "auto"
if value in ("auto", "default"):
pass
elif value in ("asyncio", "aio", "async"):
asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
pass
elif value in ("uvloop", "libuv", "uv"):
try:
import uvloop # noqa # isort:skip
except Exception: # pragma: no cover
print("The 'uvloop' package needs to be installed to use uvloop event loop")
sys.exit(2)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
else:
print("Invalid argument to --loop, event loop '{}' not recognized".format(value))
sys.exit(2)
if "-c" in args or "--config" in args:
index = args.index("-c") if "-c" in args else args.index("--config")
args.pop(index)
config_files: List[str] = []
while len(args) > index and args[index][0] != "-":
value = args.pop(index)
if value not in config_files:
config_files.append(value)
if not len(config_files):
print("Missing config file on command line")
sys.exit(2)
try:
configuration = parse_config_files(config_files)
except FileNotFoundError as e:
print("Invalid config file: {}".format(str(e)))
sys.exit(2)
except ValueError as e:
print("Invalid config file, invalid JSON format: {}".format(str(e)))
sys.exit(2)
env_production = str(os.getenv("TOMODACHI_PRODUCTION", "")).lower() or None
if env_production and env_production in ("0", "no", "none", "false"):
env_production = None
if env_production or "--production" in args:
if "--production" in args:
index = args.index("--production")
args.pop(index)
watcher = None
else:
cwd = os.path.realpath(os.getcwd())
root_directories = [cwd]
for arg in set(args):
if not arg.startswith("/") and not arg.startswith("~"):
root_directories.append(os.path.realpath(os.path.dirname(os.path.join(cwd, arg))))
else:
root_directories.append(os.path.realpath(os.path.dirname(arg)))
for p in str(os.getenv("PYTHONPATH", "")).split(os.pathsep):
if not p:
continue
if not p.startswith("/") and not p.startswith("~"):
root_directories.append(os.path.realpath(os.path.join(cwd, p)))
else:
root_directories.append(os.path.realpath(p))
from tomodachi.watcher import Watcher # noqa # isort:skip
watcher = Watcher(root=root_directories, configuration=configuration)
if "-l" in args or "--log" in args or "--log-level" in args:
index = (
args.index("-l")
if "-l" in args
else args.index("--log")
if "--log" in args
else args.index("--log-level")
)
args.pop(index)
if len(args) > index:
log_level = getattr(logging, args.pop(index).upper(), None) or log_level
logging.basicConfig(format="%(asctime)s (%(name)s): %(message)s", level=log_level)
logging.Formatter(fmt="%(asctime)s.%(msecs).03d", datefmt="%Y-%m-%d %H:%M:%S")
ServiceLauncher.run_until_complete(set(args), configuration, watcher)
sys.exit(0)
def main(self, argv: List[str]) -> None:
opts: List[Tuple[str, str]] = []
args: List[str] = []
try:
opts, args = getopt.getopt(
argv, "hlvV ", ["help", "log", "version", "version", "dependency-versions", "dependencies", "deps"]
)
except getopt.GetoptError:
self.help_command()
for opt, _ in opts:
if opt in ("-h", "--help"):
self.help_command()
if opt in ("-v", "-V", "--version"):
self.version_command()
if opt in ("--dependency-versions", "--dependencies", "--deps"):
self.dependency_versions_command()
if len(args):
if args[0] in ("run", "start", "go"):
self.run_command(args[1:])
self.help_command()
def cli_entrypoint(argv: Optional[List[str]] = None) -> None:
if argv is None:
argv = sys.argv
if argv[0].endswith("pytest"):
argv = ["tomodachi"]
CLI().main(argv[1:])
|
|
'''
HaloTools HOD Simulation
'''
import numpy as np
import pyfof
#haltools functions
from halotools.sim_manager import CachedHaloCatalog
from halotools.empirical_models import PrebuiltHodModelFactory
from halotools.mock_observables import tpcf
from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle
from halotools.mock_observables import FoFGroups
#ccppabc functions
import util
from data import data_random
from data import data_RR
from data import data_gmf_bins
from data import xi_binedges
from group_richness import gmf
from group_richness import richness
class MCMC_HODsim(object):
def __init__(self, Mr=-21, b_normal=0.25):
''' Class object that describes our forward model used in AMC-PMC inference.
Our model forward models the galaxy catalog using HOD parameters using HaloTools.
'''
self.Mr = Mr
self.b_normal = b_normal
thr = -1. * np.float(Mr)
self.model = PrebuiltHodModelFactory('zheng07', threshold=thr)
self.halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
self.RR = data_RR(box='md_all')
self.randoms = data_random('md_all')
self.NR = len(self.randoms)
def __call__(self, theta, prior_range=None, observables=['nbar', 'gmf']):
return self._sum_stat(theta, prior_range=prior_range, observables=observables)
def _sum_stat(self, theta, prior_range=None, observables=['nbar', 'gmf']):
'''
Given theta, sum_stat calculates the observables from our forward model
Parameters
----------
theta : (self explanatory)
prior_range : If specified, checks to make sure that theta is within the prior range.
'''
self.model.param_dict['logM0'] = theta[0]
self.model.param_dict['sigma_logM'] = np.exp(theta[1])
self.model.param_dict['logMmin'] = theta[2]
self.model.param_dict['alpha'] = theta[3]
self.model.param_dict['logM1'] = theta[4]
rbins = xi_binedges()
rmax = rbins.max()
approx_cell1_size = [rmax , rmax , rmax]
approx_cellran_size = [rmax , rmax , rmax]
if prior_range is None:
self.model.populate_mock(self.halocat, enforce_PBC=False)
pos =three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(len(pos) / 1000.**3.) # nbar of the galaxy catalog
elif obv == 'gmf':
nbar = len(pos) / 1000**3.
b = self.b_normal * (nbar)**(-1./3)
groups = pyfof.friends_of_friends(pos , b)
w = np.array([len(x) for x in groups])
gbins =data_gmf_bins()
gmf = np.histogram(w , gbins)[0] / (1000.**3.)
obvs.append(gmf)
elif obv == 'xi':
greek_xi = tpcf(
pos, rbins, pos,
randoms=randoms, period=None,
max_sample_size=int(3e5), estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed = self.RR,
NR_precomputed = self.NR)
obvs.append(greek_xi)
else:
raise NotImplementedError('Only nbar 2pcf, gmf implemented so far')
return obvs
else:
if np.all((prior_range[:,0] < theta) & (theta < prior_range[:,1])):
# if all theta_i is within prior range ...
try:
self.model.populate_mock(self.halocat)
pos=three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(len(pos) / 1000**3.) # nbar of the galaxy catalog
elif obv == 'gmf':
nbar = len(pos) / 1000**3.
b = self.b_normal * (nbar)**(-1./3)
groups = pyfof.friends_of_friends(pos , b)
w = np.array([len(x) for x in groups])
gbins =data_gmf_bins()
gmf = np.histogram(w , gbins)[0] / (1000.**3.)
obvs.append(gmf)
elif obv == 'xi':
greek_xi = tpcf(
pos, rbins, pos,
randoms=randoms, period=None,
max_sample_size=int(3e5), estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed = self.RR,
NR_precomputed = self.NR)
obvs.append(greek_xi)
else:
raise NotImplementedError('Only nbar, tpcf, and gmf are implemented so far')
return obvs
except ValueError:
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(10.)
elif obv == 'gmf':
bins = data_gmf_bins()
obvs.append(np.ones_like(bins)[:-1]*1000.)
elif obv == 'xi':
obvs.append(np.zeros(len(xi_binedges()[:-1])))
return obvs
else:
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(10.)
elif obv == 'gmf':
bins = data_gmf_bins()
obvs.append(np.ones_like(bins)[:-1]*1000.)
elif obv == 'xi':
obvs.append(np.zeros(len(xi_binedges()[:-1])))
return obvs
class ABC_HODsim(object):
def __init__(self, Mr=-21, b_normal=0.25):
''' Class object that describes our forward model used in AMC-PMC inference.
Our model forward models the galaxy catalog using HOD parameters using HaloTools.
'''
self.Mr = Mr
self.b_normal = b_normal
thr = -1. * np.float(Mr)
self.model = PrebuiltHodModelFactory('zheng07', threshold=thr)
#self.model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
self.halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
self.RR = data_RR(box='md_sub')
self.randoms = data_random(box='md_sub')
self.NR = len(self.randoms)
def __call__(self, theta, prior_range=None, observables=['nbar', 'gmf']):
return self._sum_stat(theta, prior_range=prior_range, observables=observables)
def _sum_stat(self, theta, prior_range=None, observables=['nbar', 'gmf']):
'''
Given theta, sum_stat calculates the observables from our forward model
Parameters
----------
theta : (self explanatory)
prior_range : If specified, checks to make sure that theta is within the prior range.
'''
self.model.param_dict['logM0'] = theta[0]
self.model.param_dict['sigma_logM'] = np.exp(theta[1])
self.model.param_dict['logMmin'] = theta[2]
self.model.param_dict['alpha'] = theta[3]
self.model.param_dict['logM1'] = theta[4]
rbins = xi_binedges()
rmax = rbins.max()
period = None
approx_cell1_size = [rmax , rmax , rmax]
approx_cellran_size = [rmax , rmax , rmax]
if prior_range is None:
rint = np.random.randint(1, 125)
####simsubvol = lambda x: util.mask_func(x, rint)
####self.model.populate_mock(self.halocat,
#### masking_function=simsubvol,
#### enforce_PBC=False)
self.model.populate_mock(self.halocat)
pos =three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
pos = util.mask_galaxy_table(pos , rint)
xi , yi , zi = util.random_shifter(rint)
temp_randoms = self.randoms.copy()
temp_randoms[:,0] += xi
temp_randoms[:,1] += yi
temp_randoms[:,2] += zi
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(len(pos) / 200**3.) # nbar of the galaxy catalog
elif obv == 'gmf':
#compute group richness
nbar = len(pos) / 200**3.
b = self.b_normal * (nbar)**(-1./3)
groups = pyfof.friends_of_friends(pos , b)
w = np.array([len(x) for x in groups])
gbins = data_gmf_bins()
gmf = np.histogram(w , gbins)[0] / (200.**3.)
obvs.append(gmf)
elif obv == 'xi':
greek_xi = tpcf(
pos, rbins, pos,
randoms=temp_randoms, period = period,
max_sample_size=int(1e5), estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed = self.RR,
NR_precomputed = self.NR)
obvs.append(greek_xi)
else:
raise NotImplementedError('Only nbar 2pcf, gmf implemented so far')
return obvs
else:
if np.all((prior_range[:,0] < theta) & (theta < prior_range[:,1])):
# if all theta_i is within prior range ...
try:
rint = np.random.randint(1, 125)
simsubvol = lambda x: util.mask_func(x, rint)
self.model.populate_mock(self.halocat,
masking_function=simsubvol,
enforce_PBC=False)
pos =three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
xi , yi , zi = util.random_shifter(rint)
temp_randoms = self.randoms.copy()
temp_randoms[:,0] += xi
temp_randoms[:,1] += yi
temp_randoms[:,2] += zi
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(len(pos) / 200**3.) # nbar of the galaxy catalog
elif obv == 'gmf':
nbar = len(pos) / 200**3.
b = self.b_normal * (nbar)**(-1./3)
groups = pyfof.friends_of_friends(pos , b)
w = np.array([len(x) for x in groups])
gbins =data_gmf_bins()
gmf = np.histogram(w , gbins)[0] / (200.**3.)
obvs.append(gmf)
elif obv == 'xi':
greek_xi = tpcf(
pos, rbins, pos,
randoms=temp_randoms, period = period,
max_sample_size=int(2e5), estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed = self.RR,
NR_precomputed = self.NR)
obvs.append(greek_xi)
else:
raise NotImplementedError('Only nbar, tpcf, and gmf are implemented so far')
return obvs
except ValueError:
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(10.)
elif obv == 'gmf':
bins = data_gmf_bins()
obvs.append(np.ones_like(bins)[:-1]*1000.)
elif obv == 'xi':
obvs.append(np.zeros(len(xi_binedges()[:-1])))
return obvs
else:
obvs = []
for obv in observables:
if obv == 'nbar':
obvs.append(10.)
elif obv == 'gmf':
bins = data_gmf_bins()
obvs.append(np.ones_like(bins)[:-1]*1000.)
elif obv == 'xi':
obvs.append(np.zeros(len(xi_binedges()[:-1])))
return obvs
|
|
%matplotlib inline
import sys, os, codecs, shutil, re, math, csv
import datetime as dt
import xlsxwriter
import pandas as pd
import numpy as np
import collections
import openpyxl
import seaborn as sns
from openpyxl.drawing.image import Image
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#Please change file name to put _X number 'AllMeters_20181219_X.xlsx'
allmeter = 'ReadingPerf_20200420.csv'
crloc = 'Collector_Location20200224.xlsx'
full_smart_meters = pd.read_csv('C:/Users/Desktop/RF_Analysis/%s' % allmeter, sep=',',skiprows=1)
full_smart_meters.columns = ['meterno', 'serialnumber', 'endpointId', 'endpointtypeid', 'firmwareversion', 'endPointModelId', 'hwmodelid', 'date', 'initialDiscoveredDate', 'initialNormalDate', 'NoOfIntervals', 'name', 'abc_rank', 'DayEnd', 'meter_status', 'spuid', 'layer']
cr_list = pd.read_excel('C:/Users/Desktop/RF_Analysis/%s' % crloc, 'Sheet1', na_values=['NA'])
cr_list = cr_list.drop(cr_list.columns[[4,5,6]], axis=1)
full_smart_meters.set_index('name').join(cr_list.set_index('CollectorNo'))
cr_list = full_smart_meters.join(cr_list.set_index('CollectorNo'), on='name', how='outer')
cr_list = cr_list.fillna({'Estates / Villages': 'Unlocated Area', 'BuildingType': 'Unknown BuildingType' })
cr_list = cr_list[cr_list['meterno'].notnull()]
cr_village = cr_list[cr_list['BuildingType'].isin(['Village'])]
cr_highrise = cr_list[cr_list['BuildingType'].isin(['Highrise'])]
cr_unknownbuilding = cr_list[cr_list['BuildingType'].isin(['Unknown BuildingType'])]
cell_meter = cr_list[cr_list['endpointtypeid'] == 15]
onlycell_meter = cell_meter[~cell_meter['abc_rank'].str.startswith('Load_DA')]
LDA_meter = cell_meter[cell_meter['abc_rank'].str.startswith('Load_DA')]
cr_list = cr_list[~cr_list['abc_rank'].str.startswith('Load_DA')]
#FW version performance
fw_avg = cr_list.pivot_table(values = ['NoOfIntervals'], index = ['firmwareversion'], aggfunc = {'NoOfIntervals': np.mean})
fw_std = cr_list.pivot_table(values = ['NoOfIntervals'], index = ['firmwareversion'], aggfunc = {'NoOfIntervals': np.std})
fw_perf = pd.concat([fw_avg, fw_std], axis=1, join_axes=[fw_avg.index])
fw_perf.columns = ['LP Average', 'LP Std Deviation']
fw_perf = fw_perf.round()
class District:
def __init__(self, cr_list, attr):
self.name = "District {}".format(attr)
self.district_meter = cr_list[cr_list['District'].str.contains(self.name, na=False)]
self.district_meter_Count = district_meter['meterno'].count()
self.district_meter_Full_48_LP_Interval = district_meter[district_meter['NoOfIntervals'] == 48]
self.district_meter_Full_48_LP_Interval_Meter_Count = district_meter_Full_48_LP_Interval['meterno'].count()
self.district_meter_Full_48_LP_Interval_Meter_Rate = round((district_meter_Full_48_LP_Interval_Meter_Count/district_meter_Count)*100,2)
self.district_1468 = district_meter[district_meter['firmwareversion'].str.contains('-24.60', na=False)]
self.district_1468_Count = district_1468['meterno'].count()
self.district_1468_Rate = round((district_1468_Count/district_meter_Count)*100,2)
self.district_meter_Normal_Meter = district_meter[district_meter['meter_status'] == 'Normal']
self.district_meter_Normal_Meter_Count = district_meter_Normal_Meter['meterno'].count()
self.district_meter_SecConfig_Meter = district_meter[district_meter['meter_status'] == 'SecConfig']
self.district_meter_SecConfig_Meter_Count = district_meter_SecConfig_Meter['meterno'].count()
self.district_meter_Discovered_Meter = district_meter[district_meter['meter_status'] == 'Discovered']
self.district_meter_Discovered_Meter_Count = district_meter_Discovered_Meter['meterno'].count()
self.district_meter_Config_Meter = district_meter[district_meter['meter_status'] == 'Configure']
self.district_meter_Config_Meter_Count = district_meter_Config_Meter['meterno'].count()
self.district_meter_Failed_Meter = district_meter[district_meter['meter_status'] == 'Failed']
self.district_meter_Failed_Meter_Count = district_meter_Failed_Meter['meterno'].count()
self.district_meter_Lost_Meter = district_meter[district_meter['meter_status'] == 'Lost']
self.district_meter_Lost_Meter_Count = district_meter_Lost_Meter['meterno'].count()
#LP-DayEnd-FULL_district Meter
self.district_meter_LP_DayEnd_Full_Meter = district_meter[(district_meter['NoOfIntervals'] == 48) & (district_meter['DayEnd'] == 1)]
self.district_meter_LP_DayEnd_Full_Meter_Count = district_meter_LP_DayEnd_Full_Meter['meterno'].count()
self.district_meter_LP_DayEnd_Full_Meter_Rate = round((district_meter_LP_DayEnd_Full_Meter_Count/district_meter_Count)*100,2)
self.district_meter_Missing_DayEnd_Reading = district_meter[district_meter['DayEnd'] != 1]
self.district_meter_Missing_DayEnd_Reading_Meter_Count = district_meter_Missing_DayEnd_Reading['meterno'].count()
self.Expected_district_meter_Total_LP_Count = ((district_meter_Count)*48)
self.district_meter_Total_LP_Count = district_meter['NoOfIntervals'].sum()
self.district_meter_Total_Dayend = district_meter[district_meter['DayEnd'] == 1]
self.district_meter_Total_Dayend_Count = district_meter_Total_Dayend['meterno'].count()
self.district_meter_LP_Success_Rate = round((district_meter_Total_LP_Count/Expected_district_meter_Total_LP_Count)*100,2)
self.district_meter_Dayend_Success_Rate = round((district_meter_Total_Dayend_Count/district_meter_Count)*100,2)
self.district_meter_Average_LP_Interval_Push_Count = district_meter['NoOfIntervals'].mean()
self.district_meter_StdDev_LP_Interval_Push_Count = district_meter['NoOfIntervals'].std()
#abc_rank
self._CR_Rnk = district_meter.pivot_table(values = ['meter_status'], index = ['name'], columns = ['abc_rank'], aggfunc = 'count')
self._CR_Rnk.columns = _CR_Rnk.columns.droplevel()
self._CR_Rnk = _CR_Rnk.loc[:,['P','A','B','C','D','E','F']]
self._CR_Rnk = _CR_Rnk.fillna(0)
def get_dict(self):
return collections.OrderedDict({
'[ {} METERS SUMMARY ]'.format(self.name):'',
'{} Meter Count'.format(self.name):district_meter_Count,
'{} FW24.60 Meter Count'.format(self.name):district_1468_Count,
'{} FW24.60 Meter(%)'.format(self.name):district_1468_Rate,
'{} Meter LP Success(%)'.format(self.name):district_meter_LP_Success_Rate,
'{} Meter Dayend Success(%)'.format(self.name):district_meter_Dayend_Success_Rate,
'{} Average LP Push Count'.format(self.name):round(district_meter_Average_LP_Interval_Push_Count,2),
'{} Std Deviation LP Push Count'.format(self.name):round(district_meter_StdDev_LP_Interval_Push_Count,2),
'{} Meter LP-DayEnd-FULL Meter Count'.format(self.name):district_meter_LP_DayEnd_Full_Meter_Count,
'{} Meter LP-DayEnd-FULL Meter(%)'.format(self.name):district_meter_LP_DayEnd_Full_Meter_Rate,
'{} Meter Full 48 LP Interval Meter Count'.format(self.name):district_meter_Full_48_LP_Interval_Meter_Count,
'{} Meter Full 48 LP Interval Meter(%)'.format(self.name):district_meter_Full_48_LP_Interval_Meter_Rate,
'{} Meter Missing DayEnd Reading Meter Count'.format(self.name):district_meter_Missing_DayEnd_Reading_Meter_Count,
'{} Meter Normal Meter Count'.format(self.name):district_meter_Normal_Meter_Count,
'{} Meter SecConfig Meter Count'.format(self.name):district_meter_SecConfig_Meter_Count,
'{} Meter Config Meter Count'.format(self.name):district_meter_Config_Meter_Count,
'{} Meter Discovered Meter Count'.format(self.name):district_meter_Discovered_Meter_Count,
'{} Meter Failed Meter Count'.format(self.name):district_meter_Failed_Meter_Count,
'{} Meter Lost Meter Count'.format(self.name):district_meter_Lost_Meter_Count,
})
district_a = District(cr_list, 'A')
district_b = District(cr_list, 'B')
district_c = District(cr_list, 'C')
district_d = District(cr_list, 'D')
No_reading_meter = cr_list[cr_list['abc_rank'] == 'F']
hexed_serial = pd.DataFrame(No_reading_meter['serialnumber'].astype(int))
hexed_serial = hexed_serial.rename(columns={'serialnumber':'hex_serial'})
hexed_serial = hexed_serial['hex_serial'].apply(lambda x:format(x, 'x'))
No_reading_meter = pd.concat([No_reading_meter, hexed_serial], axis=1)
No_reading_meter = No_reading_meter.reset_index(drop=True)
#No Reading Meter per Status
No_Reading_RF_meter = No_reading_meter[No_reading_meter['endpointtypeid'] == 9]
No_Reading_cell_meter = No_reading_meter[No_reading_meter['endpointtypeid'] == 15]
No_Reading_Normal_Meter = No_reading_meter[No_reading_meter['meter_status'] == 'Normal']
No_Reading_SecConfig_Meter = No_reading_meter[No_reading_meter['meter_status'] == 'SecConfig']
No_Reading_Discovered_Meter = No_reading_meter[No_reading_meter['meter_status'] == 'Discovered']
No_Reading_Config_Meter = No_reading_meter[No_reading_meter['meter_status'] == 'Configure']
No_Reading_Failed_Meter = No_reading_meter[No_reading_meter['meter_status'] == 'Failed']
No_Reading_Lost_Meter = No_reading_meter[No_reading_meter['meter_status'] == 'Lost']
No_Reading_Meter_with_DayEnd = No_reading_meter[No_reading_meter['DayEnd'] == 1 ]
No_LPandDayEnd_Reading_Meter_with_DayEnd = No_reading_meter[No_reading_meter['DayEnd'] == 0 ]
No_reading_meter_Highrise = No_reading_meter[No_reading_meter['BuildingType'].isin(['Highrise'])]
No_reading_meter_Village = No_reading_meter[No_reading_meter['BuildingType'].isin(['Village'])]
No_reading_meter_Unlocated = No_reading_meter[No_reading_meter['BuildingType'].isin(['Unknown BuildingType'])]
Normal_Meter = cr_list[cr_list['meter_status'] == 'Normal']
SecConfig_Meter = cr_list[cr_list['meter_status'] == 'SecConfig']
Discovered_Meter = cr_list[cr_list['meter_status'] == 'Discovered']
Config_Meter = cr_list[cr_list['meter_status'] == 'Configure']
Failed_Meter = cr_list[cr_list['meter_status'] == 'Failed']
Lost_Meter = cr_list[cr_list['meter_status'] == 'Lost']
today_date = dt.date.today().strftime('%Y-%m-%d')
#Effective Meter Calculation(Only Normal meters (w/o LDA Meter) that passed more than 7 days from initial Normal date)
cr_list['initialNormalDate'] = pd.to_datetime(cr_list['initialNormalDate'], format='%Y-%m-%d')
cr_list['date'] = cr_list['date'].fillna(today_date)
cr_list['date'] = pd.to_datetime(cr_list['date'], format='%Y-%m-%d')
cr_list['7Days_After_Normal'] = (cr_list['initialNormalDate'] + dt.timedelta(days=7))
cr_list['initialNormalDate'] = cr_list['initialNormalDate'].values.astype('datetime64[D]')
cr_list['7Days_After_Normal'] = cr_list['7Days_After_Normal'].values.astype('datetime64[D]')
cr_list['initialDiscoveredDate'] = cr_list['initialDiscoveredDate'].values.astype('datetime64[D]')
cr_list['Difference'] = cr_list['date'] - cr_list['initialNormalDate']
cr_list['DaysAfterDis'] = cr_list['date'] - cr_list['initialDiscoveredDate']
cr_list['DisToNorm'] = cr_list['initialNormalDate'] - cr_list['initialDiscoveredDate']
#SLA-Meters
Effective_Meter = cr_list[cr_list['Difference'] >= '7 days']
Effective_Meter = cr_list[(cr_list['meter_status'] == 'Normal')]
Effective_Meter_Count = Effective_Meter['meterno'].count()
EffectiveMeters_Full_48_LP_Interval = Effective_Meter[Effective_Meter['NoOfIntervals'] == 48]
EffectiveMeters_Full_48_LP_Interval_Meter_Count = EffectiveMeters_Full_48_LP_Interval['meterno'].count()
EffectiveMeters_Full_48_LP_Interval_Meter_Rate = round((EffectiveMeters_Full_48_LP_Interval_Meter_Count/Effective_Meter_Count)*100,2)
#LP-DayEnd-FULL_SLA_Meter
LP_DayEnd_Full_Effective_Meter = Effective_Meter[(Effective_Meter['NoOfIntervals'] == 48)&(Effective_Meter['DayEnd'] == 1)]
LP_DayEnd_Full_Effective_Meter_Count = LP_DayEnd_Full_Effective_Meter['meterno'].count()
LP_DayEnd_Full_Effective_Meter_Rate = round((LP_DayEnd_Full_Effective_Meter_Count/Effective_Meter_Count)*100,2)
EffectiveMeters_Missing_DayEnd_Reading = Effective_Meter[Effective_Meter['DayEnd'] != 1]
EffectiveMeters_Missing_DayEnd_Reading_Meter_Count = EffectiveMeters_Missing_DayEnd_Reading['meterno'].count()
EffectiveMeters_Missing_DayEnd_Reading_Meter_Rate = round((EffectiveMeters_Missing_DayEnd_Reading_Meter_Count/Effective_Meter_Count)*100,2)
Expected_EffectiveMeter_Total_LP_Count = ((Effective_Meter_Count)*48)
EffectiveMeter_Total_LP_Count = Effective_Meter['NoOfIntervals'].sum()
EffectiveMeter_Total_Dayend = Effective_Meter[Effective_Meter['DayEnd'] == 1]
EffectiveMeter_Total_Dayend_Count = EffectiveMeter_Total_Dayend['meterno'].count()
Effective_Meter_LP_Success_Rate = round((EffectiveMeter_Total_LP_Count/Expected_EffectiveMeter_Total_LP_Count)*100,2)
Effective_Meter_Dayend_Success_Rate = round((EffectiveMeter_Total_Dayend_Count/Effective_Meter_Count)*100,2)
Effective_Meter_Average_LP_Interval_Push_Count = Effective_Meter['NoOfIntervals'].mean()
Effective_Meter_StdDev_LP_Interval_Push_Count = Effective_Meter['NoOfIntervals'].std()
#Latest-Meters(Registered in last 30days)
Latest_Meters = cr_list[cr_list['DaysAfterDis'] < '30 days']
Latest_Meters_Count = Latest_Meters['meterno'].count()
Latest_Meters_Full_48_LP_Interval = Latest_Meters[Latest_Meters['NoOfIntervals'] == 48]
Latest_Meters_Full_48_LP_Interval_Meter_Count = Latest_Meters_Full_48_LP_Interval['meterno'].count()
Latest_Meters_Full_48_LP_Interval_Meter_Rate = round((Latest_Meters_Full_48_LP_Interval_Meter_Count/Latest_Meters_Count)*100,2)
#LP-DayEnd-FULL_SLA_Meter
LP_DayEnd_Full_Latest_Meters = Latest_Meters[(Latest_Meters['NoOfIntervals'] == 48)&(Latest_Meters['DayEnd'] == 1)]
LP_DayEnd_Full_Latest_Meters_Count = LP_DayEnd_Full_Latest_Meters['meterno'].count()
LP_DayEnd_Full_Latest_Meters_Rate = round((LP_DayEnd_Full_Latest_Meters_Count/Latest_Meters_Count)*100,2)
Latest_Meters_Missing_DayEnd_Reading = Latest_Meters[Latest_Meters['DayEnd'] != 1]
Latest_Meters_Missing_DayEnd_Reading_Meter_Count = Latest_Meters_Missing_DayEnd_Reading['meterno'].count()
Latest_Meters_Missing_DayEnd_Reading_Meter_Rate = round((Latest_Meters_Missing_DayEnd_Reading_Meter_Count/Latest_Meters_Count)*100,2)
Expected_Latest_Meters_Count_Total_LP_Count = ((Latest_Meters_Count)*48)
Latest_Meters_Total_LP_Count = Latest_Meters['NoOfIntervals'].sum()
Latest_Meters_Total_Dayend = Latest_Meters[Latest_Meters['DayEnd'] == 1]
Latest_Meters_Total_Dayend_Count = Latest_Meters_Total_Dayend['meterno'].count()
Latest_Meters_LP_Success_Rate = round((Latest_Meters_Total_LP_Count/Expected_Latest_Meters_Count_Total_LP_Count)*100,2)
Latest_Meters_Dayend_Success_Rate = round((Latest_Meters_Total_Dayend_Count/Latest_Meters_Count)*100,2)
Latest_Meters_Average_LP_Interval_Push_Count = Latest_Meters['NoOfIntervals'].mean()
Latest_Meters_StdDev_LP_Interval_Push_Count = Latest_Meters['NoOfIntervals'].std()
unlocated_meter = cr_list[cr_list['Estates / Villages'] == 'Unlocated Area']
unlocated_meter = unlocated_meter[unlocated_meter['meterno'].notnull()]
#Number of Total Meters
Total_AllMeter_Count = cr_list['meterno'].count()
Total_HighRiseMeter_Count = cr_highrise['meterno'].count()
Total_VillageMeter_Count = cr_village['meterno'].count()
Total_ALLCellMeter_Count = cell_meter['meterno'].count()
Total_LDAMeter_Count = LDA_meter['meterno'].count()
Total_CellMeter_Count = Total_ALLCellMeter_Count - Total_LDAMeter_Count
unlocated_meter_Count = unlocated_meter['meterno'].count()
unknownbuilding_Count = cr_unknownbuilding['meterno'].count()
all_meter_1468 = cr_list[cr_list['firmwareversion'].str.contains('-24.60', na=False)]
all_meter_1468_Count = all_meter_1468['meterno'].count()
all_meter_1468_1468_Rate = round((all_meter_1468_Count/Total_AllMeter_Count )*100,2)
Missing_Full_48_LP_Interval_Meters = cr_list[cr_list['NoOfIntervals'] < 48]
Missing_Full_48_LP_Interval_Meters = Missing_Full_48_LP_Interval_Meters.reset_index(drop=True)
Missing_DayEnd_Reading_All_Meters = cr_list[cr_list['DayEnd'] != 1]
Missing_DayEnd_Reading_All_Meters = Missing_DayEnd_Reading_All_Meters.reset_index(drop=True)
Success_DayEnd_Reading_All_Meters = cr_list[cr_list['DayEnd'] == 1]
#Number of No Reading Meter Status Count
No_Reading_Meter_Total_Count = No_reading_meter['abc_rank'].count()
No_Reading_RF_meter_Count = No_Reading_RF_meter['meterno'].count()
No_Reading_Cell_meter_Count = No_Reading_cell_meter['meterno'].count()
No_reading_Normal_meter_count = No_Reading_Normal_Meter['meterno'].count()
No_reading_SecConfig_meter_count = No_Reading_SecConfig_Meter['meterno'].count()
No_reading_Discovered_meter_count = No_Reading_Discovered_Meter['meterno'].count()
No_reading_Config_meter_count = No_Reading_Config_Meter['meterno'].count()
No_reading_Failed_meter_count = No_Reading_Failed_Meter['meterno'].count()
No_reading_Lost_meter_count = No_Reading_Lost_Meter['meterno'].count()
No_Reading_Meter_with_DayEnd_count = No_Reading_Meter_with_DayEnd['meterno'].count()
No_LPandDayEnd_Reading_Meter_with_DayEnd_Count = No_LPandDayEnd_Reading_Meter_with_DayEnd['meterno'].count()
No_reading_meter_Highrise_count = No_reading_meter_Highrise['abc_rank'].count()
No_reading_meter_Village_count = No_reading_meter_Village['abc_rank'].count()
No_reading_meter_Unlocated_count = No_reading_meter_Unlocated['abc_rank'].count()
#Meter Status Count
Normal_Meter_Count = Normal_Meter['meterno'].count()
SecConfig_Meter_Count = SecConfig_Meter['meterno'].count()
Config_Meter_Count = Config_Meter['meterno'].count()
Discovered_Meter_Count = Discovered_Meter['meterno'].count()
Failed_Meter_Count = Failed_Meter['meterno'].count()
Lost_Meter_Count = Lost_Meter['meterno'].count()
RF_meter = cr_list[cr_list['endpointtypeid'] != 15]
Collector_Count = RF_meter['name'].nunique()
#Performance per Areas
area_perf = cr_list.pivot_table(values = ['meter_status'], index = ['Estates / Villages'], columns = ['abc_rank'], aggfunc = 'count')
area_perf.columns = area_perf.columns.droplevel()
area_perf = area_perf.loc[:,['P','A','B','C','D','E','F']]
area_perf = area_perf.fillna(0)
#Performance per Areas
region_perf = cr_list.groupby(['Estates / Villages'])['NoOfIntervals'].mean()
region_perf_std = cr_list.groupby(['Estates / Villages'])['NoOfIntervals'].std()
region_perf = pd.concat([region_perf, region_perf_std], axis=1, join_axes=[region_perf.index])
region_perf = region_perf.round()
region_perf.columns = ['Average LP Count','Std LP Count']
Expected_AllMeter_Total_DayEnd_Reading_Count = (Total_HighRiseMeter_Count + Total_VillageMeter_Count + unknownbuilding_Count)
AllMeter_Total_DayEnd_Reading_Count = cr_list['DayEnd'].sum()
Expected_HighRiseMeter_Total_DayEnd_Reading_Count = Total_HighRiseMeter_Count
HighRiseMeter_Total_DayEnd_Reading_Count = cr_highrise['DayEnd'].sum()
Expected_VillageMeter_Total_DayEnd_Reading_Count = Total_VillageMeter_Count
VillageMeter_Total_DayEnd_Reading_Count = cr_village['DayEnd'].sum()
Expected_AllCellMeter_Total_DayEnd_Reading_Count = (Total_CellMeter_Count+Total_LDAMeter_Count)
AllCellMeter_Total_DayEnd_Reading_Count = (onlycell_meter['DayEnd'].sum() + LDA_meter['DayEnd'].sum())
Expected_CellMeter_Total_DayEnd_Reading_Count = Total_CellMeter_Count
CellMeter_Total_DayEnd_Reading_Count = (onlycell_meter['DayEnd'].sum())
Expected_LDAMeter_Total_DayEnd_Reading_Count = (Total_LDAMeter_Count)
LDAMeter_Total_DayEnd_Reading_Count = (LDA_meter['DayEnd'].sum())
Missing_DayEnd_Reading_AllMeter_Count = Expected_AllMeter_Total_DayEnd_Reading_Count-AllMeter_Total_DayEnd_Reading_Count
Missing_DayEnd_Reading_HighRiseMeter_Count = Expected_HighRiseMeter_Total_DayEnd_Reading_Count-HighRiseMeter_Total_DayEnd_Reading_Count
Missing_DayEnd_Reading_VillageMeter_Count = Expected_VillageMeter_Total_DayEnd_Reading_Count-VillageMeter_Total_DayEnd_Reading_Count
Missing_DayEnd_Reading_AllCellMeter_Count = Expected_AllCellMeter_Total_DayEnd_Reading_Count-AllCellMeter_Total_DayEnd_Reading_Count
Missing_DayEnd_Reading_CellMeter_Count = Expected_CellMeter_Total_DayEnd_Reading_Count-CellMeter_Total_DayEnd_Reading_Count
Missing_DayEnd_Reading_LDAMeter_Count = Expected_LDAMeter_Total_DayEnd_Reading_Count-LDAMeter_Total_DayEnd_Reading_Count
NO_DayEnd_Reading_but_with_LP_Reading_Meter = full_smart_meters[full_smart_meters['DayEnd'] == 0]
NO_DayEnd_Reading_but_with_LP_Reading_Meter = NO_DayEnd_Reading_but_with_LP_Reading_Meter[NO_DayEnd_Reading_but_with_LP_Reading_Meter['NoOfIntervals'] != 0]
NO_DayEnd_Reading_but_with_LP_Reading_Meter_Count = NO_DayEnd_Reading_but_with_LP_Reading_Meter['NoOfIntervals'].count()
#DayEnd Reading Push % Performance
AllMeter_Total_DayEnd_Reading_SuccessRate = (AllMeter_Total_DayEnd_Reading_Count/Expected_AllMeter_Total_DayEnd_Reading_Count)*100
MissingDayEndReadingAllMeterRate = (Missing_DayEnd_Reading_AllMeter_Count/Expected_AllMeter_Total_DayEnd_Reading_Count)*100
HighRiseMeter_Total_DayEnd_Reading_SuccessRate = (HighRiseMeter_Total_DayEnd_Reading_Count/Expected_HighRiseMeter_Total_DayEnd_Reading_Count)*100
VillageMeter_Total_DayEnd_Reading_SuccessRate = (VillageMeter_Total_DayEnd_Reading_Count/Expected_VillageMeter_Total_DayEnd_Reading_Count)*100
AllCellMeter_Total_DayEnd_Reading_SuccessRate = (AllCellMeter_Total_DayEnd_Reading_Count/Expected_AllCellMeter_Total_DayEnd_Reading_Count)*100
CellMeter_Total_DayEnd_Reading_SuccessRate = (CellMeter_Total_DayEnd_Reading_Count/Expected_CellMeter_Total_DayEnd_Reading_Count)*100
LDAMeter_Total_DayEnd_Reading_SuccessRate = (LDAMeter_Total_DayEnd_Reading_Count/Expected_LDAMeter_Total_DayEnd_Reading_Count)*100
NO_LPReading_ButWithDayEnd_Reading_Rate = (No_Reading_Meter_with_DayEnd_count/Total_AllMeter_Count)*100
No_LPandDayEnd_Reading_Meter_with_DayEnd_Rate = (No_LPandDayEnd_Reading_Meter_with_DayEnd_Count/Total_AllMeter_Count)*100
NO_DayEnd_Reading_but_with_LP_Reading_Meter_Rate = (NO_DayEnd_Reading_but_with_LP_Reading_Meter_Count/Total_AllMeter_Count)*100
#MeterType Composition Rate
HighRiseMeter_Rate = (Total_HighRiseMeter_Count/Total_AllMeter_Count)*100
VillageMeter_Rate = (Total_VillageMeter_Count/Total_AllMeter_Count)*100
AllCellMeter_Rate = (Total_ALLCellMeter_Count/Total_AllMeter_Count)*100
CellMeter_Rate = (Total_CellMeter_Count/Total_AllMeter_Count)*100
LDAMeter_Rate = (Total_LDAMeter_Count/Total_AllMeter_Count)*100
UnlocatedMeter_Rate = (unlocated_meter_Count/Total_AllMeter_Count)*100
No_Reading_Meter_Rate = (No_Reading_Meter_Total_Count/Total_AllMeter_Count)*100
No_Reading_Meter_Highrise_Rate = (No_reading_meter_Highrise_count/No_Reading_Meter_Total_Count)*100
No_Reading_Meter_Village_Rate = (No_reading_meter_Village_count/No_Reading_Meter_Total_Count)*100
No_reading_meter_Unlocated_Rate = (No_reading_meter_Unlocated_count/No_Reading_Meter_Total_Count)*100
#MeterStatus Composition Rate
Normal_Meter_Rate = (Normal_Meter_Count/Total_AllMeter_Count)*100
SecConfig_Meter_Rate = (SecConfig_Meter_Count/Total_AllMeter_Count)*100
Config_Meter_Rate = (Config_Meter_Count/Total_AllMeter_Count)*100
Discovered_Meter_Rate = (Discovered_Meter_Count/Total_AllMeter_Count)*100
Failed_Meter_Rate = (Failed_Meter_Count/Total_AllMeter_Count)*100
Lost_Meter_Rate = (Lost_Meter_Count/Total_AllMeter_Count)*100
#No Reading MeterStatus Composition Rate
No_Reading_Normal_Meter_Rate = (No_reading_Normal_meter_count/No_Reading_Meter_Total_Count)*100
No_Reading_SecConfig_Meter_Rate = (No_reading_SecConfig_meter_count/No_Reading_Meter_Total_Count)*100
No_Reading_Config_Meter_Rate = (No_reading_Config_meter_count/No_Reading_Meter_Total_Count)*100
No_Reading_Discovered_Meter_Rate = (No_reading_Discovered_meter_count/No_Reading_Meter_Total_Count)*100
No_Reading_Failed_Meter_Rate = (No_reading_Failed_meter_count/No_Reading_Meter_Total_Count)*100
No_Reading_Lost_Meter_Rate = (No_reading_Lost_meter_count/No_Reading_Meter_Total_Count)*100
#Overall LP Push Count Peformance
Expected_AllMeter_Total_LP_Count = (((Total_HighRiseMeter_Count+Total_VillageMeter_Count+unknownbuilding_Count)-Total_LDAMeter_Count)*48)+(Total_LDAMeter_Count*144)
AllMeter_Total_LP_Count = cr_list['NoOfIntervals'].sum()
Expected_HighRiseMeter_Total_LP_Count = Total_HighRiseMeter_Count*48
HighRiseMeter_Total_LP_Count = cr_highrise['NoOfIntervals'].sum()
Expected_VillageMeter_Total_LP_Count = Total_VillageMeter_Count*48
VillageMeter_Total_LP_Count = cr_village['NoOfIntervals'].sum()
Expected_AllCellMeter_Total_LP_Count = (Total_CellMeter_Count*48+Total_LDAMeter_Count*144)
AllCellMeter_Total_LP_Count = (onlycell_meter['NoOfIntervals'].sum() + LDA_meter['NoOfIntervals'].sum())
Expected_CellMeter_Total_LP_Count = (Total_CellMeter_Count*48)
CellMeter_Total_LP_Count = (onlycell_meter['NoOfIntervals'].sum())
Expected_LDAMeter_Total_LP_Count = (Total_LDAMeter_Count*144)
LDAMeter_Total_LP_Count = (LDA_meter['NoOfIntervals'].sum())
Full48_LP_Interval_AllMeter_Count = cr_list['NoOfIntervals'] == 48
Average_LP_Interval_Push_Count = cr_list['NoOfIntervals'].mean()
StdDev_LP_Interval_Push_Count = cr_list['NoOfIntervals'].std()
Full48_LP_Interval_AllMeter_Count = Full48_LP_Interval_AllMeter_Count.sum()
Full48_LP_Interval_HighRiseMeter_Count = cr_highrise['NoOfIntervals'] == 48
Full48_LP_Interval_HIghRiseMeter_Count = Full48_LP_Interval_HighRiseMeter_Count.sum()
Full48_LP_Interval_VillageMeter_Count = cr_village['NoOfIntervals'] == 48
Full48_LP_Interval_VillageMeter_Count = Full48_LP_Interval_VillageMeter_Count.sum()
Full144_LP_Interval_LDAMeter_Count = cr_list['NoOfIntervals'] == 144
Full144_LP_Interval_LDAMeter_Count = Full144_LP_Interval_LDAMeter_Count.sum()
Full48_LP_Interval_CellMeter_Count = onlycell_meter['NoOfIntervals'] == 48
Full48_LP_Interval_CellMeter_Count = Full48_LP_Interval_CellMeter_Count.sum()
Full48_LP_Interval_CellMeter_Count_Rate = round((Full48_LP_Interval_CellMeter_Count/Total_CellMeter_Count)*100,2)
Missing48_LP_unlocated_meter = unlocated_meter['NoOfIntervals'] != 48
Missing48_LP_unlocated_meter_Count = Missing48_LP_unlocated_meter.count()
#LP-DayEnd-FULL_Meter
LP_DayEnd_Full_Meter = cr_list[(cr_list['NoOfIntervals'] == 48)&(cr_list['DayEnd'] == 1)]
LP_DayEnd_Full_Meter_Count = LP_DayEnd_Full_Meter['meterno'].count()
LP_DayEnd_Full_Meter_Rate = round((LP_DayEnd_Full_Meter_Count/Total_AllMeter_Count)*100,2)
#Overall LP Push % Peformance
AllMeter_Total_LP_SuccessRate = (AllMeter_Total_LP_Count/Expected_AllMeter_Total_LP_Count)*100
Full48_LP_Interval_AllMeter_Rate = (Full48_LP_Interval_AllMeter_Count/Total_AllMeter_Count)*100
HighRiseMeter_Total_LP_SuccessRate = (HighRiseMeter_Total_LP_Count/Expected_HighRiseMeter_Total_LP_Count)*100
VillageMeter_Total_LP_SuccessRate = (VillageMeter_Total_LP_Count/Expected_VillageMeter_Total_LP_Count)*100
AllCellMeter_Total_LP_SuccessRate = (AllCellMeter_Total_LP_Count/Expected_AllCellMeter_Total_LP_Count)*100
CellMeter_Total_LP_SuccessRate = (CellMeter_Total_LP_Count/Expected_CellMeter_Total_LP_Count)*100
LDAMeter_Total_LP_SuccessRate = (LDAMeter_Total_LP_Count/Expected_LDAMeter_Total_LP_Count)*100
target_date = cr_list.iloc[0,7].strftime('%Y-%m-%d')
Performance = collections.OrderedDict({
'Execution Date': today_date,
'Target Date': target_date,
'[ KEY PERFORMANCE INDICATOR ]':'',
'Total Meter Count':Total_AllMeter_Count,
'Total Collector Count':Collector_Count,
'Total Meter FW24.60 Meter Count':all_meter_1468_Count,
'Total Meter FW24.60 Meter(%)':all_meter_1468_1468_Rate,
'All Meter LP Interval Push Success(%)':round(AllMeter_Total_LP_SuccessRate,2),
'All Meter DayEnd Reading Push Success(%)':round(AllMeter_Total_DayEnd_Reading_SuccessRate,2),
'Average LP Push Count':round(Average_LP_Interval_Push_Count,2),
'Std Deviation LP Push Count':round(StdDev_LP_Interval_Push_Count,2),
'LP-DayEnd-FULL All Meter Count':LP_DayEnd_Full_Meter_Count,
'LP-DayEnd-FULL All Meter(%)':round(LP_DayEnd_Full_Meter_Rate,2),
'Full 48 LP Interval Meter Count':Full48_LP_Interval_AllMeter_Count,
'Full 48 LP Interval Meter(%)':round(Full48_LP_Interval_AllMeter_Rate,2),
'Full 48 LP Interval Cell Meter Count':Full48_LP_Interval_CellMeter_Count,
'Full 48 LP Interval Cell Meter(%)':Full48_LP_Interval_CellMeter_Count_Rate,
'NO DayEnd Reading All Meter Count':Missing_DayEnd_Reading_AllMeter_Count,
'NO DayEnd Reading Meter(%)':round(MissingDayEndReadingAllMeterRate,2),
'NO LP and DayEnd Reading Meter Count':No_LPandDayEnd_Reading_Meter_with_DayEnd_Count,
'NO LP and DayEnd Reading Meter(%)':round(No_LPandDayEnd_Reading_Meter_with_DayEnd_Rate,2),
'NO LP Reading Meter Count':No_Reading_Meter_Total_Count,
'NO LP Reading Meter Total(%)':round(No_Reading_Meter_Rate,2),
'NO LP Reading but with DayEnd Reading Meter Count':No_Reading_Meter_with_DayEnd_count,
'NO LP Reading but with DayEnd_Reading Meter(%)':round(NO_LPReading_ButWithDayEnd_Reading_Rate,2),
'NO DayEnd Reading but with LP Reading Meter Count':NO_DayEnd_Reading_but_with_LP_Reading_Meter_Count,
'NO DayEnd Reading but with LP Reading Meter(%)':round(NO_DayEnd_Reading_but_with_LP_Reading_Meter_Rate,2),
'[ SLA METERS PERFORMANCE (NORMAL FOR OVER 7DAYS) ]':'',
'SLA Meter Count':Effective_Meter_Count,
'SLA Meter LP Success(%)':Effective_Meter_LP_Success_Rate,
'SLA Meter Dayend Success(%)':Effective_Meter_Dayend_Success_Rate,
'SLA Meter Average LP Push Count':round(Effective_Meter_Average_LP_Interval_Push_Count,2),
'SLA Meter Std Deviation LP Push Count':round(Effective_Meter_StdDev_LP_Interval_Push_Count,2),
'LP-DayEnd-FULL SLA Meter Count':LP_DayEnd_Full_Effective_Meter_Count,
'LP-DayEnd-FULL SLA Meter(%)':LP_DayEnd_Full_Effective_Meter_Rate,
'SLA Meters Full 48 LP Interval Meter Count':EffectiveMeters_Full_48_LP_Interval_Meter_Count,
'SLA Meters Full 48 LP Interval Meter(%)':EffectiveMeters_Full_48_LP_Interval_Meter_Rate,
'SLA Meters Missing DayEnd Reading Meter Count':EffectiveMeters_Missing_DayEnd_Reading_Meter_Count,
'SLA Meters Missing DayEnd Reading Meter(%)':EffectiveMeters_Missing_DayEnd_Reading_Meter_Rate,
'[ LATEST METERS PERFORMANCE (REGISTERED IN LAST 30DAYS) ]':'',
'Latest Meter Count':Latest_Meters_Count,
'Latest Meter LP Success(%)':Latest_Meters_LP_Success_Rate,
'Latest Meter Dayend Success(%)':Latest_Meters_Dayend_Success_Rate,
'Latest Meter Average LP Push Count':round(Latest_Meters_Average_LP_Interval_Push_Count,2),
'Latest Meter Std Deviation LP Push Count':round(Latest_Meters_StdDev_LP_Interval_Push_Count,2),
'Latest Meters Full 48 LP Interval Meter Count':Latest_Meters_Full_48_LP_Interval_Meter_Count,
'Latest Meters Full 48 LP Interval Meter(%)':Latest_Meters_Full_48_LP_Interval_Meter_Rate,
'Latest Meters Missing DayEnd Reading Meter Count':Latest_Meters_Missing_DayEnd_Reading_Meter_Count,
'Latest Meters Missing DayEnd Reading Meter(%)':Latest_Meters_Missing_DayEnd_Reading_Meter_Rate,
'[ {} METERS SUMMARY ]':'',
'{} Meter Count':district_meter_Count,
'{} FW24.60 Meter Count':district_1468_Count,
'{} FW24.60 Meter(%)':district_1468_Rate,
'{} Meter LP Success(%)':district_meter_LP_Success_Rate,
'{} Meter Dayend Success(%)':district_meter_Dayend_Success_Rate,
'{} Average LP Push Count':round(district_meter_Average_LP_Interval_Push_Count,2),
'{} Std Deviation LP Push Count':round(district_meter_StdDev_LP_Interval_Push_Count,2),
'{} Meter LP-DayEnd-FULL Meter Count':district_meter_LP_DayEnd_Full_Meter_Count,
'{} Meter LP-DayEnd-FULL Meter(%)':district_meter_LP_DayEnd_Full_Meter_Rate,
'{} Meter Full 48 LP Interval Meter Count':district_meter_Full_48_LP_Interval_Meter_Count,
'{} Meter Full 48 LP Interval Meter(%)':district_meter_Full_48_LP_Interval_Meter_Rate,
'{} Meter Missing DayEnd Reading Meter Count':district_meter_Missing_DayEnd_Reading_Meter_Count,
'{} Meter Normal Meter Count':district_meter_Normal_Meter_Count,
'{} Meter SecConfig Meter Count':district_meter_SecConfig_Meter_Count,
'{} Meter Config Meter Count':district_meter_Config_Meter_Count,
'{} Meter Discovered Meter Count':district_meter_Discovered_Meter_Count,
'{} Meter Failed Meter Count':district_meter_Failed_Meter_Count,
'{} Meter Lost Meter Count':district_meter_Lost_Meter_Count,
'[ District B METERS SUMMARY ]':'',
'District B Meter Count':district_b_meter_Count,
'District B FW24.60 Meter Count':district_b_1468_Count,
'District B FW24.60 Meter(%)':district_b_1468_Rate,
'District B Meter LP Success(%)':district_b_meter_LP_Success_Rate,
'District B Meter Dayend Success(%)':district_b_meter_Dayend_Success_Rate,
'District B Average LP Push Count':round(district_b_meter_Average_LP_Interval_Push_Count,2),
'District B Std Deviation LP Push Count':round(district_b_meter_StdDev_LP_Interval_Push_Count,2),
'District B Meter LP-DayEnd-FULL Meter Count':district_b_meter_LP_DayEnd_Full_Meter_Count,
'District B Meter LP-DayEnd-FULL Meter(%)':district_b_meter_LP_DayEnd_Full_Meter_Rate,
'District B Meter Full 48 LP Interval Meter Count':district_b_meter_Full_48_LP_Interval_Meter_Count,
'District B Meter Full 48 LP Interval Meter(%)':district_b_meter_Full_48_LP_Interval_Meter_Rate,
'District B Meter Missing DayEnd Reading Meter Count':district_b_meter_Missing_DayEnd_Reading_Meter_Count,
'District B Meter Normal Meter Count':district_b_meter_Normal_Meter_Count,
'District B Meter SecConfig Meter Count':district_b_meter_SecConfig_Meter_Count,
'District B Meter Config Meter Count':district_b_meter_Config_Meter_Count,
'District B Meter Discovered Meter Count':district_b_meter_Discovered_Meter_Count,
'District B Meter Failed Meter Count':district_b_meter_Failed_Meter_Count,
'District B Meter Lost Meter Count':district_b_meter_Lost_Meter_Count,
'[ District C METERS SUMMARY ]':'',
'District C Meter Count':district_c_meter_Count,
'District C FW24.60 Meter Count':district_c_1468_Count,
'District C FW24.60 Meter(%)':district_c_1468_Rate,
'District C Meter LP Success(%)':district_c_meter_LP_Success_Rate,
'District C Meter Dayend Success(%)':district_c_meter_Dayend_Success_Rate,
'District C Average LP Push Count':round(district_c_meter_Average_LP_Interval_Push_Count,2),
'District C Std Deviation LP Push Count':round(district_c_meter_StdDev_LP_Interval_Push_Count,2),
'District C Meter LP-DayEnd-FULL Meter Count':district_c_meter_LP_DayEnd_Full_Meter_Count,
'District C Meter LP-DayEnd-FULL Meter(%)':district_c_meter_LP_DayEnd_Full_Meter_Rate,
'District C Meter Full 48 LP Interval Meter Count':district_c_meter_Full_48_LP_Interval_Meter_Count,
'District C Meter Full 48 LP Interval Meter(%)':district_c_meter_Full_48_LP_Interval_Meter_Rate,
'District C Meter Missing DayEnd Reading Meter Count':district_c_meter_Missing_DayEnd_Reading_Meter_Count,
'District C Meter Normal Meter Count':district_c_meter_Normal_Meter_Count,
'District C Meter SecConfig Meter Count':district_c_meter_SecConfig_Meter_Count,
'District C Meter Config Meter Count':district_c_meter_Config_Meter_Count,
'District C Meter Discovered Meter Count':district_c_meter_Discovered_Meter_Count,
'District C Meter Failed Meter Count':district_c_meter_Failed_Meter_Count,
'District C Meter Lost Meter Count':district_c_meter_Lost_Meter_Count,
'[ District D METERS SUMMARY ]':'',
'District D Meter Count':district_d_meter_Count,
'District D FW24.60 Meter Count':district_d_1468_Count,
'District D FW24.60 Meter(%)':district_d_1468_Rate,
'District D Meter LP Success(%)':district_d_meter_LP_Success_Rate,
'District D Meter Dayend Success(%)':district_d_meter_Dayend_Success_Rate,
'District D Average LP Push Count':round(district_d_meter_Average_LP_Interval_Push_Count,2),
'District D Std Deviation LP Push Count':round(district_d_meter_StdDev_LP_Interval_Push_Count,2),
'District D Meter LP-DayEnd-FULL Meter Count':district_d_meter_LP_DayEnd_Full_Meter_Count,
'District D Meter LP-DayEnd-FULL Meter(%)':district_d_meter_LP_DayEnd_Full_Meter_Rate,
'District D Meter Full 48 LP Interval Meter Count':district_d_meter_Full_48_LP_Interval_Meter_Count,
'District D Meter Full 48 LP Interval Meter(%)':district_d_meter_Full_48_LP_Interval_Meter_Rate,
'District D Meter Missing DayEnd Reading Meter Count':district_d_meter_Missing_DayEnd_Reading_Meter_Count,
'District D Meter Normal Meter Count':district_d_meter_Normal_Meter_Count,
'District D Meter SecConfig Meter Count':district_d_meter_SecConfig_Meter_Count,
'District D Meter Config Meter Count':district_d_meter_Config_Meter_Count,
'District D Meter Discovered Meter Count':district_d_meter_Discovered_Meter_Count,
'District D Meter Failed Meter Count':district_d_meter_Failed_Meter_Count,
'District D Meter Lost Meter Count':district_d_meter_Lost_Meter_Count,
'[ OVERALL METERS SUMMARY ]':'',
'Total HighRise Meter Count':Total_HighRiseMeter_Count,
'Total Village Meter Count':Total_VillageMeter_Count,
'Total All Cell Type Meter Count':Total_ALLCellMeter_Count,
'Total LDA Meter Count':Total_LDAMeter_Count,
'Total Cell Meter Count':Total_CellMeter_Count,
'Unlocated Meter Count':unlocated_meter_Count,
'[ OVERALL LP INTERVAL PUSH SUCCESS % ]':'',
'HighRise Meter Total LP Interval Push Success(%)':round(HighRiseMeter_Total_LP_SuccessRate,2),
'Village Meter Total LP Interval Push Success(%)':round(VillageMeter_Total_LP_SuccessRate,2),
'All Cell Meter Total LP Interval Push Success(%)':round(AllCellMeter_Total_LP_SuccessRate,2),
'Cell Meter Total LP Interval Push Success(%)':round(CellMeter_Total_LP_SuccessRate,2),
'LDA Meter Total LP Interval Push Success(%)':round(LDAMeter_Total_LP_SuccessRate,2),
'[ OVERALL DAYEND READING PUSH SUCCESS % ]':'',
'HighRise Meter Total DayEnd Reading Push Success(%)':round(HighRiseMeter_Total_DayEnd_Reading_SuccessRate,2),
'Village Meter Total DayEnd Reading Push Success(%)':round(VillageMeter_Total_DayEnd_Reading_SuccessRate,2),
'All Cell Meter Total DayEnd Reading Push Success(%)':round(AllCellMeter_Total_DayEnd_Reading_SuccessRate,2),
'Cell Meter Total DayEnd Reading Push Success(%)':round(CellMeter_Total_DayEnd_Reading_SuccessRate,2),
'LDA Meter Total DayEnd Reading Push Success(%)':round(LDAMeter_Total_DayEnd_Reading_SuccessRate,2),
'[ OVERALL NO LP READING METERS SUMMARY ]':'',
'NO LP Reading Highrise Meter Count':No_reading_meter_Highrise_count,
'NO LP Reading Village Meter Count':No_reading_meter_Village_count,
'NO LP Reading Unlocated Meter Count':No_reading_meter_Unlocated_count,
'NO LP Reading RF Meter Count':No_Reading_RF_meter_Count,
'NO LP Reading Cell Meter Count':No_Reading_Cell_meter_Count,
'NO LP Reading Normal Meter Count':No_reading_Normal_meter_count,
'NO LP Reading SecConfig Meter Count':No_reading_SecConfig_meter_count,
'NO LP Reading Config Meter Count':No_reading_Config_meter_count,
'NO LP Reading Discovered Meter Count':No_reading_Discovered_meter_count,
'NO LP Reading Failed Meter Count':No_reading_Failed_meter_count,
'NO LP Reading Lost Meter Count':No_reading_Lost_meter_count,
'[ NO LP PUSH READING METER COMPOSITION RATE ]':'',
'NO LP Reading HighRise Meter(%)':round(No_Reading_Meter_Highrise_Rate,2),
'NO LP Reading Village Meter(%)':round(No_Reading_Meter_Village_Rate,2),
'NO LP Reading Unlocated Meter(%)':round(No_reading_meter_Unlocated_Rate,2),
'NO LP Reading Normal Meter(%)':round(No_Reading_Normal_Meter_Rate,2),
'NO LP Reading SecConfig Meter(%)':round(No_Reading_SecConfig_Meter_Rate,2),
'NO LP Reading Configure Meter(%)':round(No_Reading_Config_Meter_Rate,2),
'NO LP Reading Discovered Meter(%)':round(No_Reading_Discovered_Meter_Rate,2),
'NO LP Reading Failed Meter(%)':round(No_Reading_Failed_Meter_Rate,2),
'NO LP Reading Lost Meter(%)':round(No_Reading_Lost_Meter_Rate,2),
'[ METER STATUS COUNT WITH READINGS ]':'',
'Normal Status Meter Count':Normal_Meter_Count,
'SecConfig Status Meter Count':SecConfig_Meter_Count,
'Configure Status Meter Count':Config_Meter_Count,
'Discovered Status Meter Count':Discovered_Meter_Count,
'Failed Status Meter Count':Failed_Meter_Count,
'Lost Status Meter Count':Lost_Meter_Count,
'[ OVERALL LP PUSH COUNT PERFORMANCE ]':'',
'Expected All Meter Total LP Interval Push Count': Expected_AllMeter_Total_LP_Count,
'All Meter Total LP Interval Push Count': AllMeter_Total_LP_Count,
'Expected HighRise Meter Total LP Interval Push Count':Expected_HighRiseMeter_Total_LP_Count,
'HighRise Meter Total LP Interval Push Count':HighRiseMeter_Total_LP_Count,
'Expected Village Meter Total LP Interval Push Count':Expected_VillageMeter_Total_LP_Count,
'Village Meter Total LP Interval Push Count':VillageMeter_Total_LP_Count,
'Expected All Cell Meter Total LP Interval Push Count':Expected_AllCellMeter_Total_LP_Count,
'All Cell Meter Total LP Interval Push Count':AllCellMeter_Total_LP_Count,
'Expected Cell Meter Total LP Interval Push Count':Expected_CellMeter_Total_LP_Count,
'Cell Meter Total LP Interval Push Count':CellMeter_Total_LP_Count,
'Expected LDA Meter Total LP Interval Push Count':Expected_LDAMeter_Total_LP_Count,
'LDA Meter Total LP Interval Push Count':LDAMeter_Total_LP_Count,
'Full 48 LP Interval HighRise Meter Count':Full48_LP_Interval_HIghRiseMeter_Count,
'Full 48 LP Interval Village Meter Count':Full48_LP_Interval_VillageMeter_Count,
'Full 144 LP Interval LDA Meter Count':Full144_LP_Interval_LDAMeter_Count,
'[ OVERALL DAYEND READING PUSH COUNT PERFORMANCE ]':'',
'Expected All Meter Total DayEnd Reading Push Count': Expected_AllMeter_Total_DayEnd_Reading_Count,
'All Meter Total DayEnd Reading Push Count': AllMeter_Total_DayEnd_Reading_Count,
'Expected HighRise Meter Total DayEnd Reading Push Count':Expected_HighRiseMeter_Total_DayEnd_Reading_Count,
'HighRise Meter Total DayEnd Reading Push Count':HighRiseMeter_Total_DayEnd_Reading_Count,
'Expected Village Meter Total DayEnd Reading Push Count':Expected_VillageMeter_Total_DayEnd_Reading_Count,
'Village Meter Total DayEnd Reading Push Count':VillageMeter_Total_DayEnd_Reading_Count,
'Expected All Cell Meter Total DayEnd Reading Push Count':Expected_AllCellMeter_Total_DayEnd_Reading_Count,
'All Cell Meter Total DayEnd Reading Push Count':AllCellMeter_Total_DayEnd_Reading_Count,
'Expected Cell Meter Total DayEnd Reading Push Count':Expected_CellMeter_Total_DayEnd_Reading_Count,
'Cell Meter Total DayEnd Reading Push Count':CellMeter_Total_DayEnd_Reading_Count,
'Expected LDA Meter Total DayEnd Reading Push Count':Expected_LDAMeter_Total_DayEnd_Reading_Count,
'LDA Meter Total DayEnd Reading Push Count':LDAMeter_Total_DayEnd_Reading_Count,
'Missing DayEnd Reading HighRise Meter Count':Missing_DayEnd_Reading_HighRiseMeter_Count,
'Missing DayEnd Reading Village Meter Count':Missing_DayEnd_Reading_VillageMeter_Count,
'Missing DayEnd Reading Cell Meter Count':Missing_DayEnd_Reading_AllCellMeter_Count,
'Missing DayEnd Reading Cell Meter Count':Missing_DayEnd_Reading_CellMeter_Count,
'Missing DayEnd Reading LDA Meter Count':Missing_DayEnd_Reading_LDAMeter_Count,
'[ METER TYPE COMPOSITION RATE ]':'',
'HighRise Meter(%)':round(HighRiseMeter_Rate,2),
'Village Meter(%)':round(VillageMeter_Rate,2),
'All Cell Meter(%)':round(AllCellMeter_Rate,2),
'Cell Meter(%)':round(CellMeter_Rate,2),
'LDA Meter(%)':round(LDAMeter_Rate,2),
'Unlocated Meter(%)':round(UnlocatedMeter_Rate,2),
'Normal Status Meter(%)':round(Normal_Meter_Rate,2),
'SecConfig Status Meter(%)':round(SecConfig_Meter_Rate,3),
'Configure Status Meter(%)':round(Config_Meter_Rate,3),
'Discovered Status Meter(%)':round(Discovered_Meter_Rate,3),
'Failed Status Meter(%)':round(Failed_Meter_Rate,3),
'Lost Status Meter(%)':round(Lost_Meter_Rate,3)})
df_performance = pd.DataFrame(pd.io.json.json_normalize(Performance).T)
df_performance.columns = ['Performance Result']
CR_perf = cr_list[cr_list['name'].str.startswith('8020', na=False)]
CR_perf = CR_perf.groupby(['name'])['NoOfIntervals'].mean()
CR_perf_std = cr_list.groupby(['name'])['NoOfIntervals'].std()
CR_perf = pd.concat([CR_perf, CR_perf_std], axis=1, join_axes=[CR_perf.index])
CR_perf = CR_perf.round()
CR_perf.columns = ['Average LP Count','Std LP Count']
CR_perf_district = district_meter[district_meter['name'].str.startswith('8020', na=False)]
CR_perf_district = CR_perf_district.groupby(['name'])['NoOfIntervals'].mean()
CR_perf_district_std = district_meter.groupby(['name'])['NoOfIntervals'].std()
CR_perf_district = pd.concat([CR_perf_district, CR_perf_district_std], axis=1, join_axes=[CR_perf_district.index])
CR_perf_district = CR_perf_district.round()
CR_perf_district.columns = ['Average LP Count','Std LP Count']
CR_perf_district_b = district_b_meter[district_b_meter['name'].str.startswith('8020', na=False)]
CR_perf_district_b = CR_perf_district_b.groupby(['name'])['NoOfIntervals'].mean()
CR_perf_district_b_std = district_b_meter.groupby(['name'])['NoOfIntervals'].std()
CR_perf_district_b = pd.concat([CR_perf_district_b, CR_perf_district_b_std], axis=1, join_axes=[CR_perf_district_b.index])
CR_perf_district_b = CR_perf_district_b.round()
CR_perf_district_b.columns = ['Average LP Count','Std LP Count']
CR_perf_district_c = district_c_meter[district_c_meter['name'].str.startswith('8020', na=False)]
CR_perf_district_c = CR_perf_district_c.groupby(['name'])['NoOfIntervals'].mean()
CR_perf_district_c_std = district_c_meter.groupby(['name'])['NoOfIntervals'].std()
CR_perf_district_c = pd.concat([CR_perf_district_c, CR_perf_district_c_std], axis=1, join_axes=[CR_perf_district_c.index])
CR_perf_district_c = CR_perf_district_c.round()
CR_perf_district_c.columns = ['Average LP Count','Std LP Count']
CR_perf_district_d = district_d_meter[district_d_meter['name'].str.startswith('8020', na=False)]
CR_perf_district_d = CR_perf_district_d.groupby(['name'])['NoOfIntervals'].mean()
CR_perf_district_d_std = district_d_meter.groupby(['name'])['NoOfIntervals'].std()
CR_perf_district_d = pd.concat([CR_perf_district_d, CR_perf_district_d_std], axis=1, join_axes=[CR_perf_district_d.index])
CR_perf_district_d = CR_perf_district_d.round()
CR_perf_district_d.columns = ['Average LP Count','Std LP Count']
dir = 'C:/Users/Desktop/RF_Analysis/SSR/'
writer = pd.ExcelWriter('%sReading_Performance_Report%s_%s.xlsx' % (dir,target_date,today_date))
df_performance.to_excel(writer, "Performance Report")
cr_list.to_excel(writer,"Analyzed Individual Meters", index=False)
CR_perf.to_excel(writer,"CR Performance")
CR_perf_district.to_excel(writer,"{} CR Performance")
CC_CR_Rnk.to_excel(writer,"{} CR ABC Rank")
CR_perf_district_b.to_excel(writer,"District B CR Performance")
TO_CR_Rnk.to_excel(writer,"District B CR ABC Rank")
CR_perf_district_c.to_excel(writer,"District C CR Performance")
TM_CR_Rnk.to_excel(writer,"District C CR ABC Rank")
CR_perf_district_d.to_excel(writer,"District D CR Performance")
TC_CR_Rnk.to_excel(writer,"District D CR ABC Rank")
Latest_Meters.to_excel(writer, "LatestMeters")
fw_perf.to_excel(writer, "FW Performance")
region_perf.to_excel(writer, "Region Performance")
area_perf.to_excel(writer, "Area Performance")
workbook = writer.book
worksheet = writer.sheets['Performance Report']
cell_format = workbook.add_format()
cell_format.set_align('right')
worksheet.set_column(0,0,68, cell_format)
worksheet.set_column(1,10,17)
worksheet = writer.sheets['Region Performance']
worksheet.set_column(0,0,27)
writer.save()
df_performance
|
|
from __future__ import unicode_literals
import datetime
import json
import sys
import time
from email.header import Header
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from django.conf import settings
from django.core import signals
from django.core import signing
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.http.cookie import SimpleCookie
from django.utils import six, timezone
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.http import cookie_date
from django.utils.six.moves import map
# See http://www.iana.org/assignments/http-status-codes
REASON_PHRASES = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
102: 'PROCESSING',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
207: 'MULTI-STATUS',
208: 'ALREADY REPORTED',
226: 'IM USED',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
418: "I'M A TEAPOT",
422: 'UNPROCESSABLE ENTITY',
423: 'LOCKED',
424: 'FAILED DEPENDENCY',
426: 'UPGRADE REQUIRED',
428: 'PRECONDITION REQUIRED',
429: 'TOO MANY REQUESTS',
431: 'REQUEST HEADER FIELDS TOO LARGE',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
506: 'VARIANT ALSO NEGOTIATES',
507: 'INSUFFICIENT STORAGE',
508: 'LOOP DETECTED',
510: 'NOT EXTENDED',
511: 'NETWORK AUTHENTICATION REQUIRED',
}
class BadHeaderError(ValueError):
pass
class HttpResponseBase(six.Iterator):
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
reason_phrase = None # Use default reason phrase for status code.
def __init__(self, content_type=None, status=None, reason=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
self._closable_objects = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
self.cookies = SimpleCookie()
if status is not None:
self.status_code = status
if reason is not None:
self.reason_phrase = reason
elif self.reason_phrase is None:
self.reason_phrase = REASON_PHRASES.get(self.status_code,
'UNKNOWN STATUS CODE')
self['Content-Type'] = content_type
def serialize_headers(self):
"""HTTP headers as a bytestring."""
def to_bytes(val, encoding):
return val if isinstance(val, bytes) else val.encode(encoding)
headers = [
(b': '.join([to_bytes(key, 'ascii'), to_bytes(value, 'latin-1')]))
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
if six.PY3:
__bytes__ = serialize_headers
else:
__str__ = serialize_headers
def _convert_to_charset(self, value, charset, mime_encode=False):
"""Converts headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, MIME-encoding
is applied.
"""
if not isinstance(value, (bytes, six.text_type)):
value = str(value)
try:
if six.PY3:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
else:
if isinstance(value, str):
# Ensure string is valid in given charset
value.decode(charset)
else:
# Convert unicode string to given charset
value = value.encode(charset)
except UnicodeError as e:
if mime_encode:
# Wrapping in str() is a workaround for #12422 under Python 2.
value = str(Header(value, 'utf-8', maxlinelen=sys.maxsize).encode())
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
if str('\n') in value or str('\r') in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def __getstate__(self):
# SimpleCookie is not pickleable with pickle.HIGHEST_PROTOCOL, so we
# serialize to a string instead
state = self.__dict__.copy()
state['cookies'] = str(state['cookies'])
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.cookies = SimpleCookie(self.cookies)
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# If content is already encoded (eg. gzip), assume bytes.
if self.has_header('Content-Encoding'):
return bytes(value)
# Handle string types -- we can't rely on force_bytes here because:
# - under Python 3 it attempts str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, bytes):
return bytes(value)
if isinstance(value, six.text_type):
return bytes(value.encode(self._charset))
# Handle non-string types (#16494)
return force_bytes(value, self._charset)
# These methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closable in self._closable_objects:
try:
closable.close()
except Exception:
pass
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise Exception("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise Exception("This %s instance cannot tell its position" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to or replaced.
"""
streaming = False
def __init__(self, content=b'', *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
if six.PY3:
__bytes__ = serialize
else:
__str__ = serialize
@property
def content(self):
return b''.join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.string_types)):
if hasattr(value, 'close'):
self._closable_objects.append(value)
value = b''.join(self.make_bytes(chunk) for chunk in value)
else:
value = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [value]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super(StreamingHttpResponse, self).__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError("This %s instance has no `content` attribute. "
"Use `streaming_content` instead." % self.__class__.__name__)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._closable_objects.append(value)
def __iter__(self):
return self.streaming_content
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
parsed = urlparse(force_text(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
super(HttpResponseRedirectBase, self).__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
url = property(lambda self: self['Location'])
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super(HttpResponseNotModified, self).__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super(HttpResponseNotAllowed, self).__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be an json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
"""
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError('In order to allow non-dict objects to be '
'serialized set the safe parameter to False')
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder)
super(JsonResponse, self).__init__(content=data, **kwargs)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Yang Feng ([email protected]) modified on the base of tensorflow r0.10
# in the implementation of @EmbeddingWrapper to support the share of word embedding
#between forward and backward rnns.
# ==============================================================================
"""Module for constructing RNN Cells.
## Base interface for all RNN Cells
@@RNNCell
## RNN Cells for use with TensorFlow's core RNN methods
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
## Classes storing split `RNNCell` state
@@LSTMStateTuple
## RNN Cell wrappers (RNNCells that wrap other RNNCells)
@@MultiRNNCell
@@DropoutWrapper
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _state_size_with_prefix(state_size, prefix=None):
"""Helper function that enables int or TensorShape shape specification.
This function takes a size specification, which can be an integer or a
TensorShape, and converts it into a list of integers. One may specify any
additional dimensions that precede the final state size specification.
Args:
state_size: TensorShape or int that specifies the size of a tensor.
prefix: optional additional list of dimensions to prepend.
Returns:
result_state_size: list of dimensions the resulting tensor size.
"""
result_state_size = tensor_shape.as_shape(state_size).as_list()
if prefix is not None:
if not isinstance(prefix, list):
raise TypeError("prefix of _state_size_with_prefix should be a list.")
result_state_size = prefix + result_state_size
return result_state_size
class RNNCell(object):
"""Abstract object representing an RNN cell.
The definition of cell in this package differs from the definition used in the
literature. In the literature, cell refers to an object with a single scalar
output. The definition in this package refers to a horizontal array of such
units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
tuple of integers, then it results in a tuple of `len(state_size)` state
matrices, each with the a column size corresponding to values in `state_size`.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`,
or by calling the `rnn` ops several times. Every `RNNCell` must have the
properties below and and implement `__call__` with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
state_size = self.state_size
if nest.is_sequence(state_size):
state_size_flat = nest.flatten(state_size)
zeros_flat = [
array_ops.zeros(
array_ops.pack(_state_size_with_prefix(s, prefix=[batch_size])),
dtype=dtype)
for s in state_size_flat]
for s, z in zip(state_size_flat, zeros_flat):
z.set_shape(_state_size_with_prefix(s, prefix=[None]))
zeros = nest.pack_sequence_as(structure=state_size,
flat_sequence=zeros_flat)
else:
zeros_size = _state_size_with_prefix(state_size, prefix=[batch_size])
zeros = array_ops.zeros(array_ops.pack(zeros_size), dtype=dtype)
zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None]))
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = self._activation(_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, _linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if not c.dtype == h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=False, activation=tanh):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
return new_h, new_state
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: Deprecated and unused.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple
else array_ops.concat(1, [c, m]))
return m, new_state
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, num_proj, input_size=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
input_size: Deprecated and unused.
Raises:
TypeError: if cell is not an RNNCell.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(inputs, self._num_proj, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state, scope)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size,
embedding, #added by yfeng
initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
self._embedding = embedding #added by yfeng
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
#annotated by yfeng
"""
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
"""
#modified by yfeng
embedded = embedding_ops.embedding_lookup(
self._embedding, array_ops.reshape(inputs, [-1]))
#end by yfeng
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=False):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("Cell%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple
else array_ops.concat(1, new_states))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable(
"Matrix", [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
bias_term = vs.get_variable(
"Bias", [output_size],
dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
|
|
import hmac
import json
from urllib.parse import urlencode
import requests
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from rdmo.core.plugins import Plugin
class Provider(Plugin):
def send_issue(self, request, issue, integration, subject, message, attachments):
raise NotImplementedError
def webhook(self, request, options, payload):
raise NotImplementedError
class OauthProvider(Provider):
def authorize(self, request):
# get random state and store in session
state = get_random_string(length=32)
self.store_in_session(request, 'state', state)
url = self.authorize_url + '?' + urlencode({
'authorize_url': self.authorize_url,
'client_id': self.client_id,
'redirect_uri': request.build_absolute_uri(self.redirect_path),
'state': state,
'scope': self.scope
})
return HttpResponseRedirect(url)
def callback(self, request):
assert request.GET.get('state') == self.pop_from_session(request, 'state')
url = self.token_url + '?' + urlencode({
'token_url': self.token_url,
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': request.GET.get('code')
})
response = requests.post(url, headers={
'Accept': 'application/json'
})
response.raise_for_status()
response_data = response.json()
# store access token in session
self.store_in_session(request, 'access_token', response_data.get('access_token'))
# get post data from session
try:
url, data, issue_id, integration_id = self.pop_from_session(request, 'post')
return self.post(request, url, data, issue_id, integration_id)
except ValueError:
return render(request, 'core/error.html', {
'title': _('Authorization successful'),
'errors': [_('But no redirect could be found.')]
}, status=200)
def get_session_key(self, key):
class_name = self.__class__.__name__.lower()
return '{}_{}'.format(class_name, key)
def store_in_session(self, request, key, data):
session_key = self.get_session_key(key)
request.session[session_key] = data
def get_from_session(self, request, key):
session_key = self.get_session_key(key)
return request.session.get(session_key, None)
def pop_from_session(self, request, key):
session_key = self.get_session_key(key)
return request.session.pop(session_key, None)
class GitHubProvider(OauthProvider):
add_label = _('Add GitHub integration')
send_label = _('Send to GitHub')
description = _('This integration allow the creation of issues in arbitrary GitHub repositories. The upload of attachments is not supported by GitHub.')
authorize_url = 'https://github.com/login/oauth/authorize'
token_url = 'https://github.com/login/oauth/access_token'
client_id = settings.GITHUB_PROVIDER['client_id']
client_secret = settings.GITHUB_PROVIDER['client_secret']
redirect_path = reverse('oauth_callback', args=['github'])
scope = 'repo'
def send_issue(self, request, issue, integration, subject, message, attachments):
try:
repo = integration.options.get(key='repo').value
except ObjectDoesNotExist:
return render(request, 'core/error.html', {
'title': _('Integration error'),
'errors': [_('The Integration is not configured correctly.') % message]
}, status=200)
url = 'https://api.github.com/repos/{}/issues'.format(repo)
data = {
'title': subject,
'body': message
}
return self.post(request, url, data, issue.id, integration.id)
def post(self, request, url, data, issue_id, integration_id):
# get access token from the session
access_token = self.get_from_session(request, 'access_token')
if access_token:
response = requests.post(url, json=data, headers={
'Authorization': 'token {}'.format(access_token),
'Accept': 'application/vnd.github.v3+json'
})
if response.status_code == 401:
pass
else:
try:
response.raise_for_status()
response_html_url = response.json().get('html_url')
self._update_issue(issue_id, integration_id, response_html_url)
return HttpResponseRedirect(response_html_url)
except requests.HTTPError:
message = response.json().get('message')
return render(request, 'core/error.html', {
'title': _('Send error'),
'errors': [_('Something went wrong. GitHub replied: %s.') % message]
}, status=200)
# if the above did not work authorize first
self.store_in_session(request, 'post', (url, data, issue_id, integration_id))
return self.authorize(request)
def _update_issue(self, issue_id, integration_id, resource_url):
from rdmo.projects.models import Issue, Integration, IssueResource
try:
issue = Issue.objects.get(pk=issue_id)
issue.status = Issue.ISSUE_STATUS_IN_PROGRESS
issue.save()
integration = Integration.objects.get(pk=integration_id)
issue_resource = IssueResource(issue=issue, integration=integration, url=resource_url)
issue_resource.save()
except ObjectDoesNotExist:
pass
def webhook(self, request, integration):
try:
secret = integration.options.get(key='secret').value
except ObjectDoesNotExist:
raise Http404
header_signature = request.headers.get('X-Hub-Signature')
if header_signature:
body_signature = 'sha1=' + hmac.new(secret.encode(), request.body, 'sha1').hexdigest()
if hmac.compare_digest(header_signature, body_signature):
try:
payload = json.loads(request.body.decode())
action = payload.get('action')
issue_url = payload.get('issue', {}).get('html_url')
if action and issue_url:
try:
issue_resource = integration.resources.get(url=issue_url)
if action == 'closed':
issue_resource.issue.status = issue_resource.issue.ISSUE_STATUS_CLOSED
else:
issue_resource.issue.status = issue_resource.issue.ISSUE_STATUS_IN_PROGRESS
issue_resource.issue.save()
except ObjectDoesNotExist:
pass
return HttpResponse(status=200)
except json.decoder.JSONDecodeError as e:
return HttpResponse(e, status=400)
raise Http404
@property
def fields(self):
return [
{
'key': 'repo',
'placeholder': 'user_name/repo_name',
'help': _('The GitHub repository to send issues to.')
},
{
'key': 'secret',
'placeholder': 'Secret (random) string',
'help': _('The secret for a GitHub webhook to close a task.'),
'required': False,
'secret': True
}
]
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import re
import subprocess
import time
import fixtures
from heatclient import exc as heat_exceptions
from neutronclient.common import exceptions as network_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from six.moves import urllib
import testscenarios
import testtools
from heat_integrationtests.common import clients
from heat_integrationtests.common import config
from heat_integrationtests.common import exceptions
from heat_integrationtests.common import remote_client
LOG = logging.getLogger(__name__)
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
def call_until_true(duration, sleep_for, func, *args, **kwargs):
"""Call the function until it returns True or the duration elapsed.
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func(*args, **kwargs):
return True
LOG.debug("Sleeping for %d seconds", sleep_for)
time.sleep(sleep_for)
now = time.time()
return False
def rand_name(name=''):
randbits = str(random.randint(1, 0x7fffffff))
if name:
return name + '-' + randbits
else:
return randbits
class HeatIntegrationTest(testscenarios.WithScenarios,
testtools.TestCase):
def setUp(self):
super(HeatIntegrationTest, self).setUp()
self.conf = config.init_conf()
self.assertIsNotNone(self.conf.auth_url,
'No auth_url configured')
self.assertIsNotNone(self.conf.username,
'No username configured')
self.assertIsNotNone(self.conf.password,
'No password configured')
self.manager = clients.ClientManager(self.conf)
self.identity_client = self.manager.identity_client
self.orchestration_client = self.manager.orchestration_client
self.compute_client = self.manager.compute_client
self.network_client = self.manager.network_client
self.volume_client = self.manager.volume_client
self.object_client = self.manager.object_client
self.metering_client = self.manager.metering_client
self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
self.updated_time = {}
if self.conf.disable_ssl_certificate_validation:
self.verify_cert = False
else:
self.verify_cert = self.conf.ca_file or True
def get_remote_client(self, server_or_ip, username, private_key=None):
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
network_name_for_ssh = self.conf.network_for_ssh
ip = server_or_ip.networks[network_name_for_ssh][0]
if private_key is None:
private_key = self.keypair.private_key
linux_client = remote_client.RemoteClient(ip, username,
pkey=private_key,
conf=self.conf)
try:
linux_client.validate_authentication()
except exceptions.SSHTimeout:
LOG.exception('ssh connection to %s failed' % ip)
raise
return linux_client
def check_connectivity(self, check_ip):
def try_connect(ip):
try:
urllib.request.urlopen('http://%s/' % ip)
return True
except IOError:
return False
timeout = self.conf.connectivity_timeout
elapsed_time = 0
while not try_connect(check_ip):
time.sleep(10)
elapsed_time += 10
if elapsed_time > timeout:
raise exceptions.TimeoutException()
def _log_console_output(self, servers=None):
if not servers:
servers = self.compute_client.servers.list()
for server in servers:
LOG.info('Console output for %s', server.id)
LOG.info(server.get_console_output())
def _load_template(self, base_file, file_name, sub_dir=None):
sub_dir = sub_dir or ''
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
sub_dir, file_name)
with open(filepath) as f:
return f.read()
def create_keypair(self, client=None, name=None):
if client is None:
client = self.compute_client
if name is None:
name = rand_name('heat-keypair')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
def delete_keypair():
keypair.delete()
self.addCleanup(delete_keypair)
return keypair
def assign_keypair(self):
if self.conf.keypair_name:
self.keypair = None
self.keypair_name = self.conf.keypair_name
else:
self.keypair = self.create_keypair()
self.keypair_name = self.keypair.id
@classmethod
def _stack_rand_name(cls):
return rand_name(cls.__name__)
def _get_network(self, net_name=None):
if net_name is None:
net_name = self.conf.fixed_network_name
networks = self.network_client.list_networks()
for net in networks['networks']:
if net['name'] == net_name:
return net
def is_network_extension_supported(self, extension_alias):
try:
self.network_client.show_extension(extension_alias)
except network_exceptions.NeutronClientException:
return False
return True
@staticmethod
def _stack_output(stack, output_key, validate_errors=True):
"""Return a stack output value for a given key."""
value = None
for o in stack.outputs:
if validate_errors and 'output_error' in o:
# scan for errors in the stack output.
raise ValueError(
'Unexpected output errors in %s : %s' % (
output_key, o['output_error']))
if o['output_key'] == output_key:
value = o['output_value']
return value
def _ping_ip_address(self, ip_address, should_succeed=True):
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
return (proc.returncode == 0) == should_succeed
return call_until_true(
self.conf.build_timeout, 1, ping)
def _wait_for_all_resource_status(self, stack_identifier,
status, failure_pattern='^.*_FAILED$',
success_on_not_found=False):
for res in self.client.resources.list(stack_identifier):
self._wait_for_resource_status(
stack_identifier, res.resource_name,
status, failure_pattern=failure_pattern,
success_on_not_found=success_on_not_found)
def _wait_for_resource_status(self, stack_identifier, resource_name,
status, failure_pattern='^.*_FAILED$',
success_on_not_found=False):
"""Waits for a Resource to reach a given status."""
fail_regexp = re.compile(failure_pattern)
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
res = self.client.resources.get(
stack_identifier, resource_name)
except heat_exceptions.HTTPNotFound:
if success_on_not_found:
return
# ignore this, as the resource may not have
# been created yet
else:
if res.resource_status == status:
return
wait_for_action = status.split('_')[0]
resource_action = res.resource_status.split('_')[0]
if (resource_action == wait_for_action and
fail_regexp.search(res.resource_status)):
raise exceptions.StackResourceBuildErrorException(
resource_name=res.resource_name,
stack_identifier=stack_identifier,
resource_status=res.resource_status,
resource_status_reason=res.resource_status_reason)
time.sleep(build_interval)
message = ('Resource %s failed to reach %s status within '
'the required time (%s s).' %
(resource_name, status, build_timeout))
raise exceptions.TimeoutException(message)
def verify_resource_status(self, stack_identifier, resource_name,
status='CREATE_COMPLETE'):
try:
res = self.client.resources.get(stack_identifier, resource_name)
except heat_exceptions.HTTPNotFound:
return False
return res.resource_status == status
def _verify_status(self, stack, stack_identifier, status, fail_regexp):
if stack.stack_status == status:
# Handle UPDATE_COMPLETE/FAILED case: Make sure we don't
# wait for a stale UPDATE_COMPLETE/FAILED status.
if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
if self.updated_time.get(
stack_identifier) != stack.updated_time:
self.updated_time[stack_identifier] = stack.updated_time
return True
else:
return True
wait_for_action = status.split('_')[0]
if (stack.action == wait_for_action and
fail_regexp.search(stack.stack_status)):
# Handle UPDATE_COMPLETE/UPDATE_FAILED case.
if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
if self.updated_time.get(
stack_identifier) != stack.updated_time:
self.updated_time[stack_identifier] = stack.updated_time
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack.stack_status,
stack_status_reason=stack.stack_status_reason)
else:
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack.stack_status,
stack_status_reason=stack.stack_status_reason)
def _wait_for_stack_status(self, stack_identifier, status,
failure_pattern=None,
success_on_not_found=False):
"""Waits for a Stack to reach a given status.
Note this compares the full $action_$status, e.g
CREATE_COMPLETE, not just COMPLETE which is exposed
via the status property of Stack in heatclient
"""
if failure_pattern:
fail_regexp = re.compile(failure_pattern)
elif 'FAILED' in status:
# If we're looking for e.g CREATE_FAILED, COMPLETE is unexpected.
fail_regexp = re.compile('^.*_COMPLETE$')
else:
fail_regexp = re.compile('^.*_FAILED$')
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
stack = self.client.stacks.get(stack_identifier,
resolve_outputs=False)
except heat_exceptions.HTTPNotFound:
if success_on_not_found:
return
# ignore this, as the resource may not have
# been created yet
else:
if self._verify_status(stack, stack_identifier, status,
fail_regexp):
return
time.sleep(build_interval)
message = ('Stack %s failed to reach %s status within '
'the required time (%s s).' %
(stack_identifier, status, build_timeout))
raise exceptions.TimeoutException(message)
def _stack_delete(self, stack_identifier):
try:
self._handle_in_progress(self.client.stacks.delete,
stack_identifier)
except heat_exceptions.HTTPNotFound:
pass
self._wait_for_stack_status(
stack_identifier, 'DELETE_COMPLETE',
success_on_not_found=True)
def _handle_in_progress(self, fn, *args, **kwargs):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
fn(*args, **kwargs)
except heat_exceptions.HTTPConflict as ex:
# FIXME(sirushtim): Wait a little for the stack lock to be
# released and hopefully, the stack should be usable again.
if ex.error['error']['type'] != 'ActionInProgress':
raise ex
time.sleep(build_interval)
else:
break
def update_stack(self, stack_identifier, template=None, environment=None,
files=None, parameters=None, tags=None,
expected_status='UPDATE_COMPLETE',
disable_rollback=True,
existing=False):
env = environment or {}
env_files = files or {}
parameters = parameters or {}
self.updated_time[stack_identifier] = self.client.stacks.get(
stack_identifier, resolve_outputs=False).updated_time
self._handle_in_progress(
self.client.stacks.update,
stack_id=stack_identifier,
template=template,
files=env_files,
disable_rollback=disable_rollback,
parameters=parameters,
environment=env,
tags=tags,
existing=existing)
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
def preview_update_stack(self, stack_identifier, template,
environment=None, files=None, parameters=None,
tags=None, disable_rollback=True,
show_nested=False):
env = environment or {}
env_files = files or {}
parameters = parameters or {}
return self.client.stacks.preview_update(
stack_id=stack_identifier,
template=template,
files=env_files,
disable_rollback=disable_rollback,
parameters=parameters,
environment=env,
tags=tags,
show_nested=show_nested
)
def assert_resource_is_a_stack(self, stack_identifier, res_name,
wait=False):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
time.sleep(build_interval)
try:
nested_identifier = self._get_nested_identifier(
stack_identifier, res_name)
except Exception:
# We may have to wait, if the create is in-progress
if wait:
time.sleep(build_interval)
else:
raise
else:
return nested_identifier
def _get_nested_identifier(self, stack_identifier, res_name):
rsrc = self.client.resources.get(stack_identifier, res_name)
nested_link = [l for l in rsrc.links if l['rel'] == 'nested']
nested_href = nested_link[0]['href']
nested_id = nested_href.split('/')[-1]
nested_identifier = '/'.join(nested_href.split('/')[-2:])
self.assertEqual(rsrc.physical_resource_id, nested_id)
nested_stack = self.client.stacks.get(nested_id, resolve_outputs=False)
nested_identifier2 = '%s/%s' % (nested_stack.stack_name,
nested_stack.id)
self.assertEqual(nested_identifier, nested_identifier2)
parent_id = stack_identifier.split("/")[-1]
self.assertEqual(parent_id, nested_stack.parent)
return nested_identifier
def group_nested_identifier(self, stack_identifier,
group_name):
# Get the nested stack identifier from a group resource
rsrc = self.client.resources.get(stack_identifier, group_name)
physical_resource_id = rsrc.physical_resource_id
nested_stack = self.client.stacks.get(physical_resource_id,
resolve_outputs=False)
nested_identifier = '%s/%s' % (nested_stack.stack_name,
nested_stack.id)
parent_id = stack_identifier.split("/")[-1]
self.assertEqual(parent_id, nested_stack.parent)
return nested_identifier
def list_group_resources(self, stack_identifier,
group_name, minimal=True):
nested_identifier = self.group_nested_identifier(stack_identifier,
group_name)
if minimal:
return self.list_resources(nested_identifier)
return self.client.resources.list(nested_identifier)
def list_resources(self, stack_identifier):
resources = self.client.resources.list(stack_identifier)
return dict((r.resource_name, r.resource_type) for r in resources)
def stack_create(self, stack_name=None, template=None, files=None,
parameters=None, environment=None, tags=None,
expected_status='CREATE_COMPLETE',
disable_rollback=True, enable_cleanup=True,
environment_files=None):
name = stack_name or self._stack_rand_name()
templ = template or self.template
templ_files = files or {}
params = parameters or {}
env = environment or {}
self.client.stacks.create(
stack_name=name,
template=templ,
files=templ_files,
disable_rollback=disable_rollback,
parameters=params,
environment=env,
tags=tags,
environment_files=environment_files
)
if expected_status not in ['ROLLBACK_COMPLETE'] and enable_cleanup:
self.addCleanup(self._stack_delete, name)
stack = self.client.stacks.get(name, resolve_outputs=False)
stack_identifier = '%s/%s' % (name, stack.id)
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status:
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
return stack_identifier
def stack_adopt(self, stack_name=None, files=None,
parameters=None, environment=None, adopt_data=None,
wait_for_status='ADOPT_COMPLETE'):
if (self.conf.skip_test_stack_action_list and
'ADOPT' in self.conf.skip_test_stack_action_list):
self.skipTest('Testing Stack adopt disabled in conf, skipping')
name = stack_name or self._stack_rand_name()
templ_files = files or {}
params = parameters or {}
env = environment or {}
self.client.stacks.create(
stack_name=name,
files=templ_files,
disable_rollback=True,
parameters=params,
environment=env,
adopt_stack_data=adopt_data,
)
self.addCleanup(self._stack_delete, name)
stack = self.client.stacks.get(name, resolve_outputs=False)
stack_identifier = '%s/%s' % (name, stack.id)
self._wait_for_stack_status(stack_identifier, wait_for_status)
return stack_identifier
def stack_abandon(self, stack_id):
if (self.conf.skip_test_stack_action_list and
'ABANDON' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_id)
self.skipTest('Testing Stack abandon disabled in conf, skipping')
info = self.client.stacks.abandon(stack_id=stack_id)
return info
def stack_suspend(self, stack_identifier):
if (self.conf.skip_test_stack_action_list and
'SUSPEND' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack suspend disabled in conf, skipping')
self._handle_in_progress(self.client.actions.suspend, stack_identifier)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'SUSPEND_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
def stack_resume(self, stack_identifier):
if (self.conf.skip_test_stack_action_list and
'RESUME' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack resume disabled in conf, skipping')
self._handle_in_progress(self.client.actions.resume, stack_identifier)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'RESUME_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'RESUME_COMPLETE')
def wait_for_event_with_reason(self, stack_identifier, reason,
rsrc_name=None, num_expected=1):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
rsrc_events = self.client.events.list(stack_identifier,
resource_name=rsrc_name)
except heat_exceptions.HTTPNotFound:
LOG.debug("No events yet found for %s" % rsrc_name)
else:
matched = [e for e in rsrc_events
if e.resource_status_reason == reason]
if len(matched) == num_expected:
return matched
time.sleep(build_interval)
def check_autoscale_complete(self, stack_id, expected_num):
res_list = self.client.resources.list(stack_id)
all_res_complete = all(res.resource_status in ('UPDATE_COMPLETE',
'CREATE_COMPLETE')
for res in res_list)
all_res = len(res_list) == expected_num
return all_res and all_res_complete
|
|
import numpy as np
from scipy import constants
from scipy.optimize import curve_fit
import os
from numpy.polynomial import polynomial as poly
from scipy.special import lambertw
# use absolute file path so tests work
path_const = os.path.join(os.path.dirname(__file__), '..', 'constants')
def AM15G_resample(wl):
'''
Returns AM1.5G spectrum at given wavelengths,
scaled to the new data interval (assumes even data spacing)
xxx is this the best way?
inputs:
wavelength: (array like)
the measured wavelengths in nanometers.
outputs:
current density per interval: (array like)
'''
interval = abs(wl[1] - wl[0]) # a ratio to 1nm (default)
AM15G_wl = np.genfromtxt(os.path.join(path_const, 'AM1.5G_spectrum.dat'),
usecols=(0,), skip_header=1)
AM15G_Jph = np.genfromtxt(os.path.join(path_const, 'AM1.5G_spectrum.dat'),
usecols=(1,), skip_header=1)
return interval * np.interp(wl, AM15G_wl, AM15G_Jph)
def find_nearest(x_val, xdata, ydata=None):
'''
Finds the nearest index in 'xdata' to 'value'
Returns corresponding 'ydata' value if given
'''
xdata = np.array(xdata)
nearest = (np.abs(xdata - x_val)).argmin()
if ydata is not None:
ydata = np.array(ydata)
assert xdata.shape[0] == ydata.shape[0]
nearest = ydata[nearest]
return nearest
def wl_to_alpha(wavelength):
'''
Returns the band to band absorption coefficient for Silicon given a
wavelength. Linear interpolation is performed if the exact values are
not provided.
The values are taken from Green 2008
DOI:10.1016/j.solmat.2008.06.009
inputs:
wavelength: (float)
wavelength in nm
outputs:
wavelength: (float)
wavelength in nm
'''
alpha_data = np.genfromtxt(
os.path.join(path_const, 'Si_alpha_Green_2008.dat'),
usecols=(0, 1), skip_header=1).transpose()
wl = alpha_data[0]
alpha = alpha_data[1]
return np.interp(wavelength, wl, alpha)
def fit_Basore(wavelength, IQE, theta=0, wlbounds=(1040, 1100)):
'''
Linear fit of IQE to extract effective bulk lifetime
This is just a linear fit over limited wavelengths
Extracts an effective bulk diffusion length.
Inputs:
wavelength: (array like)
the measured wavelengths in nano meters.
IQE: (array like)
the measured internal quantum efficiency in units %.
theta: (float, optional)
The average angle the light travels through the sample.
This can be used to partially correct for textured surfaces.
The default is 0. In units of degrees,
wlbounds: (tuple, optional)
The bounds between which the linear fit is performed.
The first touple should be the min and then the max.
The default is 1040 nm to 1100 nm.
Returns:
a tuple of
a dictionary containing
L_eff: the effective diffusion length (cm)
a plotting function
See Basore 1993
doi:10.1109/PVSC.1993.347063
'''
index = (wavelength >= wlbounds[0]) * (wavelength <= wlbounds[1])
IQE = np.copy(IQE[index])
wavelength = np.copy(wavelength[index])
fit_params = ['Leff']
alpha = wl_to_alpha(wavelength) / float(np.cos(np.radians(theta)))
coefs = poly.polyfit(1. / alpha, 1. / IQE, 1)
# xxx check these calcs
fit_output = {'Leff': coefs[1],
'eta_c': 1 / coefs[0]}
def plot_Basore_fit(ax):
ax.plot(1. / alpha, 1. / IQE, '-o', label='data')
ax.plot(1. / alpha, poly.polyval(1. / alpha, coefs), label='fit_Basore')
ax.set_xlabel('$1/ \\alpha$ [$cm^2$]')
ax.set_ylabel('$1/IQE$ []')
ax.grid(True)
ax.legend(loc='best')
return fit_output, plot_Basore_fit
def Rs_calc_1(Vmp, Jmp, sunsVoc_V, sunsVoc_J):
# TODO: not finished
# sunsVoc method
V_sunsVoc = find_nearest(Jmp, sunsVoc_J, sunsVoc_V)
return (V_sunsVoc - Vmp) / Jmp
def Rs_calc_2(Voc, Jsc, FF, pFF):
'''
TODO: improve
From:
Solar Cells: Operating Principles, Technology and System Applications
taken from ernst2016efficiency
'''
return Voc / Jsc * (1 - FF / pFF)
def _Vth(T):
# this is here so it is the only place I need to define a default
# temperature
if T == None:
T = 300
return constants.k * T / constants.e
def ideal_FF(Voc, T=None):
'''
Calculates the ideal fill factor.
inputs:
Voc: (float)
Open circuit voltage in volts
T: (float, optional)
Temperature in Kelvin, default of 300K
output:
FF_0:
The ideal fill factor
Valid for:
Voc * q / k / T > 10
Accuracy: 1e-4
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
voc = Voc / _Vth(T)
FF_0 = (voc - np.log(voc + 0.72)) / (voc + 1)
return FF_0
def ideal_FF_2016(Voc, T=None):
'''
Calculates the ideal fill factor.
inputs:
Voc: (float)
Open circuit voltage in volts
T: (float, optional)
Temperature in Kelvin, default of 300K
output:
FF_0:
The ideal fill factor
Valid for:
??
Accuracy: ??
Source: Green, 2016
http://dx.doi.org/10.1063/1.4942660
'''
voc = Voc / _Vth(T)
z0 = np.exp(voc + 1)
# inverse f0
if0 = 1. - np.exp(-voc)
FF_0 = (lambertw(z0) - 1)**2 / if0 / voc / lambertw(z0)
return FF_0.real
def ideal_FF_series(Voc, Jsc, Rs, T=None):
'''
Calculates the ideal fill factor accounting for series resistance
inputs:
Voc: (float)
Open circuit voltage in volts
Jsc: (float)
The short circuit current in amps
Rs: (float)
The series resistance in Ohms?
T: (float)
Temperature in Kelvin
output:
FF_s:
The ideal fill factor accounting for series resistance
Valid for:
Voc * q / k / T > 10
Rs * Jsc / Voc < 0.4
Accuracy: 4e-3
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
FF_0 = ideal_FF(Voc, T)
rs = Rs / Voc * Jsc
FF_s = FF_0 * (1 - 1.1 * rs) + rs**2 / 5.4
return FF_s
def ideal_FF_series_2016(Voc, Jsc, Rs, T=None):
'''
Calculates the ideal fill factor.
inputs:
Voc: (float)
Open circuit voltage in volts
T: (float, optional)
Temperature in Kelvin, default of 300K
output:
FF_0:
The ideal fill factor
Valid for:
??
Accuracy: Approximately 4 digit accuracy is maintained in
technologically interesting cases, where losses are <5% for
normalised Voc>10.
Source: Green, 2016
http://dx.doi.org/10.1063/1.4942660
'''
FF_0 = ideal_FF_2016(Voc, T)
# normalised values
voc = Voc / _Vth(T)
rs = Rs / Voc * Jsc
# other factors
if0 = 1. - np.exp(-voc)
ifs = 1. - np.exp(-voc * (1 - rs))
z0 = np.exp(voc + 1)
# calculate it
FF_s = FF_0 * (1 - voc / lambertw(z0) * rs / if0) * if0 / ifs
return FF_s.real
def ideal_FF_series_shunt(Voc, Jsc, Rs, Rsh, T=None):
'''
Calculates the ideal fill factor, accounting for shunt and series resistance.
inputs:
Voc: (float)
Open circuit voltage in volts
Jsc: (float)
The short circuit current in amps
Rs: (float)
The series resistance in Ohms?
Rsh: (float)
The shunt resistance in Ohms?
T: (float)
Temperature in Kelvin
output:
FF_sh_s:
The ideal fill factor accounting for shunt and series resistance
Valid for:
Voc * q / k / T > 10
< 0.4
Rs * Jsc / Voc + Voc / Rsh / Jsc < 0.4
Accuracy: 3e-2
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
FF_s = ideal_FF_series(Voc, Jsc, Rs, T)
voc = Voc / _Vth(T)
rsh = Rsh / Voc * Jsc
FF_s_sh = FF_s * (1 - (voc - 0.7) / voc * FF_s / rsh)
return FF_s_sh
def ideal_FF_shunt_2016(Voc, Rsh, T=None):
'''
Calculates the ideal fill factor, accounting for shunt and series resistance.
inputs:
Voc: (float)
Open circuit voltage in volts
Jsc: (float)
The short circuit current in amps
Rs: (float)
The series resistance in Ohms?
Rsh: (float)
The shunt resistance in Ohms?
T: (float)
Temperature in Kelvin
output:
FF_sh_s:
The ideal fill factor accounting for shunt and series resistance
Valid for:
Voc * q / k / T > 10
< 0.4
Rs * Jsc / Voc + Voc / Rsh / Jsc < 0.4
Accuracy: 3e-2
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
FF_0 = ideal_FF_2016(Voc, T)
# normalised values
voc = Voc / _Vth(T)
rsh = Rsh / Voc * Jsc
# other factors
if0 = 1. - np.exp(-voc)
z0 = np.exp(voc + 1)
# calculate it
FF_sh = FF_0 * (1 - lambertw(z0) * if0 / voc /
rsh / if0) / (1 - 1 / (voc * rsh))
return FF_sh.real
def FF_loss_series(Voc, Jsc, Jmp, Rs):
'''
Calculates the loss in fill factor from series resistance
inputs:
Voc: (float)
Open circuit voltage in [V]
Jsc: (float)
Short circuit current density in [A cm^{-1}]
Jmp: (float)
Maximum power point current density in [A cm^{-2}]
Rs: (float)
Series resistance in [Ohm cm^2]
output:
FF_Rs: (float)
The increase in fill factor expected by removing the series resistance
Dimensionless units
Source: Khanna, 2013
http://dx.doi.org/10.1109/JPHOTOV.2013.2270348
'''
FF_Rs = Jmp**2 * Rs / (Voc * Jsc)
return FF_Rs
def FF_loss_shunt(Voc, Jsc, Vmp, Jmp, Rs, Rsh):
'''
Calculates the loss in fill factor from shunt resistance
inputs:
Voc: (float)
Open circuit voltage in [V]
Jsc: (float)
Short circuit current density in [A cm^{-1}]
Vmp: (float)
Maximum power point voltage in [V]
Jmp: (float)
Maximum power point current density in [A cm^{-2}]
Rs: (float)
Series resistance in [Ohm cm^2]
Rsh: (float)
Shunt resistance in [Ohm cm^2]
output:
FF_Rs: (float)
The increase in fill factor expected by removing the series resistance
Dimensionless units
Source: Khanna, 2013
http://dx.doi.org/10.1109/JPHOTOV.2013.2270348
'''
FF_Rsh = (Vmp + Rs * Jmp)**2 / (Voc * Jsc * Rsh)
return FF_Rsh
def ideality_factor(V, J, Vth):
'''
Calculates the ideality factor
This assumes that: $e^{V/mVt} >> 1$
This log form is used as it appears to be more robust against noise.
'''
with np.errstate(divide='ignore', invalid='ignore'):
m = 1. / Vth / np.gradient(np.log(J)) * np.gradient(V)
return m
if __name__ == '__main__':
print(ideal_FF(0.6, 300))
print(ideal_FF_2016(0.6, 300))
print(ideal_FF_series_2016(0.6, 0.04, 1, 300))
print(ideal_FF_series(0.6, 0.04, 1, 300))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for Mac OS X Keychain files."""
# INFO: Only supports internet and application passwords,
# because it is the only data that contains timestamp events.
# Keychain can also store "secret notes". These notes are stored
# in the same type than the application format, then, they are already
# supported. The stored wifi are also application passwords.
# TODO: the AccessControl for each entry has not been implemented. Until now,
# I know that the AccessControl from Internet and App password are stored
# using other tables (Symmetric, certificates, etc). Access Control
# indicates which specific tool, or all, is able to use this entry.
import binascii
import construct
import logging
import os
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import parser
from plaso.lib import timelib
__author__ = 'Joaquin Moreno Garijo ([email protected])'
class KeychainInternetRecordEvent(event.EventObject):
"""Convenience class for an keychain internet record event."""
DATA_TYPE = 'mac:keychain:internet'
def __init__(
self, timestamp, timestamp_desc, entry_name, account_name,
text_description, comments, where, protocol, type_protocol, ssgp_hash):
"""Initializes the event object.
Args:
timestamp: Description of the timestamp value.
timestamp_desc: Timelib type of the timestamp.
entry_name: Name of the entry.
account_name: Name of the account.
text_description: Short description about the entry.
comments: String that contains the comments added by the user.
where: The domain name or IP where the password is used.
protocol: The internet protocol used (eg. htps).
type_protocol: The sub-protocol used (eg. form).
ssgp_hash: String with hexadecimal values from the password / cert hash.
"""
super(KeychainInternetRecordEvent, self).__init__()
self.timestamp = timestamp
self.timestamp_desc = timestamp_desc
self.entry_name = entry_name
self.account_name = account_name
self.text_description = text_description
self.where = where
self.protocol = protocol
self.type_protocol = type_protocol
self.comments = comments
self.ssgp_hash = ssgp_hash
class KeychainApplicationRecordEvent(event.EventObject):
"""Convenience class for an keychain application password record event."""
DATA_TYPE = 'mac:keychain:application'
def __init__(
self, timestamp, timestamp_desc, entry_name,
account_name, text_description, comments, ssgp_hash):
"""Initializes the event object.
Args:
timestamp: Description of the timestamp value.
timestamp_desc: timelib type of the timestamp.
entry_name: Name of the entry.
account_name: Name of the account.
text_description: Short description about the entry.
comments: string that contains the comments added by the user.
ssgp_hash: String with hexadecimal values from the password / cert hash.
"""
super(KeychainApplicationRecordEvent, self).__init__()
self.timestamp = timestamp
self.timestamp_desc = timestamp_desc
self.entry_name = entry_name
self.account_name = account_name
self.text_description = text_description
self.comments = comments
self.ssgp_hash = ssgp_hash
class KeychainParser(parser.BaseParser):
"""Parser for Keychain files."""
NAME = 'mac_keychain'
KEYCHAIN_MAGIC_HEADER = 'kych'
KEYCHAIN_MAJOR_VERSION = 1
KEYCHAIN_MINOR_VERSION = 0
RECORD_TYPE_APPLICATION = 0x80000000
RECORD_TYPE_INTERNET = 0x80000001
# DB HEADER.
KEYCHAIN_DB_HEADER = construct.Struct(
'db_header',
construct.String('magic', 4),
construct.UBInt16('major_version'),
construct.UBInt16('minor_version'),
construct.UBInt32('header_size'),
construct.UBInt32('schema_offset'),
construct.Padding(4))
# DB SCHEMA.
KEYCHAIN_DB_SCHEMA = construct.Struct(
'db_schema',
construct.UBInt32('size'),
construct.UBInt32('number_of_tables'))
# For each number_of_tables, the schema has a TABLE_OFFSET with the
# offset starting in the DB_SCHEMA.
TABLE_OFFSET = construct.UBInt32('table_offset')
TABLE_HEADER = construct.Struct(
'table_header',
construct.UBInt32('table_size'),
construct.UBInt32('record_type'),
construct.UBInt32('number_of_records'),
construct.UBInt32('first_record'),
construct.UBInt32('index_offset'),
construct.Padding(4),
construct.UBInt32('recordnumbercount'))
RECORD_HEADER = construct.Struct(
'record_header',
construct.UBInt32('entry_length'),
construct.Padding(12),
construct.UBInt32('ssgp_length'),
construct.Padding(4),
construct.UBInt32('creation_time'),
construct.UBInt32('last_mod_time'),
construct.UBInt32('text_description'),
construct.Padding(4),
construct.UBInt32('comments'),
construct.Padding(8),
construct.UBInt32('entry_name'),
construct.Padding(20),
construct.UBInt32('account_name'),
construct.Padding(4))
RECORD_HEADER_APP = construct.Struct(
'record_entry_app',
RECORD_HEADER,
construct.Padding(4))
RECORD_HEADER_INET = construct.Struct(
'record_entry_inet',
RECORD_HEADER,
construct.UBInt32('where'),
construct.UBInt32('protocol'),
construct.UBInt32('type'),
construct.Padding(4),
construct.UBInt32('url'))
TEXT = construct.PascalString(
'text', length_field = construct.UBInt32('length'))
TIME = construct.Struct(
'timestamp',
construct.String('year', 4),
construct.String('month', 2),
construct.String('day', 2),
construct.String('hour', 2),
construct.String('minute', 2),
construct.String('second', 2),
construct.Padding(2))
TYPE_TEXT = construct.String('type', 4)
# TODO: add more protocols.
_PROTOCOL_TRANSLATION_DICT = {
u'htps': u'https',
u'smtp': u'smtp',
u'imap': u'imap',
u'http': u'http'}
def Parse(self, file_entry):
"""Extract data from a Keychain file.
Args:
file_entry: a file entry object.
Yields:
An event object (instance of KeychainInternetRecordEvent) for each record.
"""
file_object = file_entry.GetFileObject()
table_offsets = self._VerifyStructure(file_object)
if not table_offsets:
raise errors.UnableToParseFile(
u'The file is not a Keychain file.')
for table_offset in table_offsets:
# Skipping X bytes, unknown data at this point.
file_object.seek(table_offset - file_object.tell(), os.SEEK_CUR)
try:
table = self.TABLE_HEADER.parse_stream(file_object)
except construct.FieldError as exception:
logging.warning((
u'Unable to parse table header, moving to the next one, '
u'reason: {:s}').format(
exception))
continue
# Table_offset: absolute byte in the file where the table starts.
# table.first_record: first record in the table, relative to the
# first byte of the table.
file_object.seek(
table_offset + table.first_record - file_object.tell(), os.SEEK_CUR)
if table.record_type == self.RECORD_TYPE_INTERNET:
for _ in range(table.number_of_records):
for object_record in self._ReadEntryInternet(file_object):
yield object_record
elif table.record_type == self.RECORD_TYPE_APPLICATION:
for _ in range(table.number_of_records):
for object_record in self._ReadEntryApplication(file_object):
yield object_record
file_object.close()
def _VerifyStructure(self, file_object):
"""Verify that we are dealing with an Keychain entry.
Args:
file_object: a file-like object that points to an Keychain file.
Returns:
A list of table positions if it is a keychain, None otherwise.
"""
# INFO: The HEADER KEYCHAIN:
# [DBHEADER] + [DBSCHEMA] + [OFFSET TABLE A] + ... + [OFFSET TABLE Z]
# Where the table offset is relative to the first byte of the DB Schema,
# then we must add to this offset the size of the [DBHEADER].
try:
db_header = self.KEYCHAIN_DB_HEADER.parse_stream(file_object)
except (IOError, construct.FieldError):
return
if (db_header.minor_version != self.KEYCHAIN_MINOR_VERSION or
db_header.major_version != self.KEYCHAIN_MAJOR_VERSION or
db_header.magic != self.KEYCHAIN_MAGIC_HEADER):
return
# Read the database schema and extract the offset for all the tables.
# They are ordered by file position from the top to the bottom of the file.
try:
db_schema = self.KEYCHAIN_DB_SCHEMA.parse_stream(file_object)
except (IOError, construct.FieldError):
return
table_offsets = []
for _ in range(db_schema.number_of_tables):
try:
table_offset = self.TABLE_OFFSET.parse_stream(file_object)
except (IOError, construct.FieldError):
return
table_offsets.append(table_offset + self.KEYCHAIN_DB_HEADER.sizeof())
return table_offsets
def _GetTimestampFromEntry(self, structure):
"""Parse a time entry structure into a microseconds since Epoch in UTC.
Args:
structure: TIME entry structure.
year: String with the number of the year.
month: String with the number of the month.
day: String with the number of the day.
hour: String with the number of the month.
minute: String with the number of the minute.
second: String with the number of the second.
Returns:
Microseconds since Epoch in UTC.
"""
try:
return timelib.Timestamp.FromTimeParts(
int(structure.year, 10), int(structure.month, 10),
int(structure.day, 10), int(structure.hour, 10),
int(structure.minute, 10), int(structure.second, 10))
except ValueError:
logging.warning(u'Invalid keychain time {}').format(structure)
return 0
def _ReadEntryHeader(self, file_object, record, offset):
"""Read the common record attributes.
Args:
file_object: keychain file object.
record: structure with the header of the record.
offset: first byte of the record.
Returns:
A list of:
ssgp_hash: hash of the encrypted data (passwd, cert, note).
creation_time: when the entry was created.
last_mod_time: last time the entry was updated.
text_description: a brief description of the entry.
entry_name: name of the entry
account_name: name of the account.
"""
# Info: The hash header always start with the string ssgp follow by
# the hash. Furthermore The fields are always a multiple of four.
# Then if it is not multiple the value is padded by 0x00.
ssgp_hash = binascii.hexlify(file_object.read(record.ssgp_length)[4:])
file_object.seek(
record.creation_time - file_object.tell() + offset - 1, os.SEEK_CUR)
creation_time = self._GetTimestampFromEntry(
self.TIME.parse_stream(file_object))
file_object.seek(
record.last_mod_time - file_object.tell() + offset - 1, os.SEEK_CUR)
last_mod_time = self._GetTimestampFromEntry(
self.TIME.parse_stream(file_object))
# The comment field does not always contain data.
if record.text_description:
file_object.seek(
record.text_description - file_object.tell() + offset -1,
os.SEEK_CUR)
text_description = self.TEXT.parse_stream(file_object)
else:
text_description = u'N/A'
# The comment field does not always contain data.
if record.comments:
file_object.seek(
record.text_description - file_object.tell() + offset -1,
os.SEEK_CUR)
comments = self.TEXT.parse_stream(file_object)
else:
comments = u'N/A'
file_object.seek(
record.entry_name - file_object.tell() + offset - 1, os.SEEK_CUR)
entry_name = self.TEXT.parse_stream(file_object)
file_object.seek(
record.account_name - file_object.tell() + offset - 1, os.SEEK_CUR)
account_name = self.TEXT.parse_stream(file_object)
return (
ssgp_hash, creation_time, last_mod_time,
text_description, comments, entry_name, account_name)
def _ReadEntryInternet(self, file_object):
"""Extracts the information from an Internet password entry.
Args:
file_object: keychain file object.
Yields:
A KeychainEvent (instance of EventObject) for each record.
"""
offset = file_object.tell()
try:
record = self.RECORD_HEADER_INET.parse_stream(file_object)
except (IOError, construct.FieldError):
logging.warning(u'Unknown record header at 0x{:x}'.format(offset))
return
(ssgp_hash, creation_time, last_mod_time, text_description,
comments, entry_name, account_name) = self._ReadEntryHeader(
file_object, record.record_header, offset)
if not record.where:
where = u'N/A'
protocol = u'N/A'
type_protocol = u'N/A'
else:
file_object.seek(
record.where - file_object.tell() + offset - 1, os.SEEK_CUR)
where = self.TEXT.parse_stream(file_object)
file_object.seek(
record.protocol - file_object.tell() + offset - 1, os.SEEK_CUR)
protocol = self.TYPE_TEXT.parse_stream(file_object)
file_object.seek(
record.type - file_object.tell() + offset - 1, os.SEEK_CUR)
type_protocol = self.TEXT.parse_stream(file_object)
type_protocol = self._PROTOCOL_TRANSLATION_DICT.get(
type_protocol, type_protocol)
if record.url:
file_object.seek(
record.url - file_object.tell() + offset - 1, os.SEEK_CUR)
url = self.TEXT.parse_stream(file_object)
where = u'{}{}'.format(where, url)
# Move to the end of the record, and then, prepared for the next record.
file_object.seek(
record.record_header.entry_length + offset - file_object.tell(),
os.SEEK_CUR)
yield KeychainInternetRecordEvent(
creation_time, eventdata.EventTimestamp.CREATION_TIME,
entry_name, account_name, text_description,
comments, where, protocol, type_protocol, ssgp_hash)
if creation_time != last_mod_time:
yield KeychainInternetRecordEvent(
last_mod_time, eventdata.EventTimestamp.MODIFICATION_TIME,
entry_name, account_name, text_description,
comments, where, protocol, type_protocol)
def _ReadEntryApplication(self, file_object):
"""Extracts the information from an application password entry.
Args:
file_object: keychain file object.
Yields:
A KeychainEvent (instance of EventObject) for each record.
"""
offset = file_object.tell()
try:
record = self.RECORD_HEADER_APP.parse_stream(file_object)
except (IOError, construct.FieldError):
logging.warning(u'Unknown record header at 0x{0:08x}'.format(offset))
return
(ssgp_hash, creation_time, last_mod_time, text_description,
comments, entry_name, account_name) = self._ReadEntryHeader(
file_object, record.record_header, offset)
# Move to the end of the record, and then, prepared for the next record.
file_object.seek(
record.record_header.entry_length + offset - file_object.tell(),
os.SEEK_CUR)
yield KeychainApplicationRecordEvent(
creation_time, eventdata.EventTimestamp.CREATION_TIME,
entry_name, account_name, text_description, comments, ssgp_hash)
if creation_time != last_mod_time:
yield KeychainApplicationRecordEvent(
last_mod_time, eventdata.EventTimestamp.MODIFICATION_TIME,
entry_name, account_name, text_description, comments, ssgp_hash)
|
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions specific to handle goma related info.
"""
import base64
import datetime
import getpass
import glob
import gzip
import json
import os
import re
import shutil
import socket
import sys
import tempfile
import time
from common import chromium_utils
from slave import slave_utils
# The Google Cloud Storage bucket to store logs related to goma.
GOMA_LOG_GS_BUCKET = 'chrome-goma-log'
# Platform dependent location of run command.
PLATFORM_RUN_CMD = {
# os.name: run_cmd to use.
'nt': 'C:\\infra-python\\run.py',
'posix': '/opt/infra-python/run.py',
}
TIMESTAMP_PATTERN = re.compile('(\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2})')
TIMESTAMP_FORMAT = '%Y/%m/%d %H:%M:%S'
def GetShortHostname():
"""Get this machine's short hostname in lower case."""
return socket.gethostname().split('.')[0].lower()
def GetGomaTmpDirectory():
"""Get goma's temp directory."""
candidates = ['GOMA_TMP_DIR', 'TEST_TMPDIR', 'TMPDIR', 'TMP']
for candidate in candidates:
value = os.environ.get(candidate)
if value and os.path.isdir(value):
return value
return '/tmp'
def GetLatestGlogInfoFile(pattern):
"""Get a filename of the latest google glog INFO file.
Args:
pattern: a string of INFO file pattern.
Returns:
the latest glog INFO filename in fullpath.
"""
dirname = GetGomaTmpDirectory()
info_pattern = os.path.join(dirname, '%s.*.INFO.*' % pattern)
candidates = glob.glob(info_pattern)
if not candidates:
return None
return sorted(candidates, reverse=True)[0]
def GetLatestGomaCompilerProxyInfo():
"""Get a filename of the latest goma comiler_proxy.INFO."""
return GetLatestGlogInfoFile('compiler_proxy')
def GetLatestGomaCompilerProxySubprocInfo():
"""Get a filename of the latest goma comiler_proxy-subproc.INFO."""
return GetLatestGlogInfoFile('compiler_proxy-subproc')
def UploadToGomaLogGS(file_path, gs_filename, text_to_append=None):
"""Upload a file to Google Cloud Storage (gs://chrome-goma-log).
Note that the uploaded file would automatically be gzip compressed.
Args:
file_path: a path of a file to be uploaded.
gs_filename: a name of a file in Google Storage.
text_to_append: an addtional text to be added to a file in GS.
Returns:
a stored path name without the bucket name in GS.
"""
hostname = GetShortHostname()
today = datetime.datetime.utcnow().date()
log_path = '%s/%s/%s.gz' % (
today.strftime('%Y/%m/%d'), hostname, gs_filename)
gs_path = 'gs://%s/%s' % (GOMA_LOG_GS_BUCKET, log_path)
temp = tempfile.NamedTemporaryFile(delete=False)
try:
with temp as f_out:
with gzip.GzipFile(fileobj=f_out) as gzipf_out:
with open(file_path) as f_in:
shutil.copyfileobj(f_in, gzipf_out)
if text_to_append:
gzipf_out.write(text_to_append)
slave_utils.GSUtilCopy(temp.name, gs_path)
print "Copied log file to %s" % gs_path
finally:
os.remove(temp.name)
return log_path
def UploadGomaCompilerProxyInfo():
"""Upload goma compiler_proxy.INFO to Google Storage."""
latest_subproc_info = GetLatestGomaCompilerProxySubprocInfo()
if latest_subproc_info:
UploadToGomaLogGS(latest_subproc_info,
os.path.basename(latest_subproc_info))
else:
print 'No compiler_proxy-subproc.INFO to upload'
latest_info = GetLatestGomaCompilerProxyInfo()
if not latest_info:
print 'No compiler_proxy.INFO to upload'
return
# Since a filename of compiler_proxy.INFO is fairly unique,
# we might be able to upload it as-is.
log_path = UploadToGomaLogGS(latest_info, os.path.basename(latest_info))
viewer_url = ('http://chromium-build-stats.appspot.com/compiler_proxy_log/'
+ log_path)
print 'Visualization at %s' % viewer_url
def UploadNinjaLog(outdir, compiler, command, exit_status):
"""Upload .ninja_log to Google Cloud Storage (gs://chrome-goma-log),
in the same folder with goma's compiler_proxy.INFO.
Args:
outdir: a directory that contains .ninja_log.
compiler: compiler used for the build.
command: command line.
exit_status: ninja's exit status.
"""
ninja_log_path = os.path.join(outdir, '.ninja_log')
try:
st = os.stat(ninja_log_path)
mtime = datetime.datetime.fromtimestamp(st.st_mtime)
except OSError, e:
print e
return
cwd = os.getcwd()
platform = chromium_utils.PlatformName()
info = {'cmdline': command,
'cwd': cwd,
'platform': platform,
'exit': exit_status,
'argv': sys.argv,
'env': {}}
for k, v in os.environ.iteritems():
info['env'][k] = v
if compiler:
info['compiler'] = compiler
compiler_proxy_info = GetLatestGomaCompilerProxyInfo()
if compiler_proxy_info:
info['compiler_proxy_info'] = compiler_proxy_info
username = getpass.getuser()
hostname = GetShortHostname()
pid = os.getpid()
ninja_log_filename = 'ninja_log.%s.%s.%s.%d' % (
hostname, username, mtime.strftime('%Y%m%d-%H%M%S'), pid)
additional_text = '# end of ninja log\n' + json.dumps(info)
log_path = UploadToGomaLogGS(
ninja_log_path, ninja_log_filename, additional_text)
viewer_url = 'http://chromium-build-stats.appspot.com/ninja_log/' + log_path
print 'Visualization at %s' % viewer_url
def IsCompilerProxyKilledByFatalError():
"""Returns true if goma compiler_proxy is killed by CHECK or LOG(FATAL)."""
info_file = GetLatestGomaCompilerProxyInfo()
if not info_file:
return False
fatal_pattern = re.compile(r'^F\d{4} \d{2}:\d{2}:\d{2}\.\d{6} ')
with open(info_file) as f:
for line in f.readlines():
if fatal_pattern.match(line):
return True
return False
def SendGomaStats(goma_stats_file, goma_crash_report, build_data_dir):
"""Send GomaStats monitoring event.
Note: this function also removes goma_stats_file.
"""
try:
goma_options = []
if goma_stats_file and os.path.exists(goma_stats_file):
# send GomaStats.
goma_options = [
'--build-event-goma-stats-path',
goma_stats_file,
]
elif goma_crash_report and os.path.exists(goma_crash_report):
# crash report.
goma_options = [
'--build-event-goma-error',
'GOMA_ERROR_CRASHED',
'--build-event-goma-crash-report-id-path',
goma_crash_report,
]
elif IsCompilerProxyKilledByFatalError():
goma_options = [
'--build-event-goma-error',
'GOMA_ERROR_LOG_FATAL',
]
else:
# unknown error.
goma_options = [
'--build-event-goma-error',
'GOMA_ERROR_UNKNOWN',
]
run_cmd = PLATFORM_RUN_CMD.get(os.name)
if not run_cmd:
print 'Unknown os.name: %s' % os.name
return
send_monitoring_event_cmd = [
sys.executable,
run_cmd,
'infra.tools.send_monitoring_event',
'--event-mon-run-type', 'prod',
'--build-event-type', 'BUILD',
'--event-mon-timestamp-kind', 'POINT',
'--event-logrequest-path',
os.path.join(build_data_dir, 'log_request_proto')
] + goma_options
cmd_filter = chromium_utils.FilterCapture()
retcode = chromium_utils.RunCommand(
send_monitoring_event_cmd,
filter_obj=cmd_filter,
max_time=30)
if retcode:
print('Execution of send_monitoring_event failed with code %s'
% retcode)
print '\n'.join(cmd_filter.text)
except Exception, inst: # safety net
print('send_monitoring_event for goma failed: %s' % inst)
finally:
try:
os.remove(goma_stats_file)
except OSError: # file does not exist, for ex.
pass
def GetCompilerProxyStartTime():
"""Returns datetime instance of the latest compiler_proxy start time."""
with open(GetLatestGomaCompilerProxyInfo()) as f:
matched = TIMESTAMP_PATTERN.search(f.readline())
if matched:
return datetime.datetime.strptime(matched.group(1), TIMESTAMP_FORMAT)
def SendGomaTsMon(json_file, exit_status):
"""Send latest Goma status to ts_mon.
Args:
json_file: json filename string that has goma_ctl.py jsonstatus.
exit_status: integer exit status of the build.
"""
json_statuses = {}
try:
with open(json_file) as f:
json_statuses = json.load(f)
if not json_statuses:
print('no json status is recorded in %s' % json_file)
return
if len(json_statuses.get('notice', [])) != 1:
print('unknown json statuses style: %s' % json_statuses)
return
json_status = json_statuses['notice'][0]
if json_status['version'] != 1:
print('unknown version: %s' % json_status)
return
infra_status = json_status.get('infra_status')
result = 'success'
if exit_status != 0:
result = 'failure'
if (exit_status < 0 or
not infra_status or
infra_status['ping_status_code'] != 200 or
infra_status.get('num_user_error', 0) > 0):
result = 'exception'
num_failure = 0
ping_status_code = 0
if infra_status:
num_failure = infra_status['num_exec_compiler_proxy_failure']
ping_status_code = infra_status['ping_status_code']
clobber = 0
if os.environ.get('BUILDBOT_CLOBBER'):
clobber = 1
counter = {
'name': 'goma/failure',
'value': num_failure,
'builder': os.environ.get('BUILDBOT_BUILDERNAME', 'unknown'),
'master': os.environ.get('BUILDBOT_MASTERNAME', 'unknown'),
'slave': os.environ.get('BUILDBOT_SLAVENAME', 'unknown'),
'clobber': clobber,
'os': chromium_utils.PlatformName(),
'ping_status_code': ping_status_code,
'result': result}
start_time = GetCompilerProxyStartTime()
if start_time:
counter['start_time'] = int(time.mktime(start_time.timetuple()))
run_cmd = PLATFORM_RUN_CMD.get(os.name)
if not run_cmd:
print 'Unknown os.name: %s' % os.name
return
counter_json = json.dumps(counter)
# base64 encode on windows because it doesn't like json on the command-line.
if os.name == 'nt':
counter_json = base64.b64encode(counter_json)
cmd = [sys.executable,
run_cmd,
'infra.tools.send_ts_mon_values', '--verbose',
'--ts-mon-target-type', 'task',
'--ts-mon-task-service-name', 'goma-client',
'--ts-mon-task-job-name', 'default',
'--counter', counter_json]
cmd_filter = chromium_utils.FilterCapture()
retcode = chromium_utils.RunCommand(
cmd, filter_obj=cmd_filter,
max_time=30)
if retcode:
print('Execution of send_ts_mon_values failed with code %s'
% retcode)
print '\n'.join(cmd_filter.text)
except Exception as ex:
print('error while sending ts mon json_file=%s: %s' % (json_file, ex))
|
|
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import devsim
def printCurrents(device, contact, bias):
ecurr=devsim.get_contact_current(contact=contact, equation="ElectronContinuityEquation", device=device)
hcurr=devsim.get_contact_current(contact=contact, equation="HoleContinuityEquation", device=device)
tcurr = ecurr + hcurr
print("%s %g %g %g %g" % (contact, bias, ecurr, hcurr, tcurr))
####
#### Constants
####
def setOxideParameters(device, region):
q=1.6e-19
k=1.3806503e-23
eps=8.85e-14
T=300
devsim.set_parameter(device=device, region=region, name="Permittivity", value=3.9*eps)
devsim.set_parameter(device=device, region=region, name="ElectronCharge", value=q)
def setSiliconParameters(device, region):
q=1.6e-19
k=1.3806503e-23
eps=8.85e-14
T=300
for name, value in (
("Permittivity", 11.1*eps),
("ElectronCharge", q),
("n_i", 1.0e10),
("kT", eps * T),
("V_t", k*T/q),
("mu_n", 400),
("mu_p", 200),
):
devsim.set_parameter(device=device, region=region, name=name, value=value)
def createSolution(device, region, name):
devsim.node_solution(device=device, region=region, name=name)
devsim.edge_from_node_model(device=device, region=region, node_model=name)
def createSiliconPotentialOnly(device, region):
ie = devsim.node_model(device=device, region=region, name="IntrinsicElectrons", equation="n_i*exp(Potential/V_t)")
res = devsim.node_model(device=device, region=region, name="IntrinsicElectrons:Potential", equation="diff(%s, Potential)" % ie)
for name, equation in (
("IntrinsicHoles", "n_i^2/IntrinsicElectrons"),
("IntrinsicHoles:Potential", "diff(n_i^2/IntrinsicElectrons, Potential)"),
("IntrinsicCharge", "IntrinsicHoles-IntrinsicElectrons + NetDoping"),
("IntrinsicCharge:Potential", "diff(IntrinsicHoles-IntrinsicElectrons, Potential)"),
("PotentialIntrinsicNodeCharge", "-ElectronCharge*IntrinsicCharge"),
("PotentialIntrinsicNodeCharge:Potential", "diff(-ElectronCharge*IntrinsicCharge, Potential)"),
):
devsim.node_model(device=device, region=region, name=name, equation=equation)
for name, equation in (
("ElectricField", "(Potential@n0-Potential@n1)*EdgeInverseLength"),
("ElectricField:Potential@n0", "EdgeInverseLength"),
("ElectricField:Potential@n1", "-ElectricField:Potential@n0"),
("PotentialEdgeFlux", "Permittivity*ElectricField"),
("PotentialEdgeFlux:Potential@n0", "diff(Permittivity*ElectricField,Potential@n0)"),
("PotentialEdgeFlux:Potential@n1", "-PotentialEdgeFlux:Potential@n0"),
):
devsim.edge_model(device=device, region=region, name=name, equation=equation)
devsim.equation(device=device, region=region, name="PotentialEquation", variable_name="Potential",
node_model="PotentialIntrinsicNodeCharge", edge_model="PotentialEdgeFlux", variable_update="log_damp")
def createSiliconPotentialOnlyContact(device, region, contact):
bias_name="%sbias" % contact
format_dict = { "contact" : contact}
devsim.set_parameter(device=device, region=region, name=bias_name, value=0.0)
for name, equation in (
("celec_%(contact)s", "1e-10 + 0.5*abs(NetDoping+(NetDoping^2 + 4 * n_i^2)^(0.5))"),
("chole_%(contact)s", "1e-10 + 0.5*abs(-NetDoping+(NetDoping^2 + 4 * n_i^2)^(0.5))"),
("%(contact)snodemodel", '''
ifelse(NetDoping > 0,
Potential-%(contact)sbias-V_t*log(celec_%(contact)s/n_i),
Potential-%(contact)sbias+V_t*log(chole_%(contact)s/n_i))'''),
("%(contact)snodemodel:Potential", "1"),
):
name_sub = name % format_dict
equation_sub = equation % format_dict
devsim.contact_node_model(device=device, contact=contact, name=name_sub, equation=equation_sub)
devsim.contact_equation(device=device, contact=contact, name="PotentialEquation",
node_model="%snodemodel" % contact)
def createSiliconDriftDiffusion(device, region):
for name, equation in (
("PotentialNodeCharge", "-ElectronCharge*(Holes -Electrons + NetDoping)"),
("PotentialNodeCharge:Electrons", "+ElectronCharge"),
("PotentialNodeCharge:Holes", "-ElectronCharge"),
):
devsim.node_model(device=device, region=region, name=name, equation=equation)
devsim.equation(device=device, region=region, name="PotentialEquation", variable_name="Potential", node_model="PotentialNodeCharge",
edge_model="PotentialEdgeFlux", variable_update="log_damp")
createBernoulli(device, region)
createElectronCurrent(device, region)
createHoleCurrent(device, region)
NCharge="-ElectronCharge * Electrons"
dNChargedn="-ElectronCharge"
devsim.node_model(device=device, region=region, name="NCharge", equation=NCharge)
devsim.node_model(device=device, region=region, name="NCharge:Electrons", equation=dNChargedn)
PCharge="-ElectronCharge * Holes"
dPChargedp="-ElectronCharge"
devsim.node_model(device=device, region=region, name="PCharge", equation=PCharge)
devsim.node_model(device=device, region=region, name="PCharge:Holes", equation=dPChargedp)
ni=devsim.get_parameter(device=device, region=region, name="n_i")
devsim.set_parameter(device=device, region=region, name="n1", value=ni)
devsim.set_parameter(device=device, region=region, name="p1", value=ni)
devsim.set_parameter(device=device, region=region, name="taun", value=1e-5)
devsim.set_parameter(device=device, region=region, name="taup", value=1e-5)
USRH="-ElectronCharge*(Electrons*Holes - n_i^2)/(taup*(Electrons + n1) + taun*(Holes + p1))"
dUSRHdn="simplify(diff(%s, Electrons))" % USRH
dUSRHdp="simplify(diff(%s, Holes))" % USRH
devsim.node_model(device=device, region=region , name="USRH", equation=USRH)
devsim.node_model(device=device, region=region , name="USRH:Electrons", equation=dUSRHdn)
devsim.node_model(device=device, region=region , name="USRH:Holes", equation=dUSRHdp)
devsim.equation(device=device, region=region, name="ElectronContinuityEquation", variable_name="Electrons",
edge_model="ElectronCurrent", variable_update="positive",
time_node_model="NCharge", node_model="USRH")
devsim.equation(device=device, region=region, name="HoleContinuityEquation", variable_name="Holes",
edge_model="HoleCurrent", variable_update="positive",
time_node_model="PCharge", node_model="USRH")
def createSiliconDriftDiffusionAtContact(device, region, contact):
format_dict = { "contact" : contact }
for name, equation in (
("%(contact)snodeelectrons", "ifelse(NetDoping > 0, Electrons - celec_%(contact)s, Electrons - n_i^2/chole_%(contact)s)"),
("%(contact)snodeholes", "ifelse(NetDoping < 0, Holes - chole_%(contact)s, Holes - n_i^2/celec_%(contact)s)"),
("%(contact)snodeelectrons:Electrons", "1.0"),
("%(contact)snodeholes:Holes", "1.0"),
):
name_sub = name % format_dict
equation_sub = equation % format_dict
devsim.contact_node_model(device=device, contact=contact, name=name_sub, equation=equation_sub)
devsim.contact_equation(device=device, contact=contact, name="ElectronContinuityEquation",
node_model="%snodeelectrons" % contact, edge_current_model="ElectronCurrent")
devsim.contact_equation(device=device, contact=contact, name="HoleContinuityEquation",
node_model="%snodeholes" % contact, edge_current_model="HoleCurrent")
def createOxidePotentialOnly(device, region):
for name, equation in (
("ElectricField", "(Potential@n0 - Potential@n1)*EdgeInverseLength"),
("ElectricField:Potential@n0", "EdgeInverseLength"),
("ElectricField:Potential@n1", "-EdgeInverseLength"),
("PotentialEdgeFlux", "Permittivity*ElectricField"),
("PotentialEdgeFlux:Potential@n0", "diff(Permittivity*ElectricField, Potential@n0)"),
("PotentialEdgeFlux:Potential@n1", "-PotentialEdgeFlux:Potential@n0"),
):
devsim.edge_model(device=device, region=region, name=name, equation=equation)
devsim.equation(device=device, region=region, name="PotentialEquation", variable_name="Potential",
edge_model="PotentialEdgeFlux", variable_update="log_damp")
def createSiliconOxideInterface(device, interface):
for name, equation in (
("continuousPotential", "Potential@r0-Potential@r1"),
("continuousPotential:Potential@r0", "1"),
("continuousPotential:Potential@r1", "-1"),
):
devsim.interface_model(device=device, interface=interface, name=name, equation=equation)
devsim.interface_equation(device=device, interface=interface, name="PotentialEquation", interface_model="continuousPotential", type="continuous")
def createSiliconSiliconInterface(device, interface):
for variable in ("Potential", "Electrons", "Holes"):
format_dict = { "var", variable }
for name, equation in (
("continuous%(var)s", "%(var)s@r0-%(var)s@r1"),
("continuous%(var)s:%(var)s@r0", "1"),
("continuous%(var)s:%(var)s@r1", "-1"),
):
name_sub = name % format_dict
equation_sub = equation % format_dict
devsim.interface_model(device=device, interface=interface, name=name_sub, equation=equation_sub)
eqname = "%sEquation" % variable
ieqname = "continuous%s" % variable
devsim.interface_equation(device=device, interface=interface, name=eqname,
interface_model=ieqname, type="continuous")
def createBernoulli(device, region):
#### test for requisite models here
vdiffstr="(Potential@n0 - Potential@n1)/V_t"
for name, equation in (
("vdiff", vdiffstr),
("vdiff:Potential@n0", "V_t^(-1)"),
("vdiff:Potential@n1", "-vdiff:Potential@n0"),
("Bern01", "B(vdiff)"),
("Bern01:Potential@n0", "dBdx(vdiff) * vdiff:Potential@n0"),
("Bern01:Potential@n1", "-Bern01:Potential@n0"),
("Bern10", "Bern01 + vdiff"),
("Bern10:Potential@n0", "Bern01:Potential@n0 + vdiff:Potential@n0"),
("Bern10:Potential@n1", "Bern01:Potential@n1 + vdiff:Potential@n1"),
):
devsim.edge_model(device=device, region=region, name=name, equation=equation)
def createElectronCurrent(device, region):
Jn="ElectronCharge*mu_n*EdgeInverseLength*V_t*(Electrons@n1*Bern10 - Electrons@n0*Bern01)"
devsim.edge_model(device=device, region=region, name="ElectronCurrent", equation=Jn)
for variable in ("Electrons", "Potential"):
der = "simplify(diff(%s, %s))" % (Jn, variable)
devsim.edge_model(device=device, region=region, name="ElectronCurrent", equation=der)
def createHoleCurrent (device, region):
Jp="-ElectronCharge*mu_p*EdgeInverseLength*V_t*(Holes@n1*Bern01 - Holes@n0*Bern10)"
devsim.edge_model(device=device, region=region, name="HoleCurrent", equation=Jp)
for variable in ("Holes", "Potential"):
der = "simplify(diff(%s, %s))" % (Jp, variable)
devsim.edge_model(device=device, region=region, name="HoleCurrent", equation=der)
|
|
# Author: Bichen Wu ([email protected]) 08/25/2016
"""Image data base class for kitti"""
import cv2
import os
import numpy as np
import subprocess
from dataset.imdb import imdb
from utils.util import bbox_transform_inv, batch_iou
class kitti(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_'+image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')
self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_kitti_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'
def _load_image_set_idx(self):
image_set_file = os.path.join(
self._data_root_path, 'ImageSets', self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._image_path, idx+'.png')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_kitti_annotation(self):
def _get_obj_level(obj):
height = float(obj[7]) - float(obj[5]) + 1
trucation = float(obj[1])
occlusion = float(obj[2])
if height >= 40 and trucation <= 0.15 and occlusion <= 0:
return 1
elif height >= 25 and trucation <= 0.3 and occlusion <= 1:
return 2
elif height >= 25 and trucation <= 0.5 and occlusion <= 2:
return 3
else:
return 4
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._label_path, index+'.txt')
with open(filename, 'r') as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
try:
cls = self._class_to_idx[obj[0].lower().strip()]
except:
continue
if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:
continue
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
if not os.path.isdir(det_file_dir):
os.makedirs(det_file_dir)
for im_idx, index in enumerate(self._image_idx):
filename = os.path.join(det_file_dir, index+'.txt')
with open(filename, 'wt') as f:
for cls_idx, cls in enumerate(self._classes):
dets = all_boxes[cls_idx][im_idx]
for k in xrange(len(dets)):
f.write(
'{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '
'0.0 0.0 {:.3f}\n'.format(
cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],
dets[k][4])
)
cmd = self._eval_tool + ' ' \
+ os.path.join(self._data_root_path, 'training') + ' ' \
+ os.path.join(self._data_root_path, 'ImageSets',
self._image_set+'.txt') + ' ' \
+ os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))
print('Running: {}'.format(cmd))
status = subprocess.call(cmd, shell=True)
aps = []
names = []
for cls in self._classes:
det_file_name = os.path.join(
os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))
if os.path.exists(det_file_name):
with open(det_file_name, 'r') as f:
lines = f.readlines()
assert len(lines) == 3, \
'Line number of {} should be 3'.format(det_file_name)
aps.append(float(lines[0].split('=')[1].strip()))
aps.append(float(lines[1].split('=')[1].strip()))
aps.append(float(lines[2].split('=')[1].strip()))
else:
aps.extend([0.0, 0.0, 0.0])
names.append(cls+'_easy')
names.append(cls+'_medium')
names.append(cls+'_hard')
return aps, names
def do_detection_analysis_in_eval(self, eval_dir, global_step):
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
det_error_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step),
'error_analysis')
if not os.path.exists(det_error_dir):
os.makedirs(det_error_dir)
det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')
stats = self.analyze_detections(det_file_dir, det_error_file)
ims = self.visualize_detections(
image_dir=self._image_path,
image_format='.png',
det_error_file=det_error_file,
output_image_dir=det_error_dir,
num_det_per_type=10
)
return stats, ims
def analyze_detections(self, detection_file_dir, det_error_file):
def _save_detection(f, idx, error_type, det, score):
f.write(
'{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\n'.format(
idx, error_type,
det[0]-det[2]/2., det[1]-det[3]/2.,
det[0]+det[2]/2., det[1]+det[3]/2.,
self._classes[int(det[4])],
score
)
)
# load detections
self._det_rois = {}
for idx in self._image_idx:
det_file_name = os.path.join(detection_file_dir, idx+'.txt')
with open(det_file_name) as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
cls = self._class_to_idx[obj[0].lower().strip()]
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
score = float(obj[-1])
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls, score])
bboxes.sort(key=lambda x: x[-1], reverse=True)
self._det_rois[idx] = bboxes
# do error analysis
num_objs = 0.
num_dets = 0.
num_correct = 0.
num_loc_error = 0.
num_cls_error = 0.
num_bg_error = 0.
num_repeated_error = 0.
num_detected_obj = 0.
with open(det_error_file, 'w') as f:
for idx in self._image_idx:
gt_bboxes = np.array(self._rois[idx])
num_objs += len(gt_bboxes)
detected = [False]*len(gt_bboxes)
det_bboxes = self._det_rois[idx]
for i, det in enumerate(det_bboxes):
if i < len(gt_bboxes):
num_dets += 1
ious = batch_iou(gt_bboxes[:, :4], det[:4])
max_iou = np.max(ious)
gt_idx = np.argmax(ious)
# if not detected[gt_idx]:
# if max_iou > 0.1:
# if gt_bboxes[gt_idx, 4] == det[4]:
# if max_iou >= 0.5:
# if i < len(gt_bboxes):
# num_correct += 1
# detected[gt_idx] = True
# else:
# if i < len(gt_bboxes):
# num_loc_error += 1
# _save_detection(f, idx, 'loc', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_cls_error += 1
# _save_detection(f, idx, 'cls', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_bg_error += 1
# _save_detection(f, idx, 'bg', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_repeated_error += 1
if max_iou > 0.1:
if gt_bboxes[gt_idx, 4] == det[4]:
if max_iou >= 0.5:
if i < len(gt_bboxes):
if not detected[gt_idx]:
num_correct += 1
detected[gt_idx] = True
else:
num_repeated_error += 1
else:
if i < len(gt_bboxes):
num_loc_error += 1
_save_detection(f, idx, 'loc', det, det[5])
else:
if i < len(gt_bboxes):
num_cls_error += 1
_save_detection(f, idx, 'cls', det, det[5])
else:
if i < len(gt_bboxes):
num_bg_error += 1
_save_detection(f, idx, 'bg', det, det[5])
for i, gt in enumerate(gt_bboxes):
if not detected[i]:
_save_detection(f, idx, 'missed', gt, -1.0)
num_detected_obj += sum(detected)
f.close()
print ('Detection Analysis:')
print (' Number of detections: {}'.format(num_dets))
print (' Number of objects: {}'.format(num_objs))
print (' Percentage of correct detections: {}'.format(
num_correct/num_dets))
print (' Percentage of localization error: {}'.format(
num_loc_error/num_dets))
print (' Percentage of classification error: {}'.format(
num_cls_error/num_dets))
print (' Percentage of background error: {}'.format(
num_bg_error/num_dets))
print (' Percentage of repeated detections: {}'.format(
num_repeated_error/num_dets))
print (' Recall: {}'.format(
num_detected_obj/num_objs))
out = {}
out['num of detections'] = num_dets
out['num of objects'] = num_objs
out['% correct detections'] = num_correct/num_dets
out['% localization error'] = num_loc_error/num_dets
out['% classification error'] = num_cls_error/num_dets
out['% background error'] = num_bg_error/num_dets
out['% repeated error'] = num_repeated_error/num_dets
out['% recall'] = num_detected_obj/num_objs
return out
|
|
"""
Generate Documentation for armacode to ReadTheDocs.
Since this requires Rhino libraries, sphinx can not parse the module. This is a work around to export all documentation to .rst files.
Dependencies:
- Python27. Remember to set PATH Environment System variables
- pip
- Sphinx
- Sphinxcontrib-napoleon
- pockets (installed with Sphinxcontrib-napoleon)
"""
import sys
sys.path.insert(0, "C:\\Python27\\Lib\\site-packages") # Other dependencies
sys.path.insert(0, ".\\dependencies") # Custom dependencies
def AddReferencesToVS():
"""The following are required if this is executed under VS. This is for future reference only.
"""
cwd = os.getcwd()
armacodePath = cwd
sys.path.insert(0, "C:\\Python27\\Lib\\site-packages") # Other dependencies
sys.path.insert(0, cwd + "\\dependencies") # Custom dependencies
#armacode
sys.path.insert(0, armacodePath) # armacode
#IronPython
sys.path.insert(0, "C:\\Users\\t.ho\\AppData\\Roaming\\McNeel\\Rhinoceros\\5.0\\Plug-ins\\IronPython (814d908a-e25c-493d-97e9-ee3861957f49)\\settings\\lib")
#Rhinocommon
sys.path.insert(0, "C:\\Program Files\\Rhinoceros 5 (64-bit)\\System")
#RhinoPython
sys.path.insert(0, "C:\\Program Files (x86)\\Rhinoceros 5\\Plug-ins\\IronPython")
#Grasshopper
sys.path.insert(0, "C:\\Program Files\\Common Files\\McNeel\\Rhinoceros\\5.0\\Plug-ins\\Grasshopper (b45a29b1-4343-4035-989e-044e8580d9cf)\\0.9.76.0")
clr.AddReferenceToFileAndPath("C:\\Program Files (x86)\\Rhinoceros 5\\Plug-ins\\IronPython\\RhinoPythonHost.dll")
clr.AddReference("System.Windows.Forms")
clr.AddReference("System.Drawing")
clr.AddReference("Grasshopper")
import sphinxcontrib.napoleon as sphinxNP
import inspect
import math
import armacode
import os
import ast
import shutil
#import sphinx.ext.napoleon #This does not work in IronPython ?
def StringToFile(string, fileName):
dirName = os.path.dirname(fileName)
if not os.path.exists(dirName):
os.makedirs(dirName)
f = open(fileName,'w')
f.write(string) # python will convert \n to os.linesep
f.close() # you can omit in most cases as the destructor will call if
return fileName
def StringIndent(multilineString, indent=4, delimiter="\n"):
indentString = ""
if isinstance(indent, int):
indentString = " " * indent
elif isinstance(indent, str):
indentString = indent
resultStrings = []
strLines = multilineString.split(delimiter)
if len(strLines) == 1:
resultStrings = "{}{}".format(indentString, strLines[0])
if len(strLines) > 1:
for item in strLines:
resultStrings.append("{}{}".format(indentString, item))
resultStrings = str.join(delimiter, resultStrings)
return resultStrings
def DocStringFromGoogle(docstring):
config = sphinxNP.Config(napoleon_use_param=True, napoleon_use_rtype=True)
restDocString = sphinxNP.GoogleDocstring(docstring, config)
return restDocString
def FileDoc(filename):
"""
Return the restructuredText docstring in a python file
"""
file_contents = None
with open(filename) as f:
file_contents = f.read()
try:
mod = ast.parse(file_contents)
except:
return
docString = ast.get_docstring(mod)
if docString:
docString = inspect.cleandoc(docString)
restDocString = DocStringFromGoogle(docString)
return restDocString
def MethodSyntax(_object):
"""
Return the call syntax for a method member.
"""
memberName = _object.__name__
functionSyntax = inspect.formatargspec(*inspect.getargspec(_object))
resultString = functionSyntax
return resultString
def MethodDoc(_object):
"""
Return the restructuredText docstring for a method member.
"""
memberName = _object.__name__
docString = inspect.getdoc(_object)
if not docString:
docString = "Undocumented"
docString = inspect.cleandoc(docString)
restDocString = DocStringFromGoogle(docString)
return restDocString
def DescribeMethod(_object, customName=None):
"""
Return the string describing the method.
"""
# Get Values
memberName = _object.__name__
if customName:
memberName = customName
methodSyntax = MethodSyntax(_object)
if methodSyntax:
methodSyntax = "\n.. py:Function:: {}{}\n\n".format(memberName, methodSyntax)
restDocstring = MethodDoc(_object)
message = []
message.append(memberName)
message.append("-" * len(memberName))
message.append(methodSyntax)
message.append(restDocstring)
resultMessage = str.join("\n", message)
return resultMessage
def DescribeTool(filename, customName=None):
"""
Return the string describing the method.
"""
# Get Values
fpath, fname = os.path.split(filename)
memberName = fname[:-3]
restDocstring = FileDoc(filename)
message = []
# Indexing
message.append(".. index:: {} (Tool)\n".format(memberName))
# Reference Label
message.append(".. _tools.{}:\n".format(str.lower(memberName)))
if not restDocstring:
restDocstring = "Undocumented."
message.append(memberName)
message.append("-" * len(memberName))
message.append(restDocstring)
resultMessage = str.join("\n", message)
return resultMessage
def DescribeCommand(filename, customName=None):
"""
Return the string describing the method.
"""
# Get Values
fpath, fname = os.path.split(filename)
memberName = fname[:-7]
restDocstring = FileDoc(filename)
message = []
# Indexing
message.append(".. index:: {} (Command)\n".format(memberName))
# Reference label
message.append(".. _{}_cmd:\n".format(str.lower(memberName)))
if not restDocstring:
restDocstring = "Undocumented."
message.append(memberName)
message.append("-" * len(memberName))
message.append(restDocstring)
resultMessage = str.join("\n", message)
return resultMessage
def DescribeGHUserObject(member, customName=None):
"""
Return the string describing the GH User Object.
"""
# Get Values
fpath, fname = os.path.split(filename)
memberName = fname[:-7]
restDocstring = FileDoc(filename)
message = []
# Indexing
message.append(".. index:: {} (GH)\n".format(memberName))
# Reference label
message.append(".. _{}_gh:\n".format(str.lower(memberName)))
if not restDocstring:
restDocstring = "Undocumented."
message.append(memberName)
message.append("-" * len(memberName))
message.append(restDocstring)
resultMessage = str.join("\n", message)
return resultMessage
def DescribeObject(_objectName, _object):
if inspect.isbuiltin(_object) or inspect.isfunction(_object) or inspect.ismethod(_object):
message = DescribeMethod(_object)
return message
else:
#Property
return None
def ProcessMethods(dataDict, writeToDirectory=None, sortMembers=True, indexFile=False, useCustomNames=False):
if writeToDirectory:
try:
#print "Removing existing directory: {}".format(writeToDirectory)
shutil.rmtree(writeToDirectory)
except:
pass
memberData = dataDict
allMemberNames = dataDict.keys()
if sortMembers:
allMemberNames.sort()
allStrings = []
for memberName in allMemberNames:
member = memberData[memberName]
if useCustomNames:
useCustomNames = memberName
resultString = DescribeMethod(member, useCustomNames)
if writeToDirectory:
if isinstance(writeToDirectory, str):
fileName = "{}\\{}.rst".format(writeToDirectory, memberName)
else:
fileName = "{}.rst".format(memberName)
rc = StringToFile(resultString, fileName)
if writeToDirectory and indexFile:
indexFilename = "{}\\index.rst".format(writeToDirectory)
indexContent = CombineFiles(allMemberNames)
rc = StringToFile(indexContent, indexFilename)
print "Processed {} Methods !".format(len(allMemberNames))
def ProcessCommands(dirPath, writeToDirectory=None, sortMembers=True, indexFile=False, useCustomNames=False):
if writeToDirectory:
try:
#print "Removing existing directory: {}".format(writeToDirectory)
shutil.rmtree(writeToDirectory)
except:
pass
#Get filenames of all tools.
fileList = []
for (dirpath, dirname, filenames) in os.walk(dirPath):
for fname in filenames:
if fname.endswith("_cmd.py") and not fname.startswith("_"):
fPath = dirpath+"\\"+fname
fileList.append(fPath)
if sortMembers:
fileList.sort()
for item in fileList:
resultString = DescribeCommand(item)
fpath, fname = os.path.split(item)
memberName = fname[:-7]
if writeToDirectory:
if isinstance(writeToDirectory, str):
fileName = "{}\\{}.rst".format(writeToDirectory, memberName)
else:
fileName = "{}.rst".format(memberName)
rc = StringToFile(resultString, fileName)
if writeToDirectory and indexFile:
indexFilename = "{}\\index.rst".format(writeToDirectory)
indexContent = CombineFiles(allMemberNames)
rc = StringToFile(indexContent, indexFilename)
print "Processed {} Commands !".format(len(fileList))
def ProcessTools(dirPath, writeToDirectory=None, sortMembers=True, indexFile=False, useCustomNames=False):
if writeToDirectory:
try:
#print "Removing existing directory: {}".format(writeToDirectory)
shutil.rmtree(writeToDirectory)
except:
pass
#Get filenames of all tools.
fileList = []
for (dirpath, dirname, filenames) in os.walk(dirPath):
for fname in filenames:
if fname.endswith(".py") and not fname.startswith("_"):
fPath = dirpath+"\\"+fname
fileList.append(fPath)
if sortMembers:
fileList.sort()
for item in fileList:
resultString = DescribeTool(item)
fpath, fname = os.path.split(item)
memberName, ext = os.path.splitext(fname)
if writeToDirectory:
if isinstance(writeToDirectory, str):
fileName = "{}\\{}.rst".format(writeToDirectory, memberName)
else:
fileName = "{}.rst".format(memberName)
rc = StringToFile(resultString, fileName)
if writeToDirectory and indexFile:
indexFilename = "{}\\index.rst".format(writeToDirectory)
indexContent = CombineFiles(allMemberNames)
rc = StringToFile(indexContent, indexFilename)
print "Processed {} Tools !".format(len(fileList))
def ProcessToolsInPackage(package, writeToDirectory=None, sortMembers=True, indexFile=False, useCustomNames=False):
toolNames = package.__all__
if sortMembers:
toolNames.sort()
if writeToDirectory:
try:
#print "Removing existing directory: {}".format(writeToDirectory)
shutil.rmtree(writeToDirectory)
except:
pass
#Get filenames of all tools.
fileList = []
dirPath = os.path.dirname(armacode.tools.__file__)
for toolName in toolNames:
filePath = os.path.join(dirPath, toolName+".py")
fileList.append(filePath)
for item in fileList:
resultString = DescribeTool(item)
fpath, fname = os.path.split(item)
memberName, ext = os.path.splitext(fname)
if writeToDirectory:
if isinstance(writeToDirectory, str):
fileName = "{}\\{}.rst".format(writeToDirectory, memberName)
else:
fileName = "{}.rst".format(memberName)
rc = StringToFile(resultString, fileName)
if writeToDirectory and indexFile:
indexFilename = "{}\\index.rst".format(writeToDirectory)
indexContent = CombineFiles(allMemberNames)
rc = StringToFile(indexContent, indexFilename)
print "Processed {} Tools in Package!".format(len(fileList))
def ProcessGHUserObjects(category="AR-MA", writeToDirectory=None, sortMembers=True, indexFile=False, useCustomNames=False):
import Grasshopper
objs = list(Grasshopper.GH_InstanceServer.ComponentServer.ObjectProxies)
if sortMembers:
objs.sort(key=lambda obj: obj.Desc.Name)
obj = objs[0]
allMemberNames = []
iconDirectory = writeToDirectory + "\\icon"
if writeToDirectory:
try:
#print "Removing existing directory: {}".format(writeToDirectory)
shutil.rmtree(writeToDirectory)
except:
pass
if not os.path.isdir(iconDirectory):
os.mkdir(iconDirectory)
for obj in objs:
catName = obj.Desc.Category
if not category in catName:
continue
memberName = obj.Desc.Name
subCatName = obj.Desc.SubCategory
description = obj.Desc.InstanceDescription
fileName = memberName
#fileName = str.replace(memberName, "[OBSOLETE]", "_OBSOLETE")
fileName = str.replace(fileName, "|", "-")
fileName = str.replace(fileName, "/", "-")
fileName = str.replace(fileName, ":", "-")
fileName = str.replace(fileName, "*", "-")
fileName = fileName.replace(" ", "_")
iconFileName = "{}.png".format(fileName)
iconFilePath = "{}\\icon\\{}".format(writeToDirectory, iconFileName)
docString = description
if not docString:
docString = "Undocumented"
docString = inspect.cleandoc(docString)
restDocString = DocStringFromGoogle(docString)
message = []
# Indexing
message.append(".. index:: {} (GH)\n".format(memberName))
# Reference label
message.append(".. _{}_gh:\n".format(str.lower(memberName)))
if not restDocString:
restDocString = "Undocumented."
heading = memberName + " |icon| "
message.append(memberName + " |icon| ")
message.append("-" * len(heading))
message.append("")
message.append(restDocString)
message.append("\n.. |icon| image:: icon/{}".format(iconFileName))
resultString = str.join("\n", message)
if writeToDirectory:
try:
obj.Icon.Save(iconFilePath)
except:
print "ERROR: {} could not be extracted".format(iconFilePath)
if isinstance(writeToDirectory, str):
fileName = "{}\\{}.rst".format(writeToDirectory, fileName)
else:
fileName = "{}.rst".format(memberName)
rc = StringToFile(resultString, fileName)
allMemberNames.append(memberName)
if writeToDirectory and indexFile:
indexFilename = "{}\\index.rst".format(writeToDirectory)
indexContent = CombineFiles(allMemberNames)
rc = StringToFile(indexContent, indexFilename)
print "Processed {} GH User Objects !".format(len(allMemberNames))
def Toctree(includePaths, maxdepth=2):
toctreeString = []
toctreeString.append("")
toctreeString.append(".. toctree::")
toctreeString.append(" :maxdepth: {}".format(maxdepth))
#toctreeString.append(" :numbered:")
#toctreeString.append(" :titlesonly:")
#toctreeString.append(" :glob:")
#toctreeString.append(" :hidden:")
toctreeString.append(" ")
for item in includePaths:
toctreeString.append(" {}.rst".format(item))
resultString = str.join("\n", toctreeString)
return resultString
def CombineFiles(includePaths):
content = []
for item in includePaths:
content.append(".. include:: {}.rst".format(item))
#content.append(" :end-before: :param")
resultString = str.join("\n", content)
return resultString
def InspectObject(_object=None, ):
# Set defaul spacing
global colonSpacing
classes = {}
imports = {}
modules = {}
specialModules = {}
methods = {}
properties = {}
protected = {}
memberData = inspect.getmembers(_object)
for memberName, member in memberData:
#print "{:>30}, {}".format(memberName,type(member))
try:
if inspect.isclass(member):
classes[memberName] = member
if inspect.ismodule(member):
modules[memberName] = member
elif inspect.isbuiltin(member) or inspect.isfunction(member) or inspect.ismethod(member):
if memberName.startswith("_"):
specialModules[memberName] = member
else:
methods[memberName] = member
else:
properties[memberName] = member
except:
protected[memberName] = member
allData = {}
allData["modules"] = modules
allData["methods"] = methods
allData["specials"] = specialModules
allData["properties"] = properties
allData["protected"] = protected
return allData
def GenerateDocsetFeed():
feedFilePath = "feed\\armacode.xml"
feedTemplate = ["<entry>",
" <version>{versionNumber}</version>",
" <url>https://github.com/theTonyHo/armacode/raw/master/docset/armacode.tgz</url>",
"</entry>"]
feedTemplate = str.join("\n", feedTemplate)
feedContent = feedTemplate.format(versionNumber=armacode.__Version__)
with open(feedFilePath, 'w') as f:
f.write(feedContent)
print "Docset Feed Generated"
def GenerateVersionFile():
versionFilePath = os.getcwd()+"\\VERSION"
with open(versionFilePath, 'w') as f:
f.write(armacode.__Version__)
f.write("\n")
f.write(armacode.__ReleaseDate__)
print "Version File Generated"
def main():
moduleToDocument = armacode
moduleName = moduleToDocument.__name__
sortMembers = True
moduleDirectory = moduleToDocument.__path__[0]
#docDirectory = moduleDirectory[0] + "\\source" + "\\" + moduleName
docDirectory = os.path.dirname(__file__)
# Documentation director for all generated docs.
moduleDocDirectory = "source" + "\\" + moduleName
methodDocDirectory = moduleDocDirectory + "\\libraries"
# Reference directory for methods to include in index but not to list out.
refDirectory = moduleDocDirectory + "\\" + "reference"
# Commands
commandPath = moduleDirectory + "\\plug-in\\AR-MA {4dbb1598-76ef-4560-8b04-fd01de706e43}\\dev"
commandDocDirectory = "source" + "\\armacode\\plug-ins\\commands"
# Tools
toolPath = moduleDirectory + "\\tools"
toolDocDirectory = "source" + "\\armacode\\tools"
# Grasshopper UserObjects
ghDocDirectory = "source" + "\\armacode\\ghuserobjects"
includeModules = ["classes", ""]
# Go through module
# Ger list of methods, modules, properties
# ##Methods
# for each member of methods
# Get docstring for each member
# Convert to reSText
# Save as a .rst file with the hierarchy as name
# armacode/AddLayer.rst
allData = InspectObject(armacode)
#Additional methods to include in index
additionalMethods = { "armacode.config.SetOption" : armacode.config.SetOption,
"armacode.config.GetOption" : armacode.config.GetOption,
"armacode.config.Save" : armacode.config.Save
}
def get_class_that_defined_method(meth):
for cls in inspect.getmro(meth.im_class):
if meth.__name__ in cls.__dict__:
return cls
return None
# Methods
proceed = True
if proceed:
ProcessMethods(allData["methods"], methodDocDirectory)
ProcessMethods(additionalMethods, refDirectory, useCustomNames=True)
ProcessCommands(commandPath, commandDocDirectory)
# ProcessTools(toolPath, toolDocDirectory) # Describe all tools in directory
ProcessToolsInPackage(armacode.tools, toolDocDirectory) #Describe all tools found in package.
ProcessGHUserObjects("AR-MA", ghDocDirectory)
GenerateDocsetFeed()
GenerateVersionFile()
print "Document generated for armacode"
if __name__ == "__main__":
main()
# Set the path to documentation git repo
path = os.path.dirname(__file__)
armacode.config.SetOption("ARMACODE_DOC_GIT", "path", path)
armacode.config.SetOption("ARMACODE_DOC_GIT", "disable", False)
armacode.config.Save()
|
|
import numpy as np
import pandas as pd
from .common import (
_contains_datetime_like_objects,
is_np_datetime_like,
is_np_timedelta_like,
)
from .pycompat import dask_array_type
def _season_from_months(months):
"""Compute season (DJF, MAM, JJA, SON) from month ordinal"""
# TODO: Move "season" accessor upstream into pandas
seasons = np.array(["DJF", "MAM", "JJA", "SON"])
months = np.asarray(months)
return seasons[(months // 3) % 4]
def _access_through_cftimeindex(values, name):
"""Coerce an array of datetime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
if name == "season":
months = values_as_cftimeindex.month
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_cftimeindex, name)
return field_values.reshape(values.shape)
def _access_through_series(values, name):
"""Coerce an array of datetime-like values to a pandas Series and
access requested datetime component
"""
values_as_series = pd.Series(values.ravel())
if name == "season":
months = values_as_series.dt.month.values
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_series.dt, name).values
return field_values.reshape(values.shape)
def _get_date_field(values, name, dtype):
"""Indirectly access pandas' libts.get_date_field by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str
Name of datetime field to access
dtype : dtype-like
dtype for output date field values
Returns
-------
datetime_fields : same type as values
Array-like of datetime fields accessed for each element in values
"""
if is_np_datetime_like(values.dtype):
access_method = _access_through_series
else:
access_method = _access_through_cftimeindex
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(access_method, values, name, dtype=dtype)
else:
return access_method(values, name)
def _round_through_series_or_index(values, name, freq):
"""Coerce an array of datetime-like values to a pandas Series or xarray
CFTimeIndex and apply requested rounding
"""
from ..coding.cftimeindex import CFTimeIndex
if is_np_datetime_like(values.dtype):
values_as_series = pd.Series(values.ravel())
method = getattr(values_as_series.dt, name)
else:
values_as_cftimeindex = CFTimeIndex(values.ravel())
method = getattr(values_as_cftimeindex, name)
field_values = method(freq=freq).values
return field_values.reshape(values.shape)
def _round_field(values, name, freq):
"""Indirectly access rounding functions by wrapping data
as a Series or CFTimeIndex
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : {"ceil", "floor", "round"}
Name of rounding function
freq : str
a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
if isinstance(values, dask_array_type):
from dask.array import map_blocks
dtype = np.datetime64 if is_np_datetime_like(values.dtype) else np.dtype("O")
return map_blocks(
_round_through_series_or_index, values, name, freq=freq, dtype=dtype
)
else:
return _round_through_series_or_index(values, name, freq)
def _strftime_through_cftimeindex(values, date_format):
"""Coerce an array of cftime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
field_values = values_as_cftimeindex.strftime(date_format)
return field_values.values.reshape(values.shape)
def _strftime_through_series(values, date_format):
"""Coerce an array of datetime-like values to a pandas Series and
apply string formatting
"""
values_as_series = pd.Series(values.ravel())
strs = values_as_series.dt.strftime(date_format)
return strs.values.reshape(values.shape)
def _strftime(values, date_format):
if is_np_datetime_like(values.dtype):
access_method = _strftime_through_series
else:
access_method = _strftime_through_cftimeindex
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(access_method, values, date_format)
else:
return access_method(values, date_format)
class Properties:
def __init__(self, obj):
self._obj = obj
def _tslib_field_accessor( # type: ignore
name: str, docstring: str = None, dtype: np.dtype = None
):
def f(self, dtype=dtype):
if dtype is None:
dtype = self._obj.dtype
obj_type = type(self._obj)
result = _get_date_field(self._obj.data, name, dtype)
return obj_type(
result, name=name, coords=self._obj.coords, dims=self._obj.dims
)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _tslib_round_accessor(self, name, freq):
obj_type = type(self._obj)
result = _round_field(self._obj.data, name, freq)
return obj_type(result, name=name, coords=self._obj.coords, dims=self._obj.dims)
def floor(self, freq):
"""
Round timestamps downward to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
floor-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("floor", freq)
def ceil(self, freq):
"""
Round timestamps upward to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
ceil-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("ceil", freq)
def round(self, freq):
"""
Round timestamps to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("round", freq)
class DatetimeAccessor(Properties):
"""Access datetime fields for DataArrays with datetime-like dtypes.
Fields can be accessed through the `.dt` attribute
for applicable DataArrays.
Examples
---------
>>> import xarray as xr
>>> import pandas as pd
>>> dates = pd.date_range(start="2000/01/01", freq="D", periods=10)
>>> ts = xr.DataArray(dates, dims=("time"))
>>> ts
<xarray.DataArray (time: 10)>
array(['2000-01-01T00:00:00.000000000', '2000-01-02T00:00:00.000000000',
'2000-01-03T00:00:00.000000000', '2000-01-04T00:00:00.000000000',
'2000-01-05T00:00:00.000000000', '2000-01-06T00:00:00.000000000',
'2000-01-07T00:00:00.000000000', '2000-01-08T00:00:00.000000000',
'2000-01-09T00:00:00.000000000', '2000-01-10T00:00:00.000000000'],
dtype='datetime64[ns]')
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
>>> ts.dt
<xarray.core.accessor_dt.DatetimeAccessor object at 0x118b54d68>
>>> ts.dt.dayofyear
<xarray.DataArray 'dayofyear' (time: 10)>
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
>>> ts.dt.quarter
<xarray.DataArray 'quarter' (time: 10)>
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
"""
def strftime(self, date_format):
'''
Return an array of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc
<https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__
Parameters
----------
date_format : str
date format string (e.g. "%Y-%m-%d")
Returns
-------
formatted strings : same type as values
Array-like of strings formatted for each element in values
Examples
--------
>>> rng = xr.Dataset({"time": datetime.datetime(2000, 1, 1)})
>>> rng["time"].dt.strftime("%B %d, %Y, %r")
<xarray.DataArray 'strftime' ()>
array('January 01, 2000, 12:00:00 AM', dtype=object)
"""
'''
obj_type = type(self._obj)
result = _strftime(self._obj.data, date_format)
return obj_type(
result, name="strftime", coords=self._obj.coords, dims=self._obj.dims
)
year = Properties._tslib_field_accessor(
"year", "The year of the datetime", np.int64
)
month = Properties._tslib_field_accessor(
"month", "The month as January=1, December=12", np.int64
)
day = Properties._tslib_field_accessor("day", "The days of the datetime", np.int64)
hour = Properties._tslib_field_accessor(
"hour", "The hours of the datetime", np.int64
)
minute = Properties._tslib_field_accessor(
"minute", "The minutes of the datetime", np.int64
)
second = Properties._tslib_field_accessor(
"second", "The seconds of the datetime", np.int64
)
microsecond = Properties._tslib_field_accessor(
"microsecond", "The microseconds of the datetime", np.int64
)
nanosecond = Properties._tslib_field_accessor(
"nanosecond", "The nanoseconds of the datetime", np.int64
)
weekofyear = Properties._tslib_field_accessor(
"weekofyear", "The week ordinal of the year", np.int64
)
week = weekofyear
dayofweek = Properties._tslib_field_accessor(
"dayofweek", "The day of the week with Monday=0, Sunday=6", np.int64
)
weekday = dayofweek
weekday_name = Properties._tslib_field_accessor(
"weekday_name", "The name of day in a week", object
)
dayofyear = Properties._tslib_field_accessor(
"dayofyear", "The ordinal day of the year", np.int64
)
quarter = Properties._tslib_field_accessor("quarter", "The quarter of the date")
days_in_month = Properties._tslib_field_accessor(
"days_in_month", "The number of days in the month", np.int64
)
daysinmonth = days_in_month
season = Properties._tslib_field_accessor("season", "Season of the year", object)
time = Properties._tslib_field_accessor(
"time", "Timestamps corresponding to datetimes", object
)
is_month_start = Properties._tslib_field_accessor(
"is_month_start",
"Indicates whether the date is the first day of the month.",
bool,
)
is_month_end = Properties._tslib_field_accessor(
"is_month_end", "Indicates whether the date is the last day of the month.", bool
)
is_quarter_start = Properties._tslib_field_accessor(
"is_quarter_start",
"Indicator for whether the date is the first day of a quarter.",
bool,
)
is_quarter_end = Properties._tslib_field_accessor(
"is_quarter_end",
"Indicator for whether the date is the last day of a quarter.",
bool,
)
is_year_start = Properties._tslib_field_accessor(
"is_year_start", "Indicate whether the date is the first day of a year.", bool
)
is_year_end = Properties._tslib_field_accessor(
"is_year_end", "Indicate whether the date is the last day of the year.", bool
)
is_leap_year = Properties._tslib_field_accessor(
"is_leap_year", "Boolean indicator if the date belongs to a leap year.", bool
)
class TimedeltaAccessor(Properties):
"""Access Timedelta fields for DataArrays with Timedelta-like dtypes.
Fields can be accessed through the `.dt` attribute for applicable DataArrays.
Examples
--------
>>> import pandas as pd
>>> import xarray as xr
>>> dates = pd.timedelta_range(start="1 day", freq="6H", periods=20)
>>> ts = xr.DataArray(dates, dims=("time"))
>>> ts
<xarray.DataArray (time: 20)>
array([ 86400000000000, 108000000000000, 129600000000000, 151200000000000,
172800000000000, 194400000000000, 216000000000000, 237600000000000,
259200000000000, 280800000000000, 302400000000000, 324000000000000,
345600000000000, 367200000000000, 388800000000000, 410400000000000,
432000000000000, 453600000000000, 475200000000000, 496800000000000],
dtype='timedelta64[ns]')
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt
<xarray.core.accessor_dt.TimedeltaAccessor object at 0x109a27d68>
>>> ts.dt.days
<xarray.DataArray 'days' (time: 20)>
array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt.microseconds
<xarray.DataArray 'microseconds' (time: 20)>
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt.seconds
<xarray.DataArray 'seconds' (time: 20)>
array([ 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0,
21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600,
43200, 64800])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
"""
days = Properties._tslib_field_accessor(
"days", "Number of days for each element.", np.int64
)
seconds = Properties._tslib_field_accessor(
"seconds",
"Number of seconds (>= 0 and less than 1 day) for each element.",
np.int64,
)
microseconds = Properties._tslib_field_accessor(
"microseconds",
"Number of microseconds (>= 0 and less than 1 second) for each element.",
np.int64,
)
nanoseconds = Properties._tslib_field_accessor(
"nanoseconds",
"Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.",
np.int64,
)
class CombinedDatetimelikeAccessor(DatetimeAccessor, TimedeltaAccessor):
def __new__(cls, obj):
# CombinedDatetimelikeAccessor isn't really instatiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
# do all the validation here.
if not _contains_datetime_like_objects(obj):
raise TypeError(
"'.dt' accessor only available for "
"DataArray with datetime64 timedelta64 dtype or "
"for arrays containing cftime datetime "
"objects."
)
if is_np_timedelta_like(obj.dtype):
return TimedeltaAccessor(obj)
else:
return DatetimeAccessor(obj)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-branches, global-statement
"""Function configuration API."""
from __future__ import absolute_import
import ctypes
import traceback
from numbers import Number, Integral
from ..base import _LIB, get_last_ffi_error, py2cerror
from ..base import c_str, string_types
from ..node_generic import convert_to_node, NodeGeneric
from ..runtime_ctypes import TVMType, TVMByteArray, TVMContext
from . import ndarray as _nd
from .ndarray import NDArrayBase, _make_array
from .types import TVMValue, TypeCode
from .types import TVMPackedCFunc, TVMCFuncFinalizer
from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _ctx_to_int64
from .node import NodeBase
from . import node as _node
FunctionHandle = ctypes.c_void_p
ModuleHandle = ctypes.c_void_p
ObjectHandle = ctypes.c_void_p
TVMRetValueHandle = ctypes.c_void_p
def _ctypes_free_resource(rhandle):
"""callback to free resources when it it not needed."""
pyobj = ctypes.cast(rhandle, ctypes.py_object)
ctypes.pythonapi.Py_DecRef(pyobj)
# Global callback that is always alive
TVM_FREE_PYOBJ = TVMCFuncFinalizer(_ctypes_free_resource)
ctypes.pythonapi.Py_IncRef(ctypes.py_object(TVM_FREE_PYOBJ))
def convert_to_tvm_func(pyfunc):
"""Convert a python function to TVM function
Parameters
----------
pyfunc : python function
The python function to be converted.
Returns
-------
tvmfunc: tvm.nd.Function
The converted tvm function.
"""
local_pyfunc = pyfunc
def cfun(args, type_codes, num_args, ret, _):
""" ctypes function """
num_args = num_args.value if isinstance(num_args, ctypes.c_int) else num_args
pyargs = (C_TO_PY_ARG_SWITCH[type_codes[i]](args[i]) for i in range(num_args))
# pylint: disable=broad-except
try:
rv = local_pyfunc(*pyargs)
except Exception:
msg = traceback.format_exc()
msg = py2cerror(msg)
_LIB.TVMAPISetLastError(c_str(msg))
return -1
if rv is not None:
if isinstance(rv, tuple):
raise ValueError("PackedFunction can only support one return value")
temp_args = []
values, tcodes, _ = _make_tvm_args((rv,), temp_args)
if not isinstance(ret, TVMRetValueHandle):
ret = TVMRetValueHandle(ret)
if _LIB.TVMCFuncSetReturn(ret, values, tcodes, ctypes.c_int(1)) != 0:
raise get_last_ffi_error()
_ = temp_args
_ = rv
return 0
handle = FunctionHandle()
f = TVMPackedCFunc(cfun)
# NOTE: We will need to use python-api to increase ref count of the f
# TVM_FREE_PYOBJ will be called after it is no longer needed.
pyobj = ctypes.py_object(f)
ctypes.pythonapi.Py_IncRef(pyobj)
if _LIB.TVMFuncCreateFromCFunc(
f, pyobj, TVM_FREE_PYOBJ, ctypes.byref(handle)) != 0:
raise get_last_ffi_error()
return _CLASS_FUNCTION(handle, False)
def _make_tvm_args(args, temp_args):
"""Pack arguments into c args tvm call accept"""
num_args = len(args)
values = (TVMValue * num_args)()
type_codes = (ctypes.c_int * num_args)()
for i, arg in enumerate(args):
if isinstance(arg, NodeBase):
values[i].v_handle = arg.handle
type_codes[i] = TypeCode.NODE_HANDLE
elif arg is None:
values[i].v_handle = None
type_codes[i] = TypeCode.NULL
elif isinstance(arg, NDArrayBase):
values[i].v_handle = ctypes.cast(arg.handle, ctypes.c_void_p)
type_codes[i] = (TypeCode.NDARRAY_CONTAINER
if not arg.is_view else TypeCode.ARRAY_HANDLE)
elif isinstance(arg, _nd._TVM_COMPATS):
values[i].v_handle = ctypes.c_void_p(arg._tvm_handle)
type_codes[i] = arg.__class__._tvm_tcode
elif isinstance(arg, Integral):
values[i].v_int64 = arg
type_codes[i] = TypeCode.INT
elif isinstance(arg, Number):
values[i].v_float64 = arg
type_codes[i] = TypeCode.FLOAT
elif isinstance(arg, TVMType):
values[i].v_str = c_str(str(arg))
type_codes[i] = TypeCode.STR
elif isinstance(arg, TVMContext):
values[i].v_int64 = _ctx_to_int64(arg)
type_codes[i] = TypeCode.TVM_CONTEXT
elif isinstance(arg, bytearray):
arr = TVMByteArray()
arr.data = ctypes.cast(
(ctypes.c_byte * len(arg)).from_buffer(arg),
ctypes.POINTER(ctypes.c_byte))
arr.size = len(arg)
values[i].v_handle = ctypes.c_void_p(ctypes.addressof(arr))
temp_args.append(arr)
type_codes[i] = TypeCode.BYTES
elif isinstance(arg, string_types):
values[i].v_str = c_str(arg)
type_codes[i] = TypeCode.STR
elif isinstance(arg, (list, tuple, dict, NodeGeneric)):
arg = convert_to_node(arg)
values[i].v_handle = arg.handle
type_codes[i] = TypeCode.NODE_HANDLE
temp_args.append(arg)
elif isinstance(arg, _CLASS_MODULE):
values[i].v_handle = arg.handle
type_codes[i] = TypeCode.MODULE_HANDLE
elif isinstance(arg, FunctionBase):
values[i].v_handle = arg.handle
type_codes[i] = TypeCode.FUNC_HANDLE
elif isinstance(arg, ctypes.c_void_p):
values[i].v_handle = arg
type_codes[i] = TypeCode.HANDLE
elif callable(arg):
arg = convert_to_tvm_func(arg)
values[i].v_handle = arg.handle
type_codes[i] = TypeCode.FUNC_HANDLE
temp_args.append(arg)
elif isinstance(arg, _CLASS_OBJECT):
values[i].v_handle = arg.handle
type_codes[i] = TypeCode.OBJECT_CELL
else:
raise TypeError("Don't know how to handle type %s" % type(arg))
return values, type_codes, num_args
class FunctionBase(object):
"""Function base."""
__slots__ = ["handle", "is_global"]
# pylint: disable=no-member
def __init__(self, handle, is_global):
"""Initialize the function with handle
Parameters
----------
handle : FunctionHandle
the handle to the underlying function.
is_global : bool
Whether this is a global function in python
"""
self.handle = handle
self.is_global = is_global
def __del__(self):
if not self.is_global and _LIB is not None:
if _LIB.TVMFuncFree(self.handle) != 0:
raise get_last_ffi_error()
def __call__(self, *args):
"""Call the function with positional arguments
args : list
The positional arguments to the function call.
"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if _LIB.TVMFuncCall(
self.handle, values, tcodes, ctypes.c_int(num_args),
ctypes.byref(ret_val), ctypes.byref(ret_tcode)) != 0:
raise get_last_ffi_error()
_ = temp_args
_ = args
return RETURN_SWITCH[ret_tcode.value](ret_val)
def __init_handle_by_constructor__(fconstructor, args):
"""Initialize handle by constructor"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if _LIB.TVMFuncCall(
fconstructor.handle, values, tcodes, ctypes.c_int(num_args),
ctypes.byref(ret_val), ctypes.byref(ret_tcode)) != 0:
raise get_last_ffi_error()
_ = temp_args
_ = args
assert ret_tcode.value == TypeCode.NODE_HANDLE
handle = ret_val.v_handle
return handle
def _return_module(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, ModuleHandle):
handle = ModuleHandle(handle)
return _CLASS_MODULE(handle)
def _handle_return_func(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, FunctionHandle):
handle = FunctionHandle(handle)
return _CLASS_FUNCTION(handle, False)
# setup return handle for function type
_node.__init_by_constructor__ = __init_handle_by_constructor__
RETURN_SWITCH[TypeCode.FUNC_HANDLE] = _handle_return_func
RETURN_SWITCH[TypeCode.MODULE_HANDLE] = _return_module
RETURN_SWITCH[TypeCode.NDARRAY_CONTAINER] = lambda x: _make_array(x.v_handle, False, True)
C_TO_PY_ARG_SWITCH[TypeCode.FUNC_HANDLE] = _wrap_arg_func(
_handle_return_func, TypeCode.FUNC_HANDLE)
C_TO_PY_ARG_SWITCH[TypeCode.MODULE_HANDLE] = _wrap_arg_func(
_return_module, TypeCode.MODULE_HANDLE)
C_TO_PY_ARG_SWITCH[TypeCode.ARRAY_HANDLE] = lambda x: _make_array(x.v_handle, True, False)
C_TO_PY_ARG_SWITCH[TypeCode.NDARRAY_CONTAINER] = lambda x: _make_array(x.v_handle, False, True)
_CLASS_MODULE = None
_CLASS_FUNCTION = None
_CLASS_OBJECT = None
def _set_class_module(module_class):
"""Initialize the module."""
global _CLASS_MODULE
_CLASS_MODULE = module_class
def _set_class_function(func_class):
global _CLASS_FUNCTION
_CLASS_FUNCTION = func_class
def _set_class_object(obj_class):
global _CLASS_OBJECT
_CLASS_OBJECT = obj_class
|
|
# stdlib
import urlparse
import time
import re
import pprint
import urllib
# project
from checks import AgentCheck
# 3rd party
import simplejson as json
import requests
EVENT_TYPE = SOURCE_TYPE_NAME = 'rabbitmq'
QUEUE_TYPE = 'queues'
NODE_TYPE = 'nodes'
MAX_DETAILED_QUEUES = 200
MAX_DETAILED_NODES = 100
ALERT_THRESHOLD = 0.9 # Post an event in the stream when the number of queues or nodes to collect is above 90% of the limit:
QUEUE_ATTRIBUTES = [
# Path, Name
('active_consumers', 'active_consumers'),
('consumers', 'consumers'),
('consumer_utilisation', 'consumer_utilisation'),
('memory', 'memory'),
('messages', 'messages'),
('messages_details/rate', 'messages.rate'),
('messages_ready', 'messages_ready'),
('messages_ready_details/rate', 'messages_ready.rate'),
('messages_unacknowledged', 'messages_unacknowledged'),
('messages_unacknowledged_details/rate', 'messages_unacknowledged.rate'),
('message_stats/ack', 'messages.ack.count'),
('message_stats/ack_details/rate', 'messages.ack.rate'),
('message_stats/deliver', 'messages.deliver.count'),
('message_stats/deliver_details/rate', 'messages.deliver.rate'),
('message_stats/deliver_get', 'messages.deliver_get.count'),
('message_stats/deliver_get_details/rate', 'messages.deliver_get.rate'),
('message_stats/publish', 'messages.publish.count'),
('message_stats/publish_details/rate', 'messages.publish.rate'),
('message_stats/redeliver', 'messages.redeliver.count'),
('message_stats/redeliver_details/rate', 'messages.redeliver.rate'),
]
NODE_ATTRIBUTES = [
('fd_used', 'fd_used'),
('mem_used', 'mem_used'),
('run_queue', 'run_queue'),
('sockets_used', 'sockets_used'),
]
ATTRIBUTES = {
QUEUE_TYPE: QUEUE_ATTRIBUTES,
NODE_TYPE: NODE_ATTRIBUTES,
}
TAGS_MAP = {
QUEUE_TYPE: {
'node':'node',
'name':'queue',
'vhost':'vhost',
'policy':'policy',
},
NODE_TYPE: {
'name':'node',
}
}
METRIC_SUFFIX = {
QUEUE_TYPE: "queue",
NODE_TYPE: "node",
}
class RabbitMQ(AgentCheck):
"""This check is for gathering statistics from the RabbitMQ
Management Plugin (http://www.rabbitmq.com/management.html)
"""
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.already_alerted = []
def _get_config(self, instance):
# make sure 'rabbitmq_api_url; is present
if 'rabbitmq_api_url' not in instance:
raise Exception('Missing "rabbitmq_api_url" in RabbitMQ config.')
# get parameters
base_url = instance['rabbitmq_api_url']
if not base_url.endswith('/'):
base_url += '/'
username = instance.get('rabbitmq_user', 'guest')
password = instance.get('rabbitmq_pass', 'guest')
# Limit of queues/nodes to collect metrics from
max_detailed = {
QUEUE_TYPE: int(instance.get('max_detailed_queues', MAX_DETAILED_QUEUES)),
NODE_TYPE: int(instance.get('max_detailed_nodes', MAX_DETAILED_NODES)),
}
# List of queues/nodes to collect metrics from
specified = {
QUEUE_TYPE: {
'explicit': instance.get('queues', []),
'regexes': instance.get('queues_regexes', []),
},
NODE_TYPE: {
'explicit': instance.get('nodes', []),
'regexes': instance.get('nodes_regexes', []),
},
}
for object_type, filters in specified.iteritems():
for filter_type, filter_objects in filters.iteritems():
if type(filter_objects) != list:
raise TypeError("{0} / {0}_regexes parameter must be a list".format(object_type))
auth = (username, password)
return base_url, max_detailed, specified, auth
def check(self, instance):
base_url, max_detailed, specified, auth = self._get_config(instance)
# Generate metrics from the status API.
self.get_stats(instance, base_url, QUEUE_TYPE, max_detailed[QUEUE_TYPE], specified[QUEUE_TYPE], auth=auth)
self.get_stats(instance, base_url, NODE_TYPE, max_detailed[NODE_TYPE], specified[NODE_TYPE], auth=auth)
# Generate a service check from the aliveness API.
vhosts = instance.get('vhosts')
self._check_aliveness(base_url, vhosts, auth=auth)
def _get_data(self, url, auth=None):
try:
r = requests.get(url, auth=auth)
r.raise_for_status()
data = r.json()
except requests.exceptions.HTTPError as e:
raise Exception('Cannot open RabbitMQ API url: %s %s' % (url, str(e)))
except ValueError, e:
raise Exception('Cannot parse JSON response from API url: %s %s' % (url, str(e)))
return data
def get_stats(self, instance, base_url, object_type, max_detailed, filters, auth=None):
"""
instance: the check instance
base_url: the url of the rabbitmq management api (e.g. http://localhost:15672/api)
object_type: either QUEUE_TYPE or NODE_TYPE
max_detailed: the limit of objects to collect for this type
filters: explicit or regexes filters of specified queues or nodes (specified in the yaml file)
"""
data = self._get_data(urlparse.urljoin(base_url, object_type), auth=auth)
explicit_filters = list(filters['explicit']) # Make a copy of this list as we will remove items from it at each iteration
regex_filters = filters['regexes']
""" data is a list of nodes or queues:
data = [
{'status': 'running', 'node': 'rabbit@host', 'name': 'queue1', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
{'status': 'running', 'node': 'rabbit@host, 'name': 'queue10', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
{'status': 'running', 'node': 'rabbit@host', 'name': 'queue11', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
...
]
"""
if len(explicit_filters) > max_detailed:
raise Exception("The maximum number of %s you can specify is %d." % (object_type, max_detailed))
if explicit_filters or regex_filters: # a list of queues/nodes is specified. We process only those
matching_lines = []
for data_line in data:
name = data_line.get("name")
if name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(name)
continue
match_found = False
for p in regex_filters:
if re.search(p, name):
matching_lines.append(data_line)
match_found = True
break
if match_found: continue
# Absolute names work only for queues
if object_type != QUEUE_TYPE: continue
absolute_name = '%s/%s' % (data_line.get("vhost"), name)
if absolute_name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(name)
continue
for p in regex_filters:
if re.search(p, absolute_name):
matching_lines.append(data_line)
match_found = True
break
if match_found: continue
data = matching_lines
# if no filters are specified, check everything according to the limits
if len(data) > ALERT_THRESHOLD * max_detailed:
# Post a message on the dogweb stream to warn
self.alert(base_url, max_detailed, len(data), object_type)
if len(data) > max_detailed:
# Display a warning in the info page
self.warning("Too many queues to fetch. You must choose the %s you are interested in by editing the rabbitmq.yaml configuration file or get in touch with Datadog Support" % object_type)
for data_line in data[:max_detailed]:
# We truncate the list of nodes/queues if it's above the limit
self._get_metrics(data_line, object_type)
def _get_metrics(self, data, object_type):
tags = []
tag_list = TAGS_MAP[object_type]
for t in tag_list.keys():
tag = data.get(t, None)
if tag is not None:
tags.append('rabbitmq_%s:%s' % (tag_list[t], tag))
for attribute, metric_name in ATTRIBUTES[object_type]:
# Walk down through the data path, e.g. foo/bar => d['foo']['bar']
root = data
keys = attribute.split('/')
for path in keys[:-1]:
root = root.get(path, {})
value = root.get(keys[-1], None)
if value is not None:
try:
self.gauge('rabbitmq.%s.%s' % (METRIC_SUFFIX[object_type], metric_name), float(value), tags=tags)
except ValueError:
self.log.debug("Caught ValueError for %s %s = %s with tags: %s" % (METRIC_SUFFIX[object_type], attribute, value, tags))
def alert(self, base_url, max_detailed, size, object_type):
key = "%s%s" % (base_url, object_type)
if key in self.already_alerted:
# We have already posted an event
return
self.already_alerted.append(key)
title = "RabbitMQ integration is approaching the limit on the number of %s that can be collected from on %s" % (object_type, self.hostname)
msg = """%s %s are present. The limit is %s.
Please get in touch with Datadog support to increase the limit.""" % (size, object_type, max_detailed)
event = {
"timestamp": int(time.time()),
"event_type": EVENT_TYPE,
"msg_title": title,
"msg_text": msg,
"alert_type": 'warning',
"source_type_name": SOURCE_TYPE_NAME,
"host": self.hostname,
"tags": ["base_url:%s" % base_url, "host:%s" % self.hostname],
"event_object": "rabbitmq.limit.%s" % object_type,
}
self.event(event)
def _check_aliveness(self, base_url, vhosts=None, auth=None):
""" Check the aliveness API against all or a subset of vhosts. The API
will return {"status": "ok"} and a 200 response code in the case
that the check passes.
In the case of an invalid response code or unparseable JSON the
service check will be CRITICAL.
"""
if not vhosts:
# Fetch a list of _all_ vhosts from the API.
vhosts_url = urlparse.urljoin(base_url, 'vhosts')
vhosts_response = self._get_data(vhosts_url, auth=auth)
vhosts = [v['name'] for v in vhosts_response]
for vhost in vhosts:
tags = ['vhost:%s' % vhost]
# We need to urlencode the vhost because it can be '/'.
path = u'aliveness-test/%s' % (urllib.quote_plus(vhost))
aliveness_url = urlparse.urljoin(base_url, path)
message = None
try:
aliveness_response = self._get_data(aliveness_url, auth=auth)
message = u"Response from aliveness API: %s" % aliveness_response
if aliveness_response.get('status') == 'ok':
status = AgentCheck.OK
else:
status = AgentCheck.CRITICAL
except Exception as e:
# Either we got a bad status code or unparseable JSON.
status = AgentCheck.CRITICAL
self.warning('Error when checking aliveness for vhost %s: %s'\
% (vhost, str(e)))
self.service_check('rabbitmq.aliveness', status, tags, message=message)
|
|
import theano
import theano.tensor as T
import mlbase.network as N
import h5py
import numpy as np
import mlbase.layers.activation as act
import mlbase.loaddata as l
from mlbase.layers import *
import mlbase.cost as cost
from skimage.measure import block_reduce
def test_generative():
import mlbase.cost as cost
import mlbase.layers.activation as act
import h5py
network = N.Network()
network.debug = True
network.setInput(N.RawInput((1, 28,28)))
network.append(N.Conv2d(feature_map_multiplier=32))
network.append(act.Relu())
network.append(N.Pooling())
network.append(N.Conv2d(feature_map_multiplier=2))
network.append(act.Relu())
network.append(N.Pooling())
network.append(UpConv2d(feature_map_multiplier=2))
network.append(act.Relu())
network.append(UpConv2d(feature_map_multiplier=32))
network.append(act.Relu())
#network.append(N.Flatten())
#network.append(N.FullConn(input_feature=1152, output_feature=1152*2))
#network.append(N.Relu())
#network.append(N.FullConn(input_feature=1152*2, output_feature=10))
#network.append(N.SoftMax())
network.costFunction = cost.ImageSSE
network.inputOutputType = (T.tensor4(), T.tensor4(),)
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
#network.train(trX, trY)
#print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
network.train(trX, trX)
print(np.sum((teX - network.predict(teX)) * (teX - network.predict(teX))))
# the following is the piece to load model and predict a image.
#import mlbase.networkhelper as N
#import mlbase.cost as cost
#import theano.tensor as T
#import mlbase.loaddata as l
#from PIL import Image
#
#n = N.Network()
#n.loadFromFile('/hdd/home/yueguan/workspace/sesame-paste-noodle-dev/expdata/saved_model_LAST')
#n.costFunction = cost.ImageSSE
#n.inputOutputType = (T.tensor4(), T.tensor4(),)
#
#n.build(reload=True)
#
#trX, trY, teX, teY = l.load_mnist()
#result = n.predict(trX[0:1])
#result = (result > 0).astype(float)*255.0
#
#
#im = Image.fromarray(result[0][0])
#if im.mode != 'RGB':
# im = im.convert('RGB')
#
#im.save('result.jpg')
def test_resnet():
import mlbase.network as N
import h5py
network = N.Network()
network.debug = True
network.setInput(N.RawInput((1,28,28)))
network.append(N.Conv2d(feature_map_multiplier=32))
network.append(ResLayer())
network.append(ResLayer())
network.append(ResLayer())
network.append(ResLayer(increase_dim=True))
network.append(ResLayer())
network.append(ResLayer())
network.append(ResLayer())
network.append(ResLayer(increase_dim=True))
network.append(ResLayer())
network.append(ResLayer())
network.append(ResLayer())
network.append(N.GlobalPooling())
network.append(N.FullConn(input_feature=128, output_feature=10))
network.append(N.SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test_deeper():
import h5py
network = N.Network()
network.debug = True
network.setInput(N.RawInput((1,28,28)))
network.append(N.Conv2d(feature_map_multiplier=32))
for _ in range(3):
network.append(ResLayer())
network.append(ResLayer(increase_dim=True))
for _ in range(3):
network.append(ResLayer())
network.append(ResLayer(increase_dim=True))
for _ in range(3):
network.append(ResLayer())
network.append(N.GlobalPooling())
network.append(N.FullConn(input_feature=128, output_feature=10))
network.append(N.SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test_binaryinput():
network = N.Network()
network.debug = True
network.setInput(RawInput((1, 28,28)))
network.append(Conv2d(filter_size=(3,3), input_feature=1, output_feature=32))
network.append(Relu())
network.append(Pooling((2,2)))
network.append(Binarize())
network.append(Conv2d(filter_size=(3,3), input_feature=32, output_feature=64))
network.append(Relu())
network.append(Pooling((2,2)))
network.append(Binarize())
network.append(Conv2d(filter_size=(3,3), input_feature=64, output_feature=128))
network.append(Relu())
network.append(Pooling((2,2)))
network.append(Flatten())
network.append(FullConn(input_feature=1152, output_feature=1152*2))
network.append(Relu())
network.append(FullConn(input_feature=1152*2, output_feature=10))
network.append(SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test_binaryweight():
network = N.Network()
network.debug = True
network.setInput(RawInput((1, 28,28)))
network.append(Conv2d(feature_map_multiplier=32))
network.append(Relu())
network.append(Pooling((2,2)))
network.append(Binarize())
network.append(Conv2d(feature_map_multiplier=2))
network.append(Relu())
network.append(Pooling((2,2)))
network.append(Binarize())
network.append(BinaryConv2d(feature_map_multiplier=2))
network.append(Relu())
network.append(Pooling((2,2)))
network.append(Flatten())
network.append(FullConn(input_feature=1152, output_feature=1152*2))
network.append(Relu())
network.append(FullConn(input_feature=1152*2, output_feature=10))
network.append(SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test_unet():
n = N.Network()
def unet_dag():
x1 = DAGPlan.input()
y1 = Relu(Conv2d(Relu(Conv2d(x1))))
x2 = Pooling(y1)
y2 = Relu(Conv2d(Relu(Conv2d(x2))))
x3 = Pooling(y2)
y3 = Relu(Conv2d(Relu(Conv2d(x3))))
#x4 = y2 // conv.UpConv2d(y3)
x4 = CropConcat(y2, UpConv2d(y3))
y4 = Relu(Conv2d(Relu(Conv2d(x4))))
#x5 = y1 // conv.UpConv2d(y4)
x5 = CropConcat(y1, UpConv2d(y4))
y5 = Relu(Conv2d(Relu(Conv2d(x5))))
return y5
dagplan = unet_dag()
class UNet(Layer, metaclass=DAG,
dag=dagplan,
yaml_tag=u'!UNet',
type_name='UNet'):
pass
n.setInput(RawInput((1, 420//2, 580//2)))
n.append(Conv2d(feature_map_multiplier=4))
n.append(Relu())
n.append(UNet())
n.append(Conv2d(output_feature=1))
n.batchSize = 32
n.costFunction = cost.ImageDice
n.inputOutputType = (T.tensor4(), T.tensor4(),)
n.build()
trX, trY, teX = l.load_kaggle_ultrasound()
trX = block_reduce(trX, block_size=(1,1,2,2), func=np.mean)
trY = block_reduce(trY, block_size=(1,1,2,2), func=np.mean)
teX = block_reduce(teX, block_size=(1,1,2,2), func=np.mean)
trX = trX[:]/255.0
trY = trY[:]/255.0
teX = teX[:]/255.0
for i in range(5000):
print(i)
n.train(trX, trX[:,:,:208, :288])
#n.train(trX, trX)
#print(np.sum((teX - network.predict(teX)) * (teX - network.predict(teX))))
def test_seqlayer():
network = N.Network()
network.debug = True
class ConvNN(layer.Layer, metaclass=compose.SeqLayer,
seq=[Conv2d, act.Relu, pooling.Pooling],
yaml_tag=u'!ConvNN',
type_name='ConvNN'):
def __init__(self, feature_map_multiplier=1):
super().__init__()
self.bases[0] = Conv2d(feature_map_multiplier=feature_map_multiplier)
network.setInput(RawInput((1, 28,28)))
network.append(ConvNN(feature_map_multiplier=32))
network.append(ConvNN(feature_map_multiplier=2))
network.append(ConvNN(feature_map_multiplier=2))
network.append(reshape.Flatten())
network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
network.append(act.Relu())
network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
network.append(output.SoftMax())
network.build()
trX, trY, teX, teY = l.load_mnist()
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def testload():
n = N.Network()
n.loadFromFile()
n.saveToFile('testmodel')
def test_maxout():
network = N.Network()
network.setInput(RawInput((1, 28,28)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=128))
network.append(pooling.FeaturePooling(4))
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=8))
network.append(pooling.FeaturePooling(4))
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=8))
network.append(pooling.FeaturePooling(4))
network.append(pooling.GlobalPooling())
network.append(fullconn.FullConn(input_feature=128, output_feature=10))
network.append(output.SoftMax())
network.build()
trX, trY, teX, teY = l.load_mnist()
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test_globalpooling():
network = N.Network()
network.debug = True
network.setInput(RawInput((1, 28,28)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=32))
network.append(bn.BatchNormalization())
network.append(act.Relu())
network.append(polling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=2))
network.append(bn.BatchNormalization())
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=2))
network.append(bn.BatchNormalization())
network.append(act.Relu())
network.append(pooling.GlobalPooling())
network.append(fullconn.FullConn(input_feature=128, output_feature=10))
network.append(output.SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test5():
network = N.Network()
network.debug = True
network.setInput(RawInput((1, 28,28)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=32))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=2))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), feature_map_multiplier=2))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(reshape.Flatten())
network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
network.append(act.Relu())
network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
network.append(output.SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def testbn():
network = N.Network()
network.debug = True
network.setSaveInterval(10)
network.setInput(RawInput((1, 28,28)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=1, output_feature=32))
network.append(N.BatchNormalization())
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=32, output_feature=64))
network.append(N.BatchNormalization())
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=64, output_feature=128))
network.append(N.BatchNormalization())
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(reshape.Flatten())
network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
network.append(act.Relu())
network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
network.append(output.SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test():
network = N.Network()
network.debug = True
network.setInput(RawInput((28,28)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=1, output_feature=32))
network.append(act.Relu())
network.append(conv.Conv2d(filter_size=(2,2), input_feature=32, output_feature=32, subsample=(2,2),border='valid'))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=32, output_feature=64))
network.append(act.Relu())
network.append(conv.Conv2d(filter_size=(2,2), input_feature=64, output_feature=64, subsample=(2,2),border='valid'))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=64, output_feature=128))
network.append(act.Relu())
network.append(conv.Conv2d(filter_size=(2,2), input_feature=128, output_feature=128, subsample=(2,2),border='valid'))
network.append(reshape.Flatten())
network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
network.append(act.Relu())
network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
network.append(output.SoftMax())
network.build()
print(network)
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
def test4():
network = N.Network()
network.debug = True
network.setInput(RawInput((28,28)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=1, output_feature=32))
network.append(act.Relu())
network.append(conv.Conv2d(filter_size=(2,2), input_feature=32, output_feature=32, subsample=(2,2),border='valid'))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=32, output_feature=64))
network.append(act.Relu())
network.append(conv.Conv2d(filter_size=(2,2), input_feature=64, output_feature=64, subsample=(2,2),border='valid'))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=64, output_feature=128))
network.append(act.Relu())
network.append(conv.Conv2d(filter_size=(2,2), input_feature=128, output_feature=128, subsample=(2,2),border='valid'))
network.append(reshape.Flatten())
network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
network.append(act.Relu())
network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
network.append(output.SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
# test3()
def test3():
network = N.Network()
network.debug = True
network.setInput(RawInput((28,28)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=1, output_feature=32))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=32, output_feature=64))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter_size=(3,3), input_feature=64, output_feature=128))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(reshape.Flatten())
network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
network.append(act.Relu())
network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
network.append(output.SoftMax())
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
# test2()
def test2():
network = N.Network()
network.debug = True
#network.setInput(RawInput((1, 28,28)))
#network.append(conv.Conv2d(feature_map_multiplier=32))
#network.append(act.Relu())
#network.append(pooling.Pooling())
#network.append(conv.Conv2d(feature_map_multiplier=2))
#network.append(act.Relu())
#network.append(pooling.Pooling())
#network.append(conv.Conv2d(feature_map_multiplier=2))
#network.append(act.Relu())
#network.append(pooling.Pooling())
#network.append(reshape.Flatten())
#network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
#network.append(act.Relu())
#network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
#network.append(output.SoftMax())
li = RawInput((1, 28,28))
network.setInput(li)
lc1 = conv.Conv2d(feature_map_multiplier=32)
la1 = act.Relu()
lp1 = pooling.Pooling()
lc2 = conv.Conv2d(feature_map_multiplier=2)
la2 = act.Relu()
lp2 = pooling.Pooling()
lc3 = conv.Conv2d(feature_map_multiplier=2)
la3 = act.Relu()
lp3 = pooling.Pooling()
lf = reshape.Flatten()
lfc1 = fullconn.FullConn(input_feature=1152, output_feature=1152*2)
la4 = act.Relu()
lfc2 = fullconn.FullConn(input_feature=1152*2, output_feature=10)
lsm = output.SoftMax()
network.connect(li, lc1)
network.connect(lc1, la1)
network.connect(la1, lp1)
network.connect(lp1, lc2)
network.connect(lc2, la2)
network.connect(la2, lp2)
network.connect(lp2, lc3)
network.connect(lc3, la3)
network.connect(la3, lp3)
network.connect(lp3, lf)
network.connect(lf, lfc1)
network.connect(lfc1, la4)
network.connect(la4, lfc2)
network.connect(lfc2, lsm)
network.build()
trX, trY, teX, teY = l.load_mnist()
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
# test1():
def test1():
network = N.Network()
network.debug = True
network.setInput((28,28))
network.append(conv.Conv2d(filter=(3,3), input_feature=1, output_feature=32))
network.append(act.Relu())
network.append(conv.Conv2d(filter=(3,3), input_feature=32, output_feature=32))
network.append(act.Relu())
network.append(conv.Conv2d(filter=(3,3), input_feature=32, output_feature=32))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter=(3,3), input_feature=32, output_feature=64))
network.append(act.Relu())
network.append(conv.Conv2d(filter=(3,3), input_feature=64, output_feature=64))
network.append(act.Relu())
network.append(conv.Conv2d(filter=(3,3), input_feature=64, output_feature=64))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(conv.Conv2d(filter=(3,3), input_feature=64, output_feature=128))
network.append(act.Relu())
network.append(conv.Conv2d(filter=(3,3), input_feature=128, output_feature=128))
network.append(act.Relu())
network.append(conv.Conv2d(filter=(3,3), input_feature=128, output_feature=128))
network.append(act.Relu())
network.append(pooling.Pooling((2,2)))
network.append(reshape.Flatten())
network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
network.append(act.Relu())
network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
network.append(output.SoftMax())
#network.setCost(N.CategoryCrossEntropy)
network.build()
f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')
trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)
trY = np.zeros((f['t_train'].shape[0], 10))
trY[np.arange(len(f['t_train'])), f['t_train']] = 1
teY = np.zeros((f['t_test'].shape[0], 10))
teY[np.arange(len(f['t_test'])), f['t_test']] = 1
for i in range(5000):
print(i)
network.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == network.predict(teX)))
def test_mlp():
n = N.Network()
n.setInput(RawInput((1, 28, 28)))
n.append(Flatten())
n.append(FullConn(feature_map_multiplier=2))
n.append(Elu())
n.append(FullConn(output_feature=10))
n.append(output.SoftMax())
n.build()
trX, trY, teX, teY = l.load_mnist()
for i in range(100):
print(i)
n.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(n.predict(teX), axis=1)))
def test_show_internal():
n = N.Network()
n.setInput(RawInput((1, 28, 28)))
n.append(Flatten(), "flatten")
n.append(FullConn(feature_map_multiplier=2), "fc1")
n.append(Elu(), "layer1")
n.append(FullConn(output_feature=10), "fc2")
n.append(output.SoftMax(), "layer2")
n.build()
trX, trY, teX, teY = l.load_mnist()
for i in range(100):
print(i)
n.train(trX, trY)
print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(n.predict(teX), axis=1)))
n.predict(teX, stub=["layer1", "layer2"])
if __name__ == "__main__":
test_mlp()
|
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.tests.core.test_policydiff
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey.common.PolicyDiff import PolicyDiff
TEST_CASES = [
dict(
old="{}",
new="""
{
"user_name": "test",
"must_change_password": "false",
"create_date": "2013-09-12T18:28:21Z"
}
""",
expected_result="""{<br/>
<font color='green'> "user_name": "test",</font><br/>
<font color='green'> "must_change_password": "false",</font><br/>
<font color='green'> "create_date": "2013-09-12T18:28:21Z"</font><br/>
}<br/>
"""
),
dict(
old="""
{
"create_date": "2013-09-12T18:28:21Z",
"must_change_password": "false",
"user_name": "test"
}
""",
new="""
{
"create_date": "2013-09-12T18:28:21Z",
"must_change_password": "false",
"user_name": "test"
}
""",
expected_result="""{<br/>
<font color='black'> "create_date": "2013-09-12T18:28:21Z",</font><br/>
<font color='black'> "must_change_password": "false",</font><br/>
<font color='black'> "user_name": "test"</font><br/>
}<br/>
"""
),
dict(
old={
"thelist": [{"rule": "asdf"}],
"must_change_password": "false",
"user_name": "test"
},
new={
"thelist": [{"rule": "asdf"},{"rule": "defg"}],
"must_change_password": "false",
"user_name": "test"
},
expected_result="""{<br/>
<font color='black'> "thelist": [<br/>
<font color='black'> {<br/>
<font color='black'> "rule": "asdf"</font><br/>
},</font><br/>
<font color='green'> {<br/>
<font color='green'> "rule": "defg"</font><br/>
}</font><br/>
],</font><br/>
<font color='black'> "must_change_password": "false",</font><br/>
<font color='black'> "user_name": "test"</font><br/>
}<br/>
"""
)
]
def test_produce():
for case in TEST_CASES:
differ = PolicyDiff(case['new'], case['old'])
html = differ.produceDiffHTML()
if html != case['expected_result']:
print(html)
assert html == case['expected_result']
differ = PolicyDiff({}, {})
result = differ.produceDiffHTML()
assert result == 'No Policy.<br/>'
differ._old_policy = None
differ._new_policy = None
try:
differ.produceDiffHTML()
assert False
except ValueError:
pass
differ._old_policy = []
differ._new_policy = {}
try:
differ.produceDiffHTML()
assert False
except ValueError:
pass
differ._old_policy = "old_policy"
differ._new_policy = "new_policy"
result = differ.produceDiffHTML()
assert result == """<font color='red'>old_policy</font><br/>
<br/><font color='green'>new_policy</font><br/>
"""
differ._old_policy = [1, 2, 3]
differ._new_policy = [1, 2, 3]
differ.produceDiffHTML()
differ._old_policy = set([1, 2, 3])
differ._new_policy = set([1, 2, 3])
try:
differ.produceDiffHTML()
assert False
except ValueError:
pass
def test_form_brackets():
from security_monkey.common.PolicyDiff import form_brackets
test_values = [
{
"value": "a_string",
"open": "\"",
"close": "\""
},
{
"value": {"key": "dictionary"},
"open": "{<br/>\n",
"close": "}"
},
{
"value": [1, 2, 3],
"open": "[<br/>\n",
"close": "]"
},
{
"value": 123,
"open": "",
"close": ""
}
]
for value in test_values:
result = form_brackets(value['value'], 0)
assert value["open"] == result["open"]
assert value["close"] == result["close"]
def test_get_brackets():
from security_monkey.common.PolicyDiff import get_brackets
values = [
("str", dict(open="\"", close="\"")),
("unicode", dict(open="\"", close="\"")),
([1,2,3], dict(open="[", close="]")),
({"a": 123}, dict(open="{", close="}")),
(True, dict(open="", close="")),
(123, dict(open="", close="")),
]
for value in values:
assert get_brackets(value[0]) == value[1]
def test_added():
from security_monkey.common.PolicyDiff import added
assert added("asdf") == "<font color='green'>asdf</font><br/>\n"
def test_deleted():
from security_monkey.common.PolicyDiff import deleted
assert deleted("asdf") == "<font color='red'>asdf</font><br/>\n"
def test_same():
from security_monkey.common.PolicyDiff import same
assert same("asdf") == "<font color='black'>asdf</font><br/>\n"
def test_str_distance():
from security_monkey.common.PolicyDiff import str_distance
values = [
("abcdefg", "abcdefg", 0),
("abcdefg", "abcdef0", 1),
("axxxxfg", "abcdefg", 4),
("axxxxfg123", "abcdefg", 7)
]
for value in values:
assert str_distance(value[0], value[1]) == value[2]
def test_find_most_similar():
from security_monkey.common.PolicyDiff import find_most_similar
values = [
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
1234567890,
"November 2, 1962"
]
assert find_most_similar("ABCDEFGHIJKLMNOPQRSTU", values) == values[0]
assert find_most_similar(123456789, values) == values[1]
assert find_most_similar(1234567890, values) == values[1]
assert find_most_similar("November", values) == values[2]
values = ["Incredible"]
assert find_most_similar("November", values) == values[0]
def test_print_item():
from security_monkey.common.PolicyDiff import print_item
values = [
("<script>", "<script>"),
(123, '123'),
(932.121, '932.121'),
(None, "null"),
(True, "true"),
(False, "false"),
({"1": "2"}, "<font color='black'>\"1\": \"2\"</font><br/>\n"),
(["1", "2"], "<font color='black'>\"1\",</font><br/>\n<font color='black'>\"2\"</font><br/>\n"),
(set([1, 2, 3]), "") # unexpected
]
for value in values:
assert print_item(value[0], 'same', 0) == value[1]
def test_print_list():
from security_monkey.common.PolicyDiff import print_list
values = [
"string",
{"a": "b"},
["a", "b", "c"],
[1, 2, 3],
True,
False,
None,
set(["not supported type"])
]
expected = """<font color='{color}'>"string",</font><br/>
<font color='{color}'>[[<br/>
<font color='{color}'> "a": "b"</font><br/>
]],</font><br/>
<font color='{color}'>[<br/>
<font color='{color}'> "a",</font><br/>
<font color='{color}'> "b",</font><br/>
<font color='{color}'> "c"</font><br/>
],</font><br/>
<font color='{color}'>[<br/>
<font color='{color}'> 1,</font><br/>
<font color='{color}'> 2,</font><br/>
<font color='{color}'> 3</font><br/>
],</font><br/>
<font color='{color}'>true,</font><br/>
<font color='{color}'>false,</font><br/>
<font color='{color}'>null,</font><br/>
<font color='{color}'></font><br/>
"""
assert print_list(values, 'same', 0) == expected.format(color='black').replace('[[', '{').replace(']]', '}')
assert print_list(values, 'deleted', 0) == expected.format(color='red').replace('[[', '{').replace(']]', '}')
assert print_list(values, 'added', 0) == expected.format(color='green').replace('[[', '{').replace(']]', '}')
def test_print_dict():
from security_monkey.common.PolicyDiff import print_dict
values = {
"a": "<script>",
"b": True,
"c": None,
"d": {
"da": 1
},
"e": [1, 2, 3],
"f": set([1, 2, 3])
}
expected = """<font color='{color}'>"a": "<script>",</font><br/>
<font color='{color}'>"b": true,</font><br/>
<font color='{color}'>"c": null,</font><br/>
<font color='{color}'>"d": [[<br/>
<font color='{color}'> "da": 1</font><br/>
]],</font><br/>
<font color='{color}'>"e": [<br/>
<font color='{color}'> 1,</font><br/>
<font color='{color}'> 2,</font><br/>
<font color='{color}'> 3</font><br/>
],</font><br/>
<font color='{color}'>"f": </font><br/>
"""
assert print_dict(values, 'same', 0) == expected.format(color='black').replace('[[', '{').replace(']]', '}')
assert print_dict(values, 'deleted', 0) == expected.format(color='red').replace('[[', '{').replace(']]', '}')
assert print_dict(values, 'added', 0) == expected.format(color='green').replace('[[', '{').replace(']]', '}')
def test_sub_dict():
from security_monkey.common.PolicyDiff import process_sub_dict
values = [
dict(
a="hello",
b="hello",
x="""<font color='black'>"somekey": "hello",</font><br/>\n"""
),
dict(
a="hello",
b="different",
x="""<font color='red'>"somekey": "different",</font><br/>
<font color='green'>"somekey": "hello",</font><br/>
"""
),
dict(
a=123,
b=123,
x="""<font color='black'>"somekey": 123,</font><br/>\n"""
),
dict(
a=123,
b=1234,
x="""<font color='red'>"somekey": 1234,</font><br/>
<font color='green'>"somekey": 123,</font><br/>
"""
),
dict(
a={"a": 123},
b={"a": 123},
x="""<font color='black'>"somekey": {<br/>
<font color='black'> "a": 123</font><br/>
},</font><br/>
"""
),
dict(
a={"a": 123},
b={"a": 1234},
x="""<font color='black'>"somekey": {<br/>
<font color='red'> "a": 1234,</font><br/>
<font color='green'> "a": 123</font><br/>
},</font><br/>
"""
),
dict(
a=[1, 2, 3, 4],
b=[1, 2, 3, 4],
x="""<font color='black'>"somekey": [<br/>
<font color='black'> 1,</font><br/>
<font color='black'> 2,</font><br/>
<font color='black'> 3,</font><br/>
<font color='black'> 4</font><br/>
],</font><br/>
"""
),
# doesnt' seem to be built to handle this case?
dict(
a=[1, 2, 3, 4],
b=[1, 2, 3, 4, 5],
x="""<font color='black'>"somekey": [<br/>
<font color='black'> 1,</font><br/>
<font color='black'> 2,</font><br/>
<font color='black'> 3,</font><br/>
<font color='black'> 4,</font><br/>
<font color='red'> 5</font><br/>
],</font><br/>
"""
),
dict(
a=set([1, 2, 3]),
b=set([1, 2, 3]),
x=''
)
]
for value in values:
result = process_sub_dict("somekey", value["a"], value["b"], 0)
if result != value['x']:
print(("RE",result))
print(("EX", value['x']))
assert result == value['x']
try:
process_sub_dict('somenkey', "a_str", ["a list"], 0)
assert False
except ValueError as e:
pass
def test_constructor():
from security_monkey.common.PolicyDiff import PolicyDiff
try:
PolicyDiff("{badjson}", None)
assert False
except ValueError:
pass
try:
PolicyDiff(None, "{badjson}")
assert False
except ValueError:
pass
try:
PolicyDiff({}, [])
assert False
except ValueError:
pass
import collections
PolicyDiff(collections.defaultdict(), collections.defaultdict())
def test_diff_list():
from security_monkey.common.PolicyDiff import diff_list
values = [
dict(
a=["1", "2", 3, 3.0, True, False, None, dict(a="123"), ["list"], set([1,2,3])],
b=["1", "2", 3, 3.0, True, False, None, dict(a="123"), ["list"], set([1,2,3])],
x="""<font color='black'>"1",</font><br/>
<font color='black'>"2",</font><br/>
<font color='black'>3,</font><br/>
<font color='black'>3.0,</font><br/>
<font color='black'>true,</font><br/>
<font color='black'>false,</font><br/>
<font color='black'>null,</font><br/>
<font color='black'>{<br/>
<font color='black'> "a": "123"</font><br/>
},</font><br/>
<font color='black'>[<br/>
<font color='black'> "list"</font><br/>
]</font><br/>
"""
),
dict(
a=[1, 2, 3],
b=[1, 3, 4],
x="""<font color='black'>1,</font><br/>
<font color='black'>3,</font><br/>
<font color='red'>4,</font><br/>
<font color='green'>2</font><br/>
"""
),
dict(
a=["str", True, [1, 3], set([1, 2])],
b=[],
x="""<font color='green'>"str",</font><br/>
<font color='green'>true,</font><br/>
<font color='green'>[<br/>
<font color='green'> 1,</font><br/>
<font color='green'> 3</font><br/>
]</font><br/>
"""
),
dict(
a=[True],
b=[False],
x="""<font color='red'>false,</font><br/>
<font color='green'>true</font><br/>
"""
),
dict(
a=[[1, 2, 3, 4, 5]],
b=[[1, 2, 3, 4, 4]],
x="""<font color='black'>[<br/>
<font color='black'> 1,</font><br/>
<font color='black'> 2,</font><br/>
<font color='black'> 3,</font><br/>
<font color='black'> 4,</font><br/>
<font color='green'> 5</font><br/>
]</font><br/>
"""
),
dict(
a=[{"a": 123, "b": 234}],
b=[{"a": 123, "b": 2345}],
x="""<font color='black'>{<br/>
<font color='black'> "a": 123,</font><br/>
<font color='red'> "b": 2345,</font><br/>
<font color='green'> "b": 234</font><br/>
}</font><br/>
"""
),
dict(
a=[set([1, 2, 3, 4])],
b=[set([1, 2, 3, 4, 5])],
x=""
),
dict(
a=[],
b=["<script>", "<script>", 1234, 1234.0, True, None, [1, 2, 3], {"a": 1}, set([1])],
x="""<font color='red'>"<script>",</font><br/>
<font color='red'>"<script>",</font><br/>
<font color='red'>1234,</font><br/>
<font color='red'>1234.0,</font><br/>
<font color='red'>true,</font><br/>
<font color='red'>null,</font><br/>
<font color='red'>[<br/>
<font color='red'> 1,</font><br/>
<font color='red'> 2,</font><br/>
<font color='red'> 3</font><br/>
],</font><br/>
<font color='red'>{<br/>
<font color='red'> "a": 1</font><br/>
}</font><br/>
"""
),
]
for value in values:
result = diff_list(value["a"], value["b"], 0)
if result != value['x']:
print(("RE", result))
print(("EX", value['x']))
assert result == value['x']
def test_diff_dict():
from security_monkey.common.PolicyDiff import diff_dict
values = [
dict(
a={"a": "hello", "b": [1, 2, 3]},
b={},
x="""<font color='green'>"a": "hello",</font><br/>
<font color='green'>"b": [<br/>
<font color='green'> 1,</font><br/>
<font color='green'> 2,</font><br/>
<font color='green'> 3</font><br/>
]</font><br/>
"""
),
dict(
a={"a": "str"},
b={"a": 1234},
x="""<font color='red'>"a": 1234,</font><br/>
<font color='green'>"a": "str"</font><br/>
"""
),
dict(
a={"a": "str"},
b={"a": "george"},
x="""<font color='red'>"a": "george",</font><br/>
<font color='green'>"a": "str"</font><br/>
"""
),
dict(
a={},
b={"a": "george", "b": [1, 2, 3]},
x="""<font color='red'>"a": "george",</font><br/>
<font color='red'>"b": [<br/>
<font color='red'> 1,</font><br/>
<font color='red'> 2,</font><br/>
<font color='red'> 3</font><br/>
]</font><br/>
"""
),
]
for value in values:
result = diff_dict(value["a"], value["b"], 0)
if result != value['x']:
print(result)
assert result == value['x']
|
|
from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity', include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity', include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity', include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
|
|
"""
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
import numpy as np
from pandas import DataFrame, Series, TimeSeries
from scikits.timeseries import time_series
from scikits.statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
import scikits.statsmodels.tools.data as data_util
class ModelData(object):
"""
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
def __init__(self, endog, exog=None, **kwds):
self._orig_endog = endog
self._orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
self._check_integrity()
self._cache = resettable_cache()
def _convert_endog_exog(self, endog, exog):
# for consistent outputs if endog is (n,1)
yarr = self._get_yarr(endog)
xarr = None
if exog is not None:
xarr = self._get_xarr(exog)
if xarr.ndim == 1:
xarr = xarr[:, None]
if xarr.ndim != 2:
raise ValueError("exog is not 1d or 2d")
return yarr, xarr
@cache_writable()
def ynames(self):
endog = self._orig_endog
ynames = self._get_names(endog)
if not ynames:
ynames = _make_endog_names(endog)
if len(ynames) == 1:
return ynames[0]
else:
return list(ynames)
@cache_writable()
def xnames(self):
exog = self._orig_exog
if exog is not None:
xnames = self._get_names(exog)
if not xnames:
xnames = _make_exog_names(exog)
return list(xnames)
return None
@cache_readonly
def row_labels(self):
exog = self._orig_exog
if exog is not None:
row_labels = self._get_row_labels(exog)
else:
endog = self._orig_endog
row_labels = self._get_row_labels(endog)
return row_labels
def _get_row_labels(self, arr):
return None
def _get_names(self, arr):
if isinstance(arr, DataFrame):
return list(arr.columns)
elif isinstance(arr, Series):
return [arr.name]
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
def _get_yarr(self, endog):
if data_util.is_structured_ndarray(endog):
endog = data_util.struct_to_ndarray(endog)
return np.asarray(endog).squeeze()
def _get_xarr(self, exog):
if data_util.is_structured_ndarray(exog):
exog = data_util.struct_to_ndarray(exog)
return np.asarray(exog)
def _check_integrity(self):
if self.exog is not None:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
def wrap_output(self, obj, how='columns'):
if how == 'columns':
return self.attach_columns(obj)
elif how == 'rows':
return self.attach_rows(obj)
elif how == 'cov':
return self.attach_cov(obj)
elif how == 'dates':
return self.attach_dates(obj)
elif how == 'columns_eq':
return self.attach_columns_eq(obj)
elif how == 'cov_eq':
return self.attach_cov_eq(obj)
else:
return obj
def attach_columns(self, result):
return result
def attach_columns_eq(self, result):
return result
def attach_cov(self, result):
return result
def attach_cov_eq(self, result):
return result
def attach_rows(self, result):
return result
def attach_dates(self, result):
return result
class PandasData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def _get_row_labels(self, arr):
return arr.index
def attach_columns(self, result):
if result.squeeze().ndim == 1:
return Series(result, index=self.xnames)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.xnames)
def attach_columns_eq(self, result):
return DataFrame(result, index=self.xnames, columns=self.ynames)
def attach_cov(self, result):
return DataFrame(result, index=self.xnames, columns=self.xnames)
def attach_cov_eq(self, result):
return DataFrame(result, index=self.ynames, columns=self.ynames)
def attach_rows(self, result):
# assumes if len(row_labels) > len(result) it's bc it was truncated
# at the front, for AR lags, for example
if result.squeeze().ndim == 1:
return Series(result, index=self.row_labels[-len(result):])
else: # this is for VAR results, may not be general enough
return DataFrame(result, index=self.row_labels[-len(result):],
columns=self.ynames)
def attach_dates(self, result):
return TimeSeries(result, index=self.predict_dates)
class TimeSeriesData(ModelData):
"""
Data handling class which returns scikits.timeseries model results
"""
def _get_row_labels(self, arr):
return arr.dates
#def attach_columns(self, result):
# return recarray?
#def attach_cov(self, result):
# return recarray?
def attach_rows(self, result):
return time_series(result, dates = self.row_labels[-len(result):])
def attach_dates(self, result):
return time_series(result, dates = self.predict_dates)
_la = None
def _lazy_import_larry():
global _la
import la
_la = la
class LarryData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def __init__(self, endog, exog=None, **kwds):
_lazy_import_larry()
super(LarryData, self).__init__(endog, exog=exog, **kwds)
def _get_yarr(self, endog):
try:
return endog.x
except AttributeError:
return np.asarray(endog).squeeze()
def _get_xarr(self, exog):
try:
return exog.x
except AttributeError:
return np.asarray(exog)
def _get_names(self, exog):
try:
return exog.label[1]
except Exception:
pass
return None
def _get_row_labels(self, arr):
return arr.label[0]
def attach_columns(self, result):
if result.ndim == 1:
return _la.larry(result, [self.xnames])
else:
shape = results.shape
return _la.larray(result, [self.xnames, range(shape[1])])
def attach_columns_eq(self, result):
return _la.larray(result, [self.xnames], [self.xnames])
def attach_cov(self, result):
return _la.larry(result, [self.xnames], [self.xnames])
def attach_cov_eq(self, result):
return _la.larray(result, [self.ynames], [self.ynames])
def attach_rows(self, result):
return _la.larry(result, [self.row_labels[-len(result):]])
def attach_dates(self, result):
return _la.larray(result, [self.predict_dates])
def _is_structured_array(data):
return isinstance(data, np.ndarray) and data.dtype.names is not None
def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
ynames = ['y']
else: # for VAR
ynames = ['y%d' % (i+1) for i in range(endog.shape[1])]
return ynames
def _make_exog_names(exog):
exog_var = exog.var(0)
if (exog_var == 0).any():
# assumes one constant in first or last position
# avoid exception if more than one constant
const_idx = exog_var.argmin()
if const_idx == exog.shape[1] - 1:
exog_names = ['x%d' % i for i in range(1,exog.shape[1])]
exog_names += ['const']
else:
exog_names = ['x%d' % i for i in range(exog.shape[1])]
exog_names[const_idx] = 'const'
else:
exog_names = ['x%d' % i for i in range(exog.shape[1])]
return exog_names
def handle_data(endog, exog):
"""
Given inputs
"""
if _is_using_pandas(endog, exog):
klass = PandasData
elif _is_using_larry(endog, exog):
klass = LarryData
elif _is_using_timeseries(endog, exog):
klass = TimeSeriesData
# keep this check last
elif _is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError('unrecognized data structures: %s / %s' %
(type(endog), type(exog)))
return klass(endog, exog=exog)
def _is_using_ndarray(endog, exog):
return (isinstance(endog, np.ndarray) and
(isinstance(exog, np.ndarray) or exog is None))
def _is_using_pandas(endog, exog):
from pandas import Series, DataFrame, WidePanel
klasses = (Series, DataFrame, WidePanel)
return (isinstance(endog, klasses) or isinstance(exog, klasses))
def _is_using_larry(endog, exog):
try:
import la
return isinstance(endog, la.larry) or isinstance(exog, la.larry)
except ImportError:
return False
def _is_using_timeseries(endog, exog):
from scikits.timeseries import TimeSeries as tsTimeSeries
return isinstance(endog, tsTimeSeries) or isinstance(exog, tsTimeSeries)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Boot session from cache or build
Session bootstraps info needed by common client side activities including
permission, homepage, default variables, system defaults etc
"""
import frappe, json
from frappe import _
import frappe.utils
from frappe.utils import cint, cstr, get_assets_json
import frappe.model.meta
import frappe.defaults
import frappe.translate
import redis
from six.moves.urllib.parse import unquote
from six import text_type
from frappe.cache_manager import clear_user_cache
@frappe.whitelist(allow_guest=True)
def clear(user=None):
frappe.local.session_obj.update(force=True)
frappe.local.db.commit()
clear_user_cache(frappe.session.user)
frappe.response['message'] = _("Cache Cleared")
def clear_sessions(user=None, keep_current=False, device=None, force=False):
'''Clear other sessions of the current user. Called at login / logout
:param user: user name (default: current user)
:param keep_current: keep current session (default: false)
:param device: delete sessions of this device (default: desktop, mobile)
:param force: triggered by the user (default false)
'''
reason = "Logged In From Another Session"
if force:
reason = "Force Logged out by the user"
for sid in get_sessions_to_clear(user, keep_current, device):
delete_session(sid, reason=reason)
def get_sessions_to_clear(user=None, keep_current=False, device=None):
'''Returns sessions of the current user. Called at login / logout
:param user: user name (default: current user)
:param keep_current: keep current session (default: false)
:param device: delete sessions of this device (default: desktop, mobile)
'''
if not user:
user = frappe.session.user
if not device:
device = ("desktop", "mobile")
if not isinstance(device, (tuple, list)):
device = (device,)
offset = 0
if user == frappe.session.user:
simultaneous_sessions = frappe.db.get_value('User', user, 'simultaneous_sessions') or 1
offset = simultaneous_sessions - 1
condition = ''
if keep_current:
condition = ' AND sid != {0}'.format(frappe.db.escape(frappe.session.sid))
return frappe.db.sql_list("""
SELECT `sid` FROM `tabSessions`
WHERE `tabSessions`.user=%(user)s
AND device in %(device)s
{condition}
ORDER BY `lastupdate` DESC
LIMIT 100 OFFSET {offset}""".format(condition=condition, offset=offset),
{"user": user, "device": device})
def delete_session(sid=None, user=None, reason="Session Expired"):
from frappe.core.doctype.activity_log.feed import logout_feed
frappe.cache().hdel("session", sid)
frappe.cache().hdel("last_db_session_update", sid)
if sid and not user:
user_details = frappe.db.sql("""select user from tabSessions where sid=%s""", sid, as_dict=True)
if user_details: user = user_details[0].get("user")
logout_feed(user, reason)
frappe.db.sql("""delete from tabSessions where sid=%s""", sid)
frappe.db.commit()
def clear_all_sessions(reason=None):
"""This effectively logs out all users"""
frappe.only_for("Administrator")
if not reason: reason = "Deleted All Active Session"
for sid in frappe.db.sql_list("select sid from `tabSessions`"):
delete_session(sid, reason=reason)
def get_expired_sessions():
'''Returns list of expired sessions'''
expired = []
for device in ("desktop", "mobile"):
expired += frappe.db.sql_list("""SELECT `sid`
FROM `tabSessions`
WHERE (NOW() - `lastupdate`) > %s
AND device = %s""", (get_expiry_period_for_query(device), device))
return expired
def clear_expired_sessions():
"""This function is meant to be called from scheduler"""
for sid in get_expired_sessions():
delete_session(sid, reason="Session Expired")
def get():
"""get session boot info"""
from frappe.boot import get_bootinfo, get_unseen_notes
from frappe.utils.change_log import get_change_log
bootinfo = None
if not getattr(frappe.conf,'disable_session_cache', None):
# check if cache exists
bootinfo = frappe.cache().hget("bootinfo", frappe.session.user)
if bootinfo:
bootinfo['from_cache'] = 1
bootinfo["user"]["recent"] = json.dumps(\
frappe.cache().hget("user_recent", frappe.session.user))
if not bootinfo:
# if not create it
bootinfo = get_bootinfo()
frappe.cache().hset("bootinfo", frappe.session.user, bootinfo)
try:
frappe.cache().ping()
except redis.exceptions.ConnectionError:
message = _("Redis cache server not running. Please contact Administrator / Tech support")
if 'messages' in bootinfo:
bootinfo['messages'].append(message)
else:
bootinfo['messages'] = [message]
# check only when clear cache is done, and don't cache this
if frappe.local.request:
bootinfo["change_log"] = get_change_log()
bootinfo["metadata_version"] = frappe.cache().get_value("metadata_version")
if not bootinfo["metadata_version"]:
bootinfo["metadata_version"] = frappe.reset_metadata_version()
bootinfo.notes = get_unseen_notes()
bootinfo.assets_json = get_assets_json()
for hook in frappe.get_hooks("extend_bootinfo"):
frappe.get_attr(hook)(bootinfo=bootinfo)
bootinfo["lang"] = frappe.translate.get_user_lang()
bootinfo["disable_async"] = frappe.conf.disable_async
bootinfo["setup_complete"] = cint(frappe.db.get_single_value('System Settings', 'setup_complete'))
bootinfo["is_first_startup"] = cint(frappe.db.get_single_value('System Settings', 'is_first_startup'))
return bootinfo
def get_csrf_token():
if not frappe.local.session.data.csrf_token:
generate_csrf_token()
return frappe.local.session.data.csrf_token
def generate_csrf_token():
frappe.local.session.data.csrf_token = frappe.generate_hash()
frappe.local.session_obj.update(force=True)
class Session:
def __init__(self, user, resume=False, full_name=None, user_type=None):
self.sid = cstr(frappe.form_dict.get('sid') or
unquote(frappe.request.cookies.get('sid', 'Guest')))
self.user = user
self.device = frappe.form_dict.get("device") or "desktop"
self.user_type = user_type
self.full_name = full_name
self.data = frappe._dict({'data': frappe._dict({})})
self.time_diff = None
# set local session
frappe.local.session = self.data
if resume:
self.resume()
else:
if self.user:
self.start()
def start(self):
"""start a new session"""
# generate sid
if self.user=='Guest':
sid = 'Guest'
else:
sid = frappe.generate_hash()
self.data.user = self.user
self.data.sid = sid
self.data.data.user = self.user
self.data.data.session_ip = frappe.local.request_ip
if self.user != "Guest":
self.data.data.update({
"last_updated": frappe.utils.now(),
"session_expiry": get_expiry_period(self.device),
"full_name": self.full_name,
"user_type": self.user_type,
"device": self.device,
"session_country": get_geo_ip_country(frappe.local.request_ip) if frappe.local.request_ip else None,
})
# insert session
if self.user!="Guest":
self.insert_session_record()
# update user
user = frappe.get_doc("User", self.data['user'])
frappe.db.sql("""UPDATE `tabUser`
SET
last_login = %(now)s,
last_ip = %(ip)s,
last_active = %(now)s
WHERE name=%(name)s""", {
'now': frappe.utils.now(),
'ip': frappe.local.request_ip,
'name': self.data['user']
})
user.run_notifications("before_change")
user.run_notifications("on_update")
frappe.db.commit()
def insert_session_record(self):
frappe.db.sql("""insert into `tabSessions`
(`sessiondata`, `user`, `lastupdate`, `sid`, `status`, `device`)
values (%s , %s, NOW(), %s, 'Active', %s)""",
(str(self.data['data']), self.data['user'], self.data['sid'], self.device))
# also add to memcache
frappe.cache().hset("session", self.data.sid, self.data)
def resume(self):
"""non-login request: load a session"""
import frappe
from frappe.auth import validate_ip_address
data = self.get_session_record()
if data:
# set language
self.data.update({'data': data, 'user':data.user, 'sid': self.sid})
self.user = data.user
validate_ip_address(self.user)
self.device = data.device
else:
self.start_as_guest()
if self.sid != "Guest":
frappe.local.user_lang = frappe.translate.get_user_lang(self.data.user)
frappe.local.lang = frappe.local.user_lang
def get_session_record(self):
"""get session record, or return the standard Guest Record"""
from frappe.auth import clear_cookies
r = self.get_session_data()
if not r:
frappe.response["session_expired"] = 1
clear_cookies()
self.sid = "Guest"
r = self.get_session_data()
return r
def get_session_data(self):
if self.sid=="Guest":
return frappe._dict({"user":"Guest"})
data = self.get_session_data_from_cache()
if not data:
data = self.get_session_data_from_db()
return data
def get_session_data_from_cache(self):
data = frappe.cache().hget("session", self.sid)
if data:
data = frappe._dict(data)
session_data = data.get("data", {})
# set user for correct timezone
self.time_diff = frappe.utils.time_diff_in_seconds(frappe.utils.now(),
session_data.get("last_updated"))
expiry = get_expiry_in_seconds(session_data.get("session_expiry"))
if self.time_diff > expiry:
self._delete_session()
data = None
return data and data.data
def get_session_data_from_db(self):
self.device = frappe.db.sql('SELECT `device` FROM `tabSessions` WHERE `sid`=%s', self.sid)
self.device = self.device and self.device[0][0] or 'desktop'
rec = frappe.db.sql("""
SELECT `user`, `sessiondata`
FROM `tabSessions` WHERE `sid`=%s AND
(NOW() - lastupdate) < %s
""", (self.sid, get_expiry_period_for_query(self.device)))
if rec:
data = frappe._dict(frappe.safe_eval(rec and rec[0][1] or '{}'))
data.user = rec[0][0]
else:
self._delete_session()
data = None
return data
def _delete_session(self):
delete_session(self.sid, reason="Session Expired")
def start_as_guest(self):
"""all guests share the same 'Guest' session"""
self.user = "Guest"
self.start()
def update(self, force=False):
"""extend session expiry"""
if (frappe.session['user'] == "Guest" or frappe.form_dict.cmd=="logout"):
return
now = frappe.utils.now()
self.data['data']['last_updated'] = now
self.data['data']['lang'] = text_type(frappe.lang)
# update session in db
last_updated = frappe.cache().hget("last_db_session_update", self.sid)
time_diff = frappe.utils.time_diff_in_seconds(now, last_updated) if last_updated else None
# database persistence is secondary, don't update it too often
updated_in_db = False
if force or (time_diff==None) or (time_diff > 600):
# update sessions table
frappe.db.sql("""update `tabSessions` set sessiondata=%s,
lastupdate=NOW() where sid=%s""" , (str(self.data['data']),
self.data['sid']))
# update last active in user table
frappe.db.sql("""update `tabUser` set last_active=%(now)s where name=%(name)s""", {
"now": now,
"name": frappe.session.user
})
frappe.db.commit()
frappe.cache().hset("last_db_session_update", self.sid, now)
updated_in_db = True
# set in memcache
frappe.cache().hset("session", self.sid, self.data)
return updated_in_db
def get_expiry_period_for_query(device=None):
if frappe.db.db_type == 'postgres':
return get_expiry_period(device)
else:
return get_expiry_in_seconds(device=device)
def get_expiry_in_seconds(expiry=None, device=None):
if not expiry:
expiry = get_expiry_period(device)
parts = expiry.split(":")
return (cint(parts[0]) * 3600) + (cint(parts[1]) * 60) + cint(parts[2])
def get_expiry_period(device="desktop"):
if device=="mobile":
key = "session_expiry_mobile"
default = "720:00:00"
else:
key = "session_expiry"
default = "06:00:00"
exp_sec = frappe.defaults.get_global_default(key) or default
# incase seconds is missing
if len(exp_sec.split(':')) == 2:
exp_sec = exp_sec + ':00'
return exp_sec
def get_geo_from_ip(ip_addr):
try:
from geolite2 import geolite2
with geolite2 as f:
reader = f.reader()
data = reader.get(ip_addr)
return frappe._dict(data)
except ImportError:
return
except ValueError:
return
except TypeError:
return
def get_geo_ip_country(ip_addr):
match = get_geo_from_ip(ip_addr)
if match:
return match.country
|
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""The only reason this module exists is to try to prevent QObjects
from being created before a QApplication gets constructed. The problem
with a QObject being constructed before a QApplication is that signals
and slots do not get connected, leading to hard-to-spot bugs.
Notice that there is no hard enforcement - if the client code does not
ask for permission, nothing stops it from creating QObjects (which
won't work correctly). Even worse, nothing stops a malicious object
from setting okToCreateQObjects directly.
As the python saying goes, 'we are all consenting adults here'."""
import inspect
from PyQt4 import QtGui, QtCore
import types
################################################################################
class qt_super(object):
def __init__(self, class_, obj):
self._class = class_
self._obj = obj
def __getattr__(self, attr):
s = super(self._class, self._obj)
try:
return getattr(s, attr)
except AttributeError, e:
mro = type(self._obj).mro()
try:
ix = mro.index(self._class)
except ValueError:
raise TypeError("qt_super: obj must be an instance of class")
for class_ in mro[ix+1:]:
try:
unbound_meth = getattr(class_, attr)
return types.MethodType(unbound_meth, self._obj, class_)
except AttributeError:
pass
raise e
################################################################################
class DisallowedCaller(Exception):
"""This expection is raised whenever a caller that's not privileged to
allow QObject construction tries to do so."""
def __str__(self):
return "Caller is not allowed to call this function"
class QApplicationNotYetCreated(Exception):
"""This expection is raised whenever a function asks for permission to
create a QObject but the QApplication has not granted it yet."""
def __str__(self):
return "QApplication has not been created yet"
def allowQObjects():
"""Allows subsequent QObject creation. The constructor for the
QApplication-derived class must call this so that we know it's alright
to start creating other QtCore.QObjects."""
# tries to check if caller is allowed to call this
caller = inspect.currentframe().f_back
d = caller.f_locals
if (not d.has_key('self') or
not isinstance(d['self'], QtCore.QCoreApplication)):
raise DisallowedCaller
global okToCreateQObjects
okToCreateQObjects = True
def askForQObjectCreation():
"""This function simply throws an exception if it is not yet ok
to create QObjects."""
global okToCreateQObjects
if not okToCreateQObjects:
raise QApplicationNotYetCreated()
global _appHolder
_appHolder = None
def createBogusQtGuiApp(argv=["bogus"]):
"""createBogusQtGuiApp creates a bogus QtApplication so we can
create qobjects during test runs.
"""
class BogusApplication(QtGui.QApplication):
def __init__(self):
QtGui.QApplication.__init__(self, argv)
allowQObjects()
global _appHolder
if QtGui.qApp:
_appHolder = QtGui.qApp
if not _appHolder:
_appHolder = BogusApplication()
return _appHolder
def destroyBogusQtApp():
global _appHolder
del _appHolder
def qt_version():
return [int(i)
for i in
QtCore.qVersion().split('.')]
################################################################################
okToCreateQObjects = False
class SignalSet(object):
"""SignalSet stores a list of (object, signal, method) that can be
all connected and disconnected simultaneously. This way, it's
harder to forget to disconnect one of many signals. Also, if the
SignalSet has already been plugged, it will signal an exception,
to avoid multiple connections."""
def __init__(self, owner, signalTripleList):
self.owner = owner
self.signalTripleList = signalTripleList
self.plugged = False
def plug(self):
if self.plugged:
raise Exception("SignalSet %s is already plugged" % self)
for tupl in self.signalTripleList:
self.owner.connect(*tupl)
self.plugged = True
def unplug(self):
if not self.plugged:
return
for tupl in self.signalTripleList:
self.owner.disconnect(*tupl)
self.plugged = False
################################################################################
_oldConnect = QtCore.QObject.connect
_oldDisconnect = QtCore.QObject.disconnect
_oldEmit = QtCore.QObject.emit
def _wrapConnect(callableObject):
"""Returns a wrapped call to the old version of QtCore.QObject.connect"""
@staticmethod
def call(*args):
callableObject(*args)
_oldConnect(*args)
return call
def _wrapDisconnect(callableObject):
"""Returns a wrapped call to the old version of QtCore.QObject.disconnect"""
@staticmethod
def call(*args):
callableObject(*args)
_oldDisconnect(*args)
return call
def enableSignalDebugging(**kwargs):
"""Call this to enable Qt Signal debugging. This will trap all
connect, disconnect and emit calls. For example:
enableSignalDebugging(connectCall=callable1, disconnectCall=callable2,
emitCall=callable3)
will call callable1, 2 and 3 when the respective Qt methods are issued.
"""
f = lambda *args: None
connectCall = kwargs.get('connectCall', f)
disconnectCall = kwargs.get('disconnectCall', f)
emitCall = kwargs.get('emitCall', f)
def printIt(msg):
def call(*args):
print msg, args
return call
QtCore.QObject.connect = _wrapConnect(connectCall)
QtCore.QObject.disconnect = _wrapDisconnect(disconnectCall)
def new_emit(self, *args):
emitCall(self, *args)
_oldEmit(self, *args)
QtCore.QObject.emit = new_emit
|
|
import asyncio
from contextlib import suppress
import errno
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
import uuid
import warnings
import weakref
import dask
from dask.system import CPU_COUNT
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado import gen
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import RPCClosed, CommClosedError, coerce_to_address
from .metrics import time
from .node import ServerNode
from . import preloading
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
get_ip,
mp_context,
silence_logging,
json_load_robust,
parse_timedelta,
parse_ports,
TimeoutError,
)
from .worker import run, parse_memory_limit, Worker
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
""" A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fractiom of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker.
See Also
--------
Worker
"""
_instances = weakref.WeakSet()
process = None
status = None
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
ncores=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
preload_nanny=None,
preload_nanny_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs,
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
if preload_nanny is None:
preload_nanny = dask.config.get("distributed.nanny.preload")
if preload_nanny_argv is None:
preload_nanny_argv = dask.config.get("distributed.nanny.preload-argv")
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.config = config or {}
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if local_directory is None:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
if not os.path.exists(local_directory):
os.makedirs(local_directory)
local_directory = os.path.join(local_directory, "dask-worker-space")
self.local_directory = local_directory
self.preloads = preloading.process_preloads(
self, preload_nanny, preload_nanny_argv, file_dir=self.local_directory
)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
}
super(Nanny, self).__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_port = port
self._start_host = host
self._interface = interface
self._protocol = protocol
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = "init"
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with suppress(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
""" For API compatibility with Nanny """
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
""" Start nanny, start local process, start watching """
await super().start()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
try:
await self.listen(
start_address, **self.security.get_listen_args("worker")
)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise e
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Nanny on host {self._start_host}"
f"with port {self._start_port}"
)
self.ip = get_address_host(self.address)
for preload in self.preloads:
await preload.start()
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == "running":
assert self.worker_address
self.status = "running"
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
""" Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = self.loop.time() + timeout
await self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
async def instantiate(self, comm=None):
""" Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self.local_directory,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
self.auto_restart = True
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
result = await self.process.start()
return result
async def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error("Restart timed out, returning before finished")
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
proc = self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above terminate fraction """
if self.status != "running":
return
process = self.process.process
if process is None:
return
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in ("closing", "closed"):
try:
await self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
await self.close()
return
try:
if self.status not in ("closing", "closed", "closing-gracefully"):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == "closing-gracefully":
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = "closing-gracefully"
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == "closing":
await self.finished()
assert self.status == "closed"
if self.status == "closed":
return "OK"
self.status = "closing"
logger.info("Closing Nanny at %r", self.address)
for preload in self.preloads:
await preload.teardown()
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = "closed"
if comm:
await comm.write("OK")
await ServerNode.close(self)
class WorkerProcess:
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = "init"
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self):
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == "running":
return self.status
if self.status == "starting":
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = asyncio.Event()
self.stopped = asyncio.Event()
self.status = "starting"
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
return
msg = await self._wait_until_connected(uid)
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = "running"
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != "stopped":
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = "stopped"
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == "stopped":
return
if self.status == "stopping":
await self.stopped.wait()
return
assert self.status in ("starting", "running")
self.status = "stopping"
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - loop.time()) * 0.8,
"executor_wait": executor_wait,
}
)
await asyncio.sleep(0) # otherwise we get broken pipe errors
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
"Worker process still alive after %d seconds, killing", timeout
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
delay = 0.05
while True:
if self.status != "starting":
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(delay)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
await self.process.join()
raise msg
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
# At this point the loop is not running thus we have to run
# do_stop() explicitly.
loop.run_sync(do_stop)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators. See the @{$python/script_ops} guide.
@@py_func
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
# Used by py_util.cc to get tracebacks.
import traceback # pylint: disable=unused-import
import numpy as np
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_script_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class EagerFunc(object):
"""A wrapper for a function owned by an EagerPyFunc."""
def __init__(self, func, Tout):
"""Constructs an EagerFunc.
Args:
func: The function to wrap.
Tout: A list of datatypes for the output; an empty list if the output is
None.
"""
self._func = func
self._out_dtypes = Tout
def _convert(self, value, dtype):
if isinstance(value, resource_variable_ops.ResourceVariable):
raise RuntimeError(
"Attempting to return a variable from an eagerly executed py_func. "
"Only numeric data structures like Tensors or NumPy arrays should "
"be returned; to return the value of a variable, make sure to obtain "
"the Tensor backing it by calling `.read_value()` on the variable in "
"question: %s" % value)
return ops.convert_to_tensor(value, dtype=dtype)
def __call__(self, on_gpu, args):
"""Passes `args` to `self._func`, which is executed eagerly."""
with context.eager_mode():
ret = self._func(*args)
maybe_copy_to_gpu = lambda x: x if not on_gpu else x.gpu()
if isinstance(ret, (tuple, list)):
return [
maybe_copy_to_gpu(self._convert(x, dtype=dtype))
for (x, dtype) in zip(ret, self._out_dtypes)
]
elif ret is None:
return ret
else:
return maybe_copy_to_gpu(self._convert(ret, dtype=self._out_dtypes[0]))
class FuncRegistry(object):
"""A helper class to keep track of registered py functions.
FuncRegistry keeps a map from unique tokens (string) to python
functions, which takes numpy arrays and outputs numpy arrays.
"""
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
self._funcs = {}
def insert(self, func):
"""Registers `func` and returns a unique token for this entry."""
token = self._next_unique_token()
self._funcs[token] = func
return token
def remove(self, token):
"""Removes the registered function corresponding to `token`."""
self._funcs.pop(token, None)
@staticmethod
def _convert(value, dtype=None):
"""Converts an arg to numpy, avoiding dangerous string and unicode dtypes.
Numpy pads with zeros when using string and unicode dtypes if different
components of a tensor have different lengths. This is bad: ignoring the
padding is wrong for text data, and removing the padding is wrong for binary
data. To avoid this bug, we redo the conversion using an object dtype.
Additionally, we convert unicode strings to (byte-)strings for
compatibility.
Args:
value: Value to convert to a numpy array.
dtype: (Optional.) Desired NumPy type for the returned value.
Returns:
A numpy array.
"""
result = np.asarray(value, dtype=dtype, order="C")
if result.dtype.char == "S" and result is not value:
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U" and result is not value:
value = np.vectorize(lambda x: x.encode("utf8"))(value)
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U":
return result.astype(np.bytes_)
else:
return result
def __call__(self, token, on_gpu, args):
"""Calls the registered function for `token` with args.
Args:
token: A key into this `FuncRegistry` identifying which function to call.
on_gpu: A boolean indicating whether or not `token`'s corresponding
operation was placed on GPU; only used if the function registered for
`token` is an `EagerPyFunc`.
args: The arguments to pass to the function registered for `token`.
Returns:
The output of the function registered for `token`.
Raises:
ValueError: if no function is registered for `token`.
"""
func = self._funcs[token]
if func is None:
raise ValueError("callback %s is not found" % token)
if isinstance(func, EagerFunc):
return func(on_gpu, args)
else:
ret = func(*args)
# Strings seem to lead to a memory leak here if they're not wrapped in a
# list.
if isinstance(ret, six.binary_type):
ret = [ret]
# Ensures that we return either a single numpy array or a list of numpy
# arrays.
if isinstance(ret, (tuple, list)):
return [self._convert(x) for x in ret]
else:
return self._convert(ret)
def size(self):
"""Returns how many functions are currently registered."""
return len(self._funcs)
def _next_unique_token(self):
"""Returns a unique token."""
with self._lock:
uid = self._unique_id
self._unique_id += 1
return "pyfunc_%d" % uid
# Global registry for py functions.
_py_funcs = FuncRegistry()
pywrap_tensorflow.InitializePyTrampoline(_py_funcs)
class CleanupFunc(object):
"""A helper class to remove a registered function from _py_funcs."""
def __init__(self, token):
self._token = token
def __del__(self):
if _py_funcs is not None:
# If _py_funcs is None, the program is most likely in shutdown, and the
# _py_funcs object has been destroyed already.
_py_funcs.remove(self._token)
def _internal_py_func(func, inp, Tout, stateful=None, eager=False, name=None):
"""See documentation for py_func and eager_py_func."""
is_list_or_tuple = False
if isinstance(Tout, (list, tuple)):
is_list_or_tuple = True
else:
Tout = [Tout]
if eager:
func = EagerFunc(func, Tout)
token = _py_funcs.insert(func)
# We tie the registered function's lifetime with the current default graph,
# i.e., when the current graph is destroyed, we remove its py funcs.
graph = ops.get_default_graph()
# pylint: disable=protected-access
while isinstance(graph, function._FuncGraph):
# If the py_func was declared inside a _FuncGraph, its lifetime should be
# bound to that of the outer graph instead.
graph = graph._outer_graph
cleanup = CleanupFunc(token)
# TODO(zhifengc): Consider adding a Graph method to collect
# `cleanup` objects in one of its member.
if not hasattr(graph, "_cleanup_py_funcs_used_in_graph"):
graph._cleanup_py_funcs_used_in_graph = []
# When `graph` is destroyed, elements in _cleanup_py_funcs_used_in_graph
# will be destroyed and their __del__ will remove the 'token' from
# the funcs registry.
graph._cleanup_py_funcs_used_in_graph.append(cleanup)
# pylint: enable=protected-access
if eager:
result = gen_script_ops.eager_py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
if stateful:
result = gen_script_ops.py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
result = gen_script_ops.py_func_stateless(
input=inp, token=token, Tout=Tout, name=name)
return result if is_list_or_tuple else result[0]
def eager_py_func(func, inp, Tout, name=None):
"""Wraps a python function into a TensorFlow op.
When the returned op is executed, `func` is invoked with eager execution
enabled. Inputs are Tensor objects and func must return None or objects
that may be converted to Tensor objects.
This function has the same limitations as `py_func` with respect to
serialization and distribution.
Args:
func: A Python function which accepts a list of `Tensor` objects
having element types that match the corresponding `tf.Tensor` objects
in `inp` and returns a list of `Tensor` objects (or a single
`Tensor`, or `None`) having element types that match the
corresponding values in `Tout`.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns; an empty list
if no value is returned (i.e., if the return value is `None`).
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes; an empty list
if `func` returns None.
"""
return _internal_py_func(func=func, inp=inp, Tout=Tout, eager=True, name=name)
@tf_export("py_func")
def py_func(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func`, which takes numpy arrays as its
arguments and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
input = tf.placeholder(tf.float32)
y = tf.py_func(my_func, [input], tf.float32)
```
**N.B.** The `tf.py_func()` operation has the following known limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_func()`. If you are using distributed TensorFlow, you
must run a `tf.train.Server` in the same process as the program that calls
`tf.py_func()` and you must pin the created operation to a device in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts `ndarray` objects as arguments and
returns a list of `ndarray` objects (or a single `ndarray`). This function
must accept as many arguments as there are tensors in `inp`, and these
argument types will match the corresponding `tf.Tensor` objects
in `inp`. The returns `ndarray`s must match the number and types defined
`Tout`.
Important Note: Input and output numpy `ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors.
In-place modification or storing `func` input or return values in
python datastructures without explicit (np.)copy
can have non-deterministic consequences.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful.
If a function is stateless, when given the same input it will return the
same output and have no observable side effects. Optimizations such as
common subexpression elimination are only performed on stateless
operations.
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes.
"""
if context.executing_eagerly():
result = func(*[x.numpy() for x in inp])
result = nest.flatten(result)
result = [x if x is None else ops.convert_to_tensor(x) for x in result]
if len(result) == 1:
# Mimic the automatic unwrapping in graph-mode py_func
result, = result
return result
return _internal_py_func(
func=func, inp=inp, Tout=Tout, stateful=stateful, eager=False, name=name)
ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import fnmatch
import eventlet
from kombu.mixins import ConsumerMixin
from oslo_config import cfg
from st2common.models.api.action import LiveActionAPI
from st2common.models.api.execution import ActionExecutionAPI
from st2common.models.api.execution import ActionExecutionOutputAPI
from st2common.transport import utils as transport_utils
from st2common.transport.queues import STREAM_ANNOUNCEMENT_WORK_QUEUE
from st2common.transport.queues import STREAM_EXECUTION_ALL_WORK_QUEUE
from st2common.transport.queues import STREAM_EXECUTION_UPDATE_WORK_QUEUE
from st2common.transport.queues import STREAM_LIVEACTION_WORK_QUEUE
from st2common.transport.queues import STREAM_EXECUTION_OUTPUT_QUEUE
from st2common import log as logging
__all__ = [
"StreamListener",
"ExecutionOutputListener",
"get_listener",
"get_listener_if_set",
]
LOG = logging.getLogger(__name__)
# Stores references to instantiated listeners
_stream_listener = None
_execution_output_listener = None
class BaseListener(ConsumerMixin):
def __init__(self, connection):
self.connection = connection
self.queues = []
self._stopped = False
def get_consumers(self, consumer, channel):
raise NotImplementedError("get_consumers() is not implemented")
def processor(self, model=None):
def process(body, message):
meta = message.delivery_info
event_name = "%s__%s" % (meta.get("exchange"), meta.get("routing_key"))
try:
if model:
body = model.from_model(
body, mask_secrets=cfg.CONF.api.mask_secrets
)
self.emit(event_name, body)
finally:
message.ack()
return process
def emit(self, event, body):
pack = (event, body)
for queue in self.queues:
queue.put(pack)
def generator(
self,
events=None,
action_refs=None,
execution_ids=None,
end_event=None,
end_statuses=None,
end_execution_id=None,
):
queue = eventlet.Queue()
queue.put("")
self.queues.append(queue)
try:
stop = False
while not self._stopped and not stop:
try:
# TODO: Move to common option
message = queue.get(timeout=cfg.CONF.stream.heartbeat)
if not message:
yield message
continue
event_name, body = message
# check to see if this is the last message to send.
if event_name == end_event:
if (
body is not None
and body.status in end_statuses
and end_execution_id is not None
and body.id == end_execution_id
):
stop = True
# TODO: We now do late filtering, but this could also be performed on the
# message bus level if we modified our exchange layout and utilize routing keys
# Filter on event name
include_event = self._should_include_event(
event_names_whitelist=events, event_name=event_name
)
if not include_event:
LOG.debug('Skipping event "%s"' % (event_name))
continue
# Filter on action ref
action_ref = self._get_action_ref_for_body(body=body)
if action_refs and action_ref not in action_refs:
LOG.debug(
'Skipping event "%s" with action_ref "%s"'
% (event_name, action_ref)
)
continue
# Filter on execution id
execution_id = self._get_execution_id_for_body(body=body)
if execution_ids and execution_id not in execution_ids:
LOG.debug(
'Skipping event "%s" with execution_id "%s"'
% (event_name, execution_id)
)
continue
yield message
except eventlet.queue.Empty:
yield
finally:
self.queues.remove(queue)
def shutdown(self):
self._stopped = True
def _should_include_event(self, event_names_whitelist, event_name):
"""
Return True if particular event should be included based on the event names filter.
"""
if not event_names_whitelist:
return True
for event_name_filter_glob in event_names_whitelist:
if fnmatch.fnmatch(event_name, event_name_filter_glob):
return True
return False
def _get_action_ref_for_body(self, body):
"""
Retrieve action_ref for the provided message body.
"""
if not body:
return None
action_ref = None
if isinstance(body, ActionExecutionAPI):
action_ref = body.action.get("ref", None) if body.action else None
elif isinstance(body, LiveActionAPI):
action_ref = body.action
elif isinstance(body, (ActionExecutionOutputAPI)):
action_ref = body.action_ref
return action_ref
def _get_execution_id_for_body(self, body):
if not body:
return None
execution_id = None
if isinstance(body, ActionExecutionAPI):
execution_id = str(body.id)
elif isinstance(body, LiveActionAPI):
execution_id = None
elif isinstance(body, (ActionExecutionOutputAPI)):
execution_id = body.execution_id
return execution_id
class StreamListener(BaseListener):
"""
Listener used inside stream service.
It listenes to all the events.
"""
def get_consumers(self, consumer, channel):
return [
consumer(
queues=[STREAM_ANNOUNCEMENT_WORK_QUEUE],
accept=["pickle"],
callbacks=[self.processor()],
),
consumer(
queues=[STREAM_EXECUTION_ALL_WORK_QUEUE],
accept=["pickle"],
callbacks=[self.processor(ActionExecutionAPI)],
),
consumer(
queues=[STREAM_LIVEACTION_WORK_QUEUE],
accept=["pickle"],
callbacks=[self.processor(LiveActionAPI)],
),
consumer(
queues=[STREAM_EXECUTION_OUTPUT_QUEUE],
accept=["pickle"],
callbacks=[self.processor(ActionExecutionOutputAPI)],
),
]
class ExecutionOutputListener(BaseListener):
"""
Listener emitting action execution output event.
Only listens to action execution work and output queue.
"""
def get_consumers(self, consumer, channel):
return [
consumer(
queues=[STREAM_EXECUTION_UPDATE_WORK_QUEUE],
accept=["pickle"],
callbacks=[self.processor(ActionExecutionAPI)],
),
consumer(
queues=[STREAM_EXECUTION_OUTPUT_QUEUE],
accept=["pickle"],
callbacks=[self.processor(ActionExecutionOutputAPI)],
),
]
def listen(listener):
try:
listener.run()
finally:
listener.shutdown()
def get_listener(name):
global _stream_listener
global _execution_output_listener
if name == "stream":
if not _stream_listener:
with transport_utils.get_connection() as conn:
_stream_listener = StreamListener(conn)
eventlet.spawn_n(listen, _stream_listener)
return _stream_listener
elif name == "execution_output":
if not _execution_output_listener:
with transport_utils.get_connection() as conn:
_execution_output_listener = ExecutionOutputListener(conn)
eventlet.spawn_n(listen, _execution_output_listener)
return _execution_output_listener
else:
raise ValueError("Invalid listener name: %s" % (name))
def get_listener_if_set(name):
global _stream_listener
global _execution_output_listener
if name == "stream":
return _stream_listener
elif name == "execution_output":
return _execution_output_listener
else:
raise ValueError("Invalid listener name: %s" % (name))
|
|
# coding=utf-8
# Copyright 2021 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation_flax_logits_process import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class LogitsProcessorTest(unittest.TestCase):
def _get_uniform_logits(self, batch_size: int, length: int):
scores = np.ones((batch_size, length)) / length
return scores
def test_temperature_dist_warper(self):
input_ids = None
length = 20
scores = self._get_uniform_logits(batch_size=2, length=length)
# tweak scores to not be uniform anymore
scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch
scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch
# compute softmax
probs = jax.nn.softmax(scores, axis=-1)
temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5)
temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3)
warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1)
warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min())
def test_top_k_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2
# create ramp distribution
ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy()
ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size
top_k_warp = FlaxTopKLogitsWarper(3)
scores = top_k_warp(input_ids, ramp_logits, cur_len=None)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True])
# check special case
length = 5
top_k_warp_safety_check = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3)
ramp_logits = np.broadcast_to(np.arange(length)[None, :], (batch_size, length)).copy()
scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len=None)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist(), [2, 2])
def test_top_p_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
top_p_warp = FlaxTopPLogitsWarper(0.7)
filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None))
# dist should be filtered to keep min num values so that sum is >= 0.7
# exp (-inf) => 0
EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
# check edge cases with negative and extreme logits
ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
ramp_logits[1] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
top_p_warp = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0)
filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len=None)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist(), [3, 2])
def test_min_length_dist_processor(self):
vocab_size = 20
batch_size = 4
eos_token_id = 0
min_dist_processor = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
# check that min length is applied at length 5
input_ids = ids_tensor((batch_size, 20), vocab_size=20)
cur_len = 5
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
scores = self._get_uniform_logits(batch_size, vocab_size)
cur_len = 15
scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len)
self.assertFalse(jnp.isinf(scores_before_min_length).any())
def test_forced_bos_token_logits_processor(self):
vocab_size = 20
batch_size = 4
bos_token_id = 0
logits_processor = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
# check that all scores are -inf except the bos_token_id score
input_ids = ids_tensor((batch_size, 1), vocab_size=20)
cur_len = 1
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
cur_len = 3
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertFalse(jnp.isinf(scores).any())
def test_forced_eos_token_logits_processor(self):
vocab_size = 20
batch_size = 4
eos_token_id = 0
max_length = 5
logits_processor = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)
# check that all scores are -inf except the eos_token_id when max_length is reached
input_ids = ids_tensor((batch_size, 4), vocab_size=20)
cur_len = 4
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
cur_len = 3
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertFalse(jnp.isinf(scores).any())
def test_processor_list(self):
batch_size = 4
sequence_length = 10
vocab_size = 15
eos_token_id = 2
bos_token_id = 1
max_length = 15
# dummy input_ids and scores
input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
input_ids_comp = input_ids.copy()
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_comp = scores.copy()
# instantiate all dist processors
temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
top_k_warp = FlaxTopKLogitsWarper(3)
top_p_warp = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)
cur_len = 10
# no processor list
scores = temp_dist_warp(input_ids, scores, cur_len=cur_len)
scores = top_k_warp(input_ids, scores, cur_len=cur_len)
scores = top_p_warp(input_ids, scores, cur_len=cur_len)
scores = min_dist_proc(input_ids, scores, cur_len=cur_len)
scores = bos_dist_proc(input_ids, scores, cur_len=cur_len)
scores = eos_dist_proc(input_ids, scores, cur_len=cur_len)
# with processor list
processor = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]
)
scores_comp = processor(input_ids, scores_comp, cur_len=cur_len)
# scores should be equal
self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
def test_processor_list_jitted(self):
batch_size = 4
sequence_length = 10
vocab_size = 15
eos_token_id = 2
bos_token_id = 1
max_length = 15
# dummy input_ids and scores
input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
input_ids_comp = input_ids.copy()
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_comp = scores.copy()
# instantiate all dist processors
temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
top_k_warp = FlaxTopKLogitsWarper(3)
top_p_warp = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)
cur_len = 10
# no processor list
def run_no_processor_list(input_ids, scores, cur_len):
scores = temp_dist_warp(input_ids, scores, cur_len=cur_len)
scores = top_k_warp(input_ids, scores, cur_len=cur_len)
scores = top_p_warp(input_ids, scores, cur_len=cur_len)
scores = min_dist_proc(input_ids, scores, cur_len=cur_len)
scores = bos_dist_proc(input_ids, scores, cur_len=cur_len)
scores = eos_dist_proc(input_ids, scores, cur_len=cur_len)
return scores
# with processor list
def run_processor_list(input_ids, scores, cur_len):
processor = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]
)
scores = processor(input_ids, scores, cur_len=cur_len)
return scores
jitted_run_no_processor_list = jax.jit(run_no_processor_list)
jitted_run_processor_list = jax.jit(run_processor_list)
scores = jitted_run_no_processor_list(input_ids, scores, cur_len)
scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len)
# scores should be equal
self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
|
|
""" Video Link: https://youtu.be/1s-Tj65AKZA """
from seleniumbase import __version__
from seleniumbase import BaseCase
class HackTests(BaseCase):
def test_all_your_base_are_belong_to_us(self):
# First make sure that seleniumbase 1.65.0 or newer is installed
version = __version__.split(".")
if version[0] == "1" and int(version[1]) < 65:
raise Exception(
"This test requires minimum seleniumbase version: 1.65.0"
)
self.set_window_size(1220, 740)
ayb = "ALL YOUR BASE"
abtu = "ARE BELONG TO US"
aybabtu = "%s %s" % (ayb, abtu)
sb_banner_logo = "//seleniumbase.io/cdn/img/sb_logo_10.png"
sb_dashboard_logo = "//seleniumbase.io/img/dash_pie_3.png"
yt_chip = "#chips yt-chip-cloud-chip-renderer:nth-of-type"
wiki = "https://en.wikipedia.org/wiki/All_your_base_are_belong_to_us"
self.open(wiki)
self.click_if_visible('button[aria-label="Close"]')
self.set_text_content("h1#firstHeading", aybabtu)
self.set_text_content("#ca-history a", aybabtu)
self.set_text_content('#n-mainpage-description a', "ALL")
self.set_text_content('#n-contents a', "YOUR")
self.set_text_content('#n-currentevents a', "BASE")
self.set_text_content('#n-randompage a', "ARE")
self.set_text_content('#n-aboutsite a', "BELONG")
self.set_text_content('#n-contactpage a', "TO")
self.set_text_content('#n-sitesupport a', "US")
self.set_text_content('.tocsection-1 span.toctext', "ALL")
self.set_text_content('.tocsection-2 span.toctext', "YOUR")
self.set_text_content('.tocsection-3 span.toctext', "BASE")
self.set_text_content('.tocsection-4 span.toctext', "ARE")
self.set_text_content('.tocsection-5 span.toctext', "BELONG")
self.set_text_content('.tocsection-6 span.toctext', "TO")
self.set_text_content('.tocsection-7 span.toctext', "US")
self.highlight("h1#firstHeading", loops=2, scroll=False)
self.highlight("#ca-history a", loops=2, scroll=False)
self.highlight("nav#p-navigation", loops=2, scroll=False)
self.highlight("div#toc", loops=2, scroll=False)
self.highlight('.tocsection-1 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-2 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-3 span.toctext', loops=2, scroll=False)
self.highlight('.tocsection-4 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-5 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-6 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-7 span.toctext', loops=2, scroll=False)
zoom_in = 'div.thumbinner{zoom: 1.4;-moz-transform: scale(1.4);}'
self.add_css_style(zoom_in)
self.highlight("div.thumbinner", loops=8, scroll=False)
self.open("https://www.apple.com/store")
self.set_text_content("div.rs-shop-subheader", aybabtu)
self.set_text_content('#shelf-1 a[href*="mac"]', "ALL")
self.set_text_content('#shelf-1 a[href*="iphone"]', "YOUR")
self.set_text_content('#shelf-1 a[href*="ipad"]', "BASE")
self.set_text_content('#shelf-1 a[href*="watch"]', "ARE")
self.set_text_content('#shelf-1 a[href*="airpods"]', "BELONG")
self.set_text_content('#shelf-1 a[href*="airtag"]', "TO")
self.set_text_content('#shelf-1 a[href*="tv"]', "US")
self.set_text_content('#shelf-1 a[href*="homepod"]', ".")
self.set_text_content("h2", aybabtu + ". ")
self.highlight("div.rs-shop-subheader", loops=6, scroll=False)
self.highlight("#shelf-1", loops=2, scroll=False)
self.highlight('#shelf-1 a[href*="mac"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="iphone"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="ipad"]', loops=3, scroll=False)
self.highlight('#shelf-1 a[href*="watch"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="airpods"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="airtag"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="tv"]', loops=3, scroll=False)
self.highlight("h2", loops=9, scroll=False)
self.open("https://google.com/ncr")
self.set_text_content('a[href*="about.google"]', ayb)
self.set_text_content('a[href*="store.google"]', abtu)
self.set_text_content('a[href*="mail.google.com"]', ayb)
self.set_text_content('a[href*="google.com/img"]', abtu)
self.set_attributes('[value="Google Search"]', "value", ayb)
self.set_attributes('[value="I\'m Feeling Lucky"]', "value", abtu)
zoom_in = 'a{zoom: 1.2;-moz-transform: scale(1.2);}'
self.add_css_style(zoom_in)
zoom_in = (
'[value="ALL YOUR BASE"]{zoom: 1.3;-moz-transform: scale(1.3);}'
'[value="ARE BELONG TO US"]{zoom: 1.3;-moz-transform: scale(1.3);}'
)
self.add_css_style(zoom_in)
self.highlight('a[href*="about.google"]', loops=3)
self.highlight('a[href*="store.google"]', loops=3)
self.highlight('a[href*="mail.google.com"]', loops=3)
self.highlight('a[href*="google.com/img"]', loops=3)
self.highlight('form[role="search"]', loops=8)
self.open("https://twitter.com/")
if not self.is_element_visible('a[href*="w/signup"] span'):
self.refresh()
if self.is_element_visible('a[href*="w/signup"] span'):
self.set_text_content('a[href*="w/signup"] span', aybabtu)
self.highlight('a[href*="w/signup"] span', loops=6, scroll=False)
self.highlight('a[href*="w/signup"]', loops=6, scroll=False)
self.open("https://www.youtube.com/")
self.set_text_content('%s(1)' % yt_chip, "ALL")
self.set_text_content('%s(2)' % yt_chip, "YOUR")
self.set_text_content('%s(3)' % yt_chip, "BASE")
self.set_text_content('%s(4)' % yt_chip, "ARE")
self.set_text_content('%s(5)' % yt_chip, "BELONG")
self.set_text_content('%s(6)' % yt_chip, "TO")
self.set_text_content('%s(7)' % yt_chip, "US")
self.set_text_content('%s(8)' % yt_chip, "!")
self.set_text_content('%s(9)' % yt_chip, "!")
self.set_text_content('%s(10)' % yt_chip, "!")
self.click_if_visible("#dismiss-button")
self.click_if_visible('button[aria-label="Close"]')
self.highlight("#scroll-container", loops=5, scroll=False)
self.highlight('%s(1)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(2)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(3)' % yt_chip, loops=3, scroll=False)
self.highlight('%s(4)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(5)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(6)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(7)' % yt_chip, loops=3, scroll=False)
self.highlight("#scroll-container", loops=7, scroll=False)
self.open("https://github.com/features/actions")
self.set_text_content('a[href="/team"]', ayb)
self.set_text_content('a[href="/enterprise"]', abtu)
self.set_text_content('h1 span:nth-child(1)', ayb)
self.set_text_content('h1 span:nth-of-type(2)', "ARE")
self.set_text_content('h1 span:nth-of-type(3)', "BELONG")
self.set_text_content('h1 span:nth-of-type(4)', "TO")
self.set_text_content('h1 span:nth-of-type(5)', "US")
self.type('input[name="q"]', aybabtu.lower())
self.click("h1", scroll=False)
self.highlight("nav", loops=5, scroll=False)
self.highlight('input[name="q"]', loops=5, scroll=False)
self.highlight("h1", loops=8, scroll=False)
self.open("https://dev.to/top/infinity")
self.click_if_visible('button[aria-label="Close campaign banner"]')
self.set_text_content('nav a[data-text="Relevant"]', "ALL")
self.set_text_content('nav a[data-text="Latest"]', "YOUR")
self.set_text_content('nav a[data-text="Top"]', "BASE")
self.set_text_content('nav a[data-text="Week"]', "ARE")
self.set_text_content('nav a[data-text="Month"]', "BELONG")
self.set_text_content('nav a[data-text="Year"]', "TO")
self.set_text_content('nav a[data-text="Infinity"]', "US")
self.set_text_content('aside a[class*="tful"]', aybabtu)
self.set_text_content('aside a[aria-label="Create new account"]', ayb)
self.set_text_content('aside a[aria-label="Log in"]', abtu)
self.set_text_content('aside a[class*="tful"]:nth-child(2)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(3)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(4)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(5)', aybabtu)
self.set_attribute("a.crayons-avatar img", "src", sb_dashboard_logo)
self.set_text_content('.profile-preview-card button', "SeleniumBase")
self.set_text_content('h2.crayons-story__title a', aybabtu)
self.type('input[name="q"]', aybabtu)
self.highlight('input[name="q"]', loops=4, scroll=False)
self.highlight('[aria-label="Primary sidebar"] div div', scroll=False)
self.highlight('nav a[data-text="Relevant"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Latest"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Top"]', loops=2, scroll=False)
self.highlight('nav a[data-text="Week"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Month"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Year"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Infinity"]', loops=2, scroll=False)
self.highlight('aside[id*="sidebar"] section', loops=5, scroll=False)
self.highlight("div.crayons-story__body", loops=7, scroll=False)
self.open("https://azure.microsoft.com/en-us/services/playfab/")
self.set_text_content("h1", aybabtu)
self.set_text_content('a[aria-label*="Try PlayF"]', ayb)
self.set_text_content('a[aria-label*="Sign in to"]', abtu)
self.set_text_content('span:contains("Chat with Sales")', aybabtu)
self.highlight("h1", loops=6, scroll=False)
self.highlight('a[aria-label*="Try PlayF"]', loops=4, scroll=False)
self.highlight('a[aria-label*="Sign in to"]', loops=4, scroll=False)
self.highlight('button#live-engage-btn', loops=6, scroll=False)
self.open("https://www.snapchat.com/")
self.set_text_content("h1", ayb)
self.set_text_content("form .button-large span span", abtu)
zoom_in = 'a.button-large span{zoom: 1.2;-moz-transform: scale(1.2);}'
self.add_css_style(zoom_in)
self.highlight("h1", loops=6, scroll=False)
self.highlight("form .button-large span span", loops=8, scroll=False)
self.open("https://store.steampowered.com/")
self.set_text_content('div.content a[href*="/about/"]', " ")
self.set_text_content('div.content a[href*="help.steam"]', aybabtu)
self.set_text_content("#foryou_tab a", "ALL")
self.set_text_content("#noteworthy_tab a", "YOUR BASE")
self.set_text_content("#genre_tab a", "ARE")
self.set_text_content('span:contains("Points Shop")', "BELONG")
self.set_text_content('span:contains("News")', "TO")
self.set_text_content('span:contains("Labs")', "US")
self.set_value("input#store_nav_search_term", ayb + " . . . .")
self.highlight('div.content a[href*="help.steam"]', loops=6)
self.highlight('#store_nav_area', loops=2, scroll=False)
self.highlight("#foryou_tab a", loops=1, scroll=False)
self.highlight("#noteworthy_tab a", loops=3, scroll=False)
self.highlight("#genre_tab a", loops=1, scroll=False)
self.highlight('span:contains("BELONG")', loops=1, scroll=False)
self.highlight('span:contains("TO")', loops=1, scroll=False)
self.highlight('span:contains("US")', loops=2, scroll=False)
self.js_click('input[id*="nav_search"]')
self.highlight('input[id*="nav_search"]', loops=6, scroll=False)
self.open("https://xkcd.com/286/")
self.set_text_content('a[href="/archive"]', "ALL")
self.set_text_content('a[href*="what-if"]', "YOUR")
self.set_text_content('a[href*="//blag."]', "BASE")
self.set_text_content('a[href*="/about"]', abtu)
self.remove_element('li:contains("Feed")')
self.remove_element('li:contains("TW")')
self.remove_element('li:contains("Books")')
self.remove_element('li:contains("What")')
self.remove_element('li:contains("WI")')
self.set_attributes("#news img", "src", sb_banner_logo)
self.set_text_content('#ctitle', aybabtu)
self.set_text_content('a[rel="prev"]', "All")
self.set_text_content('a[href*="random"]', "Your")
self.set_text_content('a[rel="next"]', "Base")
self.highlight("#topLeft ul", loops=5, scroll=False)
self.highlight('a[href="/archive"]', loops=1, scroll=False)
self.highlight('a[href*="what-if"]', loops=1, scroll=False)
self.highlight('a[href*="//blag."]', loops=2, scroll=False)
self.highlight('a[href*="/about"]', loops=5, scroll=False)
self.highlight('a[rel="prev"]', loops=1, scroll=False)
self.highlight('a[href*="random"]', loops=1, scroll=False)
self.highlight('a[rel="next"]', loops=3, scroll=False)
self.highlight("#ctitle", loops=7, scroll=False)
self.open("https://www.nintendo.com/whatsnew/")
self.set_text_content("h1", aybabtu)
self.highlight("h1", loops=10, scroll=False)
self.open("https://support.gog.com/hc/en-us?product=gog")
self.set_text_content("div.intro-title", aybabtu)
self.set_text_content("h4", aybabtu)
self.highlight("div.intro-title", loops=8, scroll=False)
self.highlight("h4", loops=8, scroll=False)
self.open("https://slack.com/help/articles/204714258-Giphy-for-Slack")
self.set_text_content("h1", aybabtu)
self.set_text_content('a[prettyslug="getting-started"]', "ALL")
self.set_text_content('a[prettyslug="using-slack"]', "YOUR")
self.set_text_content('a[prettyslug="your-profile"]', "BASE")
self.set_text_content('a[prettyslug="connect-tools"]', "ARE")
self.set_text_content('a[prettyslug="administration"]', "BELONG")
self.set_text_content('a[prettyslug="tutorials"]', "TO US")
self.highlight("h1", loops=4, scroll=False)
self.highlight("div#global_menu", loops=2, scroll=False)
self.highlight('a[prettyslug*="g-started"]', loops=1, scroll=False)
self.highlight('a[prettyslug="using-slack"]', loops=1, scroll=False)
self.highlight('a[prettyslug="your-profile"]', loops=2, scroll=False)
self.highlight('a[prettyslug="connect-tools"]', loops=1, scroll=False)
self.highlight('a[prettyslug="administration"]', loops=1, scroll=False)
self.highlight('a[prettyslug="tutorials"]', loops=2, scroll=False)
self.open("https://kubernetes.io/")
self.set_text_content('nav a[href="/docs/"]', "ALL")
self.set_text_content('nav a[href="/blog/"]', "YOUR")
self.set_text_content('nav a[href="/training/"]', "BASE")
self.set_text_content('nav a[href="/partners/"]', "ARE")
self.set_text_content('nav a[href="/community/"]', "BELONG")
self.set_text_content('nav a[href="/case-studies/"]', "TO")
self.set_text_content('nav #navbarDropdown', "US")
self.set_text_content('nav #navbarDropdownMenuLink', ".")
if self.is_element_visible("h1"):
self.set_text_content("h1", aybabtu)
self.highlight("nav ul.navbar-nav", loops=3, scroll=False)
self.highlight('nav a[href="/docs/"]', loops=1, scroll=False)
self.highlight('nav a[href="/blog/"]', loops=1, scroll=False)
self.highlight('nav a[href="/training/"]', loops=2, scroll=False)
self.highlight('nav a[href="/partners/"]', loops=1, scroll=False)
self.highlight('nav a[href="/community/"]', loops=1, scroll=False)
self.highlight('nav a[href="/case-studies/"]', loops=1, scroll=False)
self.highlight('nav #navbarDropdown', loops=2, scroll=False)
if self.is_element_visible("h1"):
self.highlight('h1', loops=6, scroll=False)
self.open("https://www.selenium.dev/")
self.set_attributes("a.dropdown-toggle", "class", "nav-link")
self.set_text_content('li a:contains("About")', "ALL")
self.set_text_content('li a:contains("Downloads")', "YOUR")
self.set_text_content('li a:contains("Documentation")', "BASE")
self.set_text_content('li a:contains("Projects")', "ARE")
self.set_text_content('li a:contains("Support")', "BELONG")
self.set_text_content('li a:contains("Blog")', "TO")
self.set_text_content('li a:contains("English")', "US")
self.set_text_content("div.lead", aybabtu)
self.set_text_content("h2", aybabtu)
zoom_in = 'div.lead{zoom: 1.25;-moz-transform: scale(1.25);}'
self.add_css_style(zoom_in)
self.highlight("div#main_navbar", loops=1, scroll=False)
self.highlight('li a:contains("ALL")', loops=1, scroll=False)
self.highlight('li a:contains("YOUR")', loops=1, scroll=False)
self.highlight('li a:contains("BASE")', loops=2, scroll=False)
self.highlight('li a:contains("ARE")', loops=1, scroll=False)
self.highlight('li a:contains("BELONG")', loops=1, scroll=False)
self.highlight('li a:contains("TO")', loops=1, scroll=False)
self.highlight('li a:contains("US")', loops=2, scroll=False)
self.highlight("div.lead", loops=6, scroll=False)
self.highlight("h2", loops=8, scroll=False)
self.open("https://www.python.org/")
self.set_text_content('a[class="donate-button"]', ayb)
self.set_text_content("#about a", "ALL")
self.set_text_content("#downloads a", "YOUR")
self.set_text_content("#documentation a", "BASE")
self.set_text_content("#community a", "ARE")
self.set_text_content("#success-stories a", "BELONG")
self.set_text_content("#news a", "TO")
self.set_text_content("#events a", "US")
self.highlight('a[class="donate-button"]', loops=4, scroll=False)
self.highlight("nav#mainnav", loops=5, scroll=False)
self.highlight("#about a", loops=1, scroll=False)
self.highlight("#downloads a", loops=1, scroll=False)
self.highlight("#documentation a", loops=2, scroll=False)
self.highlight("#community a", loops=1, scroll=False)
self.highlight("#success-stories a", loops=1, scroll=False)
self.highlight("#news a", loops=1, scroll=False)
self.highlight("#events a", loops=2, scroll=False)
self.open("https://docs.pytest.org/")
self.set_text_content("h1", "pytest: " + aybabtu)
self.highlight("h1", loops=10, scroll=False)
self.open("https://wordpress.com/")
self.set_text_content('a[title="Plans & Pricing"]', aybabtu)
self.set_text_content('a[title="Get Started"]', ayb)
self.set_text_content("p.no-widows", aybabtu)
self.set_text_content("a#lpc-button", "Automate with SeleniumBase")
self.highlight('a[title="Plans & Pricing"]', loops=6, scroll=False)
self.highlight('a[title="Get Started"]', loops=4, scroll=False)
self.highlight("p.no-widows", loops=8, scroll=False)
self.highlight("a#lpc-button", loops=4, scroll=False)
self.open("https://seleniumbase.com/")
self.set_text_content("h1", aybabtu)
self.highlight("h1", loops=10, scroll=False)
self.open("https://pypi.org/")
self.set_text_content('a[href="/sponsors/"]', aybabtu)
self.set_text_content("h1", aybabtu)
self.set_value("input#search", aybabtu, scroll=False)
self.highlight('a[href="/sponsors/"]', loops=6, scroll=False)
self.highlight("h1", loops=6, scroll=False)
self.highlight("input#search", loops=8, scroll=False)
self.open("https://www.atlassian.com/software/jira")
self.set_text_content('a[href*="jira/pricing"]', ayb)
self.set_text_content('a[href*="jira/enterprise"]', abtu)
self.set_text_content('a[href="/software/jira/features"]', "")
self.set_text_content('a[href="/software/jira/guides"]', "")
self.set_text_content("h1", ayb)
self.set_text_content('div.xs-none-bottom a[href*="free"]', abtu)
self.highlight("ul.imkt-navbar__link-list", loops=2, scroll=False)
self.highlight('a[href*="jira/pricing"]', loops=3, scroll=False)
self.highlight('a[href*="jira/enterprise"]', loops=3, scroll=False)
self.highlight("h1", loops=3, scroll=False)
self.highlight('div.xs-none-bottom a[href*="free"]', scroll=False)
self.open("https://status.iboss.com/ibcloud/app/cloudStatus.html")
self.set_text_content('div[translate*="cloudStatus"]', ayb)
self.set_text_content('div[translate*="maintenance"]', "ARE")
self.set_text_content('div[translate*="advisory"]', "BELONG")
self.set_text_content('div[translate*="incident"]', "TO US")
self.set_text_content("h1", "Cloud Status - " + aybabtu)
self.highlight("nav div.ibcloud-header-contents", loops=3)
self.highlight('div[translate*="cloudStatus"]', loops=4)
self.highlight('div[translate*="maintenance"]', loops=1)
self.highlight('div[translate*="advisory"]', loops=1)
self.highlight('div[translate*="incident"]', loops=3)
self.highlight("h1", loops=9, scroll=False)
self.open("https://git-scm.com/")
self.set_text_content("span#tagline", aybabtu)
self.set_text_content("#nav-about h3", ayb)
self.set_text_content("#nav-documentation h3", abtu)
self.highlight("span#tagline", loops=8, scroll=False)
self.highlight("#nav-about h3", loops=5, scroll=False)
self.highlight("#nav-documentation h3", loops=6, scroll=False)
self.open("https://teamtreehouse.com/")
self.set_text_content("li.nav-item-free-trial", aybabtu)
self.set_text_content("h1", aybabtu)
self.set_text_content("h2", aybabtu)
self.set_text_content("p.homepage-signup-form-banner", aybabtu)
self.highlight("li.nav-item-free-trial", loops=6, scroll=False)
self.highlight("h1", loops=6, scroll=False)
self.highlight('p[class*="signup-form"]', loops=8, scroll=False)
self.open("https://pragprog.com/")
self.set_text_content("header p", aybabtu)
zoom_in = 'header p{zoom: 1.35;-moz-transform: scale(1.35);}'
self.add_css_style(zoom_in)
self.highlight("header p", loops=10, scroll=False)
self.open("https://seleniumbase.io/")
self.set_text_content("h1", aybabtu)
self.highlight("h1", loops=10, scroll=False)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmpmanager(base_resource) :
""" Configuration for manager resource. """
def __init__(self) :
self._ipaddress = ""
self._netmask = ""
self._domainresolveretry = 0
self._ip = ""
self._domain = ""
self.___count = 0
@property
def ipaddress(self) :
ur"""IP address of the SNMP manager. Can be an IPv4 or IPv6 address. You can instead specify an IPv4 network address or IPv6 network prefix if you want the NetScaler appliance to respond to SNMP queries from any device on the specified network. Alternatively, instead of an IPv4 address, you can specify a host name that has been assigned to an SNMP manager. If you do so, you must add a DNS name server that resolves the host name of the SNMP manager to its IP address.
Note: The NetScaler appliance does not support host names for SNMP managers that have IPv6 addresses.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
ur"""IP address of the SNMP manager. Can be an IPv4 or IPv6 address. You can instead specify an IPv4 network address or IPv6 network prefix if you want the NetScaler appliance to respond to SNMP queries from any device on the specified network. Alternatively, instead of an IPv4 address, you can specify a host name that has been assigned to an SNMP manager. If you do so, you must add a DNS name server that resolves the host name of the SNMP manager to its IP address.
Note: The NetScaler appliance does not support host names for SNMP managers that have IPv6 addresses.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def netmask(self) :
ur"""Subnet mask associated with an IPv4 network address. If the IP address specifies the address or host name of a specific host, accept the default value of 255.255.255.255.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
ur"""Subnet mask associated with an IPv4 network address. If the IP address specifies the address or host name of a specific host, accept the default value of 255.255.255.255.
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def domainresolveretry(self) :
ur"""Amount of time, in seconds, for which the NetScaler appliance waits before sending another DNS query to resolve the host name of the SNMP manager if the last query failed. This parameter is valid for host-name based SNMP managers only. After a query succeeds, the TTL determines the wait time.<br/>Minimum length = 5<br/>Maximum length = 20939.
"""
try :
return self._domainresolveretry
except Exception as e:
raise e
@domainresolveretry.setter
def domainresolveretry(self, domainresolveretry) :
ur"""Amount of time, in seconds, for which the NetScaler appliance waits before sending another DNS query to resolve the host name of the SNMP manager if the last query failed. This parameter is valid for host-name based SNMP managers only. After a query succeeds, the TTL determines the wait time.<br/>Minimum length = 5<br/>Maximum length = 20939
"""
try :
self._domainresolveretry = domainresolveretry
except Exception as e:
raise e
@property
def ip(self) :
ur"""The resolved IP address of the hostname manager.
"""
try :
return self._ip
except Exception as e:
raise e
@property
def domain(self) :
ur"""IP address of manager. It will be zero for hostname manager.
"""
try :
return self._domain
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmpmanager_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmpmanager
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.ipaddress is not None :
return str(self.ipaddress)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add snmpmanager.
"""
try :
if type(resource) is not list :
addresource = snmpmanager()
addresource.ipaddress = resource.ipaddress
addresource.netmask = resource.netmask
addresource.domainresolveretry = resource.domainresolveretry
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ snmpmanager() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].netmask = resource[i].netmask
addresources[i].domainresolveretry = resource[i].domainresolveretry
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete snmpmanager.
"""
try :
if type(resource) is not list :
deleteresource = snmpmanager()
if type(resource) != type(deleteresource):
deleteresource.ipaddress = resource
else :
deleteresource.ipaddress = resource.ipaddress
deleteresource.netmask = resource.netmask
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ snmpmanager() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ snmpmanager() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i].ipaddress
deleteresources[i].netmask = resource[i].netmask
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update snmpmanager.
"""
try :
if type(resource) is not list :
updateresource = snmpmanager()
updateresource.ipaddress = resource.ipaddress
updateresource.netmask = resource.netmask
updateresource.domainresolveretry = resource.domainresolveretry
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ snmpmanager() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].ipaddress = resource[i].ipaddress
updateresources[i].netmask = resource[i].netmask
updateresources[i].domainresolveretry = resource[i].domainresolveretry
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of snmpmanager resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = snmpmanager()
unsetresource.ipaddress = resource.ipaddress
unsetresource.netmask = resource.netmask
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) == cls :
if (resource and len(resource) > 0) :
unsetresources = [ snmpmanager() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipaddress = resource[i].ipaddress
unsetresources[i].netmask = resource[i].netmask
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the snmpmanager resources that are configured on netscaler.
"""
try :
if not name :
obj = snmpmanager()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [snmpmanager() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of snmpmanager resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpmanager()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the snmpmanager resources configured on NetScaler.
"""
try :
obj = snmpmanager()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of snmpmanager resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpmanager()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class snmpmanager_response(base_response) :
def __init__(self, length=1) :
self.snmpmanager = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmpmanager = [snmpmanager() for _ in range(length)]
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# for python 3.5 or earlier
import os
import fcntl
import string
import codecs
import socket
import ctypes
import argparse
import contextlib
from concurrent import futures
_libc = ctypes.CDLL('libc.so.6', use_errno=True)
class _sockaddr_alg(ctypes.Structure):
_fields_ = [
('salg_family', ctypes.c_uint16),
('salg_type', ctypes.c_char * 14),
('salg_feat', ctypes.c_uint32),
('salg_mask', ctypes.c_uint32),
('salg_name', ctypes.c_char * 64),
]
class HashDescriptor:
SPLICE_F_MOVE = 1
SPLICE_F_NONBLOCK = 2
SPLICE_F_MORE = 4
SPLICE_F_GIFT = 8
# max page size is 16 in the kernel (<4.11)
SPLICE_S_MAX = os.sysconf(os.sysconf_names['SC_PAGESIZE']) * 16
def __init__(self, fileno, digestsize):
self.fileno = fileno
self.digestsize = digestsize
def splice(self, fileno, size):
@contextlib.contextmanager
def _pipe():
try:
rfd, wfd = -1, -1
rfd, wfd = os.pipe()
yield rfd, wfd
finally:
if rfd >= 0:
os.close(rfd)
if wfd >= 0:
os.close(wfd)
def _splice(fd_in, off_in, fd_out, off_out, len_, flags):
size = _libc.splice(fd_in, off_in, fd_out, off_out, len_, flags)
if size < 0:
n = ctypes.get_errno()
raise OSError(n, os.strerror(n))
return size
with _pipe() as (rfd, wfd):
while size > 0:
if size <= self.SPLICE_S_MAX:
mvlen = size
flags = self.SPLICE_F_MOVE
else:
mvlen = self.SPLICE_S_MAX
flags = self.SPLICE_F_MOVE | self.SPLICE_F_MORE
nr = _splice(fileno, None, wfd, None, mvlen, flags)
nw = _splice(rfd, None, self.fileno, None, mvlen, flags)
assert nr == nw
size -= nr
os.lseek(fileno, 0, os.SEEK_SET)
def digest(self, fileno, size):
def _read(fileno, size):
while size > 0:
byte = os.read(fileno, size)
size -= len(byte)
yield byte
if size:
self.splice(fileno, size)
else:
os.write(self.fileno, b'')
return b''.join(_read(self.fileno, self.digestsize))
class Hash:
AF_ALG = 38
SOL_ALG = 279
ALG_SET_KEY = 1
ALG_SET_IV = 2
ALG_SET_OP = 3
ALG_SET_AEAD_ASSOCLEN = 4
ALG_SET_AEAD_AUTHSIZE = 5
ALG_OP_DECRYPT = 0
ALG_OP_ENCRYPT = 1
ALG_TYPE = b'hash'
ALG_NAME = None
ALG_BYTE = None
def __init__(self, key):
sock = socket.socket(self.AF_ALG, socket.SOCK_SEQPACKET, 0)
algo = _sockaddr_alg(self.AF_ALG, self.ALG_TYPE, 0, 0, self.ALG_NAME)
r = _libc.bind(sock.fileno(), ctypes.byref(algo), ctypes.sizeof(algo))
if r < 0:
n = ctypes.get_errno()
sock.close()
raise OSError(n, os.strerror(n))
self.key = key
self.sock = self.prepare(sock)
self.algo = algo
def __del__(self):
if getattr(self, 'sock', None):
self.sock.close()
def prepare(self, sock):
if self.key is not None:
r = _libc.setsockopt(sock.fileno(), self.SOL_ALG, self.ALG_SET_KEY,
self.key, self.ALG_BYTE)
if r < 0:
n = ctypes.get_errno()
sock.close()
raise OSError(n, os.strerror(n))
return sock
@contextlib.contextmanager
def open(self):
try:
fileno = _libc.accept(self.sock.fileno(), None, None)
if fileno < 0:
n = ctypes.get_errno()
raise OSError(n, os.strerror(n))
yield HashDescriptor(fileno, self.ALG_BYTE)
finally:
if fileno >= 0:
os.close(fileno)
@classmethod
def instance(cls, name, key=None):
return cls.algorithm()[name](key)
@classmethod
def algorithm(cls):
d = {}
for c in cls.__subclasses__():
d.update(c.algorithm())
d[c.__name__] = c
return d
class dummy(Hash):
ALG_NAME = b'dummy'
ALG_BYTE = 0
class descriptor(HashDescriptor):
def splice(self, fileno, size):
pass
def digest(self, fileno, size):
return b''
def __init__(self, key):
pass
@contextlib.contextmanager
def open(self):
yield self.descriptor(0, 0)
def iteralgo():
with open('/proc/crypto') as fp:
algo = {}
for line in fp:
line = line.strip()
if not line:
if algo.get('type') == 'shash':
yield algo
algo = {}
continue
key, val = line.split(':', 1)
algo[key.strip()] = val.strip()
def defalgo():
table = str.maketrans(string.punctuation, '_' * len(string.punctuation))
for algo in iteralgo():
name = algo['driver'].translate(table).strip('_')
if name.endswith('_generic'):
name = name[:-8]
globals()[name] = type(name, (Hash,), {
'ALG_NAME': algo['driver'].encode(),
'ALG_BYTE': int(algo['digestsize']),
})
def main():
digs = sorted(Hash.algorithm().keys())
argp = argparse.ArgumentParser()
argp.add_argument('-a', '--digest-algo', choices=digs, default='dummy')
argp.add_argument('-k', '--digest-key', type=os.fsencode)
argp.add_argument('-t', '--threads', type=int, default=os.cpu_count())
argp.add_argument('files', nargs=argparse.REMAINDER)
args = argp.parse_args()
hasher = Hash.instance(args.digest_algo, args.digest_key)
def run(path):
with hasher.open() as desc, open(path) as fp:
fileno = fp.fileno()
fcntl.flock(fileno, fcntl.LOCK_SH)
return desc.digest(fileno, os.fstat(fileno).st_size)
with futures.ThreadPoolExecutor(max_workers=args.threads) as executor:
futuredict = {executor.submit(run, path): path for path in args.files}
for future in futures.as_completed(futuredict):
path = futuredict[future]
digest = future.result()
print(codecs.encode(digest, 'hex').decode(), '', path)
defalgo()
if __name__ == '__main__':
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest.mock import MagicMock, patch
import pytest
from airflow.models import DAG, Connection
from airflow.providers.dbt.cloud.hooks.dbt import DbtCloudHook, DbtCloudJobRunException, DbtCloudJobRunStatus
from airflow.providers.dbt.cloud.operators.dbt import (
DbtCloudGetJobRunArtifactOperator,
DbtCloudRunJobOperator,
)
from airflow.utils import db, timezone
DEFAULT_DATE = timezone.datetime(2021, 1, 1)
TASK_ID = "run_job_op"
ACCOUNT_ID_CONN = "account_id_conn"
NO_ACCOUNT_ID_CONN = "no_account_id_conn"
DEFAULT_ACCOUNT_ID = 11111
ACCOUNT_ID = 22222
TOKEN = "token"
PROJECT_ID = 33333
JOB_ID = 4444
RUN_ID = 5555
EXPECTED_JOB_RUN_OP_EXTRA_LINK = (
"https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/"
)
DEFAULT_ACCOUNT_JOB_RUN_RESPONSE = {
"data": {
"id": RUN_ID,
"href": EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=DEFAULT_ACCOUNT_ID, project_id=PROJECT_ID, run_id=RUN_ID
),
}
}
EXPLICIT_ACCOUNT_JOB_RUN_RESPONSE = {
"data": {
"id": RUN_ID,
"href": EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=ACCOUNT_ID, project_id=PROJECT_ID, run_id=RUN_ID
),
}
}
def setup_module():
# Connection with ``account_id`` specified
conn_account_id = Connection(
conn_id=ACCOUNT_ID_CONN,
conn_type=DbtCloudHook.conn_type,
login=DEFAULT_ACCOUNT_ID,
password=TOKEN,
)
# Connection with no ``account_id`` specified
conn_no_account_id = Connection(
conn_id=NO_ACCOUNT_ID_CONN,
conn_type=DbtCloudHook.conn_type,
password=TOKEN,
)
db.merge_conn(conn_account_id)
db.merge_conn(conn_no_account_id)
class TestDbtCloudRunJobOperator:
def setup_method(self):
self.dag = DAG("test_dbt_cloud_job_run_op", start_date=DEFAULT_DATE)
self.mock_ti = MagicMock()
self.mock_context = {"ti": self.mock_ti}
self.config = {
"job_id": JOB_ID,
"check_interval": 1,
"timeout": 3,
}
@patch.object(DbtCloudHook, "trigger_job_run", return_value=MagicMock(**DEFAULT_ACCOUNT_JOB_RUN_RESPONSE))
@pytest.mark.parametrize(
"job_run_status, expected_output",
[
(DbtCloudJobRunStatus.SUCCESS.value, "success"),
(DbtCloudJobRunStatus.ERROR.value, "exception"),
(DbtCloudJobRunStatus.CANCELLED.value, "exception"),
(DbtCloudJobRunStatus.RUNNING.value, "timeout"),
(DbtCloudJobRunStatus.QUEUED.value, "timeout"),
(DbtCloudJobRunStatus.STARTING.value, "timeout"),
],
)
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_wait_for_termination(
self, mock_run_job, conn_id, account_id, job_run_status, expected_output
):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID, dbt_cloud_conn_id=conn_id, account_id=account_id, dag=self.dag, **self.config
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert operator.wait_for_termination
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
mock_get_job_run.return_value.json.return_value = {
"data": {"status": job_run_status, "id": RUN_ID}
}
if expected_output == "success":
operator.execute(context=self.mock_context)
assert mock_run_job.return_value.data["id"] == RUN_ID
elif expected_output == "exception":
# The operator should fail if the job run fails or is cancelled.
with pytest.raises(DbtCloudJobRunException) as err:
operator.execute(context=self.mock_context)
assert err.value.endswith("has failed or has been cancelled.")
else:
# Demonstrating the operator timing out after surpassing the configured timeout value.
with pytest.raises(DbtCloudJobRunException) as err:
operator.execute(context=self.mock_context)
assert err.value.endswith(
f"has not reached a terminal status after {self.config['timeout']} seconds."
)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=f"Triggered via Apache Airflow by task {TASK_ID!r} in the {self.dag.dag_id} DAG.",
additional_run_config={},
)
if job_run_status in DbtCloudJobRunStatus.TERMINAL_STATUSES.value:
assert mock_get_job_run.call_count == 1
else:
# When the job run status is not in a terminal status or "Success", the operator will
# continue to call ``get_job_run()`` until a ``timeout`` number of seconds has passed
# (3 seconds for this test). Therefore, there should be 4 calls of this function: one
# initially and 3 for each check done at a 1 second interval.
assert mock_get_job_run.call_count == 4
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_no_wait_for_termination(self, mock_run_job, conn_id, account_id):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=None,
dag=self.dag,
wait_for_termination=False,
**self.config,
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert not operator.wait_for_termination
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=f"Triggered via Apache Airflow by task {TASK_ID!r} in the {self.dag.dag_id} DAG.",
additional_run_config={},
)
mock_get_job_run.assert_not_called()
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_custom_trigger_reason(self, mock_run_job, conn_id, account_id):
custom_trigger_reason = "Some other trigger reason."
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=custom_trigger_reason,
dag=self.dag,
**self.config,
)
assert operator.trigger_reason == custom_trigger_reason
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
mock_get_job_run.return_value.json.return_value = {
"data": {"status": DbtCloudJobRunStatus.SUCCESS.value, "id": RUN_ID}
}
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=custom_trigger_reason,
additional_run_config={},
)
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_run_job_operator_link(self, conn_id, account_id, create_task_instance_of_operator, request):
ti = create_task_instance_of_operator(
DbtCloudRunJobOperator,
dag_id="test_dbt_cloud_run_job_op_link",
execution_date=DEFAULT_DATE,
task_id="trigger_dbt_cloud_job",
dbt_cloud_conn_id=conn_id,
job_id=JOB_ID,
account_id=account_id,
)
if request.node.callspec.id == "default_account":
_run_response = DEFAULT_ACCOUNT_JOB_RUN_RESPONSE
else:
_run_response = EXPLICIT_ACCOUNT_JOB_RUN_RESPONSE
ti.xcom_push(key="job_run_url", value=_run_response["data"]["href"])
url = ti.task.get_extra_links(ti, "Monitor Job Run")
assert url == (
EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=account_id if account_id else DEFAULT_ACCOUNT_ID,
project_id=PROJECT_ID,
run_id=_run_response["data"]["id"],
)
)
class TestDbtCloudGetJobRunArtifactOperator:
def setup_method(self):
self.dag = DAG("test_dbt_cloud_get_artifact_op", start_date=DEFAULT_DATE)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_json_artifact(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/manifest.json",
dag=self.dag,
)
mock_get_artifact.return_value.json.return_value = {"data": "file contents"}
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/manifest.json",
account_id=account_id,
step=None,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_json_artifact_with_step(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/manifest.json",
step=2,
dag=self.dag,
)
mock_get_artifact.return_value.json.return_value = {"data": "file contents"}
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/manifest.json",
account_id=account_id,
step=2,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_text_artifact(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/model.sql",
dag=self.dag,
)
mock_get_artifact.return_value.text = "file contents"
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/model.sql",
account_id=account_id,
step=None,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_text_artifact_with_step(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/model.sql",
step=2,
dag=self.dag,
)
mock_get_artifact.return_value.text = "file contents"
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/model.sql",
account_id=account_id,
step=2,
)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Cisco Systems, Inc.
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import copy
import logging
import netaddr
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
import six
from horizon import exceptions
from horizon import messages
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'}
OFF_STATE = 'OFF'
ON_STATE = 'ON'
ROUTER_INTERFACE_OWNERS = (
'network:router_interface',
'network:router_interface_distributed'
)
class NeutronAPIDictWrapper(base.APIDictWrapper):
def __init__(self, apidict):
if 'admin_state_up' in apidict:
if apidict['admin_state_up']:
apidict['admin_state'] = 'UP'
else:
apidict['admin_state'] = 'DOWN'
# Django cannot handle a key name with ':', so use '__'.
apidict.update({
key.replace(':', '__'): value
for key, value in apidict.items()
if ':' in key
})
super(NeutronAPIDictWrapper, self).__init__(apidict)
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name'].strip():
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name').strip() or
'(%s)' % self._apidict['id'][:13])
class Agent(NeutronAPIDictWrapper):
"""Wrapper for neutron agents."""
class Network(NeutronAPIDictWrapper):
"""Wrapper for neutron Networks."""
def to_dict(self):
d = dict(super(NeutronAPIDictWrapper, self).to_dict())
d['subnets'] = [s.to_dict() for s in d['subnets']]
return d
class Subnet(NeutronAPIDictWrapper):
"""Wrapper for neutron subnets."""
def __init__(self, apidict):
apidict['ipver_str'] = get_ipver_str(apidict['ip_version'])
super(Subnet, self).__init__(apidict)
class SubnetPool(NeutronAPIDictWrapper):
"""Wrapper for neutron subnetpools."""
class Port(NeutronAPIDictWrapper):
"""Wrapper for neutron ports."""
def __init__(self, apidict):
if 'mac_learning_enabled' in apidict:
apidict['mac_state'] = \
ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE
pairs = apidict.get('allowed_address_pairs')
if pairs:
apidict = copy.deepcopy(apidict)
wrapped_pairs = [PortAllowedAddressPair(pair) for pair in pairs]
apidict['allowed_address_pairs'] = wrapped_pairs
super(Port, self).__init__(apidict)
class PortAllowedAddressPair(NeutronAPIDictWrapper):
"""Wrapper for neutron port allowed address pairs."""
def __init__(self, addr_pair):
super(PortAllowedAddressPair, self).__init__(addr_pair)
# Horizon references id property for table operations
self.id = addr_pair['ip_address']
class Profile(NeutronAPIDictWrapper):
"""Wrapper for neutron profiles."""
_attrs = ['profile_id', 'name', 'segment_type', 'segment_range',
'sub_type', 'multicast_ip_index', 'multicast_ip_range']
class Router(NeutronAPIDictWrapper):
"""Wrapper for neutron routers."""
class RouterStaticRoute(NeutronAPIDictWrapper):
"""Wrapper for neutron routes extra route."""
def __init__(self, route):
super(RouterStaticRoute, self).__init__(route)
# Horizon references id property for table operations
self.id = route['nexthop'] + ":" + route['destination']
class SecurityGroup(NeutronAPIDictWrapper):
# Required attributes: id, name, description, tenant_id, rules
def __init__(self, sg, sg_dict=None):
if sg_dict is None:
sg_dict = {sg['id']: sg['name']}
sg['rules'] = [SecurityGroupRule(rule, sg_dict)
for rule in sg['security_group_rules']]
super(SecurityGroup, self).__init__(sg)
def to_dict(self):
return {k: self._apidict[k] for k in self._apidict if k != 'rules'}
@six.python_2_unicode_compatible
class SecurityGroupRule(NeutronAPIDictWrapper):
# Required attributes:
# id, parent_group_id
# ip_protocol, from_port, to_port, ip_range, group
# ethertype, direction (Neutron specific)
def _get_secgroup_name(self, sg_id, sg_dict):
if sg_id:
if sg_dict is None:
sg_dict = {}
# If sg name not found in sg_dict,
# first two parts of UUID is used as sg name.
return sg_dict.get(sg_id, sg_id[:13])
else:
return u''
def __init__(self, sgr, sg_dict=None):
# In Neutron, if both remote_ip_prefix and remote_group_id are None,
# it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0.
if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']:
if sgr['ethertype'] == 'IPv6':
sgr['remote_ip_prefix'] = '::/0'
else:
sgr['remote_ip_prefix'] = '0.0.0.0/0'
rule = {
'id': sgr['id'],
'parent_group_id': sgr['security_group_id'],
'direction': sgr['direction'],
'ethertype': sgr['ethertype'],
'ip_protocol': sgr['protocol'],
'from_port': sgr['port_range_min'],
'to_port': sgr['port_range_max'],
}
cidr = sgr['remote_ip_prefix']
rule['ip_range'] = {'cidr': cidr} if cidr else {}
group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict)
rule['group'] = {'name': group} if group else {}
super(SecurityGroupRule, self).__init__(rule)
def __str__(self):
if 'name' in self.group:
remote = self.group['name']
elif 'cidr' in self.ip_range:
remote = self.ip_range['cidr']
else:
remote = 'ANY'
direction = 'to' if self.direction == 'egress' else 'from'
if self.from_port:
if self.from_port == self.to_port:
proto_port = ("%s/%s" %
(self.from_port, self.ip_protocol.lower()))
else:
proto_port = ("%s-%s/%s" %
(self.from_port, self.to_port,
self.ip_protocol.lower()))
elif self.ip_protocol:
try:
ip_proto = int(self.ip_protocol)
proto_port = "ip_proto=%d" % ip_proto
except Exception:
# well-defined IP protocol name like TCP, UDP, ICMP.
proto_port = self.ip_protocol
else:
proto_port = ''
return (_('ALLOW %(ethertype)s %(proto_port)s '
'%(direction)s %(remote)s') %
{'ethertype': self.ethertype,
'proto_port': proto_port,
'remote': remote,
'direction': direction})
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'neutron'
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def _list(self, **filters):
secgroups = self.client.list_security_groups(**filters)
return [SecurityGroup(sg) for sg in secgroups.get('security_groups')]
def list(self):
tenant_id = self.request.user.tenant_id
return self._list(tenant_id=tenant_id)
def _sg_name_dict(self, sg_id, rules):
"""Create a mapping dict from secgroup id to its name."""
related_ids = set([sg_id])
related_ids |= set(filter(None, [r['remote_group_id'] for r in rules]))
related_sgs = self.client.list_security_groups(id=related_ids,
fields=['id', 'name'])
related_sgs = related_sgs.get('security_groups')
return dict((sg['id'], sg['name']) for sg in related_sgs)
def get(self, sg_id):
secgroup = self.client.show_security_group(sg_id).get('security_group')
sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])
return SecurityGroup(secgroup, sg_dict)
def create(self, name, desc):
body = {'security_group': {'name': name,
'description': desc,
'tenant_id': self.request.user.project_id}}
secgroup = self.client.create_security_group(body)
return SecurityGroup(secgroup.get('security_group'))
def update(self, sg_id, name, desc):
body = {'security_group': {'name': name,
'description': desc}}
secgroup = self.client.update_security_group(sg_id, body)
return SecurityGroup(secgroup.get('security_group'))
def delete(self, sg_id):
self.client.delete_security_group(sg_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
if not cidr:
cidr = None
if from_port < 0:
from_port = None
if to_port < 0:
to_port = None
if isinstance(ip_protocol, int) and ip_protocol < 0:
ip_protocol = None
body = {'security_group_rule':
{'security_group_id': parent_group_id,
'direction': direction,
'ethertype': ethertype,
'protocol': ip_protocol,
'port_range_min': from_port,
'port_range_max': to_port,
'remote_ip_prefix': cidr,
'remote_group_id': group_id}}
try:
rule = self.client.create_security_group_rule(body)
except neutron_exc.Conflict:
raise exceptions.Conflict(_('Security group rule already exists.'))
rule = rule.get('security_group_rule')
sg_dict = self._sg_name_dict(parent_group_id, [rule])
return SecurityGroupRule(rule, sg_dict)
def rule_delete(self, sgr_id):
self.client.delete_security_group_rule(sgr_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else []
def update_instance_security_group(self, instance_id,
new_security_group_ids):
ports = port_list(self.request, device_id=instance_id)
for p in ports:
params = {'security_groups': new_security_group_ids}
port_update(self.request, p.id, **params)
class FloatingIp(base.APIDictWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip['ip'] = fip['floating_ip_address']
fip['fixed_ip'] = fip['fixed_ip_address']
fip['pool'] = fip['floating_network_id']
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
pass
class FloatingIpTarget(base.APIDictWrapper):
pass
class FloatingIpManager(network_base.FloatingIpManager):
device_owner_map = {
'compute:': 'compute',
'neutron:LOADBALANCER': 'loadbalancer',
}
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def list_pools(self):
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
in self.client.list_networks(**search_opts).get('networks')]
def _get_instance_type_from_device_owner(self, device_owner):
for key, value in self.device_owner_map.items():
if device_owner.startswith(key):
return value
return device_owner
def _set_instance_info(self, fip, port=None):
if fip['port_id']:
if not port:
port = port_get(self.request, fip['port_id'])
fip['instance_id'] = port.device_id
fip['instance_type'] = self._get_instance_type_from_device_owner(
port.device_owner)
else:
fip['instance_id'] = None
fip['instance_type'] = None
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
# In Neutron, list_floatingips returns Floating IPs from
# all tenants when the API is called with admin role, so
# we need to filter them with tenant_id.
search_opts['tenant_id'] = tenant_id
port_search_opts = {'tenant_id': tenant_id}
else:
port_search_opts = {}
fips = self.client.list_floatingips(**search_opts)
fips = fips.get('floatingips')
# Get port list to add instance_id to floating IP list
# instance_id is stored in device_id attribute
ports = port_list(self.request, **port_search_opts)
port_dict = collections.OrderedDict([(p['id'], p) for p in ports])
for fip in fips:
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [FloatingIp(fip) for fip in fips]
def get(self, floating_ip_id):
fip = self.client.show_floatingip(floating_ip_id).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def allocate(self, pool, tenant_id=None, **params):
if not tenant_id:
tenant_id = self.request.user.project_id
create_dict = {'floating_network_id': pool,
'tenant_id': tenant_id}
if 'floating_ip_address' in params:
create_dict['floating_ip_address'] = params['floating_ip_address']
fip = self.client.create_floatingip(
{'floatingip': create_dict}).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def release(self, floating_ip_id):
self.client.delete_floatingip(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# NOTE: In Neutron Horizon floating IP support, port_id is
# "<port_id>_<ip_address>" format to identify multiple ports.
pid, ip_address = port_id.split('_', 1)
update_dict = {'port_id': pid,
'fixed_ip_address': ip_address}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def disassociate(self, floating_ip_id):
update_dict = {'port_id': None}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def _get_reachable_subnets(self, ports):
if not is_enabled_by_config('enable_fip_topology_check', True):
# All subnets are reachable from external network
return set(
p.fixed_ips[0]['subnet_id'] for p in ports if p.fixed_ips
)
# Retrieve subnet list reachable from external network
ext_net_ids = [ext_net.id for ext_net in self.list_pools()]
gw_routers = [r.id for r in router_list(self.request)
if (r.external_gateway_info and
r.external_gateway_info.get('network_id')
in ext_net_ids)]
reachable_subnets = set([p.fixed_ips[0]['subnet_id'] for p in ports
if ((p.device_owner in
ROUTER_INTERFACE_OWNERS)
and (p.device_id in gw_routers))])
# we have to include any shared subnets as well because we may not
# have permission to see the router interface to infer connectivity
shared = set([s.id for n in network_list(self.request, shared=True)
for s in n.subnets])
return reachable_subnets | shared
def list_targets(self):
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
servers, has_more = nova.server_list(self.request)
server_dict = collections.OrderedDict(
[(s.id, s.name) for s in servers])
reachable_subnets = self._get_reachable_subnets(ports)
if is_service_enabled(self.request,
config_name='enable_lb',
ext_name='lbaas'):
# Also get the loadbalancer VIPs
vip_dict = {v['port_id']: v['name']
for v in self.client.list_vips().get('vips', [])}
else:
vip_dict = {}
targets = []
for p in ports:
# Remove network ports from Floating IP targets
if p.device_owner.startswith('network:'):
continue
port_id = p.id
server_name = server_dict.get(p.device_id) or vip_dict.get(port_id)
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
target = {'name': '%s: %s' % (server_name, ip['ip_address']),
'id': '%s_%s' % (port_id, ip['ip_address']),
'port_id': port_id,
'instance_id': p.device_id}
targets.append(FloatingIpTarget(target))
return targets
def _target_ports_by_instance(self, instance_id):
if not instance_id:
return None
search_opts = {'device_id': instance_id}
return port_list(self.request, **search_opts)
def get_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
targets = [target for target in target_list
if target['instance_id'] == instance_id]
if not targets:
return None
return targets[0]['id']
else:
# In Neutron one port can have multiple ip addresses, so this
# method picks up the first one and generate target id.
ports = self._target_ports_by_instance(instance_id)
if not ports:
return None
return '{0}_{1}'.format(ports[0].id,
ports[0].fixed_ips[0]['ip_address'])
def list_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target['id'] for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
return ['{0}_{1}'.format(p.id, p.fixed_ips[0]['ip_address'])
for p in ports]
def is_simple_associate_supported(self):
# NOTE: There are two reason that simple association support
# needs more considerations. (1) Neutron does not support the
# default floating IP pool at the moment. It can be avoided
# in case where only one floating IP pool exists.
# (2) Neutron floating IP is associated with each VIF and
# we need to check whether such VIF is only one for an instance
# to enable simple association support.
return False
def is_supported(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def get_ipver_str(ip_version):
"""Convert an ip version number to a human-friendly string."""
return IP_VERSION_DICT.get(ip_version, '')
@memoized
def neutronclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = neutron_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if type(filter_values) != list:
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len // filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
def network_list(request, **params):
LOG.debug("network_list(): params=%s", params)
networks = neutronclient(request).list_networks(**params).get('networks')
# Get subnet list to expand subnet info in network list.
subnets = subnet_list(request)
subnet_dict = dict([(s['id'], s) for s in subnets])
# Expand subnet list from subnet_id to values.
for n in networks:
# Due to potential timing issues, we can't assume the subnet_dict data
# is in sync with the network data.
n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if
s in subnet_dict]
return [Network(n) for n in networks]
def network_list_for_tenant(request, tenant_id, include_external=False,
**params):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If requested_networks specified, it searches requested_networks only.
"""
LOG.debug("network_list_for_tenant(): tenant_id=%s, params=%s"
% (tenant_id, params))
networks = []
shared = params.get('shared')
if shared is not None:
del params['shared']
if shared in (None, False):
# If a user has admin role, network list returned by Neutron API
# contains networks that do not belong to that tenant.
# So we need to specify tenant_id when calling network_list().
networks += network_list(request, tenant_id=tenant_id,
shared=False, **params)
if shared in (None, True):
# In the current Neutron API, there is no way to retrieve
# both owner networks and public networks in a single API call.
networks += network_list(request, shared=True, **params)
params['router:external'] = params.get('router:external', True)
if params['router:external'] and include_external:
if shared is not None:
params['shared'] = shared
fetched_net_ids = [n.id for n in networks]
# Retrieves external networks when router:external is not specified
# in (filtering) params or router:external=True filter is specified.
# When router:external=False is specified there is no need to query
# networking API because apparently nothing will match the filter.
ext_nets = network_list(request, **params)
networks += [n for n in ext_nets if
n.id not in fetched_net_ids]
return networks
def network_get(request, network_id, expand_subnet=True, **params):
LOG.debug("network_get(): netid=%s, params=%s" % (network_id, params))
network = neutronclient(request).show_network(network_id,
**params).get('network')
if expand_subnet:
if request.user.tenant_id == network['tenant_id'] or network['shared']:
# Since the number of subnets per network must be small,
# call subnet_get() for each subnet instead of calling
# subnet_list() once.
network['subnets'] = [subnet_get(request, sid)
for sid in network['subnets']]
return Network(network)
def network_create(request, **kwargs):
"""Create a network object.
:param request: request context
:param tenant_id: (optional) tenant id of the network created
:param name: (optional) name of the network created
:returns: Network object
"""
LOG.debug("network_create(): kwargs = %s" % kwargs)
# In the case network profiles are being used, profile id is needed.
if 'net_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('net_profile_id')
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body = {'network': kwargs}
network = neutronclient(request).create_network(body=body).get('network')
return Network(network)
def network_update(request, network_id, **kwargs):
LOG.debug("network_update(): netid=%s, params=%s" % (network_id, kwargs))
body = {'network': kwargs}
network = neutronclient(request).update_network(network_id,
body=body).get('network')
return Network(network)
def network_delete(request, network_id):
LOG.debug("network_delete(): netid=%s" % network_id)
neutronclient(request).delete_network(network_id)
def subnet_list(request, **params):
LOG.debug("subnet_list(): params=%s" % (params))
subnets = neutronclient(request).list_subnets(**params).get('subnets')
return [Subnet(s) for s in subnets]
def subnet_get(request, subnet_id, **params):
LOG.debug("subnet_get(): subnetid=%s, params=%s" % (subnet_id, params))
subnet = neutronclient(request).show_subnet(subnet_id,
**params).get('subnet')
return Subnet(subnet)
def subnet_create(request, network_id, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param cidr: (optional) subnet IP address range
:param ip_version: (optional) IP version (4 or 6)
:param gateway_ip: (optional) IP address of gateway
:param tenant_id: (optional) tenant id of the subnet created
:param name: (optional) name of the subnet created
:param subnetpool_id: (optional) subnetpool to allocate prefix from
:param prefixlen: (optional) length of prefix to allocate
:returns: Subnet object
Although both cidr+ip_version and subnetpool_id+preifxlen is listed as
optional you MUST pass along one of the combinations to get a successful
result.
"""
LOG.debug("subnet_create(): netid=%s, kwargs=%s"
% (network_id, kwargs))
body = {'subnet': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnet'].update(kwargs)
subnet = neutronclient(request).create_subnet(body=body).get('subnet')
return Subnet(subnet)
def subnet_update(request, subnet_id, **kwargs):
LOG.debug("subnet_update(): subnetid=%s, kwargs=%s" % (subnet_id, kwargs))
body = {'subnet': kwargs}
subnet = neutronclient(request).update_subnet(subnet_id,
body=body).get('subnet')
return Subnet(subnet)
def subnet_delete(request, subnet_id):
LOG.debug("subnet_delete(): subnetid=%s" % subnet_id)
neutronclient(request).delete_subnet(subnet_id)
def subnetpool_list(request, **params):
LOG.debug("subnetpool_list(): params=%s" % (params))
subnetpools = \
neutronclient(request).list_subnetpools(**params).get('subnetpools')
return [SubnetPool(s) for s in subnetpools]
def subnetpool_get(request, subnetpool_id, **params):
LOG.debug("subnetpool_get(): subnetpoolid=%s, params=%s" %
(subnetpool_id, params))
subnetpool = \
neutronclient(request).show_subnetpool(subnetpool_id,
**params).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_create(request, name, prefixes, **kwargs):
"""Create a subnetpool.
ip_version is auto-detected in back-end.
Parameters:
request -- Request context
name -- Name for subnetpool
prefixes -- List of prefixes for pool
Keyword Arguments (optional):
min_prefixlen -- Minimum prefix length for allocations from pool
max_prefixlen -- Maximum prefix length for allocations from pool
default_prefixlen -- Default prefix length for allocations from pool
default_quota -- Default quota for allocations from pool
shared -- Subnetpool should be shared (Admin-only)
tenant_id -- Owner of subnetpool
Returns:
SubnetPool object
"""
LOG.debug("subnetpool_create(): name=%s, prefixes=%s, kwargs=%s"
% (name, prefixes, kwargs))
body = {'subnetpool':
{'name': name,
'prefixes': prefixes,
}
}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnetpool'].update(kwargs)
subnetpool = \
neutronclient(request).create_subnetpool(body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_update(request, subnetpool_id, **kwargs):
LOG.debug("subnetpool_update(): subnetpoolid=%s, kwargs=%s" %
(subnetpool_id, kwargs))
body = {'subnetpool': kwargs}
subnetpool = \
neutronclient(request).update_subnetpool(subnetpool_id,
body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_delete(request, subnetpool_id):
LOG.debug("subnetpool_delete(): subnetpoolid=%s" % subnetpool_id)
return neutronclient(request).delete_subnetpool(subnetpool_id)
def port_list(request, **params):
LOG.debug("port_list(): params=%s" % (params))
ports = neutronclient(request).list_ports(**params).get('ports')
return [Port(p) for p in ports]
def port_get(request, port_id, **params):
LOG.debug("port_get(): portid=%s, params=%s" % (port_id, params))
port = neutronclient(request).show_port(port_id, **params).get('port')
return Port(port)
def unescape_port_kwargs(**kwargs):
for key in kwargs:
if '__' in key:
kwargs[':'.join(key.split('__'))] = kwargs.pop(key)
return kwargs
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
"""
LOG.debug("port_create(): netid=%s, kwargs=%s" % (network_id, kwargs))
# In the case policy profiles are being used, profile id is needed.
if 'policy_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('policy_profile_id')
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['port'].update(kwargs)
port = neutronclient(request).create_port(body=body).get('port')
return Port(port)
def port_delete(request, port_id):
LOG.debug("port_delete(): portid=%s" % port_id)
neutronclient(request).delete_port(port_id)
def port_update(request, port_id, **kwargs):
LOG.debug("port_update(): portid=%s, kwargs=%s" % (port_id, kwargs))
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': kwargs}
port = neutronclient(request).update_port(port_id, body=body).get('port')
return Port(port)
def profile_list(request, type_p, **params):
LOG.debug("profile_list(): "
"profile_type=%(profile_type)s, params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
profiles = neutronclient(request).list_network_profiles(
**params).get('network_profiles')
elif type_p == 'policy':
profiles = neutronclient(request).list_policy_profiles(
**params).get('policy_profiles')
return [Profile(n) for n in profiles]
def profile_get(request, profile_id, **params):
LOG.debug("profile_get(): "
"profileid=%(profileid)s, params=%(params)s",
{'profileid': profile_id, 'params': params})
profile = neutronclient(request).show_network_profile(
profile_id, **params).get('network_profile')
return Profile(profile)
def profile_create(request, **kwargs):
LOG.debug("profile_create(): kwargs=%s", kwargs)
body = {'network_profile': {}}
body['network_profile'].update(kwargs)
profile = neutronclient(request).create_network_profile(
body=body).get('network_profile')
return Profile(profile)
def profile_delete(request, profile_id):
LOG.debug("profile_delete(): profile_id=%s", profile_id)
neutronclient(request).delete_network_profile(profile_id)
def profile_update(request, profile_id, **kwargs):
LOG.debug("profile_update(): "
"profileid=%(profileid)s, kwargs=%(kwargs)s",
{'profileid': profile_id, 'kwargs': kwargs})
body = {'network_profile': kwargs}
profile = neutronclient(request).update_network_profile(
profile_id, body=body).get('network_profile')
return Profile(profile)
def profile_bindings_list(request, type_p, **params):
LOG.debug("profile_bindings_list(): "
"profile_type=%(profile_type)s params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
bindings = neutronclient(request).list_network_profile_bindings(
**params).get('network_profile_bindings')
elif type_p == 'policy':
bindings = neutronclient(request).list_policy_profile_bindings(
**params).get('policy_profile_bindings')
return [Profile(n) for n in bindings]
def router_create(request, **kwargs):
LOG.debug("router_create():, kwargs=%s" % kwargs)
body = {'router': {}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['router'].update(kwargs)
router = neutronclient(request).create_router(body=body).get('router')
return Router(router)
def router_update(request, r_id, **kwargs):
LOG.debug("router_update(): router_id=%s, kwargs=%s" % (r_id, kwargs))
body = {'router': {}}
body['router'].update(kwargs)
router = neutronclient(request).update_router(r_id, body=body)
return Router(router['router'])
def router_get(request, router_id, **params):
router = neutronclient(request).show_router(router_id,
**params).get('router')
return Router(router)
def router_list(request, **params):
routers = neutronclient(request).list_routers(**params).get('routers')
return [Router(r) for r in routers]
def router_list_on_l3_agent(request, l3_agent_id, **params):
routers = neutronclient(request).\
list_routers_on_l3_agent(l3_agent_id,
**params).get('routers')
return [Router(r) for r in routers]
def router_delete(request, router_id):
neutronclient(request).delete_router(router_id)
def router_add_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
client = neutronclient(request)
return client.add_interface_router(router_id, body)
def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
neutronclient(request).remove_interface_router(router_id, body)
def router_add_gateway(request, router_id, network_id):
body = {'network_id': network_id}
neutronclient(request).add_gateway_router(router_id, body)
def router_remove_gateway(request, router_id):
neutronclient(request).remove_gateway_router(router_id)
def router_static_route_list(request, router_id=None):
router = router_get(request, router_id)
try:
routes = [RouterStaticRoute(r) for r in router.routes]
except AttributeError:
LOG.debug("router_static_route_list(): router_id=%s, "
"router=%s", (router_id, router))
return []
return routes
def router_static_route_remove(request, router_id, route_ids):
currentroutes = router_static_route_list(request, router_id=router_id)
newroutes = []
for oldroute in currentroutes:
if oldroute.id not in route_ids:
newroutes.append({'nexthop': oldroute.nexthop,
'destination': oldroute.destination})
body = {'routes': newroutes}
new = router_update(request, router_id, **body)
return new
def router_static_route_add(request, router_id, newroute):
body = {}
currentroutes = router_static_route_list(request, router_id=router_id)
body['routes'] = [newroute] + [{'nexthop': r.nexthop,
'destination': r.destination}
for r in currentroutes]
new = router_update(request, router_id, **body)
return new
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota'])
def tenant_quota_update(request, tenant_id, **kwargs):
quotas = {'quota': kwargs}
return neutronclient(request).update_quota(tenant_id, quotas)
def agent_list(request, **params):
agents = neutronclient(request).list_agents(**params)
return [Agent(a) for a in agents['agents']]
def list_dhcp_agent_hosting_networks(request, network, **params):
agents = neutronclient(request).list_dhcp_agent_hosting_networks(network,
**params)
return [Agent(a) for a in agents['agents']]
def list_l3_agent_hosting_router(request, router, **params):
agents = neutronclient(request).list_l3_agent_hosting_routers(router,
**params)
return [Agent(a) for a in agents['agents']]
def show_network_ip_availability(request, network_id):
ip_availability = neutronclient(request).show_network_ip_availability(
network_id)
return ip_availability
def add_network_to_dhcp_agent(request, dhcp_agent, network_id):
body = {'network_id': network_id}
return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body)
def remove_network_from_dhcp_agent(request, dhcp_agent, network_id):
return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent,
network_id)
def provider_list(request):
providers = neutronclient(request).list_service_providers()
return providers['service_providers']
def servers_update_addresses(request, servers, all_tenants=False):
"""Retrieve servers networking information from Neutron if enabled.
Should be used when up to date networking information is required,
and Nova's networking info caching mechanism is not fast enough.
"""
# Get all (filtered for relevant servers) information from Neutron
try:
ports = list_resources_with_long_filters(
port_list, 'device_id', [instance.id for instance in servers],
request=request)
fips = FloatingIpManager(request)
if fips.is_supported():
floating_ips = list_resources_with_long_filters(
fips.list, 'port_id', [port.id for port in ports],
all_tenants=all_tenants)
else:
floating_ips = []
networks = list_resources_with_long_filters(
network_list, 'id', set([port.network_id for port in ports]),
request=request)
except Exception:
error_message = _('Unable to connect to Neutron.')
LOG.error(error_message)
messages.error(request, error_message)
return
# Map instance to its ports
instances_ports = collections.defaultdict(list)
for port in ports:
instances_ports[port.device_id].append(port)
# Map port to its floating ips
ports_floating_ips = collections.defaultdict(list)
for fip in floating_ips:
ports_floating_ips[fip.port_id].append(fip)
# Map network id to its name
network_names = dict(((network.id, network.name) for network in networks))
for server in servers:
try:
addresses = _server_get_addresses(
request,
server,
instances_ports,
ports_floating_ips,
network_names)
except Exception as e:
LOG.error(six.text_type(e))
else:
server.addresses = addresses
def _server_get_addresses(request, server, ports, floating_ips, network_names):
def _format_address(mac, ip, type):
try:
version = netaddr.IPAddress(ip).version
except Exception:
error_message = _('Unable to parse IP address %s.') % ip
LOG.error(error_message)
messages.error(request, error_message)
raise
return {u'OS-EXT-IPS-MAC:mac_addr': mac,
u'version': version,
u'addr': ip,
u'OS-EXT-IPS:type': type}
addresses = collections.defaultdict(list)
instance_ports = ports.get(server.id, [])
for port in instance_ports:
network_name = network_names.get(port.network_id)
if network_name is not None:
for fixed_ip in port.fixed_ips:
addresses[network_name].append(
_format_address(port.mac_address,
fixed_ip['ip_address'],
u'fixed'))
port_fips = floating_ips.get(port.id, [])
for fip in port_fips:
addresses[network_name].append(
_format_address(port.mac_address,
fip.floating_ip_address,
u'floating'))
return dict(addresses)
@memoized
def list_extensions(request):
extensions_list = neutronclient(request).list_extensions()
if 'extensions' in extensions_list:
return tuple(extensions_list['extensions'])
else:
return ()
@memoized
def is_extension_supported(request, extension_alias):
extensions = list_extensions(request)
for extension in extensions:
if extension['alias'] == extension_alias:
return True
else:
return False
def is_enabled_by_config(name, default=True):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get(name, default)
@memoized
def is_service_enabled(request, config_name, ext_name):
return (is_enabled_by_config(config_name) and
is_extension_supported(request, ext_name))
@memoized
def is_quotas_extension_supported(request):
return (is_enabled_by_config('enable_quotas', False) and
is_extension_supported(request, 'quotas'))
# Using this mechanism till a better plugin/sub-plugin detection
# mechanism is available.
# When using specific plugins the profile_support can be
# turned on if needed to configure and/or use profiles.
# Since this is a temporary mechanism used to detect profile_support
# @memorize is not being used.
# TODO(absubram): Change this config variable check with
# subplugin/plugin detection API when it becomes available.
def is_port_profiles_supported():
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
# Can be used to check for vendor specific plugin
profile_support = network_config.get('profile_support', None)
if str(profile_support).lower() == 'cisco':
return True
# FEATURE_MAP is used to define:
# - related neutron extension name (key: "extension")
# - corresponding dashboard config (key: "config")
# - RBAC policies (key: "poclies")
# If a key is not contained, the corresponding permission check is skipped.
FEATURE_MAP = {
'dvr': {
'extension': 'dvr',
'config': {
'name': 'enable_distributed_router',
'default': False,
},
'policies': {
'get': 'get_router:distributed',
'create': 'create_router:distributed',
'update': 'update_router:distributed',
}
},
'l3-ha': {
'extension': 'l3-ha',
'config': {'name': 'enable_ha_router',
'default': False},
'policies': {
'get': 'get_router:ha',
'create': 'create_router:ha',
'update': 'update_router:ha',
}
},
}
def get_feature_permission(request, feature, operation=None):
"""Check if a feature-specific field can be displayed.
This method check a permission for a feature-specific field.
Such field is usually provided through Neutron extension.
:param request: Request Object
:param feature: feature name defined in FEATURE_MAP
:param operation (optional): Operation type. The valid value should be
defined in FEATURE_MAP[feature]['policies']
It must be specified if FEATURE_MAP[feature] has 'policies'.
"""
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
feature_info = FEATURE_MAP.get(feature)
if not feature_info:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The requested feature '%(feature)s' is unknown. "
"Please make sure to specify a feature defined "
"in FEATURE_MAP."))
# Check dashboard settings
feature_config = feature_info.get('config')
if feature_config:
if not network_config.get(feature_config['name'],
feature_config['default']):
return False
# Check policy
feature_policies = feature_info.get('policies')
if feature_policies:
policy_name = feature_policies.get(operation)
if not policy_name:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The 'operation' parameter for "
"get_feature_permission '%(feature)s' "
"is invalid. It should be one of %(allowed)s")
% {'feature': feature,
'allowed': ' '.join(feature_policies.keys())})
role = (('network', policy_name),)
if not policy.check(role, request):
return False
# Check if a required extension is enabled
feature_extension = feature_info.get('extension')
if feature_extension:
try:
return is_extension_supported(request, feature_extension)
except Exception:
msg = (_("Failed to check Neutron '%s' extension is not supported")
% feature_extension)
LOG.info(msg)
return False
# If all checks are passed, now a given feature is allowed.
return True
|
|
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Backup driver for IBM Tivoli Storage Manager (TSM).
Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM)
as the backend. The driver uses TSM command line dsmc utility to
run the backup and restore operations.
This version supports backup of block devices, e.g, FC, iSCSI, local as well as
regular files.
A prerequisite for using the IBM TSM backup service is configuring the
Cinder host for using TSM.
"""
import json
import os
import stat
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _LE, _
from cinder import utils
LOG = logging.getLogger(__name__)
tsm_opts = [
cfg.StrOpt('backup_tsm_volume_prefix',
default='backup',
help='Volume prefix for the backup id when backing up to TSM'),
cfg.StrOpt('backup_tsm_password',
default='password',
help='TSM password for the running username',
secret=True),
cfg.BoolOpt('backup_tsm_compression',
default=True,
help='Enable or Disable compression for backups'),
]
CONF = cfg.CONF
CONF.register_opts(tsm_opts)
VALID_BACKUP_MODES = ['image', 'file']
def _get_backup_metadata(backup, operation):
"""Return metadata persisted with backup object."""
svc_metadata = backup['service_metadata']
try:
svc_dict = json.loads(svc_metadata)
backup_path = svc_dict.get('backup_path')
backup_mode = svc_dict.get('backup_mode')
except TypeError:
# for backwards compatibility
vol_prefix = CONF.backup_tsm_volume_prefix
backup_id = backup['id']
backup_path = utils.make_dev_path('%s-%s' %
(vol_prefix, backup_id))
backup_mode = 'image'
if backup_mode not in VALID_BACKUP_MODES:
volume_id = backup['volume_id']
backup_id = backup['id']
err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. '
'Backup object has unexpected mode. Image or file '
'backups supported, actual mode is %(vol_mode)s.')
% {'op': operation,
'bck_id': backup_id,
'vol_id': volume_id,
'vol_mode': backup_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return backup_path, backup_mode
def _image_mode(backup_mode):
"""True if backup is image type."""
return backup_mode == 'image'
def _make_link(volume_path, backup_path, vol_id):
"""Create a hard link for the volume block device.
The IBM TSM client performs an image backup on a block device.
The name of the block device is the backup prefix plus the backup id
:param volume_path: real device path name for volume
:param backup_path: path name TSM will use as volume to backup
:param vol_id: id of volume to backup (for reporting)
:raises: InvalidBackup
"""
try:
utils.execute('ln', volume_path, backup_path,
run_as_root=True,
check_exit_code=True)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to create device hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'vpath': volume_path,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _create_unique_device_link(backup_id, volume_path, volume_id, bckup_mode):
"""Create a consistent hardlink for the volume block device.
Create a consistent hardlink using the backup id so TSM
will be able to backup and restore to the same block device.
:param backup_id: the backup id
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
:param bckup_mode: TSM backup mode, either 'image' or 'file'
:raises: InvalidBackup
:returns str -- hardlink path of the volume block device
"""
if _image_mode(bckup_mode):
hardlink_path = utils.make_dev_path('%s-%s' %
(CONF.backup_tsm_volume_prefix,
backup_id))
else:
dir, volname = os.path.split(volume_path)
hardlink_path = ('%s/%s-%s' %
(dir,
CONF.backup_tsm_volume_prefix,
backup_id))
_make_link(volume_path, hardlink_path, volume_id)
return hardlink_path
def _check_dsmc_output(output, check_attrs, exact_match=True):
"""Check dsmc command line utility output.
Parse the output of the dsmc command and make sure that a given
attribute is present, and that it has the proper value.
TSM attribute has the format of "text : value".
:param output: TSM output to parse
:param check_attrs: text to identify in the output
:param exact_match: if True, the check will pass only if the parsed
value is equal to the value specified in check_attrs. If false, the
check will pass if the parsed value is greater than or equal to the
value specified in check_attrs. This is needed because for file
backups, the parent directories may also be included the first a
volume is backed up.
:returns bool -- indicate if requited output attribute found in output
"""
parsed_attrs = {}
for line in output.split('\n'):
# parse TSM output: look for "msg : value
key, sep, val = line.partition(':')
if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip()
for ckey, cval in check_attrs.items():
if ckey not in parsed_attrs:
return False
elif exact_match and parsed_attrs[ckey] != cval:
return False
elif not exact_match and int(parsed_attrs[ckey]) < int(cval):
return False
return True
def _get_volume_realpath(volume_file, volume_id):
"""Get the real path for the volume block device.
If the volume is not a block device or a regular file issue an
InvalidBackup exception.
:param volume_file: file object representing the volume
:param volume_id: Volume id for backup or as restore target
:raises: InvalidBackup
:returns str -- real path of volume device
:returns str -- backup mode to be used
"""
try:
# Get real path
volume_path = os.path.realpath(volume_file.name)
# Verify that path is a block device
volume_mode = os.stat(volume_path).st_mode
if stat.S_ISBLK(volume_mode):
backup_mode = 'image'
elif stat.S_ISREG(volume_mode):
backup_mode = 'file'
else:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is unexpected file type. Block or regular '
'files supported, actual file mode is %(vol_mode)s.')
% {'vol_id': volume_id,
'path': volume_path,
'vol_mode': volume_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except AttributeError:
err = (_('backup: %(vol_id)s failed. Cannot obtain real path '
'to volume at %(path)s.')
% {'vol_id': volume_id,
'path': volume_file})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except OSError:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is not a file.')
% {'vol_id': volume_id,
'path': volume_path})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return volume_path, backup_mode
def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
"""Remove the hardlink for the volume block device.
:param hardlink_path: hardlink to the volume block device
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
"""
try:
utils.execute('rm',
'-f',
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.'),
{'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
'out': exc.stdout,
'err': exc.stderr})
class TSMBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'
def __init__(self, context, db_driver=None):
super(TSMBackupDriver, self).__init__(context, db_driver)
self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix
def _do_backup(self, backup_path, vol_id, backup_mode):
"""Perform the actual backup operation.
:param backup_path: volume path
:param vol_id: volume id
:param backup_mode: file mode of source volume; 'image' or 'file'
:raises: InvalidBackup
"""
backup_attrs = {'Total number of objects backed up': '1'}
compr_flag = 'yes' if CONF.backup_tsm_compression else 'no'
backup_cmd = ['dsmc', 'backup']
if _image_mode(backup_mode):
backup_cmd.append('image')
backup_cmd.extend(['-quiet',
'-compression=%s' % compr_flag,
'-password=%s' % self.tsm_password,
backup_path])
out, err = utils.execute(*backup_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, backup_attrs, exact_match=False)
if not success:
err = (_('backup: %(vol_id)s failed to obtain backup '
'success notification from server.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _do_restore(self, backup_path, restore_path, vol_id, backup_mode):
"""Perform the actual restore operation.
:param backup_path: the path the backup was created from, this
identifies the backup to tsm
:param restore_path: volume path to restore into
:param vol_id: volume id
:param backup_mode: mode used to create the backup ('image' or 'file')
:raises: InvalidBackup
"""
restore_attrs = {'Total number of objects restored': '1'}
restore_cmd = ['dsmc', 'restore']
if _image_mode(backup_mode):
restore_cmd.append('image')
restore_cmd.append('-noprompt') # suppress prompt
else:
restore_cmd.append('-replace=yes') # suppress prompt
restore_cmd.extend(['-quiet',
'-password=%s' % self.tsm_password,
backup_path])
if restore_path != backup_path:
restore_cmd.append(restore_path)
out, err = utils.execute(*restore_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, restore_attrs)
if not success:
err = (_('restore: %(vol_id)s failed.\n'
'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def backup(self, backup, volume_file, backup_metadata=False):
"""Backup the given volume to TSM.
TSM performs a backup of a volume. The volume_file is used
to determine the path of the block device that TSM will back-up.
:param backup: backup information for volume
:param volume_file: file object representing the volume
:param backup_metadata: whether or not to backup volume metadata
:raises InvalidBackup
"""
# TODO(dosaboy): this needs implementing (see backup.drivers.ceph for
# an example)
if backup_metadata:
msg = _("Volume metadata backup requested but this driver does "
"not yet support this feature.")
raise exception.InvalidBackup(reason=msg)
backup_id = backup['id']
volume_id = backup['volume_id']
volume_path, backup_mode = _get_volume_realpath(volume_file,
volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.',
{'volume_id': volume_id,
'volume_path': volume_path,
'mode': backup_mode})
backup_path = _create_unique_device_link(backup_id,
volume_path,
volume_id,
backup_mode)
service_metadata = {'backup_mode': backup_mode,
'backup_path': backup_path}
self.db.backup_update(self.context,
backup_id,
{'service_metadata':
json.dumps(service_metadata)})
try:
self._do_backup(backup_path, volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(backup_path, volume_path, volume_id)
LOG.debug('Backup %s finished.', backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
:param backup: backup information for volume
:param volume_id: volume id
:param volume_file: file object representing the volume
:raises InvalidBackup
"""
backup_id = backup['id']
# backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
'mode: %(mode)s.',
{'volume_id': volume_id,
'backup_id': backup_id,
'mode': backup_mode})
# volume_path is the path to restore into. This may
# be different than the original volume.
volume_path, unused = _get_volume_realpath(volume_file,
volume_id)
restore_path = _create_unique_device_link(backup_id,
volume_path,
volume_id,
backup_mode)
try:
self._do_restore(backup_path, restore_path, volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup_id,
'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup from TSM server.
:param backup: backup information for volume
:raises InvalidBackup
"""
delete_attrs = {'Total number of objects deleted': '1'}
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
volume_id = backup['volume_id']
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
{'backup': backup['id'],
'mode': backup_mode})
try:
out, err = utils.execute('dsmc',
'delete',
'backup',
'-quiet',
'-noprompt',
'-objtype=%s' % backup_mode,
'-password=%s' % self.tsm_password,
delete_path,
run_as_root=True,
check_exit_code=False)
except processutils.ProcessExecutionError as exc:
err = (_('delete: %(vol_id)s failed to run dsmc with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('delete: %(vol_id)s failed to run dsmc '
'due to invalid arguments with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
success = _check_dsmc_output(out, delete_attrs)
if not success:
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
LOG.error(_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s'),
{'vol_id': volume_id,
'out': out,
'err': err})
LOG.debug('Delete %s finished.', backup['id'])
def get_backup_driver(context):
return TSMBackupDriver(context)
|
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import os
import operator
import sys
import warnings
import traceback
import importlib
import six
from six.moves import reduce
from . import xcbq
from .log_utils import logger
class QtileError(Exception):
pass
def lget(o, v):
try:
return o[v]
except (IndexError, TypeError):
return None
def translate_masks(modifiers):
"""
Translate a modifier mask specified as a list of strings into an or-ed
bit representation.
"""
masks = []
for i in modifiers:
try:
masks.append(xcbq.ModMasks[i])
except KeyError:
raise KeyError("Unknown modifier: %s" % i)
if masks:
return reduce(operator.or_, masks)
else:
return 0
def translate_modifiers(mask):
r = []
for k, v in xcbq.ModMasks.items():
if mask & v:
r.append(k)
return r
def shuffleUp(lst):
if len(lst) > 1:
c = lst[-1]
lst.remove(c)
lst.insert(0, c)
def shuffleDown(lst):
if len(lst) > 1:
c = lst[0]
lst.remove(c)
lst.append(c)
if sys.version_info < (3, 3):
class lru_cache(object):
"""
A decorator that implements a self-expiring LRU cache for class
methods (not functions!).
Cache data is tracked as attributes on the object itself. There is
therefore a separate cache for each object instance.
"""
def __init__(self, maxsize=128, typed=False):
self.size = maxsize
def __call__(self, f):
cache_name = "_cached_{0}".format(f.__name__)
cache_list_name = "_cachelist_{0}".format(f.__name__)
size = self.size
@functools.wraps(f)
def wrap(self, *args):
if not hasattr(self, cache_name):
setattr(self, cache_name, {})
setattr(self, cache_list_name, [])
cache = getattr(self, cache_name)
cache_list = getattr(self, cache_list_name)
if args in cache:
cache_list.remove(args)
cache_list.insert(0, args)
return cache[args]
else:
ret = f(self, *args)
cache_list.insert(0, args)
cache[args] = ret
if len(cache_list) > size:
d = cache_list.pop()
cache.pop(d)
return ret
return wrap
else:
from functools import lru_cache # noqa: F401
def rgb(x):
"""
Returns a valid RGBA tuple.
Here are some valid specifcations:
#ff0000
ff0000
with alpha: ff0000.5
(255, 0, 0)
(255, 0, 0, 0.5)
"""
if isinstance(x, (tuple, list)):
if len(x) == 4:
alpha = x[3]
else:
alpha = 1
return (x[0] / 255.0, x[1] / 255.0, x[2] / 255.0, alpha)
elif isinstance(x, six.string_types):
if x.startswith("#"):
x = x[1:]
if "." in x:
x, alpha = x.split(".")
alpha = float("0." + alpha)
else:
alpha = 1
if len(x) != 6:
raise ValueError("RGB specifier must be 6 characters long.")
vals = [int(i, 16) for i in (x[0:2], x[2:4], x[4:6])]
vals.append(alpha)
return rgb(vals)
raise ValueError("Invalid RGB specifier.")
def hex(x):
r, g, b, _ = rgb(x)
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
def scrub_to_utf8(text):
if not text:
return u""
elif isinstance(text, six.text_type):
return text
else:
return text.decode("utf-8", "ignore")
# WARNINGS
class UnixCommandNotFound(Warning):
pass
class UnixCommandRuntimeError(Warning):
pass
def catch_exception_and_warn(warning=Warning, return_on_exception=None,
excepts=Exception):
"""
.. function:: warn_on_exception(func, [warning_class, return_on_failure,
excepts])
attempts to call func. catches exception or exception tuple and issues
a warning instead. returns value of return_on_failure when the
specified exception is raised.
:param func: a callable to be wrapped
:param warning: the warning class to issue if an exception is
raised
:param return_on_exception: the default return value of the function
if an exception is raised
:param excepts: an exception class (or tuple of exception classes) to
catch during the execution of func
:type excepts: Exception or tuple of Exception classes
:type warning: Warning
:rtype: a callable
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return_value = return_on_exception
try:
return_value = func(*args, **kwargs)
except excepts as err:
logger.warn(err.strerror)
warnings.warn(err.strerror, warning)
return return_value
return wrapper
return decorator
def get_cache_dir():
"""
Returns the cache directory and create if it doesn't exists
"""
cache_directory = os.path.expandvars('$XDG_CACHE_HOME')
if cache_directory == '$XDG_CACHE_HOME':
# if variable wasn't set
cache_directory = os.path.expanduser("~/.cache")
cache_directory = os.path.join(cache_directory, 'qtile')
if not os.path.exists(cache_directory):
os.makedirs(cache_directory)
return cache_directory
def describe_attributes(obj, attrs, func=lambda x: x):
"""
Helper for __repr__ functions to list attributes with truthy values only
(or values that return a truthy value by func)
"""
pairs = []
for attr in attrs:
value = getattr(obj, attr, None)
if func(value):
pairs.append('%s=%s' % (attr, value))
return ', '.join(pairs)
def safe_import(module_names, class_name, globals_, fallback=None):
"""
Try to import a module, and if it fails because an ImporError
it logs on WARNING, and logs the traceback on DEBUG level
"""
module_path = '.'.join(module_names)
if type(class_name) is list:
for name in class_name:
safe_import(module_names, name, globals_)
return
package = __package__
# TODO: remove when we really want to drop 3.2 support
# python 3.2 don't set __package__
if not package:
package = __name__
try:
module = importlib.import_module(module_path, package)
globals_[class_name] = getattr(module, class_name)
except ImportError as error:
logger.warning("Unmet dependencies for optional Widget: '%s.%s', %s",
module_path, class_name, error)
logger.debug("%s", traceback.format_exc())
if fallback:
globals_[class_name] = fallback(module_path, class_name, error)
|
|
"""@file vad_timings_processor.py
contains the VadTimingsProcessor class"""
import os
import subprocess
import StringIO
import scipy.io.wavfile as wav
import numpy as np
import processor
import gzip
from nabu.processing.feature_computers import feature_computer_factory
class VadTimingsProcessor(processor.Processor):
"""a processor for convert VAD timings into targets"""
def __init__(self, conf, segment_lengths):
"""VadTimingsProcessor constructor
Args:
conf: VadTimingsProcessor configuration as a dict of strings
segment_lengths: A list containing the desired lengths of segments.
Possibly multiple segment lengths"""
# create the feature computer
self.comp = feature_computer_factory.factory('frames')(conf)
self.winlen = float(conf['winlen'])
self.winstep = float(conf['winstep'])
# set the length of the segments. Possibly multiple segment lengths
self.segment_lengths = segment_lengths
self.nrS = int(conf['nrs'])
# initialize the metadata
self.dim = self.nrS
self.max_length = np.zeros(len(self.segment_lengths))
# self.sequence_length_histogram = np.zeros(0, dtype=np.int32)
self.nontime_dims = [self.dim]
super(VadTimingsProcessor, self).__init__(conf)
def __call__(self, datalines):
"""process the data in dataline
Args:
datalines: in format 'mix_wav spk_id1 seg1_start seg1_end seg2_start seg2_end ... spk_id1 spk_id2 ... spk_id2
Returns:
segmented_data: The segmented features as a list of numpy arrays per segment length
utt_info: some info on the utterance"""
utt_info = dict()
split_lines = datalines.split(' ')
mix_file = split_lines.pop(0)
# read the wav file
rate, utt = _read_wav(mix_file)
# compute the features
frames = self.comp(utt, rate)
audio_length = np.shape(frames)[-2]
vad_indicator = np.zeros([audio_length, self.dim], dtype=np.bool)
ind = 0
spk_ind = 0
new_id = True
prev_id = ''
while True:
if new_id:
prev_id = split_lines[ind]
ind += 1
new_id = False
if prev_id == split_lines[ind]:
ind += 1
new_id = True
spk_ind += 1
else:
seg_st = float(split_lines[ind])
seg_st_frames = sec2frames(seg_st, self.winlen, self.winstep)
if seg_st_frames > audio_length-1:
seg_st_frames = audio_length-1
seg_end = float(split_lines[ind+1])
seg_end_frames = sec2frames(seg_end, self.winlen, self.winstep)
if seg_end_frames > audio_length:
seg_end_frames = audio_length
vad_indicator[seg_st_frames:seg_end_frames, spk_ind] = 1
ind += 2
if ind >= len(split_lines):
break
# split the data for all desired segment lengths
segmented_data = self.segment_data(vad_indicator)
# update the metadata
for i, seg_length in enumerate(self.segment_lengths):
self.max_length[i] = max(self.max_length[i], np.shape(segmented_data[seg_length][0])[0])
# seq_length = np.shape(segmented_data[seg_length][0])[0]
# if seq_length >= np.shape(self.sequence_length_histogram[i])[0]:
# self.sequence_length_histogram[i] = np.concatenate(
# [self.sequence_length_histogram[i], np.zeros(
# seq_length-np.shape(self.sequence_length_histogram[i])[0]+1,
# dtype=np.int32)]
# )
# self.sequence_length_histogram[i][seq_length] += len(segmented_data[seg_length])
return segmented_data, utt_info
def write_metadata(self, datadir):
"""write the processor metadata to disk
Args:
datadir: the directory where the metadata should be written"""
for i, seg_length in enumerate(self.segment_lengths):
seg_dir = os.path.join(datadir, seg_length)
# with open(os.path.join(seg_dir, 'sequence_length_histogram.npy'), 'w') as fid:
# np.save(fid, self.sequence_length_histogram[i])
with open(os.path.join(seg_dir, 'max_length'), 'w') as fid:
fid.write(str(self.max_length[i]))
with open(os.path.join(seg_dir, 'dim'), 'w') as fid:
fid.write(str(self.dim))
with open(os.path.join(seg_dir, 'nontime_dims'), 'w') as fid:
fid.write(str(self.nontime_dims)[1:-1])
class VadTimings2SamplesProcessor(processor.Processor):
"""a processor for convert VAD timings into time domain targets"""
def __init__(self, conf, segment_lengths):
"""VadTimingsProcessor constructor
Args:
conf: VadTimingsProcessor configuration as a dict of strings
segment_lengths: A list containing the desired lengths of segments.
Possibly multiple segment lengths"""
# set the length of the segments. Possibly multiple segment lengths
self.segment_lengths = segment_lengths
self.nrS = int(conf['nrs'])
# initialize the metadata
self.dim = self.nrS
self.max_length = np.zeros(len(self.segment_lengths))
# self.sequence_length_histogram = np.zeros(0, dtype=np.int32)
self.nontime_dims = [self.dim]
super(VadTimings2SamplesProcessor, self).__init__(conf)
def __call__(self, datalines):
"""process the data in dataline
Args:
datalines: in format 'mix_wav spk_id1 seg1_start seg1_end seg2_start seg2_end ... spk_id1 spk_id2 ... spk_id2
Returns:
segmented_data: The segmented features as a list of numpy arrays per segment length
utt_info: some info on the utterance"""
utt_info = dict()
split_lines = datalines.split(' ')
mix_file = split_lines.pop(0)
# read the wav file
rate, utt = _read_wav(mix_file)
audio_length = len(utt)
vad_indicator = np.zeros([audio_length, self.dim], dtype=np.bool)
ind = 0
spk_ind = 0
new_id = True
prev_id = ''
while True:
if new_id:
prev_id = split_lines[ind]
ind += 1
new_id = False
if prev_id == split_lines[ind]:
ind += 1
new_id = True
spk_ind += 1
else:
seg_st = float(split_lines[ind])
seg_st_samples = sec2samples(seg_st, rate)
if seg_st_samples > audio_length - 1:
seg_st_samples = audio_length - 1
seg_end = float(split_lines[ind + 1])
seg_end_samples = sec2samples(seg_end, rate)
if seg_end_samples > audio_length:
seg_end_samples = audio_length
vad_indicator[seg_st_samples:seg_end_samples, spk_ind] = 1
ind += 2
if ind >= len(split_lines):
break
# split the data for all desired segment lengths
segmented_data = self.segment_data(vad_indicator)
# update the metadata
for i, seg_length in enumerate(self.segment_lengths):
self.max_length[i] = max(self.max_length[i], np.shape(segmented_data[seg_length][0])[0])
# seq_length = np.shape(segmented_data[seg_length][0])[0]
# if seq_length >= np.shape(self.sequence_length_histogram[i])[0]:
# self.sequence_length_histogram[i] = np.concatenate(
# [self.sequence_length_histogram[i], np.zeros(
# seq_length-np.shape(self.sequence_length_histogram[i])[0]+1,
# dtype=np.int32)]
# )
# self.sequence_length_histogram[i][seq_length] += len(segmented_data[seg_length])
return segmented_data, utt_info
def write_metadata(self, datadir):
"""write the processor metadata to disk
Args:
datadir: the directory where the metadata should be written"""
for i, seg_length in enumerate(self.segment_lengths):
seg_dir = os.path.join(datadir, seg_length)
# with open(os.path.join(seg_dir, 'sequence_length_histogram.npy'), 'w') as fid:
# np.save(fid, self.sequence_length_histogram[i])
with open(os.path.join(seg_dir, 'max_length'), 'w') as fid:
fid.write(str(self.max_length[i]))
with open(os.path.join(seg_dir, 'dim'), 'w') as fid:
fid.write(str(self.dim))
with open(os.path.join(seg_dir, 'nontime_dims'), 'w') as fid:
fid.write(str(self.nontime_dims)[1:-1])
def sec2frames(time_in_seconds, winlength, winstep):
""" turn time in seconds into time in frames"""
time_in_frames = (time_in_seconds - winlength/2)/winstep
time_in_frames = int(round(time_in_frames))
if time_in_frames < 0:
time_in_frames = 0
return time_in_frames
def sec2samples(time_in_seconds, rate):
""" turn time in seconds into time in samples"""
time_in_samples = int(round(time_in_seconds * rate))
if time_in_samples < 0:
time_in_samples = 0
return time_in_samples
def _read_wav(wavfile):
"""
read a wav file
Args:
wavfile: either a path to a wav file or a command to read and pipe
an audio file
Returns:
- the sampling rate
- the utterance as a numpy array
"""
if os.path.exists(wavfile):
# its a file
(rate, utterance) = wav.read(wavfile)
elif wavfile[-1] == '|':
# its a command
# read the audio file
pid = subprocess.Popen(wavfile + ' tee', shell=True, stdout=subprocess.PIPE)
output, _ = pid.communicate()
output_buffer = StringIO.StringIO(output)
(rate, utterance) = wav.read(output_buffer)
else:
# its a segment of an utterance
split = wavfile.split(' ')
begin = float(split[-2])
end = float(split[-1])
unsegmented = ' '.join(split[:-2])
rate, full_utterance = _read_wav(unsegmented)
utterance = full_utterance[int(begin*rate):int(end*rate)]
return rate, utterance
|
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import logging
import os
import yaml
import fabric.api
from path import path
from claw import configuration
from claw import tests
from claw.handlers import stub_handler
class TestConfiguration(tests.BaseTest):
def test_init_from_dir(self):
conf = configuration.Configuration(self.workdir)
self.assertEqual(conf.configuration, self.workdir.basename())
def test_init_from_current_dir(self):
conf = configuration.Configuration()
self.assertEqual(conf.configuration, path(os.getcwd()).basename())
def test_init_from_current_configuration(self):
self.init()
conf = configuration.Configuration(configuration.CURRENT_CONFIGURATION)
self.assertEqual(conf.configuration,
configuration.CURRENT_CONFIGURATION)
self.claw.generate(tests.STUB_CONFIGURATION)
conf = configuration.Configuration(configuration.CURRENT_CONFIGURATION)
self.assertEqual(conf.configuration, tests.STUB_CONFIGURATION)
def test_exists(self):
self.init()
conf = configuration.Configuration(tests.STUB_CONFIGURATION)
self.assertFalse(conf.exists())
self.claw.generate(tests.STUB_CONFIGURATION)
conf = configuration.Configuration(tests.STUB_CONFIGURATION)
self.assertTrue(conf.exists())
blueprint = conf.blueprint(tests.STUB_BLUEPRINT)
self.assertFalse(blueprint.exists())
self.claw('generate-blueprint', tests.STUB_CONFIGURATION,
tests.STUB_BLUEPRINT)
self.assertTrue(blueprint.exists())
def test_configuration_properties(self):
conf, blueprint = self._init_configuration_and_blueprint()
conf_dir = self.workdir / 'configurations' / tests.STUB_CONFIGURATION
self.assertEqual(conf.dir, conf_dir)
self.assertEqual(conf.manager_blueprint_dir,
conf_dir / 'manager-blueprint')
self.assertEqual(conf.blueprints_dir,
conf_dir / 'blueprints')
self.assertEqual(conf.inputs_path,
conf_dir / 'inputs.yaml')
self.assertEqual(
conf.manager_blueprint_path,
conf_dir / 'manager-blueprint' / 'manager-blueprint.yaml')
self.assertEqual(conf.handler_configuration_path,
conf_dir / 'handler-configuration.yaml')
self.assertEqual(conf.cli_config_path,
conf_dir / '.cloudify' / 'config.yaml')
blueprint_dir = conf.blueprints_dir / tests.STUB_BLUEPRINT
self.assertIs(blueprint.configuration, conf)
self.assertEqual(blueprint.blueprint_name, tests.STUB_BLUEPRINT)
self.assertEqual(blueprint.dir, blueprint_dir)
self.assertEqual(blueprint.blueprint_configuration_path,
blueprint_dir / 'blueprint-configuration.yaml')
self.assertEqual(blueprint.inputs_path,
blueprint_dir / 'inputs.yaml')
self.assertEqual(blueprint.blueprint_path,
blueprint_dir / 'blueprint' / 'blueprint.yaml')
def test_yaml_files(self):
conf, blueprint = self._init_configuration_and_blueprint()
conf_dir = self.workdir / 'configurations' / tests.STUB_CONFIGURATION
blueprint_dir = conf.blueprints_dir / tests.STUB_BLUEPRINT
conf.cli_config_path.dirname().mkdir_p()
configuration_files = [
('inputs', conf_dir / 'inputs.yaml'),
('handler_configuration', conf_dir / 'handler-configuration.yaml'),
('manager_blueprint',
conf_dir / 'manager-blueprint' / 'manager-blueprint.yaml'),
('cli_config', conf_dir / '.cloudify' / 'config.yaml'),
]
blueprint_files = [
('inputs', blueprint_dir / 'inputs.yaml'),
('blueprint', blueprint_dir / 'blueprint' / 'blueprint.yaml'),
('blueprint_configuration',
blueprint_dir / 'blueprint-configuration.yaml')
]
def assert_files(obj, files):
content = {'some': 'value'}
for _file in files:
setattr(obj, _file[0], content)
if isinstance(obj, configuration.Configuration):
new_obj = configuration.Configuration(tests.STUB_CONFIGURATION)
else:
new_obj = conf.blueprint(tests.STUB_BLUEPRINT)
for _file in files:
self.assertEqual(content, getattr(new_obj, _file[0]))
self.assertEqual(yaml.safe_load(_file[1].text()), content)
assert_files(conf, configuration_files)
assert_files(blueprint, blueprint_files)
def test_properties(self):
props_name = 'props1'
main_suites_yaml_path = self.workdir / 'main-suites.yaml'
main_suites_yaml = {
'variables': {'a': '123'},
'handler_properties': {
props_name: {
'a_from_var': '{{a}}',
'b': 'b_val'
}
}
}
main_suites_yaml_path.write_text(yaml.safe_dump(main_suites_yaml))
conf = self._init_configuration(main_suites_yaml_path)
with conf.patch.handler_configuration as patch:
patch.obj.pop('properties', None)
self.assertEqual(conf.properties, {})
with conf.patch.handler_configuration as patch:
patch.obj['properties'] = 'no_such_properties'
self.assertEqual(conf.properties, {})
with conf.patch.handler_configuration as patch:
patch.obj['properties'] = props_name
self.assertEqual(conf.properties, {
'a_from_var': '123',
'b': 'b_val'
})
def test_client(self):
conf = self._init_configuration()
self.assertEqual(conf.client._client.host, 'localhost')
ip = '1.1.1.1'
default_port = 80
custom_port = 12345
with conf.patch.handler_configuration as patch:
patch.obj['manager_ip'] = ip
self.assertEqual(conf.client._client.host, ip)
self.assertEqual(conf.client._client.port, default_port)
with conf.patch.handler_configuration as patch:
patch.obj['manager_port'] = custom_port
self.assertEqual(conf.client._client.host, ip)
self.assertEqual(conf.client._client.port, custom_port)
def test_claw_handler(self):
conf = self._init_configuration()
with conf.patch.handler_configuration as patch:
patch.set_value('handler', 'stub_handler')
claw_handler = conf.claw_handler
self.assertTrue(isinstance(claw_handler, stub_handler.Handler))
self.assertIs(claw_handler.configuration, conf)
def test_patch(self):
conf, blueprint = self._init_configuration_and_blueprint()
key = 'some_key'
value = 'some_value'
conf.cli_config_path.dirname().makedirs_p()
conf.cli_config_path.write_text('{}')
def _assert_patching(obj, props):
for prop in props:
with getattr(obj.patch, prop) as patch:
patch.set_value(key, value)
self.assertEqual(getattr(obj, prop)[key], value)
_assert_patching(conf, ['inputs',
'manager_blueprint',
'handler_configuration',
'cli_config'])
_assert_patching(blueprint, ['inputs',
'blueprint',
'blueprint_configuration'])
def test_ssh(self):
conf = self._init_configuration()
ip = '1.1.1.1'
user = 'user'
key = '~/key'
with conf.patch.handler_configuration as patch:
patch.obj.update({
'manager_ip': ip,
'manager_user': user,
'manager_key': key
})
with conf.ssh() as ssh:
self.assertEqual(ssh, fabric.api)
self.assertEqual(fabric.api.env['host_string'], ip)
self.assertEqual(fabric.api.env['user'], user)
self.assertEqual(fabric.api.env['key_filename'], key)
def test_logger(self):
conf = self._init_configuration()
logger = conf.logger
self.assertTrue(isinstance(logger, logging.Logger))
self.assertEqual(1, len(logger.handlers))
def _init_configuration(self, suites_yaml=None):
self.init(suites_yaml)
self.claw.generate(tests.STUB_CONFIGURATION)
return configuration.Configuration(tests.STUB_CONFIGURATION)
def _init_configuration_and_blueprint(self):
conf = self._init_configuration()
self.claw('generate-blueprint',
tests.STUB_CONFIGURATION,
tests.STUB_BLUEPRINT)
blueprint = conf.blueprint(tests.STUB_BLUEPRINT)
return conf, blueprint
|
|
"""
Implements the wrapper for the Astropy test runner in the form of the
``./setup.py test`` distutils command.
"""
import os
import glob
import shutil
import subprocess
import sys
import tempfile
from setuptools import Command
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before distutils/setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index('--remote-data')
except ValueError:
pass
else:
sys.argv[idx] = '--remote-data=any'
try:
idx = sys.argv.index('-R')
except ValueError:
pass
else:
sys.argv[idx] = '-R=any'
return super(FixRemoteDataOption, cls).__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = 'Run the tests for this package'
user_options = [
('package=', 'P',
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"If nothing is specified, all default tests are run."),
('test-path=', 't',
'Specify a test location by path. If a relative path to a .py file, '
'it is relative to the built package, so e.g., a leading "astropy/" '
'is necessary. If a relative path to a .rst file, it is relative to '
'the directory *below* the --docs-path directory, so a leading '
'"docs/" is usually necessary. May also be an absolute path.'),
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
('plugins=', 'p',
'Plugins to enable when running pytest.'),
('pastebin=', 'b',
"Enable pytest pastebin output. Either 'all' or 'failed'."),
('args=', 'a',
'Additional arguments to be passed to pytest.'),
('remote-data=', 'R', 'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'),
('pep8', '8',
'Enable PEP8 checking and disable regular tests. '
'Requires the pytest-pep8 plugin.'),
('pdb', 'd',
'Start the interactive Python debugger on errors.'),
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('open-files', 'o', 'Fail if any tests leave files open. Requires the '
'psutil package.'),
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If negative, all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
('docs-path=', None,
'The path to the documentation .rst files. If not provided, and '
'the current directory contains a directory called "docs", that '
'will be used.'),
('skip-docs', None,
"Don't test the documentation .rst files."),
('repeat=', None,
'How many times to repeat each test (can be used to check for '
'sporadic failures).'),
('temp-root=', None,
'The root directory in which to create the temporary testing files. '
'If unspecified the system default is used (e.g. /tmp) as explained '
'in the documentation for tempfile.mkstemp.')
]
package_name = ''
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = 'none'
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'remote_data={1.remote_data!r}, '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'add_local_eggs_to_path=True, ' # see _build_temp_install below
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""
Run the tests!
"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists('docs'): # pragma: no cover
self.docs_path = os.path.abspath('docs')
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists('.eggs'):
shutil.copytree('.eggs', os.path.join(self.testing_path, '.eggs'))
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, '-c', cmd],
cwd=self.testing_path, close_fds=False)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because py.test is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-',
dir=self.temp_root)
self.tmp_dir = os.path.realpath(tmp_dir)
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command('install')
install_cmd = self.distribution.get_command_obj('install')
install_cmd.prefix = self.tmp_dir
self.run_command('install')
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command('install')
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(self.testing_path,
os.path.basename(self.docs_path))
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy('setup.cfg', self.testing_path)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated
"""
if self.parallel != 0:
raise ValueError(
"--coverage can not be used with --parallel")
try:
import coverage # pylint: disable=W0611
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is "
"installed.")
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path, self.package_name, 'tests', 'coveragerc')
with open(coveragerc, 'r') as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name)
tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc')
with open(tmp_coveragerc, 'wb') as tmp:
tmp.write(coveragerc_content.encode('utf-8'))
cmd_pre = (
'import coverage; '
'cov = coverage.coverage(data_file="{0}", config_file="{1}"); '
'cov.start();'.format(
os.path.abspath(".coverage"), tmp_coveragerc))
cmd_post = (
'cov.stop(); '
'from astropy.tests.helper import _save_coverage; '
'_save_coverage(cov, result, "{0}", "{1}");'.format(
os.path.abspath('.'), self.testing_path))
return cmd_pre, cmd_post
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""better_twill is a small wrapper around twill to set some sane defaults and
monkey-patch some better versions of some of twill's methods.
It also handles twill's absense.
"""
import os
import sys
import urllib
import urlparse
from os.path import abspath, dirname, join
from pkg_resources import parse_version as pv
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from trac.util.text import to_unicode
# On OSX lxml needs to be imported before twill to avoid Resolver issues
# somehow caused by the mac specific 'ic' module
try:
from lxml import etree
except ImportError:
pass
try:
import twill
except ImportError:
twill = None
# When twill tries to connect to a site before the site is up, it raises an
# exception. In 0.9b1, it's urlib2.URLError, but in -latest, it's
# twill.browser.BrowserStateError.
try:
from twill.browser import BrowserStateError as ConnectError
except ImportError:
from urllib2 import URLError as ConnectError
if twill:
# We want Trac to generate valid html, and therefore want to test against
# the html as generated by Trac. "tidy" tries to clean up broken html,
# and is responsible for one difficult to track down testcase failure
# (for #5497). Therefore we turn it off here.
twill.commands.config('use_tidy', '0')
# We use a transparent proxy to access the global browser object through
# twill.get_browser(), as the browser can be destroyed by browser_reset()
# (see #7472).
class _BrowserProxy(object):
def __getattribute__(self, name):
return getattr(twill.get_browser(), name)
def __setattr__(self, name, value):
setattr(twill.get_browser(), name, value)
# setup short names to reduce typing
# This twill browser (and the tc commands that use it) are essentially
# global, and not tied to our test fixture.
tc = twill.commands
b = _BrowserProxy()
# Setup XHTML validation for all retrieved pages
try:
from lxml import etree
except ImportError:
print "SKIP: validation of XHTML output in functional tests " \
"(no lxml installed)"
etree = None
if etree and pv(etree.__version__) < pv('2.0.0'):
# 2.0.7 and 2.1.x are known to work.
print "SKIP: validation of XHTML output in functional tests " \
"(lxml < 2.0, api incompatibility)"
etree = None
if etree:
class _Resolver(etree.Resolver):
base_dir = dirname(abspath(__file__))
def resolve(self, system_url, public_id, context):
return self.resolve_filename(join(self.base_dir,
system_url.split("/")[-1]),
context)
_parser = etree.XMLParser(dtd_validation=True)
_parser.resolvers.add(_Resolver())
etree.set_default_parser(_parser)
def _format_error_log(data, log):
msg = []
for entry in log:
context = data.splitlines()[max(0, entry.line - 5):
entry.line + 6]
msg.append("\n# %s\n# URL: %s\n# Line %d, column %d\n\n%s\n"
% (entry.message, entry.filename, entry.line,
entry.column, "\n".join([each.decode('utf-8')
for each in context])))
return "\n".join(msg).encode('ascii', 'xmlcharrefreplace')
def _validate_xhtml(func_name, *args, **kwargs):
page = b.get_html()
if "xhtml1-strict.dtd" not in page:
return
etree.clear_error_log()
try:
# lxml will try to convert the URL to unicode by itself,
# this won't work for non-ascii URLs, so help him
url = b.get_url()
if isinstance(url, str):
url = unicode(url, 'latin1')
etree.parse(StringIO(page), base_url=url)
except etree.XMLSyntaxError, e:
raise twill.errors.TwillAssertionError(
_format_error_log(page, e.error_log))
b._post_load_hooks.append(_validate_xhtml)
# When we can't find something we expected, or find something we didn't
# expect, it helps the debugging effort to have a copy of the html to
# analyze.
def twill_write_html():
"""Write the current html to a file. Name the file based on the
current testcase.
"""
import unittest
frame = sys._getframe()
while frame:
if frame.f_code.co_name in ('runTest', 'setUp', 'tearDown'):
testcase = frame.f_locals['self']
testname = testcase.__class__.__name__
tracdir = testcase._testenv.tracdir
break
elif isinstance(frame.f_locals.get('self'), unittest.TestCase):
testcase = frame.f_locals['self']
testname = '%s.%s' % (testcase.__class__.__name__,
testcase._testMethodName)
tracdir = testcase._testenv.tracdir
break
frame = frame.f_back
else:
# We didn't find a testcase in the stack, so we have no clue what's
# going on.
raise Exception("No testcase was found on the stack. This was "
"really not expected, and I don't know how to "
"handle it.")
filename = os.path.join(tracdir, 'log', "%s.html" % testname)
html_file = open(filename, 'w')
html_file.write(b.get_html())
html_file.close()
return urlparse.urljoin('file:', urllib.pathname2url(filename))
# Twill isn't as helpful with errors as I'd like it to be, so we replace
# the formvalue function. This would be better done as a patch to Twill.
def better_formvalue(form, field, value, fv=tc.formvalue):
try:
fv(form, field, value)
except (twill.errors.TwillAssertionError,
twill.utils.ClientForm.ItemNotFoundError), e:
filename = twill_write_html()
raise twill.errors.TwillAssertionError('%s at %s' %
(unicode(e), filename))
tc.formvalue = better_formvalue
tc.fv = better_formvalue
# Twill requires that on pages with more than one form, you have to click a
# field within the form before you can click submit. There are a number of
# cases where the first interaction a user would have with a form is
# clicking on a button. This enhancement allows us to specify the form to
# click on.
def better_browser_submit(fieldname=None, formname=None, browser=b, old_submit=b.submit):
if formname is not None: # enhancement to directly specify the form
browser._browser.form = browser.get_form(formname)
old_submit(fieldname)
b.submit = better_browser_submit
def better_submit(fieldname=None, formname=None):
b.submit(fieldname, formname)
tc.submit = better_submit
# Twill's formfile function leaves a filehandle open which prevents the
# file from being deleted on Windows. Since we would just assume use a
# StringIO object in the first place, allow the file-like object to be
# provided directly.
def better_formfile(formname, fieldname, filename, content_type=None,
fp=None):
if not fp:
filename = filename.replace('/', os.path.sep)
temp_fp = open(filename, 'rb')
data = temp_fp.read()
temp_fp.close()
fp = StringIO(data)
form = b.get_form(formname)
control = b.get_form_field(form, fieldname)
if not control.is_of_kind('file'):
raise twill.errors.TwillException("ERROR: field is not a file "
"upload field!")
b.clicked(form, control)
control.add_file(fp, content_type, filename)
tc.formfile = better_formfile
# Twill's tc.find() does not provide any guidance on what we got
# instead of what was expected.
def better_find(what, flags='', tcfind=tc.find):
try:
tcfind(what, flags)
except twill.errors.TwillAssertionError, e:
filename = twill_write_html()
raise twill.errors.TwillAssertionError('%s at %s' %
(to_unicode(e), filename))
tc.find = better_find
def better_notfind(what, flags='', tcnotfind=tc.notfind):
try:
tcnotfind(what, flags)
except twill.errors.TwillAssertionError, e:
filename = twill_write_html()
raise twill.errors.TwillAssertionError('%s at %s' %
(to_unicode(e), filename))
tc.notfind = better_notfind
# Same for tc.url - no hint about what went wrong!
def better_url(should_be, tcurl=tc.url):
try:
tcurl(should_be)
except twill.errors.TwillAssertionError, e:
filename = twill_write_html()
raise twill.errors.TwillAssertionError('%s at %s' %
(to_unicode(e), filename))
tc.url = better_url
else:
b = tc = None
|
|
#!/usr/bin/env python3
# This file is part of the MicroPython project, http://micropython.org/
# The MIT License (MIT)
# Copyright (c) 2019 Damien P. George
import os
import subprocess
import sys
import argparse
sys.path.append("../tools")
import pyboard
# Paths for host executables
CPYTHON3 = os.getenv("MICROPY_CPYTHON3", "python3")
MICROPYTHON = os.getenv("MICROPY_MICROPYTHON", "../ports/unix/pycopy-coverage")
NATMOD_EXAMPLE_DIR = "../examples/natmod/"
# Supported tests and their corresponding mpy module
TEST_MAPPINGS = {
"btree": "btree/btree_$(ARCH).mpy",
"framebuf": "framebuf/framebuf_$(ARCH).mpy",
"uheapq": "uheapq/uheapq_$(ARCH).mpy",
"urandom": "urandom/urandom_$(ARCH).mpy",
"ure": "ure/ure_$(ARCH).mpy",
"uzlib": "uzlib/uzlib_$(ARCH).mpy",
}
# Code to allow a target MicroPython to import an .mpy from RAM
injected_import_hook_code = """\
import sys, uos, uio
class _File(uio.IOBase):
def __init__(self):
self.off = 0
def ioctl(self, request, arg):
return 0
def readinto(self, buf):
buf[:] = memoryview(_g_buf)[self.off:self.off + len(buf)]
self.off += len(buf)
return len(buf)
class _FS:
def mount(self, readonly, mkfs):
pass
def chdir(self, path):
pass
def stat(self, path):
if path == '__injected.mpy':
return tuple(0 for _ in range(10))
else:
raise OSError(-2) # ENOENT
def open(self, path, mode):
return _File()
uos.mount(_FS(), '/__remote')
uos.chdir('/__remote')
sys.modules['{}'] = __import__('__injected')
"""
class TargetSubprocess:
def __init__(self, cmd):
self.cmd = cmd
def close(self):
pass
def run_script(self, script):
try:
p = subprocess.run(
self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script
)
return p.stdout, None
except subprocess.CalledProcessError as er:
return b"", er
class TargetPyboard:
def __init__(self, pyb):
self.pyb = pyb
self.pyb.enter_raw_repl()
def close(self):
self.pyb.exit_raw_repl()
self.pyb.close()
def run_script(self, script):
try:
self.pyb.enter_raw_repl()
output = self.pyb.exec_(script)
output = output.replace(b"\r\n", b"\n")
return output, None
except pyboard.PyboardError as er:
return b"", er
def run_tests(target_truth, target, args, stats):
for test_file in args.files:
# Find supported test
for k, v in TEST_MAPPINGS.items():
if test_file.find(k) != -1:
test_module = k
test_mpy = v.replace("$(ARCH)", args.arch)
break
else:
print("---- {} - no matching mpy".format(test_file))
continue
# Read test script
with open(test_file, "rb") as f:
test_file_data = f.read()
# Create full test with embedded .mpy
try:
with open(NATMOD_EXAMPLE_DIR + test_mpy, "rb") as f:
test_script = b"_g_buf=" + bytes(repr(f.read()), "ascii") + b"\n"
except OSError:
print("---- {} - mpy file not compiled".format(test_file))
continue
test_script += bytes(injected_import_hook_code.format(test_module), "ascii")
test_script += test_file_data
# Run test under MicroPython
result_out, error = target.run_script(test_script)
# Work out result of test
extra = ""
if error is None and result_out == b"SKIP\n":
result = "SKIP"
elif error is not None:
result = "FAIL"
extra = " - " + str(error)
else:
# Check result against truth
try:
with open(test_file + ".exp", "rb") as f:
result_exp = f.read()
error = None
except OSError:
result_exp, error = target_truth.run_script(test_file_data)
if error is not None:
result = "TRUTH FAIL"
elif result_out != result_exp:
result = "FAIL"
print(result_out)
else:
result = "pass"
# Accumulate statistics
stats["total"] += 1
if result == "pass":
stats["pass"] += 1
elif result == "SKIP":
stats["skip"] += 1
else:
stats["fail"] += 1
# Print result
print("{:4} {}{}".format(result, test_file, extra))
def main():
cmd_parser = argparse.ArgumentParser(
description="Run dynamic-native-module tests under MicroPython"
)
cmd_parser.add_argument(
"-p", "--pyboard", action="store_true", help="run tests via pyboard.py"
)
cmd_parser.add_argument(
"-d", "--device", default="/dev/ttyACM0", help="the device for pyboard.py"
)
cmd_parser.add_argument(
"-a", "--arch", default="x64", help="native architecture of the target"
)
cmd_parser.add_argument("files", nargs="*", help="input test files")
args = cmd_parser.parse_args()
target_truth = TargetSubprocess([CPYTHON3])
if args.pyboard:
target = TargetPyboard(pyboard.Pyboard(args.device))
else:
target = TargetSubprocess([MICROPYTHON])
stats = {"total": 0, "pass": 0, "fail": 0, "skip": 0}
run_tests(target_truth, target, args, stats)
target.close()
target_truth.close()
print("{} tests performed".format(stats["total"]))
print("{} tests passed".format(stats["pass"]))
if stats["fail"]:
print("{} tests failed".format(stats["fail"]))
if stats["skip"]:
print("{} tests skipped".format(stats["skip"]))
if stats["fail"]:
sys.exit(1)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
#########################################################################################
#
# Motion correction of dMRI data.
#
# Inspired by Xu et al. Neuroimage 2013.
#
# Details of the algorithm:
# - grouping of DW data only (every n volumes, default n=5)
# - average all b0
# - average DWI data within each group
# - average DWI of all groups
# - moco on DWI groups
# - moco on b=0, using target volume: last b=0
# - moco on all dMRI data
# _ generating b=0 mean and DWI mean after motion correction
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Karun Raju, Tanguy Duval, Julien Cohen-Adad
# Modified: 2014-08-15
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: Do not merge per group if no group is asked.
# TODO: make sure slicewise not used with ants, eddy not used with ants
# TODO: make sure images are axial
# TDOD: if -f, we only need two plots. Plot 1: X params with fitted spline, plot 2: Y param with fitted splines. Each plot will have all Z slices (with legend Z=0, Z=1, ...) and labels: y; translation (mm), xlabel: volume #. Plus add grid.
# TODO (no priority): for sinc interp, use ANTs instead of flirt
import sys
import os
import commands
import getopt
import time
import glob
import math
import numpy as np
from sct_dmri_eddy_correct import eddy_correct
import sct_utils as sct
import msct_moco as moco
from sct_dmri_separate_b0_and_dwi import identify_b0
import importlib
from sct_convert import convert
from msct_image import Image
from sct_image import copy_header, split_data, concat_data
from msct_parser import Parser
class Param:
def __init__(self):
self.debug = 0
self.fname_data = ''
self.fname_bvecs = ''
self.fname_bvals = ''
self.fname_target = ''
self.fname_mask = ''
self.mat_final = ''
self.todo = ''
self.group_size = 1 # number of images averaged for 'dwi' method.
self.spline_fitting = 0
self.remove_tmp_files = 1
self.verbose = 1
self.plot_graph = 0
self.suffix = '_moco'
self.param = ['2', # degree of polynomial function for moco
'2', # smoothing sigma in mm
'1', # gradientStep
'MeanSquares'] # metric: MI,MeanSquares
self.interp = 'spline' # nn, linear, spline
self.run_eddy = 0
self.mat_eddy = ''
self.min_norm = 0.001
self.swapXY = 0
self.bval_min = 100 # in case user does not have min bvalues at 0, set threshold (where csf disapeared).
self.otsu = 0 # use otsu algorithm to segment dwi data for better moco. Value coresponds to data threshold. For no segmentation set to 0.
self.iterative_averaging = 1 # iteratively average target image for more robust moco
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# initialization
start_time = time.time()
path_out = '.'
param_user = ''
# reducing the number of CPU used for moco (see issue #201)
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "1"
# get path of the toolbox
status, param.path_sct = commands.getstatusoutput('echo $SCT_DIR')
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
param.fname_data = arguments['-i']
param.fname_bvecs = arguments['-bvec']
if '-bval' in arguments:
param.fname_bvals = arguments['-bval']
if '-bvalmin' in arguments:
param.bval_min = arguments['-bvalmin']
if '-g' in arguments:
param.group_size = arguments['-g']
if '-m' in arguments:
param.fname_mask = arguments['-m']
if '-param' in arguments:
param.param = arguments['-param']
if '-thr' in arguments:
param.otsu = arguments['-thr']
if '-x' in arguments:
param.interp = arguments['-x']
if '-ofolder' in arguments:
path_out = arguments['-ofolder']
if '-r' in arguments:
param.remove_tmp_files = int(arguments['-r'])
if '-v' in arguments:
param.verbose = int(arguments['-v'])
# Get full path
param.fname_data = os.path.abspath(param.fname_data)
param.fname_bvecs = os.path.abspath(param.fname_bvecs)
if param.fname_bvals != '':
param.fname_bvals = os.path.abspath(param.fname_bvals)
if param.fname_mask != '':
param.fname_mask = os.path.abspath(param.fname_mask)
# Extract path, file and extension
path_data, file_data, ext_data = sct.extract_fname(param.fname_data)
path_mask, file_mask, ext_mask = sct.extract_fname(param.fname_mask)
# create temporary folder
sct.printv('\nCreate temporary folder...', param.verbose)
path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
sct.run('mkdir '+path_tmp, param.verbose)
# names of files in temporary folder
ext = '.nii'
dmri_name = 'dmri'
mask_name = 'mask'
bvecs_fname = 'bvecs.txt'
# Copying input data to tmp folder
sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
sct.run('cp '+param.fname_data+' '+path_tmp+dmri_name+ext_data, param.verbose)
sct.run('cp '+param.fname_bvecs+' '+path_tmp+bvecs_fname, param.verbose)
if param.fname_mask != '':
sct.run('cp '+param.fname_mask+' '+path_tmp+mask_name+ext_mask, param.verbose)
# go to tmp folder
os.chdir(path_tmp)
# convert dmri to nii format
convert(dmri_name+ext_data, dmri_name+ext)
# update field in param (because used later).
# TODO: make this cleaner...
if param.fname_mask != '':
param.fname_mask = mask_name+ext_mask
# run moco
dmri_moco(param)
# come back to parent folder
os.chdir('..')
# Generate output files
path_out = sct.slash_at_the_end(path_out, 1)
sct.create_folder(path_out)
sct.printv('\nGenerate output files...', param.verbose)
sct.generate_output_file(path_tmp+dmri_name+param.suffix+ext, path_out+file_data+param.suffix+ext_data, param.verbose)
sct.generate_output_file(path_tmp+'b0_mean.nii', path_out+'b0'+param.suffix+'_mean'+ext_data, param.verbose)
sct.generate_output_file(path_tmp+'dwi_mean.nii', path_out+'dwi'+param.suffix+'_mean'+ext_data, param.verbose)
# Delete temporary files
if param.remove_tmp_files == 1:
sct.printv('\nDelete temporary files...', param.verbose)
sct.run('rm -rf '+path_tmp, param.verbose)
# display elapsed time
elapsed_time = time.time() - start_time
sct.printv('\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s', param.verbose)
#To view results
sct.printv('\nTo view results, type:', param.verbose)
sct.printv('fslview -m ortho,ortho '+param.path_out+file_data+param.suffix+' '+file_data+' &\n', param.verbose, 'info')
#=======================================================================================================================
# dmri_moco: motion correction specific to dmri data
#=======================================================================================================================
def dmri_moco(param):
file_data = 'dmri'
ext_data = '.nii'
file_b0 = 'b0'
file_dwi = 'dwi'
mat_final = 'mat_final/'
file_dwi_group = 'dwi_averaged_groups' # no extension
fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
ext_mat = 'Warp.nii.gz' # warping field
# Get dimensions of data
sct.printv('\nGet dimensions of data...', param.verbose)
im_data = Image(file_data + ext_data)
nx, ny, nz, nt, px, py, pz, pt = im_data.dim
sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), param.verbose)
# Identify b=0 and DWI images
sct.printv('\nIdentify b=0 and DWI images...', param.verbose)
index_b0, index_dwi, nb_b0, nb_dwi = identify_b0('bvecs.txt', param.fname_bvals, param.bval_min, param.verbose)
# check if dmri and bvecs are the same size
if not nb_b0 + nb_dwi == nt:
sct.printv('\nERROR in '+os.path.basename(__file__)+': Size of data ('+str(nt)+') and size of bvecs ('+str(nb_b0+nb_dwi)+') are not the same. Check your bvecs file.\n', 1, 'error')
sys.exit(2)
# Prepare NIFTI (mean/groups...)
#===================================================================================================================
# Split into T dimension
sct.printv('\nSplit along T dimension...', param.verbose)
im_data_split_list = split_data(im_data, 3)
for im in im_data_split_list:
im.save()
# Merge b=0 images
sct.printv('\nMerge b=0...', param.verbose)
# cmd = fsloutput + 'fslmerge -t ' + file_b0
# for it in range(nb_b0):
# cmd = cmd + ' ' + file_data + '_T' + str(index_b0[it]).zfill(4)
im_b0_list = []
for it in range(nb_b0):
im_b0_list.append(im_data_split_list[index_b0[it]])
im_b0_out = concat_data(im_b0_list, 3)
im_b0_out.setFileName(file_b0 + ext_data)
im_b0_out.save()
sct.printv((' File created: ' + file_b0), param.verbose)
# Average b=0 images
sct.printv('\nAverage b=0...', param.verbose)
file_b0_mean = file_b0+'_mean'
sct.run('sct_maths -i '+file_b0+ext_data+' -o '+file_b0_mean+ext_data+' -mean t', param.verbose)
# if not average_data_across_dimension(file_b0+'.nii', file_b0_mean+'.nii', 3):
# sct.printv('ERROR in average_data_across_dimension', 1, 'error')
# cmd = fsloutput + 'fslmaths ' + file_b0 + ' -Tmean ' + file_b0_mean
# status, output = sct.run(cmd, param.verbose)
# Number of DWI groups
nb_groups = int(math.floor(nb_dwi/param.group_size))
# Generate groups indexes
group_indexes = []
for iGroup in range(nb_groups):
group_indexes.append(index_dwi[(iGroup*param.group_size):((iGroup+1)*param.group_size)])
# add the remaining images to the last DWI group
nb_remaining = nb_dwi%param.group_size # number of remaining images
if nb_remaining > 0:
nb_groups += 1
group_indexes.append(index_dwi[len(index_dwi)-nb_remaining:len(index_dwi)])
# DWI groups
file_dwi_mean = []
for iGroup in range(nb_groups):
sct.printv('\nDWI group: ' +str((iGroup+1))+'/'+str(nb_groups), param.verbose)
# get index
index_dwi_i = group_indexes[iGroup]
nb_dwi_i = len(index_dwi_i)
# Merge DW Images
sct.printv('Merge DW images...', param.verbose)
file_dwi_merge_i = file_dwi + '_' + str(iGroup)
im_dwi_list = []
for it in range(nb_dwi_i):
im_dwi_list.append(im_data_split_list[index_dwi_i[it]])
im_dwi_out = concat_data(im_dwi_list, 3)
im_dwi_out.setFileName(file_dwi_merge_i + ext_data)
im_dwi_out.save()
# Average DW Images
sct.printv('Average DW images...', param.verbose)
file_dwi_mean.append(file_dwi + '_mean_' + str(iGroup))
sct.run('sct_maths -i '+file_dwi_merge_i+ext_data+' -o '+file_dwi_mean[iGroup]+ext_data+' -mean t', param.verbose)
# Merge DWI groups means
sct.printv('\nMerging DW files...', param.verbose)
# file_dwi_groups_means_merge = 'dwi_averaged_groups'
im_dw_list = []
for iGroup in range(nb_groups):
im_dw_list.append(file_dwi_mean[iGroup] + ext_data)
im_dw_out = concat_data(im_dw_list, 3)
im_dw_out.setFileName(file_dwi_group + ext_data)
im_dw_out.save()
# cmd = fsloutput + 'fslmerge -t ' + file_dwi_group
# for iGroup in range(nb_groups):
# cmd = cmd + ' ' + file_dwi + '_mean_' + str(iGroup)
# Average DW Images
# TODO: USEFULL ???
sct.printv('\nAveraging all DW images...', param.verbose)
fname_dwi_mean = file_dwi+'_mean'
sct.run('sct_maths -i '+file_dwi_group+ext_data+' -o '+file_dwi_group+'_mean'+ext_data+' -mean t', param.verbose)
# segment dwi images using otsu algorithm
if param.otsu:
sct.printv('\nSegment group DWI using OTSU algorithm...', param.verbose)
# import module
otsu = importlib.import_module('sct_otsu')
# get class from module
param_otsu = otsu.param() #getattr(otsu, param)
param_otsu.fname_data = file_dwi_group+ext_data
param_otsu.threshold = param.otsu
param_otsu.file_suffix = '_seg'
# run otsu
otsu.otsu(param_otsu)
file_dwi_group = file_dwi_group+'_seg'
# extract first DWI volume as target for registration
nii = Image(file_dwi_group+ext_data)
data_crop = nii.data[:, :, :, index_dwi[0]:index_dwi[0]+1]
nii.data = data_crop
target_dwi_name = 'target_dwi'
nii.setFileName(target_dwi_name+ext_data)
nii.save()
# START MOCO
#===================================================================================================================
# Estimate moco on b0 groups
sct.printv('\n-------------------------------------------------------------------------------', param.verbose)
sct.printv(' Estimating motion on b=0 images...', param.verbose)
sct.printv('-------------------------------------------------------------------------------', param.verbose)
param_moco = param
param_moco.file_data = 'b0'
if index_dwi[0] != 0:
# If first DWI is not the first volume (most common), then there is a least one b=0 image before. In that case
# select it as the target image for registration of all b=0
param_moco.file_target = file_data + '_T' + str(index_b0[index_dwi[0]-1]).zfill(4)
else:
# If first DWI is the first volume, then the target b=0 is the first b=0 from the index_b0.
param_moco.file_target = file_data + '_T' + str(index_b0[0]).zfill(4)
param_moco.path_out = ''
param_moco.todo = 'estimate'
param_moco.mat_moco = 'mat_b0groups'
moco.moco(param_moco)
# Estimate moco on dwi groups
sct.printv('\n-------------------------------------------------------------------------------', param.verbose)
sct.printv(' Estimating motion on DW images...', param.verbose)
sct.printv('-------------------------------------------------------------------------------', param.verbose)
param_moco.file_data = file_dwi_group
param_moco.file_target = target_dwi_name # target is the first DW image (closest to the first b=0)
param_moco.path_out = ''
# param_moco.todo = 'estimate'
param_moco.todo = 'estimate_and_apply'
param_moco.mat_moco = 'mat_dwigroups'
moco.moco(param_moco)
# create final mat folder
sct.create_folder(mat_final)
# Copy b=0 registration matrices
sct.printv('\nCopy b=0 registration matrices...', param.verbose)
for it in range(nb_b0):
sct.run('cp '+'mat_b0groups/'+'mat.T'+str(it)+ext_mat+' '+mat_final+'mat.T'+str(index_b0[it])+ext_mat, param.verbose)
# Copy DWI registration matrices
sct.printv('\nCopy DWI registration matrices...', param.verbose)
for iGroup in range(nb_groups):
for dwi in range(len(group_indexes[iGroup])):
sct.run('cp '+'mat_dwigroups/'+'mat.T'+str(iGroup)+ext_mat+' '+mat_final+'mat.T'+str(group_indexes[iGroup][dwi])+ext_mat, param.verbose)
# Spline Regularization along T
if param.spline_fitting:
moco.spline(mat_final, nt, nz, param.verbose, np.array(index_b0), param.plot_graph)
# combine Eddy Matrices
if param.run_eddy:
param.mat_2_combine = 'mat_eddy'
param.mat_final = mat_final
moco.combine_matrix(param)
# Apply moco on all dmri data
sct.printv('\n-------------------------------------------------------------------------------', param.verbose)
sct.printv(' Apply moco', param.verbose)
sct.printv('-------------------------------------------------------------------------------', param.verbose)
param_moco.file_data = file_data
param_moco.file_target = file_dwi+'_mean_'+str(0) # reference for reslicing into proper coordinate system
param_moco.path_out = ''
param_moco.mat_moco = mat_final
param_moco.todo = 'apply'
moco.moco(param_moco)
# copy geometric information from header
# NB: this is required because WarpImageMultiTransform in 2D mode wrongly sets pixdim(3) to "1".
im_dmri = Image(file_data+ext_data)
im_dmri_moco = Image(file_data+param.suffix+ext_data)
im_dmri_moco = copy_header(im_dmri, im_dmri_moco)
im_dmri_moco.save()
# generate b0_moco_mean and dwi_moco_mean
cmd = 'sct_dmri_separate_b0_and_dwi -i '+file_data+param.suffix+ext_data+' -bvec bvecs.txt -a 1'
if not param.fname_bvals == '':
cmd = cmd+' -m '+param.fname_bvals
sct.run(cmd, param.verbose)
def get_parser():
# parser initialisation
parser = Parser(__file__)
# initialize parameters
param = Param()
param_default = Param()
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description(' Motion correction of dMRI data. Some robust features include:\n'
'- group-wise (-g)\n'
'- slice-wise regularized along z using polynomial function (-p). For more info about the method, type: isct_antsSliceRegularizedRegistration\n'
'- masking (-m)\n'
'- iterative averaging of target volume\n')
parser.add_option(name='-i',
type_value='file',
description='Diffusion data',
mandatory=True,
example='dmri.nii.gz')
parser.add_option(name='-bvec',
type_value='file',
description='Bvecs file',
mandatory=True,
example='bvecs.nii.gz')
parser.add_option(name='-b',
type_value=None,
description='Bvecs file',
mandatory=False,
deprecated_by='-bvec')
parser.add_option(name='-bval',
type_value='file',
description='Bvals file',
mandatory=False,
example='bvals.nii.gz')
parser.add_option(name='-bvalmin',
type_value='float',
description='B-value threshold (in s/mm2) below which data is considered as b=0.',
mandatory=False,
example='50')
parser.add_option(name='-a',
type_value=None,
description='Bvals file',
mandatory=False,
deprecated_by='-bval')
parser.add_option(name='-g',
type_value='int',
description='Group nvols successive dMRI volumes for more robustness.',
mandatory=False,
default_value=param_default.group_size,
example=['2'])
parser.add_option(name='-m',
type_value='file',
description='Binary mask to limit voxels considered by the registration metric.',
mandatory=False,
example=['dmri_mask.nii.gz'])
parser.add_option(name='-param',
type_value=[[','], 'str'],
description='Parameters for registration.'
'ALL ITEMS MUST BE LISTED IN ORDER. Separate with comma.\n'
'1) degree of polynomial function used for regularization along Z. For no regularization set to 0.\n'
'2) smoothing kernel size (in mm).\n'
'3) gradient step. The higher the more deformation allowed.\n'
'4) metric: {MI,MeanSquares}. If you find very large deformations, switching to MeanSquares can help.\n',
default_value=param_default.param,
mandatory=False,
example=['2,1,0.5,MeanSquares'])
parser.add_option(name='-p',
type_value=None,
description='Parameters for registration.'
'ALL ITEMS MUST BE LISTED IN ORDER. Separate with comma.'
'1) degree of polynomial function used for regularization along Z. For no regularization set to 0.'
'2) smoothing kernel size (in mm).'
'3) gradient step. The higher the more deformation allowed.'
'4) metric: {MI,MeanSquares}. If you find very large deformations, switching to MeanSquares can help.',
mandatory=False,
deprecated_by='-param')
parser.add_option(name='-thr',
type_value='float',
description='Segment DW data using OTSU algorithm. Value corresponds to OTSU threshold. For no segmentation set to 0.',
mandatory=False,
default_value=param_default.otsu,
example=['25'])
parser.add_option(name='-t',
type_value=None,
description='Segment DW data using OTSU algorithm. Value corresponds to OTSU threshold. For no segmentation set to 0.',
mandatory=False,
deprecated_by='-thr')
parser.add_option(name='-x',
type_value='multiple_choice',
description='Final Interpolation.',
mandatory=False,
default_value=param_default.interp,
example=['nn', 'linear', 'spline'])
parser.add_option(name='-ofolder',
type_value='folder_creation',
description='Output folder',
mandatory=False,
default_value='./',
example='dmri_moco_results/')
parser.add_option(name='-o',
type_value=None,
description='Output folder.',
mandatory=False,
deprecated_by='-o')
parser.usage.addSection('MISC')
parser.add_option(name="-r",
type_value="multiple_choice",
description='Remove temporary files.',
mandatory=False,
default_value='1',
example=['0', '1'])
parser.add_option(name="-v",
type_value='multiple_choice',
description="verbose: 0 = nothing, 1 = classic, 2 = expended",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
param = Param()
param_default = Param()
main()
|
|
# Copyright (c) 2016, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestDevNull(McrouterTestCase):
config = './mcrouter/test/test_dev_null.json'
extra_args = []
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.mc_good = self.add_server(Memcached())
self.mc_wild = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_dev_null(self):
mcr = self.get_mcrouter()
# finally setup is done
mcr.set("good:key", "should_be_set")
mcr.set("key", "should_be_set_wild")
mcr.set("null:key", "should_not_be_set")
mcgood_val = self.mc_good.get("good:key")
mcnull_val = self.mc_wild.get("null:key")
mcwild_val = self.mc_wild.get("key")
self.assertEqual(mcgood_val, "should_be_set")
self.assertEqual(mcnull_val, None)
self.assertEqual(mcwild_val, "should_be_set_wild")
self.assertEqual(mcr.delete("null:key2"), None)
self.assertEqual(int(mcr.stats('ods')['dev_null_requests']), 2)
class TestMigratedPools(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools.json'
extra_args = []
def setUp(self):
self.wild_new = self.add_server(Memcached())
self.wild_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 2)})
def test_migrated_pools(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.wild_old.set("get-key-" + str(phase), str(phase))
self.wild_new.set("get-key-" + str(phase), str(phase * 100))
# first we are in the old domain make sure all ops go to
# the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
self.assertEqual(self.wild_old.get("set-key-1"), str(42))
self.assertEqual(self.wild_new.get("set-key-1"), None)
mcr.delete("get-key-1")
#make sure the delete went to old but not new
self.assertEqual(self.wild_old.get("get-key-1"), None)
self.assertEqual(self.wild_new.get("get-key-1"), str(100))
#next phase
time.sleep(2)
# gets/sets go to the old place
self.assertEqual(mcr.get("get-key-2"), str(2))
mcr.set("set-key-2", str(4242))
self.assertEqual(self.wild_old.get("set-key-2"), str(4242))
self.assertEqual(self.wild_new.get("set-key-2"), None)
mcr.delete("get-key-2")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-2"), None)
self.assertEqual(self.wild_new.get("get-key-2"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(300))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.wild_old.get("set-key-3"), None)
self.assertEqual(self.wild_new.get("set-key-3"), str(424242))
mcr.delete("get-key-3")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-3"), None)
self.assertEqual(self.wild_new.get("get-key-3"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-4"), str(400))
mcr.set("set-key-4", str(42424242))
self.assertEqual(self.wild_old.get("set-key-4"), None)
self.assertEqual(self.wild_new.get("set-key-4"), str(42424242))
mcr.delete("get-key-4")
#make sure the delete went to the new place only
self.assertEqual(self.wild_old.get("get-key-4"), str(4))
self.assertEqual(self.wild_new.get("get-key-4"), None)
class TestMigratedPoolsFailover(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools_failover.json'
extra_args = []
def setUp(self):
self.a_new = self.add_server(Memcached())
self.a_old = self.add_server(Memcached())
self.b_new = self.add_server(Memcached())
self.b_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 4)})
def test_migrated_pools_failover(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.a_old.set("get-key-" + str(phase), str(phase))
self.a_new.set("get-key-" + str(phase), str(phase * 10))
self.b_old.set("get-key-" + str(phase), str(phase * 100))
self.b_new.set("get-key-" + str(phase), str(phase * 1000))
# first we are in the old domain make sure all ops go to
# the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
self.assertEqual(self.a_old.get("set-key-1"), str(42))
self.a_old.terminate()
self.assertEqual(mcr.get("get-key-1"), str(100))
mcr.set("set-key-1", str(42))
self.assertEqual(self.b_old.get("set-key-1"), str(42))
#next phase
time.sleep(5)
self.assertEqual(mcr.get("get-key-2"), str(200))
mcr.set("set-key-2", str(42))
self.assertEqual(self.b_old.get("set-key-2"), str(42))
#next phase
time.sleep(5)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(30))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.a_new.get("set-key-3"), str(424242))
self.a_new.terminate()
self.assertEqual(mcr.get("get-key-3"), str(3000))
class TestDuplicateServers(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only one proxy destination connection is made
# for all the duplicate servers
self.assertEqual(1, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':ascii:plain-1000'
self.assertTrue(key in stats)
class TestDuplicateServersDiffTimeouts(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers_difftimeouts.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers_difftimeouts(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only two proxy destination connections are made
# for all the duplicate servers in pools with diff timeout
self.assertEqual(2, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':ascii:plain-1000'
self.assertTrue(key in stats)
key = 'localhost:' + str(self.port_map[12345]) + ':ascii:plain-2000'
self.assertTrue(key in stats)
class TestPoolServerErrors(McrouterTestCase):
config = './mcrouter/test/test_pool_server_errors.json'
def setUp(self):
self.mc1 = self.add_server(Memcached())
# mc2 is ErrorRoute
self.mc3 = self.add_server(Memcached())
def test_pool_server_errors(self):
mcr = self.add_mcrouter(self.config, '/a/a/')
self.assertIsNone(mcr.get('test'))
stats = mcr.stats('servers')
self.assertEqual(2, len(stats))
self.assertTrue(mcr.set('/b/b/abc', 'valueA'))
self.assertEqual(self.mc1.get('abc'), 'valueA')
self.assertFalse(mcr.set('/b/b/a', 'valueB'))
self.assertTrue(mcr.set('/b/b/ab', 'valueC'))
self.assertEqual(self.mc3.get('ab'), 'valueC')
class TestSamePoolFailover(McrouterTestCase):
config = './mcrouter/test/test_same_pool_failover.json'
extra_args = []
def setUp(self):
self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_same_pool_failover(self):
mcr = self.get_mcrouter()
self.assertEqual(mcr.get('foobar'), None)
self.assertTrue(mcr.set('foobar', 'bizbang'))
self.assertEqual(mcr.get('foobar'), 'bizbang')
mcr.delete('foobar')
self.assertEqual(mcr.get('foobar'), None)
class TestGetFailover(McrouterTestCase):
config = './mcrouter/test/test_get_failover.json'
extra_args = []
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def failover_common(self, key):
self.mcr = self.get_mcrouter()
self.assertEqual(self.mcr.get(key), None)
self.assertTrue(self.mcr.set(key, 'bizbang'))
self.assertEqual(self.mcr.get(key), 'bizbang')
# kill the main host so everything failsover to gut
self.wildcard.terminate()
self.assertEqual(self.mcr.get(key), None)
self.assertTrue(self.mcr.set(key, 'bizbang-fail'))
self.assertEqual(self.mcr.get(key), 'bizbang-fail')
def test_get_failover(self):
self.failover_common('testkey')
# the failover should have set it with a much shorter TTL
# so make sure that we can't get the value after the TTL
# has expired
time.sleep(4)
self.assertEqual(self.mcr.get('testkey'), None)
class TestGetFailoverWithFailoverTag(TestGetFailover):
config = './mcrouter/test/test_get_failover_with_failover_tag.json'
def test_get_failover(self):
key = 'testkey|#|extra=1'
self.failover_common(key)
# Verify the failover tag was appended
fail_key = key + ":failover=1"
self.assertEqual(self.mcr.get(key), 'bizbang-fail')
self.assertEqual(self.gut.get(fail_key), 'bizbang-fail')
class TestLeaseGetFailover(McrouterTestCase):
config = './mcrouter/test/test_get_failover.json'
extra_args = []
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_lease_get_failover(self):
mcr = self.get_mcrouter()
get_res = {}
get_res['testkey'] = mcr.leaseGet('testkey')
get_res['testkey']['value'] = 'bizbang-lease'
self.assertGreater(get_res['testkey']['token'], 0)
self.assertTrue(mcr.leaseSet('testkey', get_res['testkey']))
get_res['testkey'] = mcr.leaseGet('testkey')
self.assertFalse(get_res['testkey']['token'])
self.assertEqual(get_res['testkey']['value'], 'bizbang-lease')
# kill the main host so everything failsover to mctestc00.gut
self.wildcard.terminate()
get_res['testkey'] = mcr.leaseGet('testkey')
get_res['testkey']['value'] = 'bizbang-lease-fail'
self.assertGreater(get_res['testkey']['token'], 0)
self.assertTrue(mcr.leaseSet('testkey', get_res['testkey']))
get_res['testkey'] = mcr.leaseGet('testkey')
self.assertFalse(get_res['testkey']['token'])
self.assertEqual(get_res['testkey']['value'], 'bizbang-lease-fail')
# the failover should have set it with a much shorter TTL
# so make sure that we can't get the value after the TTL
# has expired
time.sleep(4)
get_res['testkey'] = mcr.leaseGet('testkey')
self.assertGreater(get_res['testkey']['token'], 0)
self.assertFalse(get_res['testkey']['value'])
class TestMetaGetFailover(McrouterTestCase):
config = './mcrouter/test/test_get_failover.json'
extra_args = []
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_metaget_failover(self):
mcr = self.get_mcrouter()
get_res = {}
self.assertTrue(mcr.set('testkey', 'bizbang'))
get_res = mcr.metaget('testkey')
self.assertEqual(0, int(get_res['exptime']))
self.wildcard.terminate()
self.assertTrue(mcr.set('testkey', 'bizbang-fail'))
self.assertEqual(mcr.get('testkey'), 'bizbang-fail')
get_res = mcr.metaget('testkey')
self.assertAlmostEqual(int(get_res['exptime']),
int(time.time()) + 3,
delta=1)
# the failover should have set it with a much shorter TTL
# so make sure that we can't get the value after the TTL
# has expired
time.sleep(4)
self.assertEqual(mcr.metaget('testkey'), {})
self.assertEqual(mcr.get('testkey'), None)
class TestFailoverWithLimit(McrouterTestCase):
config = './mcrouter/test/test_failover_limit.json'
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config)
def test_failover_limit(self):
mcr = self.get_mcrouter()
self.assertTrue(mcr.set('key', 'value.wildcard'))
self.assertEqual(mcr.get('key'), 'value.wildcard')
self.wildcard.terminate()
# first 12 requests should succeed (10 burst + 2 rate)
self.assertTrue(mcr.set('key', 'value.gut'))
for i in range(11):
self.assertEqual(mcr.get('key'), 'value.gut')
# now every 5th request should succeed
for i in range(10):
for j in range(4):
self.assertIsNone(mcr.get('key'))
self.assertEqual(mcr.get('key'), 'value.gut')
|
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
==========================
fMRI: SPM Auditory dataset
==========================
Introduction
============
The fmri_spm_auditory.py recreates the classical workflow described in the SPM8 manual (http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf)
using auditory dataset that can be downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/auditory/:
python fmri_spm_auditory.py
Import necessary modules from nipype."""
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.spm as spm # spm
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.matlab as mlab # how to run matlabimport nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model specification
import os # system functions
"""
Preliminaries
-------------
"""
# Set the way matlab should be called
mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
"""
Setting up workflows
--------------------
In this tutorial we will be setting up a hierarchical workflow for spm
analysis. This will demonstrate how pre-defined workflows can be setup
and shared across users, projects and labs.
Setup preprocessing workflow
----------------------------
This is a generic preprocessing workflow that can be used by different analyses
"""
preproc = pe.Workflow(name='preproc')
"""We strongly encourage to use 4D files insteead of series of 3D for fMRI analyses
for many reasons (cleanness and saving and filesystem inodes are among them). However,
the the workflow presented in the SPM8 manual which this tutorial is based on
uses 3D files. Therefore we leave converting to 4D as an option. We are using `merge_to_4d`
variable, because switching between 3d and 4d requires some additional steps (explauned later on).
Use :class:`nipype.interfaces.fsl.Merge` to merge a series of 3D files along the time
dimension creating a 4d file.
"""
merge_to_4d = True
if merge_to_4d:
merge = pe.Node(interface=fsl.Merge(), name="merge")
merge.inputs.dimension="t"
"""Use :class:`nipype.interfaces.spm.Realign` for motion correction
and register all images to the mean image.
"""
realign = pe.Node(interface=spm.Realign(), name="realign")
"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid
body registration of the functional data to the structural data.
"""
coregister = pe.Node(interface=spm.Coregister(), name="coregister")
coregister.inputs.jobtype = 'estimate'
segment = pe.Node(interface=spm.Segment(), name="segment")
"""Uncomment the following line for faster execution
"""
#segment.inputs.gaussians_per_class = [1, 1, 1, 4]
"""Warp functional and structural data to SPM's T1 template using
:class:`nipype.interfaces.spm.Normalize`. The tutorial data set
includes the template image, T1.nii.
"""
normalize_func = pe.Node(interface=spm.Normalize(), name = "normalize_func")
normalize_func.inputs.jobtype = "write"
normalize_struc = pe.Node(interface=spm.Normalize(), name = "normalize_struc")
normalize_struc.inputs.jobtype = "write"
"""Smooth the functional data using
:class:`nipype.interfaces.spm.Smooth`.
"""
smooth = pe.Node(interface=spm.Smooth(), name = "smooth")
"""`write_voxel_sizes` is the input of the normalize interface that is recommended to be set to
the voxel sizes of the target volume. There is no need to set it manually since we van infer it from data
using the following function:
"""
def get_vox_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.get_header()
voxdims = hdr.get_zooms()
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
"""Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
voxel sizes.
"""
if merge_to_4d:
preproc.connect([(merge, realign,[('merged_file', 'in_files')])])
preproc.connect([(realign,coregister,[('mean_image', 'target')]),
(coregister, segment,[('coregistered_source','data')]),
(segment, normalize_func, [('transformation_mat','parameter_file')]),
(segment, normalize_struc, [('transformation_mat','parameter_file'),
('modulated_input_image', 'apply_to_files'),
(('modulated_input_image', get_vox_dims), 'write_voxel_sizes')]),
(realign, normalize_func, [('realigned_files', 'apply_to_files'),
(('realigned_files', get_vox_dims), 'write_voxel_sizes')]),
(normalize_func, smooth, [('normalized_files', 'in_files')]),
])
"""
Set up analysis workflow
------------------------
"""
l1analysis = pe.Workflow(name='analysis')
"""Generate SPM-specific design information using
:class:`nipype.interfaces.spm.SpecifyModel`.
"""
modelspec = pe.Node(interface=model.SpecifySPMModel(), name= "modelspec")
"""Generate a first level SPM.mat file for analysis
:class:`nipype.interfaces.spm.Level1Design`.
"""
level1design = pe.Node(interface=spm.Level1Design(), name= "level1design")
level1design.inputs.bases = {'hrf':{'derivs': [0,0]}}
"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the
parameters of the model.
"""
level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate")
level1estimate.inputs.estimation_method = {'Classical' : 1}
threshold = pe.Node(interface=spm.Threshold(), name="threshold")
"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the
first level contrasts specified in a few steps above.
"""
contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate")
l1analysis.connect([(modelspec,level1design,[('session_info','session_info')]),
(level1design,level1estimate,[('spm_mat_file','spm_mat_file')]),
(level1estimate,contrastestimate,[('spm_mat_file','spm_mat_file'),
('beta_images','beta_images'),
('residual_image','residual_image')]),
(contrastestimate, threshold,[('spm_mat_file','spm_mat_file'),
('spmT_images', 'stat_image')]),
])
"""
Preproc + Analysis pipeline
---------------------------
"""
l1pipeline = pe.Workflow(name='firstlevel')
l1pipeline.connect([(preproc, l1analysis, [('realign.realignment_parameters',
'modelspec.realignment_parameters')])])
"""Pluging in `functional_runs` is a bit more complicated, because model spec expects a list of `runs`.
Every run can be a 4D file or a list of 3D files. Therefore for 3D analysis we need a list of lists and
to make one we need a helper function.
"""
if merge_to_4d:
l1pipeline.connect([(preproc, l1analysis, [('smooth.smoothed_files',
'modelspec.functional_runs')])])
else:
def makelist(item):
return [item]
l1pipeline.connect([(preproc, l1analysis, [(('smooth.smoothed_files',makelist),
'modelspec.functional_runs')])])
"""
Data specific components
------------------------
In this tutorial there is only one subject `M00223`.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
"""
# Specify the location of the data downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/auditory/
data_dir = os.path.abspath('spm_auditory_data')
# Specify the subject directories
subject_list = ['M00223']
# Map field names to individual subject runs.
info = dict(func=[['f', 'subject_id', 'f', 'subject_id', range(16,100)]],
struct=[['s', 'subject_id', 's', 'subject_id', 2]])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name = 'datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s%s/%s%s_%03d.img'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Experimental paradigm specific components
-----------------------------------------
Here we create a structure that provides information
about the experimental paradigm. This is used by the
:class:`nipype.interfaces.spm.SpecifyModel` to create the information
necessary to generate an SPM design matrix.
"""
from nipype.interfaces.base import Bunch
subjectinfo = [Bunch(conditions=['Task'],
onsets=[range(6,84,12)],
durations=[[6]])]
"""Setup the contrast structure that needs to be evaluated. This is a
list of lists. The inner list specifies the contrasts and has the
following format - [Name,Stat,[list of condition names],[weights on
those conditions]. The condition names must match the `names` listed
in the `subjectinfo` function described above.
"""
cont1 = ('active > rest','T', ['Task'],[1])
contrasts = [cont1]
# set up node specific inputs
modelspecref = l1pipeline.inputs.analysis.modelspec
modelspecref.input_units = 'scans'
modelspecref.output_units = 'scans'
modelspecref.time_repetition = 7
modelspecref.high_pass_filter_cutoff = 120
l1designref = l1pipeline.inputs.analysis.level1design
l1designref.timing_units = modelspecref.output_units
l1designref.interscan_interval = modelspecref.time_repetition
l1pipeline.inputs.preproc.smooth.fwhm = [6, 6, 6]
l1pipeline.inputs.analysis.modelspec.subject_info = subjectinfo
l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts
l1pipeline.inputs.analysis.threshold.contrast_index = 1
"""
Setup the pipeline
------------------
The nodes created above do not describe the flow of data. They merely
describe the parameters used for each function. In this section we
setup the connections between the nodes such that appropriate outputs
from nodes are piped into appropriate inputs of other nodes.
Use the :class:`nipype.pipeline.engine.Pipeline` to create a
graph-based execution pipeline for first level analysis. The config
options tells the pipeline engine to use `workdir` as the disk
location to use when running the processes and keeping their
outputs. The `use_parameterized_dirs` tells the engine to create
sub-directories under `workdir` corresponding to the iterables in the
pipeline. Thus for this pipeline there will be subject specific
sub-directories.
The ``nipype.pipeline.engine.Pipeline.connect`` function creates the
links between the processes, i.e., how data should flow in and out of
the processing nodes.
"""
level1 = pe.Workflow(name="level1")
level1.base_dir = os.path.abspath('spm_auditory_tutorial/workingdir')
level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource,l1pipeline,[('struct', 'preproc.coregister.source')])
])
if merge_to_4d:
level1.connect([(datasource,l1pipeline,[('func','preproc.merge.in_files')])])
else:
level1.connect([(datasource,l1pipeline,[('func','preproc.realign.in_files')])])
"""
Setup storage results
---------------------
Use :class:`nipype.interfaces.io.DataSink` to store selected outputs
from the pipeline in a specific location. This allows the user to
selectively choose important output bits from the analysis and keep
them.
The first step is to create a datasink node and then to connect
outputs from the modules above to storage locations. These take the
following form directory_name[.[@]subdir] where parts between [] are
optional. For example 'realign.@mean' below creates a directory called
realign in 'l1output/subject_id/' and stores the mean image output
from the Realign process in the realign directory. If the @ is left
out, then a sub-directory with the name 'mean' would be created and
the mean image would be copied to that directory.
"""
datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = os.path.abspath('spm_auditory_tutorial/l1output')
def getstripdir(subject_id):
import os
return os.path.join(os.path.abspath('spm_auditory_tutorial/workingdir'),'_subject_id_%s' % subject_id)
# store relevant outputs from various stages of the 1st level analysis
level1.connect([(infosource, datasink,[('subject_id','container'),
(('subject_id', getstripdir),'strip_dir')]),
(l1pipeline, datasink,[('analysis.contrastestimate.con_images','contrasts.@con'),
('analysis.contrastestimate.spmT_images','contrasts.@T')]),
])
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures
with appropriate parameters and the connectivity between the
processes, but does not generate any output. To actually run the
analysis on the data the ``nipype.pipeline.engine.Pipeline.Run``
function needs to be called.
"""
if __name__ == '__main__':
level1.run()
level1.write_graph()
|
|
"""
Tools to for cryoscope analysis
Brian Tarasinski
Dec 2017
Edited by Adriaan Rol
"""
from typing import Union
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker
from pycqed.analysis.tools.plotting import (set_xlabel, set_ylabel,
flex_colormesh_plot_vs_xy)
import scipy.signal as ss
import scipy.optimize as so
import scipy.interpolate as si
def normalize_sincos(
data,
window_size_frac=500,
window_size=None,
do_envelope=True):
if window_size is None:
window_size = len(data) // window_size_frac
# window size for savgol filter must be odd
window_size -= (window_size + 1) % 2
mean_data_r = ss.savgol_filter(data.real, window_size, 0, 0)
mean_data_i = ss.savgol_filter(data.imag, window_size, 0, 0)
mean_data = mean_data_r + 1j * mean_data_i
if do_envelope:
envelope = np.sqrt(
ss.savgol_filter(
(np.abs(
data -
mean_data))**2,
window_size,
0,
0))
else:
envelope = 1
norm_data = ((data - mean_data) / envelope)
return norm_data
def fft_based_freq_guess_complex(y):
"""
guess the shape of a sinusoidal complex signal y (in multiples of
sampling rate), by selecting the peak in the fft.
return guess (f, ph, off, amp) for the model
y = amp*exp(2pi i f t + ph) + off.
"""
fft = np.fft.fft(y)[1:len(y)]
freq_guess_idx = np.argmax(np.abs(fft))
if freq_guess_idx >= len(y) // 2:
freq_guess_idx -= len(y)
freq_guess = 1 / len(y) * (freq_guess_idx + 1)
phase_guess = np.angle(fft[freq_guess_idx]) + np.pi / 2
amp_guess = np.absolute(fft[freq_guess_idx]) / len(y)
offset_guess = np.mean(y)
return freq_guess, phase_guess, offset_guess, amp_guess
class CryoscopeAnalyzer:
def __init__(
self,
time,
complex_data,
norm_window_size=61,
demod_freq=None,
derivative_window_length=None,
derivative_order=2,
nyquist_order=0,
demod_smooth=None):
"""
analyse a cryoscope measurement.
time: array of times (lengths of Z pulse)
complex_data: measured data, combine x- and y- results in a
complex number
norm_window_size: window size used for normalizing sine and cosine
demod_freq: frequency for demodulation. Is guessed if None.
derivative_window_length, derivative_order: parameters of the sovgol
filter used for extracting frequency.
Needs some playing around sometimes.
demod_smooth: when the demodulated signal should be smoothed before
taking derivative, set this to a tuple (window_length, order),
again parametrizing a sovgol filter.
"""
self.time = time
self.data = complex_data
self.norm_data = normalize_sincos(self.data, window_size=61)
self.demod_freq = demod_freq
self.derivative_window_length = derivative_window_length
self.demod_smooth = demod_smooth
self.nyquist_order = nyquist_order
self.sampling_rate = 1 / (self.time[1] - self.time[0])
if self.derivative_window_length is None:
self.derivative_window_length = 7 / self.sampling_rate
self.derivative_window_size = max(
3, int(self.derivative_window_length * self.sampling_rate))
self.derivative_window_size += (self.derivative_window_size + 1) % 2
if self.demod_freq is None:
self.demod_freq = - \
fft_based_freq_guess_complex(self.norm_data)[
0] * self.sampling_rate
self.demod_data = np.exp(
2 * np.pi * 1j * self.time * self.demod_freq) * self.norm_data
if self.demod_smooth:
n, o = self.demod_smooth
r, i = self.demod_data.real, self.demod_data.imag
r = ss.savgol_filter(r, n, o, 0)
i = ss.savgol_filter(i, n, o, 0)
self.demod_data = r + 1j * i
# extract the phase. unwrapping only works well if demodulation is
# good!
self.phase = np.unwrap(np.angle(self.demod_data))
# extract frequency by a lowpass-derivative filter.
# use a savitzky golay filter: it take sliding window of length
# `window_length`, fits a polynomial, returns derivative at
# middle point
self.detuning = ss.savgol_filter(
self.phase / (
2 * np.pi),
window_length=self.derivative_window_size,
polyorder=derivative_order,
deriv=1) * self.sampling_rate
self.real_detuning = self.get_real_detuning(self.nyquist_order)
def get_real_detuning(self, nyquist_order=None):
if nyquist_order is None:
nyquist_order = self.nyquist_order
real_detuning = self.detuning-self.demod_freq+self.sampling_rate*nyquist_order
return real_detuning
def get_amplitudes(self):
"""
Converts the real detuning to amplitude
"""
real_detuning = self.get_real_detuning()
if hasattr(self, 'freq_to_amp'):
amplitudes = self.freq_to_amp(real_detuning)
return amplitudes
else:
raise NotImplementedError('Add a "freq_to_amp" method.')
def plot_short_time_fft(self, ax=None,
title='Short time Fourier Transform',
window_size=100, **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
f, t, Zxx = ss.stft(self.norm_data, fs=self.sampling_rate,
nperseg=window_size,
noverlap=0.95 * window_size, return_onesided=False)
m = np.argsort(f)
ax.pcolormesh(self.time[0] + t, f[m], np.abs(Zxx)[m, :])
ax.set_ylabel('Frequency')
ax.set_xlabel('Time')
formatter = matplotlib.ticker.EngFormatter(unit='s')
ax.xaxis.set_major_formatter(formatter)
formatter = matplotlib.ticker.EngFormatter(unit='Hz')
ax.yaxis.set_major_formatter(formatter)
def plot_raw_data(self, ax=None, title="Raw cryoscope data",
style=".-", **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
ax.plot(self.time, self.data.real, style, label="Re", color="C0")
ax.plot(self.time, self.data.imag, style, label="Im", color="C1")
ax.legend()
set_xlabel(ax, 'Time', 's')
set_ylabel(ax, "Amplitude", 'a.u.')
def plot_normalized_data(self, ax=None, title='Normalized cryoscope data',
style=".-", **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
ax.plot(
self.time,
self.norm_data.real,
style,
label="Re",
color="C0")
ax.plot(self.time, self.norm_data.imag, style, label="Im", color="C1")
ax.legend()
ax.set_xlabel("Time")
ax.set_ylabel("Amplitude")
formatter = matplotlib.ticker.EngFormatter(unit='s')
ax.xaxis.set_major_formatter(formatter)
def plot_demodulated_data(self, ax=None,
title='Demodulated cryoscope data',
style=".-", **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
ax.plot(
self.time,
self.demod_data.real,
style,
label="Re",
color="C0")
ax.plot(
self.time,
self.demod_data.imag,
style,
label="Im",
color="C1")
ax.legend()
ax.set_xlabel("Time")
ax.set_ylabel("Amplitude")
formatter = matplotlib.ticker.EngFormatter(unit='s')
ax.xaxis.set_major_formatter(formatter)
def plot_normalized_data_circle(self, ax=None,
title='Normalized cryoscope data', **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
ax.set_xlabel("Re")
ax.set_ylabel("Im")
ax.plot(self.norm_data.real, self.norm_data.imag, ".")
def plot_phase(self, ax=None, title="Cryoscope demodulated phase",
wrap=False, **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
if wrap:
ax.plot(self.time, self.phase % (2 * np.pi), ".", color="C0")
else:
ax.plot(self.time, self.phase, ".", label="Im", color="C0")
set_xlabel(ax, 'Time', 's')
set_ylabel(ax, 'Phase', 'deg')
def plot_detuning(self, ax=None,
title="Detuning from demodulation frequency", **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
ax.plot(self.time, self.detuning, ".-", color="C0")
set_xlabel(ax, 'Time', 's')
set_ylabel(ax, 'Frequency', 'Hz')
def plot_frequency(self, ax=None, title='Detuning frequency',
nyquists=None, style=".-", show_demod_freq=True, **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
if nyquists is None:
nyquists = [self.nyquist_order]
for n in nyquists:
if show_demod_freq:
ax.axhline(-self.demod_freq + self.sampling_rate *
n, linestyle='--', c='grey')
real_detuning = self.get_real_detuning(n)
ax.plot(self.time, real_detuning, style)
set_xlabel(ax, 'Time', 's')
set_ylabel(ax, 'Frequency', 'Hz')
def plot_amplitude(self, ax=None, title='Cryoscope amplitude',
nyquists=None, style=".-", **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
amp = self.get_amplitudes()
ax.plot(self.time, amp, style)
set_xlabel(ax, 'Time', 's')
set_ylabel(ax, 'Amplitude', 'V')
def sincos_model_real_imag(times, freq, phase):
r, i = np.cos(2 *
np.pi *
times *
freq +
phase), np.sin(2 *
np.pi *
times *
freq +
phase)
return np.hstack((r, i))
class DacArchAnalysis:
"""
Given cryscope time series from a square region in time and amplitude,
fit complex oscillations to obtain a dac arch. Tries to be smart about
supersampled signals, after constructing the arc, fits a polynomial
in order to facilitate interpolation.
"""
def __init__(
self,
times,
amps,
data,
exclusion_indices=[],
poly_fit_order=2,
nyquist_calc='auto',
invert_frequency_sign=False,
plot_fits=False):
"""
Extract a dac arch from a set of cryoscope-style measurements.
times: array of pulse lengths
amps: array of pulse amplitudes
data: 2D array of measurement results (size of len(times x amps)).
Complex numbers containing x- and y- values
poly_fit_order: order of model used for fitting the dac-arch
invert_frequency_sign: boolean. might be useful if x and y are
interchanged in measurement
plot_fits: plots how the fit is going, for display.
"""
self.data = data
self.times = times
self.amps = np.array(amps)
self.poly_fit_order = poly_fit_order
self.sampling_rate = 1 / (self.times[1] - self.times[0])
self.freqs = []
self.excl_amps = []
self.excl_freqs = []
self.exclusion_indices = exclusion_indices
self.norm_data = []
for d in self.data:
self.norm_data.append(normalize_sincos(d, window_size=11))
self.norm_data = np.array(self.norm_data)
for nd in self.norm_data:
guess_f, guess_ph, *_ = fft_based_freq_guess_complex(nd)
guess_f *= self.sampling_rate
nd_real_imag = np.hstack([nd.real, nd.imag])
fit, err = so.curve_fit(sincos_model_real_imag,
self.times, nd_real_imag,
p0=[guess_f, guess_ph])
if plot_fits:
plt.figure()
plt.plot(self.times, nd.real, "-.b")
plt.plot(self.times, nd.imag, ".-r")
tt = np.linspace(self.times[0], self.times[-1], 300)
plt.plot(tt, sincos_model_real_imag(tt, *fit)[:len(tt)], "--b")
plt.plot(tt, sincos_model_real_imag(tt, *fit)[len(tt):], "--r")
self.freqs.append(fit[0])
self.freqs = np.array(self.freqs)
if nyquist_calc == 'auto':
self.nyquist = np.cumsum(self.freqs[1:] < self.freqs[:-1])
self.nyquist = np.hstack(([0], self.nyquist))
elif nyquist_calc == 'disabled':
self.nyquist = np.zeros(len(self.freqs))
else:
raise NotImplementedError()
# FIXME: proper support for auto nyquist with
# a proper nyquist should be extracte
self.freqs = self.freqs + self.nyquist * self.sampling_rate
if invert_frequency_sign:
self.freqs = -self.freqs
# Exclude data from excludion indices
self.filt_freqs = np.delete(self.freqs, self.exclusion_indices)
self.filt_amps = np.delete(self.amps, self.exclusion_indices)
self.excl_freqs = self.freqs[self.exclusion_indices]
self.excl_amps = self.amps[self.exclusion_indices]
self.poly_fit = np.polyfit(self.filt_amps, self.filt_freqs,
self.poly_fit_order)
self._inv_interpolation = None
def amp_to_freq(self, amp):
"""
Find the frequency that corresponds to a given amplitude by
evaluating the fit to the extracted data.
"""
return np.polyval(self.poly_fit, amp)
def freq_to_amp(self, freq, kind='root_parabola', **kw):
"""
Find the amplitude that corresponds to a given frequency, by
numerically inverting the fit.
freq: The frequency or set of frequencies.
kind: Which technique to use:
"interpolate": Uses numerical interpolation to find the inverse.
Only works if freq is in the range of measured dac values.
"root": Finds the inverse of the model numerical. Slow, but can
extrapolate.
**kw : get passed on to methods that implement the different "kind"
of calculations.
"""
if kind == 'interpolate':
if self._inv_interpolation is None:
no_samples = 50
self.sampled_amps = np.linspace(
np.min(
self.amps), np.max(
self.amps), no_samples)
self.sampled_freqs = self.amp_to_freq(self.sampled_amps)
self._inv_interpolation = si.interp1d(
self.sampled_freqs, self.sampled_amps, kind='cubic',
bounds_error=False,
fill_value='extrapolate')
return self._inv_interpolation(freq)
if kind == 'root':
return np.vectorize(self._freq_to_amp_root)(freq)
if kind == 'root_parabola':
# return self._freq_to_amp_root_parabola(freq, **kw)
return freq_to_amp_root_parabola(freq=freq,
poly_coeffs=self.poly_fit, **kw)
raise ValueError("`kind` not understood")
# def _freq_to_amp_root_parabola(self, freq, positive_branch=True):
# """
# Converts freq in Hz to amplitude.
# Requires "poly_fit" to be set to the polynomial values
# extracted from the cryoscope flux arc.
# Assumes a parabola to find the roots but should also work for a higher
# order polynomial, except that it will pick the wrong branch.
# N.B. this method assumes that the polycoeffs are with respect to the
# amplitude in units of V.
# """
# # recursive allows dealing with an array of freqs
# if isinstance(freq, (list, np.ndarray)):
# return np.array([self._freq_to_amp_root_parabola(
# f, positive_branch=positive_branch) for f in freq])
# p = np.poly1d(self.poly_fit)
# sols = (p-freq).roots
# # sols returns 2 solutions (for a 2nd order polynomial)
# if positive_branch:
# sol = np.max(sols)
# else:
# sol = np.min(sols)
# # imaginary part is ignored, instead sticking to closest real value
# return np.real(sol)
def _freq_to_amp_root(self, freq):
"""
Find the amplitude corresponding to a given frequency by numerically
inverting the fit.
"""
poly = np.array(self.poly_fit)
poly[-1] -= freq
roots = np.roots(poly)
# return the solution that is real and closest to the givenamp range
real_mask = np.abs(roots.imag) < 1e-8
if not any(real_mask):
return None
dist_from_range = np.abs(roots[real_mask] - np.mean(self.amps))
return roots[real_mask][np.argmin(dist_from_range)].real
def plot_freqs(self, ax=None, title='', **kw):
if ax is None:
ax = plt.gca()
ax.set_title(title)
amps_sorted = [x for x, _ in sorted(
zip(self.filt_amps, self.filt_freqs))]
freqs_sorted = [y for _, y in sorted(
zip(self.filt_amps, self.filt_freqs))]
ax.plot(amps_sorted, freqs_sorted, ".-")
ax.scatter(self.excl_amps, self.excl_freqs, marker='x', color='C3')
aa = np.linspace(min(self.amps), max(self.amps), 50)
ax.plot(aa, np.polyval(self.poly_fit, aa), label='fit')
set_xlabel(ax, "Amplitude", 'V')
set_ylabel(ax, 'Detuning', 'Hz')
def plot_ffts(self, ax=None, title='', nyquist_unwrap=False, **kw):
if ax is None:
ax = plt.gca()
if nyquist_unwrap:
raise NotImplementedError
ax.set_title(title)
ffts = np.fft.fft(self.norm_data)
freqs = np.arange(len(ffts[0])) * self.sampling_rate / len(ffts[0])
flex_colormesh_plot_vs_xy(xvals=np.array(self.amps), yvals=freqs,
zvals=np.abs(ffts).T,
ax=ax)
ax.scatter(self.filt_amps, self.filt_freqs % self.sampling_rate, color="C1",
facecolors='none', label='Dominant freqs.')
ax.scatter(self.excl_amps, self.excl_freqs % self.sampling_rate,
color="C3",
marker='x')
aa = np.linspace(min(self.amps), max(self.amps), 300)
ax.plot(aa, np.polyval(self.poly_fit, aa) % self.sampling_rate, "r",
label='fit')
set_xlabel(ax, "Amplitude", 'V') # a.u.
set_ylabel(ax, 'Detuning', 'Hz')
ax.legend()
def freq_to_amp_root_parabola(freq, poly_coeffs, positive_branch=True):
"""
Converts freq in Hz to amplitude in V.
Requires "poly_coeffs" to be set to the polynomial values
extracted from the cryoscope flux arc.
Assumes a parabola to find the roots but should also work for a higher
order polynomial, except that it will pick the wrong branch.
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V.
"""
# recursive allows dealing with an array of freqs
if isinstance(freq, (list, np.ndarray)):
return np.array([freq_to_amp_root_parabola(
freq=f, poly_coeffs=poly_coeffs,
positive_branch=positive_branch) for f in freq])
p = np.poly1d(poly_coeffs)
sols = (p-freq).roots
# sols returns 2 solutions (for a 2nd order polynomial)
if positive_branch:
sol = np.max(sols)
else:
sol = np.min(sols)
# imaginary part is ignored, instead sticking to closest real value
return np.real(sol)
|
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the ChartObject class, a minimal prototype class to build more chart
types on top of it. It provides the mechanisms to support the shared chained
methods.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
from collections import OrderedDict
from ..properties import bokeh_integer_types, Datetime
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
import blaze
except ImportError:
blaze=None
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
DEFAULT_INDEX_ALIASES = list('abcdefghijklmnopqrstuvz1234567890')
DEFAULT_INDEX_ALIASES += list(zip(DEFAULT_INDEX_ALIASES, DEFAULT_INDEX_ALIASES))
class DataAdapter(object):
"""
Adapter object used to normalize Charts inputs to a common interface.
Supported inputs are dict, list, tuple, np.ndarray and pd.DataFrame.
"""
def __init__(self, data, index=None, columns=None, force_alias=True):
self.__values = data
self._values = self.validate_values(data)
self.convert_index_to_int = False
self._columns_map = {}
self.convert_items_to_dict = False
if columns is None and force_alias:
# no column 'labels' defined for data... in this case we use
# default names
keys = getattr(self._values, 'keys', None)
if callable(keys):
columns = list(keys())
elif keys is None:
columns = list(map(str, range(len(data))))
else:
columns = list(keys)
if columns:
self._columns = columns
# define a mapping between the real keys to access data and the aliases
# we have defined using 'columns'
self._columns_map = dict(zip(columns, self.keys()))
if index is not None:
self._index = index
self.convert_items_to_dict = True
elif force_alias:
_index = getattr(self._values, 'index', None)
# check because if it is a callable self._values is not a
# dataframe (probably a list)
if _index is None:
indexes = self.index
if isinstance(indexes[0], int):
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
elif not callable(_index):
self._index = list(_index)
self.convert_items_to_dict = True
else:
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
@staticmethod
def is_number(value):
numbers = (float, ) + bokeh_integer_types
return isinstance(value, numbers)
@staticmethod
def is_datetime(value):
try:
dt = Datetime(value)
dt # shut up pyflakes
return True
except ValueError:
return False
@staticmethod
def validate_values(values):
if np and isinstance(values, np.ndarray):
if len(values.shape) == 1:
return np.array([values])
else:
return values
elif pd and isinstance(values, pd.DataFrame):
return values
elif isinstance(values, (dict, OrderedDict)):
if all(DataAdapter.is_number(x) for x in values.values()):
return values
return values
elif isinstance(values, (list, tuple)):
if all(DataAdapter.is_number(x) for x in values):
return [values]
return values
elif hasattr(values, '__array__'):
values = pd.DataFrame(np.asarray(values))
return values
# TODO: Improve this error message..
raise TypeError("Input type not supported! %s" % values)
def index_converter(self, x):
key = self._columns_map.get(x, x)
if self.convert_index_to_int:
key = int(key)
return key
def keys(self):
# assuming it's a dict or dataframe
keys = getattr(self._values, "keys", None)
if callable(keys):
return list(keys())
elif keys is None:
self.convert_index_to_int = True
indexes = range(len(self._values))
return list(map(str, indexes))
else:
return list(keys)
def __len__(self):
return len(self.values())
def __iter__(self):
for k in self.keys():
yield k
def __getitem__(self, key):
val = self._values[self.index_converter(key)]
# if we have "index aliases" we need to remap the values...
if self.convert_items_to_dict:
val = dict(zip(self._index, val))
return val
def values(self):
return self.normalize_values(self._values)
@staticmethod
def normalize_values(values):
_values = getattr(values, "values", None)
if callable(_values):
return list(_values())
elif _values is None:
return values
else:
# assuming it's a dataframe, in that case it returns transposed
# values compared to it's dict equivalent..
return list(_values.T)
def items(self):
return [(key, self[key]) for key in self]
def iterkeys(self):
return iter(self)
def itervalues(self):
for k in self:
yield self[k]
def iteritems(self):
for k in self:
yield (k, self[k])
@property
def columns(self):
try:
return self._columns
except AttributeError:
return list(self.keys())
@property
def index(self):
try:
return self._index
except AttributeError:
index = getattr(self._values, "index", None)
if not callable(index) and index is not None:
# guess it's a pandas dataframe..
return index
# no, it's not. So it's probably a list so let's get the
# values and check
values = self.values()
if isinstance(values, dict):
return list(values.keys())
else:
first_el = self.values()[0]
if isinstance(first_el, dict):
indexes = list(first_el.keys())
else:
indexes = range(0, len(self.values()[0]))
self._index = indexes
return indexes
#-----------------------------------------------------------------------------
# Convenience methods
#-----------------------------------------------------------------------------
@staticmethod
def get_index_and_data(values, index=None):
"""Parse values (that must be one of the DataAdapter supported
input types) and create an separate/create index and data
depending on values type and index.
Args:
values (iterable): container that holds data to be plotted using
on the Chart classes
Returns:
A tuple of (index, values), where: ``index`` is an iterable that
represents the data index and ``values`` is an iterable containing
the values to be plotted.
"""
_values = DataAdapter(values, force_alias=False)
if hasattr(values, 'keys'):
if index is not None:
if isinstance(index, string_types):
xs = _values[index]
else:
xs = index
else:
try:
xs = _values.index
except AttributeError:
xs = values.index
else:
if index is None:
xs = _values.index
elif isinstance(index, string_types):
xs = _values[index]
else:
xs = index
return xs, _values
|
|
from graph_generator import *
from mystruct import *
import numpy as np
import math
def objective_function(edges):
"""transform to graph edges to a quadratic objective function"""
##larger one
x = [edge.prim2.locs[edge.loc2][1][0] if edge.tp == 'h' else edge.prim2.locs[edge.loc2][0][0] for edge in edges]
##smaller one
y = [edge.prim1.locs[edge.loc1][1][0] if edge.tp == 'h' else edge.prim1.locs[edge.loc1][0][0] for edge in edges]
##constant
c = [edge.prim2.locs[edge.loc2][1][1] - edge.prim1.locs[edge.loc1][1][1] - edge.ideal if edge.tp == 'h' else edge.prim2.locs[edge.loc2][0][1] - edge.prim1.locs[edge.loc1][0][1] - edge.ideal for edge in edges]
obj = [(xe,ye,ce) for xe,ye,ce in zip(x,y,c)]
return obj
def get_first_derivative(obj,pl,parms):
dev = np.array(np.zeros(len(pl)))
for term in obj:
x = parms[term[0]].value
y = parms[term[1]].value
c = term[2]
if x - y + c >= 0:
a = 1
else:
a = 10
if parms[term[0]].tp == 'h' or parms[term[0]].tp == 'v':
i1 = pl.index(term[0])
dev[i1] = dev[i1] + 2*a*(x-y+c)
elif parms[term[0]].tp == 'd':
deps = parms[term[0]].dependent
for dep in deps:
parm = dep[0]
if parm.tp != 'h' and parm.tp != 'v':
continue
weight = dep[1]
i1 = pl.index(parms.index(parm))
dev[i1] = dev[i1] + 2*a*weight*(x-y+c)
if parms[term[1]].tp == 'h' or parms[term[1]].tp == 'v':
i2 = pl.index(term[1])
dev[i2] = dev[i2] - 2*a*(x-y+c)
elif parms[term[1]].tp == 'd':
deps = parms[term[1]].dependent
for dep in deps:
parm = dep[0]
if parm.tp != 'h' and parm.tp != 'v':
continue
weight = dep[1]
i2 = pl.index(parms.index(parm))
dev[i2] = dev[i2] + 2*a*weight*(x-y+c)
return dev
def get_hessian(obj,pl,parms):
hessian = np.zeros([len(pl),len(pl)])
for term in obj:
x = parms[term[0]].value
y = parms[term[1]].value
c = term[2]
##a is the scalor for penalty
if x - y + c >= 0:
a = 1
else:
a = 10
if parms[term[0]].tp == 'h' or parms[term[0]].tp == 'v':
i1 = pl.index(term[0])
hessian[i1][i1] = hessian[i1][i1] + 2*a
else:
i1 = -1
if parms[term[1]].tp == 'h' or parms[term[1]].tp == 'v':
i2 = pl.index(term[1])
hessian[i2][i2] = hessian[i1][i1] + 2*a
else:
i2 = -1
if i1 >= 0 and i2 >= 0:
hessian[i1][i2] = hessian[i1][i2] - 2*a
hessian[i2][i1] = hessian[i2][i1] - 2*a
return hessian
def evaluate_obj(obj,parms):
result = 0
for term in obj:
x = parms[term[0]].value
y = parms[term[1]].value
c = term[2]
if x + y - c >= 0:
a = 1
else:
a = 10
result = result + a*((x - y + c)**2)
return result
def new_updates(updates,alpha):
new = [alpha * update for update in updates]
return new
def evaluate_updated_obj(obj,pl,parms,updates,alpha):
newup = new_updates(updates,alpha)
result = 0
for term in obj:
x = parms[term[0]].value
if parms[term[0]].tp == 'h' or parms[term[0]].tp == 'v':
xu = newup[pl.index(term[0])]
xn = x - xu
else:
xn = x
y = parms[term[1]].value
if parms[term[1]].tp == 'h' or parms[term[1]].tp == 'v':
yu = newup[pl.index(term[1])]
yn = y - yu
else:
yn = y
c = term[2]
if x + y - c >= 0:
a = 1
else:
a = 10
result = result + a*((xn - yn + c)**2)
return result
def line_search(obj,pl,parms,updates):
alpha = 1.0
f = evaluate_obj(obj,parms)
# print "f ",f
# print "updated ",evaluate_updated_obj(obj,pl,parms,updates,alpha)
while True:
newf = evaluate_updated_obj(obj,pl,parms,updates,alpha)
if alpha < 1e-4 or newf < f:
break
alpha = alpha/2
# print "alpha ",alpha
return alpha
def gradient_descent_iter(obj,pl,parms):
#iterate 10 times, didn't check the convergence
for it in range(500):
# hessian = get_hessian(obj,pl,parms)
dev = get_first_derivative(obj,pl,parms)
# updates = np.linalg.solve(hessian, dev)
updates = dev
step_size = line_search(obj,pl,parms,updates)
# print "hessian ",hessian
# print "dev ",dev
# print "updates ",updates
for i in range(len(pl)):
parms[pl[i]].value = parms[pl[i]].value - step_size*updates[i]
def dog_leg_iter(obj,pl,parms):
delta = 20
for it in range(400):
hessian = get_hessian(obj,pl,parms)
dev = get_first_derivative(obj,pl,parms)
dev = np.matrix(dev).transpose()
pu = -np.linalg.norm(dev)/float(dev.transpose()*hessian*dev)*dev
pb = -np.linalg.solve(hessian,dev)
nu = np.linalg.norm(pu)
nb = np.linalg.norm(pb)
print "nu nb",nu,nb
if nu > delta:
updates = delta/nu*pu
elif nu <= delta and nb > delta:
#solve quadratic function here
a = np.linalg.norm(pb-pu)**2
b = 2*float((pb-pu).transpose()*(2*pu-pb))
c = np.linalg.norm(2*pu-pb)**2 - delta**2
a = a/c
b = b/c
c = 1.0
r = math.sqrt(b**2 - 4*a*c)
tao = (-b + r)/(2*a)
if tao > 2 or tao < 1:
tao = (-b - r)/(2*a)
print "tao ", tao
updates = pu + (tao-1)*(pb-pu)
else:
updates = pb
print "updates ",updates
print "dev ",dev
for i in range(len(pl)):
parms[pl[i]].value = parms[pl[i]].value + updates.item((i,0))
def round_up_parms(parms):
for parm in parms:
parm.value = int(round(parm.value))
def update_dependent_prim_locs(graph):
parms = graph.parms
for dep in graph.dep:
slave = dep[0]
master = dep[1]
tp = dep[2]
i1 = parms[master.locs[0][0][0]].value
i2 = parms[master.locs[1][0][0]].value
j1 = parms[master.locs[0][1][0]].value
j2 = parms[master.locs[1][1][0]].value
w = (i2 - i1)/( j2 - j1 + 0.0001)
if tp == 't': #stem-beam-top
parms[slave.locs[0][0][0]].update_weights([w,-w,1.0])
slave.locs[0][0][1] = w*slave.locs[0][1][1] - w*master.locs[0][1][1] + master.locs[0][0][1]##set offset
elif tp == 'b': #stem-beam-bottom
parms[slave.locs[1][0][0]].update_weights([w,-w,1.0])
slave.locs[1][0][1]= w*slave.locs[1][1][1] - w*master.locs[0][1][1] + master.locs[0][0][1]##set offset
else:#beam-to-beam-parallel, using numbers
num = tp
parms[slave.locs[0][0][0]].update_weights([w,-w,1.0])
parms[slave.locs[1][0][0]].update_weights([w,-w,1.0])
slave.locs[0][0][1] = w*slave.locs[0][1][1] - w*master.locs[0][1][1] + master.locs[0][0][1] + num*glob.inter_beam_dis##set offset
slave.locs[1][0][1]= w*slave.locs[1][1][1] - w*master.locs[0][1][1] + master.locs[0][0][1] + num*glob.inter_beam_dis##set offset
# print "master left:",parms[master.locs[0][0][0]].value,master.locs[0][0][1]
# print "slave left:",parms[slave.locs[0][0][0]].value,slave.locs[0][0][1]
# print "left value:",parms[slave.locs[0][0][0]].value + slave.locs[0][0][1]
for parm in parms:
if parm.tp == 'd':
parm.update_value()
def new_gradient_descent_iter(graph,obj,pl,parms):
iterations = 0
while True:
# hessian = get_hessian(obj,pl,parms)
dev = get_first_derivative(obj,pl,parms)
# updates = np.linalg.solve(hessian, dev)
updates = dev
step_size = line_search(obj,pl,parms,updates)
# print "hessian ",hessian
# print "dev ",dev
# print "updates ",updates
old_value = [parms[p].value for p in pl]
for i in range(len(pl)):
parms[pl[i]].value = parms[pl[i]].value - step_size*updates[i]
update_dependent_prim_locs(graph)
new_value = [parms[p].value for p in pl]
if sum([abs(v1 - v2) for (v1,v2) in zip(old_value,new_value)]) < 1e-1:
break
iterations = iterations + 1
#else:
# print sum([abs(v1 - v2) for (v1,v2) in zip(old_value,new_value)])
def optimize_graph_parms(graph):
""" optimize the graph parameters """
edges = graph.edges
parms = graph.parms
ph = [edge.prim1.locs[edge.loc1][1][0] for edge in edges if edge.tp == 'h'] + [edge.prim2.locs[edge.loc2][1][0] for edge in edges if edge.tp == 'h']#horizontal ones
pv = [edge.prim1.locs[edge.loc1][0][0] for edge in edges if edge.tp == 'v'] + [edge.prim2.locs[edge.loc2][0][0] for edge in edges if edge.tp == 'v']#horizontal \ones
pl = list(set(pv + ph))
pl = [p for p in pl if parms[p].tp != 'c' and parms[p].tp != 'd']
obj = objective_function(edges)
print "obj ",obj
print "pl ",pl
print "edges ",[(edge.prim2.name, edge.prim1.name) for edge in edges]
new_gradient_descent_iter(graph,obj,pl,parms)
# update_dependent_prim_locs(graph)
# dog_leg_iter(obj,pl,parms)
round_up_parms(parms)
|
|
# -*- coding: utf-8 -*-
"""Test utilities."""
#
# (C) Pywikibot team, 2013-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import print_function, unicode_literals
__version__ = '$Id$'
#
import os
import re
import subprocess
import sys
import time
import traceback
from warnings import warn
if sys.version_info[0] > 2:
import six
import pywikibot
from pywikibot import config
from pywikibot.site import Namespace
from pywikibot.data.api import CachedRequest
from pywikibot.data.api import Request as _original_Request
from tests import _pwb_py
from tests import unittest # noqa
class DrySiteNote(RuntimeWarning):
"""Information regarding dry site."""
pass
def expected_failure_if(expect):
"""
Unit test decorator to expect failure under conditions.
@param expect: Flag to check if failure is expected
@type expect: bool
"""
if expect:
return unittest.expectedFailure
else:
return lambda orig: orig
def allowed_failure(func):
"""
Unit test decorator to allow failure.
Test runners each have different interpretations of what should be
the result of an @expectedFailure test if it succeeds. Some consider
it to be a pass; others a failure.
This decorator runs the test and, if it is a failure, reports the result
and considers it a skipped test.
"""
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError:
tb = traceback.extract_tb(sys.exc_info()[2])
for depth, line in enumerate(tb):
if re.match('^assert[A-Z]', line[2]):
break
tb = traceback.format_list(tb[:depth])
pywikibot.error('\n' + ''.join(tb)[:-1]) # remove \n at the end
raise unittest.SkipTest('Test is allowed to fail.')
except Exception:
pywikibot.exception(tb=True)
raise unittest.SkipTest('Test is allowed to fail.')
wrapper.__name__ = func.__name__
return wrapper
def allowed_failure_if(expect):
"""
Unit test decorator to allow failure under conditions.
@param expect: Flag to check if failure is allowed
@type expect: bool
"""
if expect:
return allowed_failure
else:
return lambda orig: orig
def add_metaclass(cls):
"""Call six's add_metaclass with the site's __metaclass__ in Python 3."""
if sys.version_info[0] > 2:
return six.add_metaclass(cls.__metaclass__)(cls)
else:
assert cls.__metaclass__
return cls
def fixed_generator(iterable):
"""Return a dummy generator ignoring all parameters."""
def gen(*args, **kwargs):
for item in iterable:
yield item
return gen
class DryParamInfo(dict):
"""Dummy class to use instead of L{pywikibot.data.api.ParamInfo}."""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(DryParamInfo, self).__init__(*args, **kwargs)
self.modules = set()
self.action_modules = set()
self.query_modules = set()
self.query_modules_with_limits = set()
self.prefixes = set()
def fetch(self, modules, _init=False):
"""Prevented method."""
raise Exception(u'DryParamInfo.fetch(%r, %r) prevented'
% (modules, _init))
def parameter(self, module, param_name):
"""Load dry data."""
return self[module][param_name]
class DummySiteinfo():
"""Dummy class to use instead of L{pywikibot.site.Siteinfo}."""
def __init__(self, cache):
"""Constructor."""
self._cache = dict((key, (item, False)) for key, item in cache.items())
def __getitem__(self, key):
"""Get item."""
return self.get(key, False)
def __setitem__(self, key, value):
"""Set item."""
self._cache[key] = (value, False)
def get(self, key, get_default=True, cache=True, expiry=False):
"""Return dry data."""
# Default values are always expired, so only expiry=False doesn't force
# a reload
force = expiry is not False
if not force and key in self._cache:
loaded = self._cache[key]
if not loaded[1] and not get_default:
raise KeyError(key)
else:
return loaded[0]
elif get_default:
default = pywikibot.site.Siteinfo._get_default(key)
if cache:
self._cache[key] = (default, False)
return default
else:
raise KeyError(key)
def __contains__(self, key):
"""Return False."""
return False
def is_recognised(self, key):
"""Return None."""
return None
def get_requested_time(self, key):
"""Return False."""
return False
class DryRequest(CachedRequest):
"""Dummy class to use instead of L{pywikibot.data.api.Request}."""
def __init__(self, *args, **kwargs):
"""Constructor."""
_original_Request.__init__(self, *args, **kwargs)
@classmethod
def create_simple(cls, **kwargs):
"""Skip CachedRequest implementation."""
return _original_Request.create_simple(**kwargs)
def _expired(self, dt):
"""Never invalidate cached data."""
return False
def _write_cache(self, data):
"""Never write data."""
return
def submit(self):
"""Prevented method."""
raise Exception(u'DryRequest rejecting request: %r'
% self._params)
class DrySite(pywikibot.site.APISite):
"""Dummy class to use instead of L{pywikibot.site.APISite}."""
_loginstatus = pywikibot.site.LoginStatus.NOT_ATTEMPTED
def __init__(self, code, fam, user, sysop):
"""Constructor."""
super(DrySite, self).__init__(code, fam, user, sysop)
self._userinfo = pywikibot.tools.EMPTY_DEFAULT
self._paraminfo = DryParamInfo()
self._siteinfo = DummySiteinfo({})
self._siteinfo._cache['lang'] = (code, True)
self._siteinfo._cache['case'] = (
'case-sensitive' if self.family.name == 'wiktionary' else
'first-letter', True)
extensions = []
if self.family.name == 'wikisource':
extensions.append({'name': 'ProofreadPage'})
self._siteinfo._cache['extensions'] = (extensions, True)
def _build_namespaces(self):
return Namespace.builtin_namespaces(case=self.siteinfo['case'])
def __repr__(self):
"""Override default so warnings and errors indicate test is dry."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.code,
self.family.name)
@property
def userinfo(self):
"""Return dry data."""
return self._userinfo
def version(self):
"""Dummy version, with warning to show the callers context."""
warn('%r returning version 1.24; override if unsuitable.'
% self, DrySiteNote, stacklevel=2)
return '1.24'
def image_repository(self):
"""Return Site object for image repository e.g. commons."""
code, fam = self.shared_image_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username(),
interface=self.__class__)
def data_repository(self):
"""Return Site object for data repository e.g. Wikidata."""
code, fam = self.shared_data_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username(),
interface=DryDataSite)
class DryDataSite(DrySite, pywikibot.site.DataSite):
"""Dummy class to use instead of L{pywikibot.site.DataSite}."""
def _build_namespaces(self):
namespaces = super(DryDataSite, self)._build_namespaces()
namespaces[0].defaultcontentmodel = 'wikibase-item'
namespaces[120] = Namespace(id=120,
case='first-letter',
canonical_name='Property',
defaultcontentmodel='wikibase-property')
return namespaces
class DryPage(pywikibot.Page):
"""Dummy class that acts like a Page but avoids network activity."""
_pageid = 1
_disambig = False
_isredir = False
def isDisambig(self):
"""Return disambig status stored in _disambig."""
return self._disambig
class FakeLoginManager(pywikibot.data.api.LoginManager):
"""Loads a fake password."""
@property
def password(self):
"""Get the fake password."""
return 'foo'
@password.setter
def password(self, value):
"""Ignore password changes."""
pass
def execute(command, data_in=None, timeout=0, error=None):
"""
Execute a command and capture outputs.
@param command: executable to run and arguments to use
@type command: list of unicode
"""
# Any environment variables added on Windows must be of type
# str() on Python 2.
env = os.environ.copy()
# Prevent output by test package; e.g. 'max_retries reduced from x to y'
env[str('PYWIKIBOT_TEST_QUIET')] = str('1')
# sys.path may have been modified by the test runner to load dependencies.
pythonpath = os.pathsep.join(sys.path)
if sys.platform == 'win32' and sys.version_info[0] < 3:
pythonpath = str(pythonpath)
env[str('PYTHONPATH')] = pythonpath
env[str('PYTHONIOENCODING')] = str(config.console_encoding)
# LC_ALL is used by i18n.input as an alternative for userinterface_lang
if pywikibot.config.userinterface_lang:
env[str('LC_ALL')] = str(pywikibot.config.userinterface_lang)
# Set EDITOR to an executable that ignores all arguments and does nothing.
env[str('EDITOR')] = str('call' if sys.platform == 'win32' else 'true')
options = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE
}
if data_in is not None:
options['stdin'] = subprocess.PIPE
try:
p = subprocess.Popen(command, env=env, **options)
except TypeError:
# Generate a more informative error
if sys.platform == 'win32' and sys.version_info[0] < 3:
unicode_env = [(k, v) for k, v in os.environ.items()
if not isinstance(k, str) or
not isinstance(v, str)]
if unicode_env:
raise TypeError('os.environ must contain only str: %r'
% unicode_env)
child_unicode_env = [(k, v) for k, v in env.items()
if not isinstance(k, str) or
not isinstance(v, str)]
if child_unicode_env:
raise TypeError('os.environ must contain only str: %r'
% child_unicode_env)
raise
if data_in is not None:
p.stdin.write(data_in.encode(config.console_encoding))
p.stdin.flush() # _communicate() otherwise has a broken pipe
stderr_lines = b''
waited = 0
while (error or (waited < timeout)) and p.poll() is None:
# In order to kill 'shell' and others early, read only a single
# line per second, and kill the process as soon as the expected
# output has been seen.
# Additional lines will be collected later with p.communicate()
if error:
line = p.stderr.readline()
stderr_lines += line
if error in line.decode(config.console_encoding):
break
time.sleep(1)
waited += 1
if (timeout or error) and p.poll() is None:
p.kill()
if p.poll() is not None:
stderr_lines += p.stderr.read()
data_out = p.communicate()
return {'exit_code': p.returncode,
'stdout': data_out[0].decode(config.console_encoding),
'stderr': (stderr_lines + data_out[1]).decode(config.console_encoding)}
def execute_pwb(args, data_in=None, timeout=0, error=None):
"""
Execute the pwb.py script and capture outputs.
@param args: list of arguments for pwb.py
@type args: list of unicode
"""
if sys.version_info < (2, 7, 9):
return execute(command=[sys.executable, '-W ignore:A true', _pwb_py] + args,
data_in=data_in, timeout=timeout, error=error)
else:
return execute(command=[sys.executable, _pwb_py] + args,
data_in=data_in, timeout=timeout, error=error)
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Audit model."""
from ggrc import db
from ggrc.models.deferred import deferred
from ggrc.models.mixins import (
Timeboxed, Noted, Described, Hyperlinked, WithContact,
Titled, Slugged, CustomAttributable, Stateful
)
from ggrc.models.mixins import clonable
from ggrc.models.relationship import Relatable
from ggrc.models.object_person import Personable
from ggrc.models.context import HasOwnContext
from ggrc.models.reflection import AttributeInfo
from ggrc.models.reflection import PublishOnly
from ggrc.models.program import Program
from ggrc.models.person import Person
from ggrc.models.snapshot import Snapshotable
class Audit(Snapshotable, clonable.Clonable,
CustomAttributable, Personable, HasOwnContext, Relatable,
Timeboxed, Noted, Described, Hyperlinked, WithContact, Titled,
Stateful, Slugged, db.Model):
"""Audit model."""
__tablename__ = 'audits'
_slug_uniqueness = False
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
CLONEABLE_CHILDREN = {"AssessmentTemplate"}
report_start_date = deferred(db.Column(db.Date), 'Audit')
report_end_date = deferred(db.Column(db.Date), 'Audit')
audit_firm_id = deferred(
db.Column(db.Integer, db.ForeignKey('org_groups.id')), 'Audit')
audit_firm = db.relationship('OrgGroup', uselist=False)
gdrive_evidence_folder = deferred(db.Column(db.String), 'Audit')
program_id = deferred(
db.Column(db.Integer, db.ForeignKey('programs.id'), nullable=False),
'Audit')
audit_objects = db.relationship(
'AuditObject', backref='audit', cascade='all, delete-orphan')
object_type = db.Column(
db.String(length=250), nullable=False, default='Control')
_publish_attrs = [
'report_start_date',
'report_end_date',
'audit_firm',
'status',
'gdrive_evidence_folder',
'program',
'object_type',
PublishOnly('audit_objects')
]
_fulltext_attrs = [
'report_start_date',
'report_end_date',
'audit_firm',
'status',
'gdrive_evidence_folder',
]
_sanitize_html = [
'gdrive_evidence_folder',
'description',
]
_include_links = []
_aliases = {
"program": {
"display_name": "Program",
"filter_by": "_filter_by_program",
"mandatory": True,
},
"user_role:Auditor": {
"display_name": "Auditors",
"type": AttributeInfo.Type.USER_ROLE,
"filter_by": "_filter_by_auditor",
},
"start_date": "Planned Start Date",
"end_date": "Planned End Date",
"report_start_date": "Planned Report Period from",
"report_end_date": "Planned Report Period to",
"contact": {
"display_name": "Internal Audit Lead",
"mandatory": True,
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"notes": None,
"url": None,
"reference_url": None,
"status": {
"display_name": "Status",
"mandatory": True,
"description": "Options are:\n{}".format('\n'.join(VALID_STATES))
}
}
def _clone(self, source_object):
"""Clone audit and all relevant attributes.
Keeps the internals of actual audit cloning and everything that is related
to audit itself (auditors, audit firm, context setting,
custom attribute values, etc.)
"""
from ggrc_basic_permissions import create_audit_context
data = {
"title": source_object.generate_attribute("title"),
"description": source_object.description,
"audit_firm": source_object.audit_firm,
"start_date": source_object.start_date,
"end_date": source_object.end_date,
"program": source_object.program,
"status": source_object.VALID_STATES[0],
"report_start_date": source_object.report_start_date,
"report_end_date": source_object.report_end_date,
"contact": source_object.contact
}
self.update_attrs(data)
db.session.flush()
create_audit_context(self)
self._clone_auditors(source_object)
self.clone_custom_attribute_values(source_object)
def _clone_auditors(self, audit):
"""Clone auditors of specified audit.
Args:
audit: Audit instance
"""
from ggrc_basic_permissions.models import Role, UserRole
role = Role.query.filter_by(name="Auditor").first()
auditors = [ur.person for ur in UserRole.query.filter_by(
role=role, context=audit.context).all()]
for auditor in auditors:
user_role = UserRole(
context=self.context,
person=auditor,
role=role
)
db.session.add(user_role)
db.session.flush()
def clone(self, source_id, mapped_objects=None):
"""Clone audit with specified whitelisted children.
Children that can be cloned should be specified in CLONEABLE_CHILDREN.
Args:
mapped_objects: A list of related objects that should also be copied and
linked to a new audit.
"""
if not mapped_objects:
mapped_objects = []
source_object = Audit.query.get(source_id)
self._clone(source_object)
if any(mapped_objects):
related_children = source_object.related_objects(mapped_objects)
for obj in related_children:
obj.clone(self)
@classmethod
def _filter_by_program(cls, predicate):
return Program.query.filter(
(Program.id == Audit.program_id) &
(predicate(Program.slug) | predicate(Program.title))
).exists()
@classmethod
def _filter_by_auditor(cls, predicate):
from ggrc_basic_permissions.models import Role, UserRole
return UserRole.query.join(Role, Person).filter(
(Role.name == "Auditor") &
(UserRole.context_id == cls.context_id) &
(predicate(Person.name) | predicate(Person.email))
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Audit, cls).eager_query()
return query.options(
orm.joinedload('program'),
orm.subqueryload('object_people').joinedload('person'),
orm.subqueryload('audit_objects'),
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesOperations(object):
"""VpnSitesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
"""Retrieves the details of a VPN site.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being retrieved.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSite, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.VpnSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.VpnSite"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'VpnSite')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnSite', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.VpnSite"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnSite"]
"""Creates a VpnSite resource if it doesn't exist else updates the existing VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being created or updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to create or update VpnSite.
:type vpn_site_parameters: ~azure.mgmt.network.v2018_07_01.models.VpnSite
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnSite or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.VpnSite]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
vpn_site_parameters=vpn_site_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnSite', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnSite"]
"""Updates VpnSite tags.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to update VpnSite tags.
:type vpn_site_parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnSite or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.VpnSite]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
vpn_site_parameters=vpn_site_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being deleted.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSitesResult"]
"""Lists all the vpnSites in a resource group.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSitesResult"]
"""Lists all the VpnSites in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnSites'} # type: ignore
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If one of the grad_ys is invalid.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype))
else:
if grad_y.dtype != y.dtype:
raise ValueError("Y and ys_grad must be of the same type, "
"not y: %s, ys_grad: %s " %
(dtypes.as_dtype(y.dtype).name,
dtypes.as_dtype(grad_y.dtype).name))
return grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if the gradients are invalid.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is not None:
if not grad.dtype.is_compatible_with(inp.dtype):
raise ValueError("Gradient type %s generated for op %s does "
"not match input type %s" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(name, "gradients", ys + xs + grad_ys):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
grad_fn = ops.get_default_graph()._get_function(
op.type).python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and _IsTrainable(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = grad_fn(op, *out_grads)
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _SymGrad(op, out_grads)
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
if isinstance(in_grad, ops.Tensor):
in_grad.set_shape(t_in.get_shape())
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_ops.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_real_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_real_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_real_grad:
# For an unused exit, if it has floating-point outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat_v2([x.values for x in out_grad], 0),
array_ops.concat_v2([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else:
out_grads[i] = []
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
ValueError: if the arguments are invalid or not supported. Currently,
this function only supports one-dimensional `x` in `xs`.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute a hessian matrix for each x in xs
hessians = []
for i, x in enumerate(xs):
# Check dimensions
ndims = x.get_shape().ndims
if ndims is None:
raise ValueError('Cannot compute Hessian because the dimensionality of '
'element number %d of `xs` cannot be determined' % i)
elif ndims != 1:
raise ValueError('Computing hessians is currently only supported for '
'one-dimensional tensors. Element number %d of `xs` has '
'%d dimensions.' % (i, ndims))
with ops.name_scope(name + '_first_derivative'):
# Compute the partial derivatives of the input with respect to all
# elements of `x`
_gradients = gradients(ys, x, **kwargs)[0]
# Unpack the gradients into a list so we can take derivatives with
# respect to each element
_gradients = array_ops.unpack(_gradients)
with ops.name_scope(name + '_second_derivative'):
# Compute the partial derivatives with respect to each element of the list
_hess = [gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
# Pack the list into a matrix and add to the list of hessians
hessians.append(array_ops.stack(_hess, name=name))
return hessians
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LocalNetworkGateway"]
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LocalNetworkGatewayListResult"]
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
|
|
#!/usr/bin/env python
"""Tests for grr.lib.aff4_objects.filestore."""
import os
import StringIO
import time
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.aff4_objects import filestore
# Needed for GetFile pylint: disable=unused-import
from grr.lib.flows.general import transfer
# pylint: enable=unused-import
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
class FakeStore(object):
PRIORITY = 99
PATH = rdfvalue.RDFURN("aff4:/files/temp")
def __init__(self, path, token):
self.dest_file = aff4.FACTORY.Create(path, "AFF4MemoryStream",
mode="rw", token=token)
def AddFile(self, unused_blob_fd, sync=False):
_ = sync
return self.dest_file
def Get(self, _):
return True
class Schema(object):
ACTIVE = "unused"
class FileStoreTest(test_lib.AFF4ObjectTest):
"""Tests for file store functionality."""
def testFileAdd(self):
fs = aff4.FACTORY.Open(filestore.FileStore.PATH, "FileStore",
token=self.token)
fake_store1 = FakeStore("aff4:/files/temp1", self.token)
fake_store2 = FakeStore("aff4:/files/temp2", self.token)
with utils.Stubber(fs, "OpenChildren",
lambda: [fake_store1, fake_store2]):
src_fd = aff4.FACTORY.Create(aff4.ROOT_URN.Add("temp").Add("src"),
"VFSBlobImage", token=self.token, mode="rw")
src_fd.SetChunksize(filestore.FileStore.CHUNK_SIZE)
src_data = "ABC" * filestore.FileStore.CHUNK_SIZE
src_data_fd = StringIO.StringIO(src_data)
src_fd.AppendContent(src_data_fd)
fs.AddFile(src_fd)
# Reset file pointers
src_fd.Seek(0)
fake_store1.dest_file.Seek(0)
fake_store2.dest_file.Seek(0)
# Check file content got written to both data stores.
self.assertEqual(src_data, fake_store1.dest_file.Read(-1))
self.assertEqual(src_data, fake_store2.dest_file.Read(-1))
def testGetByPriority(self):
priority1 = aff4.FACTORY.Create("aff4:/files/1", "FileStore", mode="rw",
token=self.token)
priority1.PRIORITY = 1
priority1.Set(priority1.Schema.ACTIVE(False))
priority2 = aff4.FACTORY.Create("aff4:/files/2", "FileStore", mode="rw",
token=self.token)
priority2.PRIORITY = 2
priority3 = aff4.FACTORY.Create("aff4:/files/3", "FileStore", mode="rw",
token=self.token)
priority3.PRIORITY = 3
fs = aff4.FACTORY.Open(filestore.FileStore.PATH, "FileStore",
token=self.token)
with utils.Stubber(fs, "OpenChildren",
lambda: [priority3, priority1, priority2]):
child_list = list(fs.GetChildrenByPriority())
self.assertEqual(child_list[0].PRIORITY, 2)
self.assertEqual(child_list[1].PRIORITY, 3)
child_list = list(fs.GetChildrenByPriority(allow_external=False))
self.assertEqual(child_list[0].PRIORITY, 2)
class HashFileStoreTest(test_lib.AFF4ObjectTest):
"""Tests for hash file store functionality."""
def setUp(self):
super(HashFileStoreTest, self).setUp()
client_ids = self.SetupClients(1)
self.client_id = client_ids[0]
@staticmethod
def AddFileToFileStore(pathspec=None, client_id=None, token=None):
"""Adds file with given pathspec to the hash file store."""
if pathspec is None:
raise ValueError("pathspec can't be None")
if client_id is None:
raise ValueError("client_id can't be None")
urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN(pathspec, client_id)
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile",
"HashBuffer")
for _ in test_lib.TestFlowHelper(
"GetFile", client_mock, token=token, client_id=client_id,
pathspec=pathspec):
pass
auth_state = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
flow.Events.PublishEvent(
"FileStore.AddFileToStore",
rdf_flows.GrrMessage(payload=urn, auth_state=auth_state),
token=token)
worker = test_lib.MockWorker(token=token)
worker.Simulate()
def AddFile(self, path):
"""Add file with a subpath (relative to winexec_img.dd) to the store."""
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "winexec_img.dd"))
pathspec.Append(path=path, pathtype=rdf_paths.PathSpec.PathType.TSK)
return self.AddFileToFileStore(pathspec, client_id=self.client_id,
token=self.token)
def testListHashes(self):
self.AddFile("/Ext2IFS_1_10b.exe")
hashes = list(aff4.HashFileStore.ListHashes(token=self.token))
self.assertEqual(len(hashes), 5)
self.assertTrue(filestore.FileStoreHash(
fingerprint_type="pecoff", hash_type="md5",
hash_value="a3a3259f7b145a21c7b512d876a5da06") in hashes)
self.assertTrue(filestore.FileStoreHash(
fingerprint_type="pecoff", hash_type="sha1",
hash_value="019bddad9cac09f37f3941a7f285c79d3c7e7801") in hashes)
self.assertTrue(filestore.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a") in hashes)
self.assertTrue(filestore.FileStoreHash(
fingerprint_type="generic", hash_type="sha1",
hash_value="7dd6bee591dfcb6d75eb705405302c3eab65e21a") in hashes)
self.assertTrue(filestore.FileStoreHash(
fingerprint_type="generic", hash_type="sha256",
hash_value="0e8dc93e150021bb4752029ebbff51394aa36f06"
"9cf19901578e4f06017acdb5") in hashes)
def testListHashesWithAge(self):
with utils.Stubber(time, "time", lambda: 42):
self.AddFile("/Ext2IFS_1_10b.exe")
hashes = list(aff4.HashFileStore.ListHashes(token=self.token, age=41e6))
self.assertEqual(len(hashes), 0)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token, age=43e6))
self.assertEqual(len(hashes), 5)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token))
self.assertEqual(len(hashes), 5)
def testHashAgeUpdatedWhenNewHitAddedWithinAFF4IndexCacheAge(self):
# Check that there are no hashes.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertEqual(len(hashes), 0)
with utils.Stubber(time, "time", lambda: 42):
self.AddFileToFileStore(
rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "empty_file")),
client_id=self.client_id, token=self.token)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertTrue(hashes)
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 1)
latest_time = 42 + config_lib.CONFIG["AFF4.intermediate_cache_age"] - 1
with utils.Stubber(time, "time", lambda: latest_time):
self.AddFileToFileStore(
rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "a", "b", "c", "helloc.txt")),
client_id=self.client_id, token=self.token)
# Check that now we have two hits for the previosly added hash.
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 2)
# Check that new hit doesn't affect hash age.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(43e6, 1e10)))
self.assertFalse(hashes)
def testHashAgeUpdatedWhenNewHitAddedAfterAFF4IndexCacheAge(self):
# Check that there are no hashes.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertEqual(len(hashes), 0)
with utils.Stubber(time, "time", lambda: 42):
self.AddFileToFileStore(
rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "empty_file")),
client_id=self.client_id, token=self.token)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertTrue(hashes)
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 1)
latest_time = 42 + config_lib.CONFIG["AFF4.intermediate_cache_age"] + 1
with utils.Stubber(time, "time", lambda: latest_time):
self.AddFileToFileStore(
rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "a", "b", "c", "helloc.txt")),
client_id=self.client_id, token=self.token)
# Check that now we have two hits for the previosly added hash.
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 2)
# Check that new hit affects hash age.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(43e6, 1e10)))
self.assertTrue(hashes)
def testGetClientsForHash(self):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hits = list(aff4.HashFileStore.GetClientsForHash(filestore.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"), token=self.token))
self.assertListEqual(hits, [self.client_id.Add(
"fs/tsk").Add(self.base_path).Add("winexec_img.dd/Ext2IFS_1_10b.exe")])
def testGetClientsForHashWithAge(self):
with utils.Stubber(time, "time", lambda: 42):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hits = list(aff4.HashFileStore.GetClientsForHash(
filestore.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"),
age=41e6,
token=self.token))
self.assertEqual(len(hits), 0)
hits = list(aff4.HashFileStore.GetClientsForHash(
filestore.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"),
age=43e6,
token=self.token))
self.assertEqual(len(hits), 1)
hits = list(aff4.HashFileStore.GetClientsForHash(
filestore.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"),
token=self.token))
self.assertEqual(len(hits), 1)
def testGetClientsForHashes(self):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hash1 = filestore.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a")
hash2 = filestore.FileStoreHash(
fingerprint_type="generic", hash_type="sha1",
hash_value="e1f7e62b3909263f3a2518bbae6a9ee36d5b502b")
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
token=self.token))
self.assertEqual(len(hits), 2)
self.assertListEqual(hits[hash1], [self.client_id.Add(
"fs/tsk").Add(self.base_path).Add("winexec_img.dd/Ext2IFS_1_10b.exe")])
self.assertListEqual(hits[hash2], [self.client_id.Add(
"fs/tsk").Add(self.base_path).Add("winexec_img.dd/idea.dll")])
def testGetClientsForHashesWithAge(self):
with utils.Stubber(time, "time", lambda: 42):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hash1 = filestore.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a")
hash2 = filestore.FileStoreHash(
fingerprint_type="generic", hash_type="sha1",
hash_value="e1f7e62b3909263f3a2518bbae6a9ee36d5b502b")
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
age=41e6,
token=self.token))
self.assertEqual(len(hits), 0)
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
age=43e6,
token=self.token))
self.assertEqual(len(hits), 2)
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
token=self.token))
self.assertEqual(len(hits), 2)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Modules required to work with ironic_discoverd:
https://pypi.python.org/pypi/ironic-discoverd
"""
import eventlet
from oslo_config import cfg
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import keystone
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
discoverd_opts = [
cfg.BoolOpt('enabled', default=False,
help='whether to enable inspection using ironic-discoverd'),
cfg.StrOpt('service_url',
help='ironic-discoverd HTTP endpoint. If this is not set, the '
'ironic-discoverd client default (http://127.0.0.1:5050) '
'will be used.'),
cfg.IntOpt('status_check_period', default=60,
help='period (in seconds) to check status of nodes '
'on inspection')
]
CONF = cfg.CONF
CONF.register_opts(discoverd_opts, group='discoverd')
ironic_discoverd = importutils.try_import('ironic_discoverd')
if ironic_discoverd:
from ironic_discoverd import client
class DiscoverdInspect(base.InspectInterface):
"""In-band inspection via ironic-discoverd project."""
@classmethod
def create_if_enabled(cls, driver_name):
"""Create instance of DiscoverdInspect if it's enabled.
Reports log warning with given driver_name if it's not.
:return: DiscoverdInspect instance or None
"""
if CONF.discoverd.enabled:
return cls()
else:
LOG.info(_LI("Inspection via ironic-discoverd is disabled in "
"configuration for driver %s. To enable, change "
"[discoverd] enabled = True."), driver_name)
def __init__(self):
if not CONF.discoverd.enabled:
raise exception.DriverLoadError(
_('ironic-discoverd support is disabled'))
if not ironic_discoverd:
raise exception.DriverLoadError(
_('ironic-discoverd Python module not found'))
# NOTE(dtantsur): __version_info__ attribute appeared in 1.0.0
version = getattr(ironic_discoverd, '__version_info__', (0, 2))
if version < (1, 0):
raise exception.DriverLoadError(
_('ironic-discoverd version is too old: required >= 1.0.0, '
'got %s') % '.'.join(str(x) for x in version))
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return {} # no properties
def validate(self, task):
"""Validate the driver-specific inspection information.
If invalid, raises an exception; otherwise returns None.
:param task: a task from TaskManager.
"""
# NOTE(deva): this is not callable if discoverd is disabled
# so don't raise an exception -- just pass.
pass
def inspect_hardware(self, task):
"""Inspect hardware to obtain the hardware properties.
This particular implementation only starts inspection using
ironic-discoverd. Results will be checked in a periodic task.
:param task: a task from TaskManager.
:returns: states.INSPECTING
"""
LOG.debug('Starting inspection for node %(uuid)s using '
'ironic-discoverd client %(version)s',
{'uuid': task.node.uuid, 'version':
ironic_discoverd.__version__})
# NOTE(dtantsur): we're spawning a short-living green thread so that
# we can release a lock as soon as possible and allow ironic-discoverd
# to operate on a node.
eventlet.spawn_n(_start_inspection, task.node.uuid, task.context)
return states.INSPECTING
@base.driver_periodic_task(spacing=CONF.discoverd.status_check_period,
enabled=CONF.discoverd.enabled)
def _periodic_check_result(self, manager, context):
"""Periodic task checking results of inspection."""
filters = {'provision_state': states.INSPECTING}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver in node_iter:
try:
# TODO(dtantsur): we need an exclusive lock only once
# inspection is finished.
with task_manager.acquire(context, node_uuid) as task:
_check_status(task)
except (exception.NodeLocked, exception.NodeNotFound):
continue
def _call_discoverd(func, uuid, context):
"""Wrapper around calls to discoverd."""
# NOTE(dtantsur): due to bug #1428652 None is not accepted for base_url.
kwargs = {}
if CONF.discoverd.service_url:
kwargs['base_url'] = CONF.discoverd.service_url
return func(uuid, auth_token=context.auth_token, **kwargs)
def _start_inspection(node_uuid, context):
"""Call to discoverd to start inspection."""
try:
_call_discoverd(client.introspect, node_uuid, context)
except Exception as exc:
LOG.exception(_LE('Exception during contacting ironic-discoverd '
'for inspection of node %(node)s: %(err)s'),
{'node': node_uuid, 'err': exc})
# NOTE(dtantsur): if acquire fails our last option is to rely on
# timeout
with task_manager.acquire(context, node_uuid) as task:
task.node.last_error = _('Failed to start inspection: %s') % exc
task.process_event('fail')
else:
LOG.info(_LI('Node %s was sent to inspection to ironic-discoverd'),
node_uuid)
def _check_status(task):
"""Check inspection status for node given by a task."""
node = task.node
if node.provision_state != states.INSPECTING:
return
if not isinstance(task.driver.inspect, DiscoverdInspect):
return
LOG.debug('Calling to discoverd to check status of node %s',
task.node.uuid)
# NOTE(dtantsur): periodic tasks do not have proper tokens in context
task.context.auth_token = keystone.get_admin_auth_token()
try:
status = _call_discoverd(client.get_status, node.uuid, task.context)
except Exception:
# NOTE(dtantsur): get_status should not normally raise
# let's assume it's a transient failure and retry later
LOG.exception(_LE('Unexpected exception while getting '
'inspection status for node %s, will retry later'),
node.uuid)
return
if status.get('error'):
LOG.error(_LE('Inspection failed for node %(uuid)s '
'with error: %(err)s'),
{'uuid': node.uuid, 'err': status['error']})
node.last_error = (_('ironic-discoverd inspection failed: %s')
% status['error'])
task.process_event('fail')
elif status.get('finished'):
LOG.info(_LI('Inspection finished successfully for node %s'),
node.uuid)
task.process_event('done')
|
|
"""
sphinx.domains
~~~~~~~~~~~~~~
Support for domains, which are groupings of description directives
and roles describing e.g. constructs of one programming language.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import copy
from typing import NamedTuple
from sphinx.errors import SphinxError
from sphinx.locale import _
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterable, List, Tuple, Type, Union # NOQA
from docutils import nodes # NOQA
from docutils.parsers.rst.states import Inliner # NOQA
from sphinx import addnodes # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.roles import XRefRole # NOQA
from sphinx.util.typing import RoleFunction # NOQA
class ObjType:
"""
An ObjType is the description for a type of object that a domain can
document. In the object_types attribute of Domain subclasses, object type
names are mapped to instances of this class.
Constructor arguments:
- *lname*: localized name of the type (do not include domain name)
- *roles*: all the roles that can refer to an object of this type
- *attrs*: object attributes -- currently only "searchprio" is known,
which defines the object's priority in the full-text search index,
see :meth:`Domain.get_objects()`.
"""
known_attrs = {
'searchprio': 1,
}
def __init__(self, lname, *roles, **attrs):
# type: (str, Any, Any) -> None
self.lname = lname
self.roles = roles # type: Tuple
self.attrs = self.known_attrs.copy() # type: Dict
self.attrs.update(attrs)
IndexEntry = NamedTuple('IndexEntry', [('name', str),
('subtype', int),
('docname', str),
('anchor', str),
('extra', str),
('qualifier', str),
('descr', str)])
class Index:
"""
An Index is the description for a domain-specific index. To add an index to
a domain, subclass Index, overriding the three name attributes:
* `name` is an identifier used for generating file names.
* `localname` is the section title for the index.
* `shortname` is a short name for the index, for use in the relation bar in
HTML output. Can be empty to disable entries in the relation bar.
and providing a :meth:`generate()` method. Then, add the index class to
your domain's `indices` list. Extensions can add indices to existing
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`.
"""
name = None # type: str
localname = None # type: str
shortname = None # type: str
def __init__(self, domain):
# type: (Domain) -> None
if self.name is None or self.localname is None:
raise SphinxError('Index subclass %s has no valid name or localname'
% self.__class__.__name__)
self.domain = domain
def generate(self, docnames=None):
# type: (Iterable[str]) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]
"""Return entries for the index given by *name*. If *docnames* is
given, restrict to entries referring to these docnames.
The return value is a tuple of ``(content, collapse)``, where *collapse*
is a boolean that determines if sub-entries should start collapsed (for
output formats that support collapsing sub-entries).
*content* is a sequence of ``(letter, entries)`` tuples, where *letter*
is the "heading" for the given *entries*, usually the starting letter.
*entries* is a sequence of single entries, where a single entry is a
sequence ``[name, subtype, docname, anchor, extra, qualifier, descr]``.
The items in this sequence have the following meaning:
- `name` -- the name of the index entry to be displayed
- `subtype` -- sub-entry related type:
0 -- normal entry
1 -- entry with sub-entries
2 -- sub-entry
- `docname` -- docname where the entry is located
- `anchor` -- anchor for the entry within `docname`
- `extra` -- extra info for the entry
- `qualifier` -- qualifier for the description
- `descr` -- description for the entry
Qualifier and description are not rendered e.g. in LaTeX output.
"""
raise NotImplementedError
class Domain:
"""
A Domain is meant to be a group of "object" description directives for
objects of a similar nature, and corresponding roles to create references to
them. Examples would be Python modules, classes, functions etc., elements
of a templating language, Sphinx roles and directives, etc.
Each domain has a separate storage for information about existing objects
and how to reference them in `self.data`, which must be a dictionary. It
also must implement several functions that expose the object information in
a uniform way to parts of Sphinx that allow the user to reference or search
for objects in a domain-agnostic way.
About `self.data`: since all object and cross-referencing information is
stored on a BuildEnvironment instance, the `domain.data` object is also
stored in the `env.domaindata` dict under the key `domain.name`. Before the
build process starts, every active domain is instantiated and given the
environment object; the `domaindata` dict must then either be nonexistent or
a dictionary whose 'version' key is equal to the domain class'
:attr:`data_version` attribute. Otherwise, `OSError` is raised and the
pickled environment is discarded.
"""
#: domain name: should be short, but unique
name = ''
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
object_types = {} # type: Dict[str, ObjType]
#: directive name -> directive class
directives = {} # type: Dict[str, Any]
#: role name -> role callable
roles = {} # type: Dict[str, Union[RoleFunction, XRefRole]]
#: a list of Index subclasses
indices = [] # type: List[Type[Index]]
#: role name -> a warning message if reference is missing
dangling_warnings = {} # type: Dict[str, str]
#: node_class -> (enum_node_type, title_getter)
enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
#: data value for a fresh environment
initial_data = {} # type: Dict
#: data value
data = None # type: Dict
#: data version, bump this when the format of `self.data` changes
data_version = 0
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env # type: BuildEnvironment
self._role_cache = {} # type: Dict[str, Callable]
self._directive_cache = {} # type: Dict[str, Callable]
self._role2type = {} # type: Dict[str, List[str]]
self._type2role = {} # type: Dict[str, str]
# convert class variables to instance one (to enhance through API)
self.object_types = dict(self.object_types)
self.directives = dict(self.directives)
self.roles = dict(self.roles)
self.indices = list(self.indices)
if self.name not in env.domaindata:
assert isinstance(self.initial_data, dict)
new_data = copy.deepcopy(self.initial_data)
new_data['version'] = self.data_version
self.data = env.domaindata[self.name] = new_data
else:
self.data = env.domaindata[self.name]
if self.data['version'] != self.data_version:
raise OSError('data of %r domain out of date' % self.label)
for name, obj in self.object_types.items():
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self._type2role[name] = obj.roles[0] if obj.roles else ''
self.objtypes_for_role = self._role2type.get # type: Callable[[str], List[str]]
self.role_for_objtype = self._type2role.get # type: Callable[[str], str]
def add_object_type(self, name, objtype):
# type: (str, ObjType) -> None
"""Add an object type."""
self.object_types[name] = objtype
if objtype.roles:
self._type2role[name] = objtype.roles[0]
else:
self._type2role[name] = ''
for role in objtype.roles:
self._role2type.setdefault(role, []).append(name)
def role(self, name):
# type: (str) -> RoleFunction
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
if name in self._role_cache:
return self._role_cache[name]
if name not in self.roles:
return None
fullname = '%s:%s' % (self.name, name)
def role_adapter(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
return self.roles[name](fullname, rawtext, text, lineno,
inliner, options, content)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name):
# type: (str) -> Callable
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
if name in self._directive_cache:
return self._directive_cache[name]
if name not in self.directives:
return None
fullname = '%s:%s' % (self.name, name)
BaseDirective = self.directives[name]
class DirectiveAdapter(BaseDirective): # type: ignore
def run(self):
# type: () -> List[nodes.Node]
self.name = fullname
return super().run()
self._directive_cache[name] = DirectiveAdapter
return DirectiveAdapter
# methods that should be overwritten
def clear_doc(self, docname):
# type: (str) -> None
"""Remove traces of a document in the domain-specific inventories."""
pass
def merge_domaindata(self, docnames, otherdata):
# type: (List[str], Dict) -> None
"""Merge in data regarding *docnames* from a different domaindata
inventory (coming from a subprocess in parallel builds).
"""
raise NotImplementedError('merge_domaindata must be implemented in %s '
'to be able to do parallel builds!' %
self.__class__)
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, str, nodes.document) -> None
"""Process a document after it is read by the environment."""
pass
def check_consistency(self):
# type: () -> None
"""Do consistency checks (**experimental**)."""
pass
def process_field_xref(self, pnode):
# type: (addnodes.pending_xref) -> None
"""Process a pending xref created in a doc field.
For example, attach information about the current scope.
"""
pass
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted.
"""
pass
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
"""Resolve the pending_xref *node* with the given *target*.
The reference comes from an "any" or similar role, which means that we
don't know the type. Otherwise, the arguments are the same as for
:meth:`resolve_xref`.
The method must return a list (potentially empty) of tuples
``('domain:role', newnode)``, where ``'domain:role'`` is the name of a
role that could have created the same reference, e.g. ``'py:func'``.
``newnode`` is what :meth:`resolve_xref` would return.
.. versionadded:: 1.3
"""
raise NotImplementedError
def get_objects(self):
# type: () -> Iterable[Tuple[str, str, str, str, str, int]]
"""Return an iterable of "object descriptions", which are tuples with
five items:
* `name` -- fully qualified name
* `dispname` -- name to display when searching/linking
* `type` -- object type, a key in ``self.object_types``
* `docname` -- the document where it is to be found
* `anchor` -- the anchor name for the object
* `priority` -- how "important" the object is (determines placement
in search results)
- 1: default priority (placed before full-text matches)
- 0: object is important (placed before default-priority objects)
- 2: object is unimportant (placed after full-text matches)
- -1: object should not show up in search at all
"""
return []
def get_type_name(self, type, primary=False):
# type: (ObjType, bool) -> str
"""Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
def get_enumerable_node_type(self, node):
# type: (nodes.Node) -> str
"""Get type of enumerable nodes (experimental)."""
enum_node_type, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return enum_node_type
def get_full_qualified_name(self, node):
# type: (nodes.Element) -> str
"""Return full qualified name for given node."""
return None
|
|
#!/usr/bin/python
import gzip
import numpy as np
import sys, os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import common
import config as cfg
def fileToDictionary(inputFile, indexColumn):
input = open(inputFile, "r")
rd = dict()
for x in input:
arow = x.rstrip().split("\t")
id = arow[indexColumn]
if rd.has_key(id):
#rd[id].append(arow)
print "duplicate knowngene id = " + id
print "arow = " + str(arow)
print "rd[id] = " + str(rd[id])
else:
rd[id] = arow
input.close()
return(rd)
def fileToArray(inputFile, skipFirst):
input = open(inputFile, "r")
ra = []
for i in range(skipFirst):
input.readline()
for x in input:
arow = x.rstrip().split("\t")
ra.append(arow)
input.close()
return(ra)
def countBins(samFile, countFile, statFile, sizeRef, binRef):
if samFile[-2:] == 'gz':
INFILE = gzip.open(samFile, "rb")
else:
INFILE = open(samFile, "r")
OUTFILE = open(countFile, "w")
STATFILE = open(statFile, "w")
chrominfo = fileToDictionary(sizeRef, 0)
bins = fileToArray(binRef, 0)
binCounts = []
for i in range(len(bins)):
binCounts.append(0)
counter = 0
totalReads = 0
prevChrompos = ""
for x in INFILE:
arow = x.rstrip().split("\t")
if arow[0][0] == '@':
continue
thisChrom = arow[2]
thisChrompos = arow[3]
if thisChrom.find("_") > -1:
continue
if thisChrom == "chrM":
continue
if thisChrom == "":
continue
if chrominfo.has_key(thisChrom):
pass
else:
continue
totalReads += 1
thisChrominfo = chrominfo[thisChrom]
thisAbspos = long(thisChrompos) + long(thisChrominfo[2])
counter += 1
indexUp = len(bins) - 1
indexDown = 0
indexMid = int((indexUp - indexDown) / 2.0)
while True:
if thisAbspos >= long(bins[indexMid][2]):
indexDown = indexMid + 0
indexMid = int((indexUp - indexDown) / 2.0) + indexMid
else:
indexUp = indexMid + 0
indexMid = int((indexUp - indexDown) / 2.0) + indexDown
if indexUp - indexDown < 2:
break
#####I ADDED IN THIS IF/ELSE TEST -- ORIGINALLY ONLY THE SECOND STATEMENT WAS HERE BUT THAT PREVENTS GETTING COUNTS IN THE LAST BIN#####
if thisAbspos >= long(bins[indexUp][2]):
binCounts[indexUp] += 1
else:
binCounts[indexDown] += 1
prevChrompos = thisChrompos
for i in range(len(binCounts)):
thisRatio = float(binCounts[i]) / (float(counter) / float(len(bins)))
OUTFILE.write("\t".join(bins[i][0:3]))
OUTFILE.write("\t")
OUTFILE.write(str(binCounts[i]))
OUTFILE.write("\t")
OUTFILE.write(str(thisRatio))
OUTFILE.write("\n")
binCounts.sort()
STATFILE.write('Reads\t')
STATFILE.write(str(totalReads))
STATFILE.write('\n')
STATFILE.write('AverageCount\t')
STATFILE.write(str(np.mean(binCounts)))
STATFILE.write('\n')
STATFILE.write('MedianCount\t')
STATFILE.write(str(binCounts[len(bins)/2]))
STATFILE.write('\n')
INFILE.close()
OUTFILE.close()
STATFILE.close()
def runOne(samFile, countDir, statsDir, species):
#get environment prepared#
countVars = cfg.Count()
if samFile[-2:] == 'gz':
sampleName = os.path.basename(samFile)[:-13]
else:
sampleName = os.path.basename(samFile)[:-11]
statFile = statsDir + sampleName + '.bincount.stats.txt'
countFile = countDir + sampleName + '.bincounts.txt'
countBins(samFile, countFile, statFile, countVars.chromDict[species], countVars.binDict[species])
printText = '\t\tFinished counting reads for ' + os.path.basename(samFile)
print(printText)
|
|
# -*- coding: utf-8 -*-
from pexpect import pxssh
import getpass
import argparse
# import os
import csv
import re
__version__ = "0.3.3.6"
"""
Handle imput paramaters
"""
parser = argparse.ArgumentParser(description='''My Description. \
And what a lovely description it is. ''', epilog="""All's well that ends well.""")
parser.add_argument("host", help="Host name or address, where want to connect")
parser.add_argument("username", help="Username for host")
parser.add_argument("-p", "--password", help="Password for host")
parser.add_argument("-i", "--inputfile", help="Input file name (CSV)")
parser.add_argument("-o", "--outputfile", help="Output file name")
parser.add_argument("-n", "--no_echo", help="Trun Off the command repetition in console and output file."
, action='count')
parser.add_argument("-v", "--debug", help="Trun ON the debug logging of OSSI terminal. \
Debugis loggeg into the debug.log", action='count')
# Planned feature
parser.add_argument("-c", "--command", help="CM command as a string; \
eg. <display station xxxx>")
parser.add_argument("-t", "--prompt_timeout", help="Finetuning timeout for promp recognition in seconds; \
Default settings is 2 seconds, but it can be decreased to around 0.5-0.2 sec. \
Be careful!\
Too low number can lead to wrong behavior and script crash. \
Test every command to get proper number, and do not mix commands in\
input file with that option.")
# parser.add_argument("-f", "--fieldID", help="FieldID /what you want t change/")
# parser.add_argument("-d", "--data", help="data for change command")
args = parser.parse_args()
if args.password is not None:
password = args.password
else:
password = getpass.getpass('password: ')
class Ossi(object):
"""
Ossi handler class object
Init the base object with some default variables.
"""
def __init__(self):
"""
Init ossi object
Init the base object with some default variables.
"""
self.cmd_error = 0
self.debug = args.debug
self.ossi_alive = False
self.no_echo = args.no_echo
if args.prompt_timeout is not None:
self.timeout = args.prompt_timeout
else:
self.timeout = 2
def ossi_open(self, host, username, password):
"""
Ssh ession opening, and switch to ossi termnal.
Open an ssh session to the 'host' with 'username' and 'password'.
If the password does not provided, than get it with getpass.
If the connection established, go into SAT with ossi terminal.
"""
self.host = host
self.username = username
self.password = password
# print self.host, self.username, self.password
try:
self.s = pxssh.pxssh(options={"StrictHostKeyChecking": "no",
"UserKnownHostsFile": "/dev/null"})
# hostname = raw_input('hostname: ')
# username = raw_input('username: ')
# print args.host, args.username, password
if self.debug is not None:
self.s.logfile = open("debug.log", "wb")
try:
self.s.login(self.host, self.username, self.password, terminal_type='vt100', original_prompt='[#$>t\]]', password_regex='(?i)(?:password:)')
self.s.timeout = 5
if self.no_echo is None:
print "--- Connection established ---"
self.s.sendline('sat') # run a command
try:
self.s.expect('Terminal Type.*') # match the prompts
self.s.sendline('ossit')
try:
self.s.expect('t') # match the prompt
if self.no_echo is None:
print "--- Ossi is logged in and ready ---"
self.ossi_alive = self.s.isalive()
except Exception as identifier:
print 'Did not recognized ossi terminal prompt'
self.ossi_alive = False
except Exception as identifier:
print 'Did not recognized prompt for Terminal Type'
self.ossi_alive = False
except Exception as identifier:
print 'Login failed', self.s.before
self.ossi_alive = False
except pxssh.ExceptionPxssh as self.e:
print("pxssh failed on login.")
print(self.e)
def ossi_close(self):
"""
Session closing.
Print how many wrong command were sent to the ossi
Logging off from ossi, and close the ssh session
"""
try:
# print (' - Logging out from ossi - ')
self.s.sendline('clogoff')
self.s.sendline('t')
self.s.expect('Proceed With Logoff.*')
self.s.sendline('y')
self.s.prompt()
# print(s.before)
self.s.logout()
if self.no_echo is None:
print '--- Ossi logged out ---'
if self.cmd_error is not 0:
print '*** {0} commands are failed ***'.format(self.cmd_error)
for self.key in self.failed_cmd:
print '{0} --- {1}'.format(self.key, self.failed_cmd[self.key])
print'*** End of Errors ***'
except pxssh.ExceptionPxssh as self.e:
print("pxssh failed on logoff.")
print(self.e)
def cmd_parser(self, inputfile):
"""
It is parsing the 'inputfile' csv file.
Each line is an ossi command, so it reads line by line, and concatenate with withspace.
"""
self.inputfile = inputfile
if self.inputfile is not None:
try:
self.info = csv.reader(open(self.inputfile))
if self.no_echo is None:
print ' -- {0} is opened --'.format(self.inputfile)
except:
print ("Failed to open: ", self.inputfile)
else:
for self.row in self.info:
# self.row_cmd = ' '.join(self.row)
self.cmd = ' '.join(self.row)
if len(self.cmd.translate(None, ' \n\t\r')) > 0:
if self.no_echo is None:
print '-------- \n\r{0}\n\r--------'.format(self.cmd)
self.output_writer('-------- \n{0}\n--------\n'.format(self.cmd))
self.ossi_cmd(self.cmd)
def ossi_prompt(self):
try:
i = self.s.expect(['\rmore..y.'], timeout=self.timeout)
return True
except Exception as e:
#print (e)
return False
def ossi_cmd(self, command):
"""
Send 'command' to ossi terminal, and read the output.
It gets the command as a string object. The command output is read page by page and passed as an object to 'data_parse'.
The result is printed out, and writen into the output file if it is defined.
"""
self.command = command
if self.command is not None:
self.cmd_raw_result = ""
self.cmd_result = ""
self.failed_cmd = {}
self.s.sendline('c'+self.command)
self.s.sendline('t')
self.index = 0
if self.ossi_prompt():
while self.index == 0:
self.cmd_raw_result += self.s.before
self.s.sendline('y')
if self.ossi_prompt():
pass
else:
self.index = self.s.expect(['####fake', '\rd\r\n\rt\r\n\r', '\rd*t\r\n\r'])
self.cmd_raw_result += self.s.before
else:
try:
self.index = self.s.expect(['\rmore..y.', 'e1.*', 'f.*'])
if self.index == 1:
if self.no_echo is None:
print '-- Command Error --'
self.cmd_error += 1
self.failed_cmd[str(self.command)] = self.s.after
elif self.index == 2:
self.cmd_raw_result += self.s.after
except:
if self.s.expect(['\rt\r\n\r']):
self.index = 3
if self.index != 3:
#Call command output parser
self.cmd_result = self.data_parse(self.cmd_raw_result)
self.output_writer(self.cmd_result)
self.output_writer('\n\n')
# print '---- last data ---'
def data_parse(self, data):
"""
Parsing the ossi commnad page 'data', and retun its back.
Parsing only the dxxxxxx fields from the output. Values are comma separated.
"""
self.data = data
self.page_data = ""
self.fields = 0
self.new_record = True
self.lines = self.data.split('\n')
for self.line in self.lines:
self.line = self.line.lstrip().rstrip()
if re.match('^f', self.line):
self.fields = self.line.count('\t') + 1
elif re.match('^e', self.line):
self.result = self.line.lstrip('e')
self.page_data += self.result
elif re.match('^d', self.line):
self.result = self.line.lstrip('d')
if len(self.result) is not 0:
if len(self.page_data) > 0:
if self.new_record is False:
self.page_data += ','
self.page_data += re.sub('\t', ',', self.result)
self.new_record = False
else:
self.page_data += re.sub('\t', ',', self.result)
self.new_record = False
elif re.match('^n', self.line):
self.page_data += "\n"
self.new_record = True
# print '*** page data ***'
# print ''.join(page_data)
print self.page_data
if args.command is None:
print ('\n')
return self.page_data
def output_writer(self, output):
"""
Write 'output' object into the outputfile.
If the outputfile is not defined than only print output to the screen.
"""
self.outputfile = args.outputfile
self.output = output
if self.outputfile is not None:
# print self.output
try:
with open(self.outputfile, 'a+') as self.f:
self.f.write(self.output)
except:
print ("Failed to open: ", self.outputfile)
def main():
"""
Main modul of ossi script.
Bring things together.
"""
if args.no_echo is None:
print '--- Let Start! ---'
a = Ossi()
if args.password is not None:
password = args.password
else:
password = getpass.getpass('password: ')
a.ossi_open(args.host, args.username, password)
# print args.inputfile
# print a.cmd_parser(args.inputfile)
if a.ossi_alive is True:
if args.inputfile is not None and args.command is None:
a.cmd_parser(args.inputfile)
elif args.inputfile is None and args.command is not None:
if args.no_echo is None:
print '-------- \n\r{0}\n\r--------'.format(args.command)
a.output_writer('-------- \n{0}\n--------\n'.format(args.command))
a.ossi_cmd(args.command)
else:
print('There is neither an input csv file neither a command to execute')
a.ossi_close()
if args.no_echo is None:
print '--- Script running is finished ---'
|
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test label RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount/getaddressesbylabel
- listaddressgroupings
- setlabel
- sendfrom (with account arguments)
- move (with account arguments)
Run the test twice - once using the accounts API and once using the labels API.
The accounts API test can be removed in V0.18.
"""
from collections import defaultdict
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class WalletLabelsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-deprecatedrpc=accounts'], []]
def setup_network(self):
"""Don't connect nodes."""
self.setup_nodes()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
"""Run the test twice - once using the accounts API and once using the labels API."""
self.log.info("Test accounts API")
self._run_subtest(True, self.nodes[0])
self.log.info("Test labels API")
self._run_subtest(False, self.nodes[1])
def _run_subtest(self, accounts_api, node):
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" label has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
if accounts_api:
node.sendfrom("", common_address, fee)
amount_to_send = 1.0
# Create labels and make sure subsequent label API calls
# recognize the label/address associations.
labels = [Label(name, accounts_api) for name in ("a", "b", "c", "d", "e")]
for label in labels:
if accounts_api:
address = node.getaccountaddress(label.name)
else:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), [label.name for label in labels])
# Send a transaction to each label, and make sure this forces
# getaccountaddress to generate a new receiving address.
for label in labels:
if accounts_api:
node.sendtoaddress(label.receive_address, amount_to_send)
label.add_receive_address(node.getaccountaddress(label.name))
else:
node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
node.generate(1)
for label in labels:
assert_equal(
node.getreceivedbyaddress(label.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbylabel(label.name), amount_to_send)
# Check that sendfrom label reduces listaccounts balances.
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
if accounts_api:
node.sendfrom(label.name, to_label.receive_address, amount_to_send)
else:
node.sendtoaddress(to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
if accounts_api:
address = node.getaccountaddress(label.name)
else:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
if accounts_api:
node.move(label.name, "", node.getbalance(label.name))
label.verify(node)
node.generate(101)
expected_account_balances = {"": 5200}
for label in labels:
expected_account_balances[label.name] = 0
if accounts_api:
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
# Check that setlabel can assign a label to a new unused address.
for label in labels:
address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
if accounts_api:
assert address not in node.getaddressesbyaccount("")
else:
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
# Check that addmultisigaddress can assign labels.
for label in labels:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, label.name)['address']
label.add_address(multisig_address)
label.purpose[multisig_address] = "send"
label.verify(node)
if accounts_api:
node.sendfrom("", multisig_address, 50)
node.generate(101)
if accounts_api:
for label in labels:
assert_equal(node.getbalance(label.name), 50)
# Check that setlabel can change the label of an address from a
# different label.
change_label(node, labels[0].addresses[0], labels[0], labels[1], accounts_api)
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
change_label(node, labels[2].addresses[0], labels[2], labels[2], accounts_api)
if accounts_api:
# Check that setaccount can change the label of an address which
# is the receiving address of a different label.
change_label(node, labels[0].receive_address, labels[0], labels[1], accounts_api)
# Check that setaccount can set the label of an address which is
# already the receiving address of the label. This is a no-op.
change_label(node, labels[2].receive_address, labels[2], labels[2], accounts_api)
class Label:
def __init__(self, name, accounts_api):
# Label name
self.name = name
self.accounts_api = accounts_api
# Current receiving address associated with this label.
self.receive_address = None
# List of all addresses assigned with this label
self.addresses = []
# Map of address to address purpose
self.purpose = defaultdict(lambda: "receive")
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
if self.accounts_api:
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
if self.accounts_api:
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(
node.getaddressinfo(address)['labels'][0],
{"name": self.name,
"purpose": self.purpose[address]})
if self.accounts_api:
assert_equal(node.getaccount(address), self.name)
else:
assert_equal(node.getaddressinfo(address)['label'], self.name)
assert_equal(
node.getaddressesbylabel(self.name),
{address: {"purpose": self.purpose[address]} for address in self.addresses})
if self.accounts_api:
assert_equal(set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_label(node, address, old_label, new_label, accounts_api):
assert_equal(address in old_label.addresses, True)
if accounts_api:
node.setaccount(address, new_label.name)
else:
node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if accounts_api:
if old_label.name != new_label.name and address == old_label.receive_address:
new_address = node.getaccountaddress(old_label.name)
assert_equal(new_address not in old_label.addresses, True)
assert_equal(new_address not in new_label.addresses, True)
old_label.add_receive_address(new_address)
old_label.verify(node)
new_label.verify(node)
if __name__ == '__main__':
WalletLabelsTest().main()
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import fractions
import prettytable
from oslo_config import cfg
from cloudferry.condensation import group
from cloudferry.condensation import flavor
from cloudferry.condensation import action
from cloudferry.condensation import node
from cloudferry.condensation import vm
from cloudferry.lib.utils import log
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class Cloud(object):
"""This class is representation of All resources of cloud"""
@classmethod
def from_dicts(cls, name, nodes, flavors, vms, groups):
"""
This method creates cloud structure from dicts parsed from files
We create objects in following order:
1 - create flavor objects
2 - find http://en.wikipedia.org/wiki/Greatest_common_divisor on
all flavor properties
3 - call reduce_resources method on all flavors objects
4 - create all nodes objects
5 - create vm objects assigned linked with node objects and
flavor objects
6 - create group objects linked to vms
"""
nodes_dict, flavors_dict, vms_dict = {}, {}, {}
# create Flavor Objects
for flavor_id, flavor_params in flavors.items():
flavors_dict.update(
{flavor_id: flavor.Flavor(**flavor_params)})
flavors_dict[flavor.default] = flavor.default
# count gcd on Flavors for ram and cores
ram_factor = reduce(
fractions.gcd, [i.ram for i in flavors_dict.values()])
core_factor = reduce(
fractions.gcd, [i.core for i in flavors_dict.values()])
# reduce flavors with core_factor and ram_factor
for flavor_obj in flavors_dict.values():
flavor_obj.reduce_resources(ram_factor, core_factor)
# create Node Objects
for node_name, node_params in nodes.items():
# replace fqdn with just node name
node_name = node_name.split(".")[0]
nodes_dict.update(
{node_name: node.Node(
name=node_name,
ram_factor=ram_factor,
core_factor=core_factor,
**node_params)})
# create Vm objects linked to Nodes and Flavors
for vm_params in vms.values():
node_obj = nodes_dict.get(vm_params.get("host"))
if node_obj is None:
# VM is running on a host which is down
LOG.info("VM '%s' is running on a down host. Skipping.",
vm_params['id'])
continue
flavor_obj = flavors_dict.get(vm_params.get("flavor"))
vms_dict.update({vm_params.get("id"): vm.Vm(
node=node_obj,
vm_id=vm_params.get("id"),
flavor=flavor_obj)})
# create Groups objects
groups = group.Group.from_dict(groups, vms_dict)
return cls(name, nodes_dict, groups)
def __init__(self, name, nodes=None, groups=None):
if not nodes:
nodes = {}
if not groups:
groups = []
self.name = name
self.add_nodes(nodes)
self.groups = groups
self.required_flavors_for_nodes = {}
self.node_ids_to_be_recalculated = []
self.actions = action.Actions(name)
# do we need to solve bounded dynamic knapsacks problem
self.improve_accuracy = False
LOG.debug("created cloud obj with name " + name)
def add_nodes(self, nodes_dict):
"""
This method adds nodes to self
"""
LOG.debug("adding nodes to cloud " + self.name)
self.nodes = nodes_dict
for node_obj in nodes_dict.values():
node_obj.cloud = self
def calc_required_flavors_for_nodes(self):
"""
This method recalculates flavor distribution over nodes for all
node names in self.node_ids_to_be_recalculated
This trick reduces total complexity of the program
We don't have to recalculate distribution for all nodes
only for ones that are in array
"""
# we need to count for each flavor distribution
# on nodes that are not full
LOG.debug("starting recalculation of flavor distribution over nodes")
flavors_dict = {}
for node_obj in self.nodes.values():
if node_obj.is_full:
continue
for vm_obj in node_obj.vms.values():
if vm_obj.flavor not in flavors_dict:
flavors_dict[vm_obj.flavor] = 0
flavors_dict[vm_obj.flavor] += 1
# just in case - make list of nodes to be recalculated distinct
self.node_ids_to_be_recalculated = list(set(
self.node_ids_to_be_recalculated))
LOG.info("recalculating nodes " + ",".join(
self.node_ids_to_be_recalculated))
for node_name in self.node_ids_to_be_recalculated:
# paranoid check
if node_name not in self.nodes:
if node_name in self.required_flavors_for_nodes:
del self.required_flavors_for_nodes[node_name]
continue
node_obj = self.nodes[node_name]
if node_obj.vms and not node_obj.is_full:
LOG.debug("recalculating " + node_name)
self.required_flavors_for_nodes.update({
node_name: node_obj.calculate_flavors_required(
flavors_dict,
self.improve_accuracy)})
# after updating distribution clear list
self.node_ids_to_be_recalculated = []
LOG.debug("finished recalculation of flavor distribution over nodes")
def condense(self, improve_accuracy=False):
"""
This method finds vms distribution on nodes with highest density
it runs recursively until we cannot find better solution
"""
self.required_flavors_for_nodes = {}
self.improve_accuracy = improve_accuracy
self.node_ids_to_be_recalculated = []
# recalculate all nodes that are neither full, nor empty
for node_name, node_obj in self.nodes.items():
if node_obj.vms and not node_obj.is_full:
self.node_ids_to_be_recalculated.append(node_name)
self.condense_recursively()
def fil_node(self, node_to_be_filled, node_name_to_be_filled):
for flavor_obj, count in self.required_flavors_for_nodes[
node_name_to_be_filled].items():
# for all nodes containing that flavor we have number of vms
# with flavor on that node
for node_obj, flavor_count in flavor_obj.node_distribution(
self).items():
if not count:
# we placed enough vms of this flavor to node
break
if node_obj == node_to_be_filled or node_obj.is_full:
# we don't have to move vm from full nodes
continue
if node_obj.name not in self.node_ids_to_be_recalculated:
# as soon as we moved vm from the node - we need to
# recalculate distribution on this node
self.node_ids_to_be_recalculated.append(node_obj.name)
for _ in range(flavor_count):
if not count:
# we placed enough vms of this flavor to node
break
# evacuate node from one node to another
vm_obj = node_obj.get_vm_by_flavor(flavor_obj)
vm_obj.link_node(node_to_be_filled)
self.actions.add_condensation_action(
vm_obj, node_obj, node_to_be_filled)
count -= 1
# This node is already full - we don't need to store its distribution
del self.required_flavors_for_nodes[node_name_to_be_filled]
def postprocess_filing(self):
# process checks after node is full
for node_name, node_obj in self.nodes.items():
# first check - find free/full nodes and exclude them
# from candidates to be filled
if not node_obj.vms or node_obj.is_full:
if node_name in self.node_ids_to_be_recalculated:
self.node_ids_to_be_recalculated.pop(
self.node_ids_to_be_recalculated.index(node_name))
if node_name in self.required_flavors_for_nodes:
del self.required_flavors_for_nodes[node_name]
continue
# second check - find nodes that need more flavors that we have
if node_name in self.required_flavors_for_nodes:
for flavor_obj, count in self.required_flavors_for_nodes[
node_name].items():
if count > flavor_obj.amount(self):
if node_name not in self.node_ids_to_be_recalculated:
self.node_ids_to_be_recalculated.append(node_name)
def condense_recursively(self):
"""
This method finds vms distribution on nodes with highest density
it runs recursively until we cannot find better solution
"""
# calculate how much flavors each node need to be full
self.calc_required_flavors_for_nodes()
if not self.required_flavors_for_nodes:
# we cannot improve result - we are done
return
# select node to be filled
if self.improve_accuracy:
# use algorithm with better accuracy and higher complexity
# we use this in the end of condensation
# in this case we are looking for node with maximal density
node_name_to_be_filled = max(
self.required_flavors_for_nodes,
key=lambda a: self.nodes[a].potential_utilization(
self.required_flavors_for_nodes[a]))
node_to_be_filled = self.nodes[node_name_to_be_filled]
else:
# use approximate algorithm that runs faster, but solution is not
# accurate
# in this case we are looking for node that requires
# minimum permutations to be filled
node_name_to_be_filled = min(
self.required_flavors_for_nodes,
key=lambda a: sum(
self.required_flavors_for_nodes[a].values()))
node_to_be_filled = self.nodes[node_name_to_be_filled]
pot_util = node_to_be_filled.potential_utilization(
self.required_flavors_for_nodes[node_name_to_be_filled])
# if potentional utilization of node doesn't full the node
# it means that we are done with approximation part
# and we need to switch to accurate algorithm
if all([i < CONF.condense.precision for i in pot_util]):
# recalculate for all spare nodes
return self.condense(True)
# at this moment we have node to be filled and flavors to put on it
# we need to do actual job at this step
# for all flavors that needs to be placed to node we have count
# of vms of this flavor to be placed on the node
LOG.info("filing node " + node_name_to_be_filled)
self.fil_node(node_to_be_filled, node_name_to_be_filled)
self.postprocess_filing()
return self.condense_recursively()
def transfer_nodes(self, cloud):
"""
This method transfers all nodes without vms to cloud
"""
for key, value in self.nodes.items():
if not value.vms:
self.transfer_node(key, cloud)
def transfer_node(self, node_name, cloud):
"""
This method transfers node to another cloud
"""
node_to_be_transfered = self.nodes.pop(node_name)
node_to_be_transfered.cloud = cloud
cloud.nodes[node_to_be_transfered.name] = node_to_be_transfered
self.actions.add_transfer_action(node_name)
def get_group_to_migrate(self):
"""
This method returns next large group
"""
if self.groups:
return self.groups.pop(0)
def check_if_group_fits(self, group_obj, cloud_obj):
"""
This method tries to assign vms from group from source cloud
to destination cloud
"""
# try to assign vms on dst cloud
list_of_nodes = [i for i in cloud_obj.nodes.values() if not i.is_full]
flavors_dict = {}
vm_list = group_obj.get_all_vms()
for vm_obj in vm_list:
if vm_obj.flavor not in flavors_dict:
flavors_dict[vm_obj.flavor] = 0
flavors_dict[vm_obj.flavor] += 1
result = {}
for node_obj in list_of_nodes:
if not flavors_dict:
break
fl_required = node_obj.calculate_flavors_required(flavors_dict)
if all([i < CONF.condense.precision for i in
node_obj.potential_utilization(
fl_required)]):
fl_required = node_obj.calculate_flavors_required(
flavors_dict, True)
result[node_obj] = fl_required
for flavor_obj, count in fl_required.items():
flavors_dict[flavor_obj] -= count
if flavors_dict[flavor_obj] == 0:
del flavors_dict[flavor_obj]
return flavors_dict, result
def migrate_vms(self, cloud):
return self.migrate_group(cloud)
def migrate_group(self, cloud, strict=True):
"""This method migrates single group"""
group_to_migrate = self.get_group_to_migrate()
if not group_to_migrate:
return
# check that group can fit destination cloud
flavors_left, distribution = self.check_if_group_fits(
group_to_migrate, cloud)
if flavors_left:
if strict:
msg = "cannot fit flavors %s" % flavors_left
raise RuntimeError(msg)
else:
self.groups.insert(0, group_to_migrate)
return
for node_obj, flavors_required in distribution.items():
for vm_obj in group_to_migrate.get_all_vms():
flavor_obj = vm_obj.flavor
if flavor_obj in flavors_required:
flavors_required[flavor_obj] -= 1
if flavors_required[flavor_obj] == 0:
del flavors_required[flavor_obj]
self.migrate_vm(vm_obj, node_obj)
return self.migrate_group(cloud, False)
def migrate_vm(self, vm_obj, target_node):
"""This method migrates vm from one cloud to another"""
vm_obj.link_node(target_node)
self.actions.add_migration_action(vm_obj, target_node)
def migrate_to(self, cloud):
"""
This method contains main logic of application - it processes
migration"""
while self.nodes and self.groups:
self.condense()
self.transfer_nodes(cloud)
self.migrate_vms(cloud)
cloud.condense()
self.actions.dump_actions()
cloud.actions.dump_actions()
def __str__(self):
"""
This method prints table
"""
table = prettytable.PrettyTable(
['Node', 'Number of VMS', 'Ram Utilization', 'Core Utilization'])
rows = []
for node_name, node_obj in self.nodes.items():
util = node_obj.utilization
rows.append((node_name, len(node_obj.vms), util[0], util[1]))
rows = sorted(rows, key=lambda a: a[1], reverse=True)
for row in rows:
table.add_row(row)
return ("\n\n\n\n {total} vms total; {free} nodes free;"
" {full} nodes full\n cloud - {name} \n"
"{table}\n\n").format(
total=str(sum(i[1] for i in rows)),
free=str(len(
[i for i in self.nodes.values() if not i.vms])),
full=str(len(
[i for i in self.nodes.values() if i.is_full])),
name=self.name,
table=str(table))
@property
def groups_info(self):
return "\n".join([str(i) for i in self.groups])
|
|
import unittest
import math
import numpy as np
import numpy.testing
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from parspec import SpecBuilder
from parspec import Source
from parspec import ParSpec
def logPoisson(k, v, s):
vv = np.array(k, dtype=float)
vv[vv<1] = 1
vv += s
return -0.5 * (k-v)**2 / vv
class TestParSpec(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup a single spectrum object for all tests"""
# Builder accumulates data and builds the spectrum
builder = SpecBuilder('Spectrum')
### Add a signal ###
# Add a trinagular signal
sig = [1000., 1100., 1200., 1100., 1000.]
src_sig = Source(sig)
# Indicate the bin contents in sig are subject to statistical
# uncertainty, based on double the count (as if 2x MC was generated
# then scaled down by 0.5)
src_sig.use_stats(.5*(2*np.array(sig))**0.5)
# Allow its scale to vary
src_sig.set_expression(
'lumi*xsec_sig', # scale factor
['lumi', 'xsec_sig'], # parameters are lumi and xsec
['xsec_sig', 'lumi']) # dn/dlumi and dn/dxsec
# Add to builder once configured
builder.add_source(src_sig)
# Constrain xsec with an asymmeric prior
builder.set_prior('xsec_sig', 1, 0.9, 1.2, 'normal')
# Constrain lumi with 5% uncertainty
builder.set_prior('lumi', 1, 0.95, 1.05, 'lognormal')
### Add two systematic uncertinaties ###
# Add systematic shape variation (a top hat)
sig_syst1 = [0, 50, 50, 50, 0]
# This is a shape which inherits the normalization from the signal
src_sig_syst1_up = Source(sig_syst1, shapeof=src_sig)
# Assume 1:1 statistical uncertainty on this shape
src_sig_syst1_up.use_stats(np.array(sig_syst1)**0.5)
# Control the amount of this variation with the parameter syst1, and
# indicate that the shape applies only if syst1 >= 0. Note that
# parameter list and gradients can be omitted for simple sums
src_sig_syst1_up.set_expression('syst1', polarity='up')
# Make syst1 fully asymmetric: it has the same effect on the spectrum
# when the parameter is positive as negative
src_sig_syst1_down = Source(sig_syst1, shapeof=src_sig)
src_sig_syst1_down.set_expression('syst1', polarity='down')
builder.add_source(src_sig_syst1_up)
builder.add_source(src_sig_syst1_down)
# 1 sigma penality when this parameter gets to values +/- 1
builder.set_prior('syst1', 0, -1, 1, 'normal')
# Add a linear systematic variant
sig_syst2 = [-100, -50, 0 , 50, 100]
src_sig_syst2 = Source(sig_syst2, shapeof=src_sig)
# This one is symmetrized: the value of syst2 simply scales
src_sig_syst2.set_expression('syst2')
builder.add_source(src_sig_syst2)
builder.set_prior('syst2', 0, -1, 1, 'normal')
### Add a template (the parameter of interest) ###
# Add shape to th3 signal, but won't be constrained
sig_temp1 = [0, 0, 10, 100, 0]
src_poi = Source(sig_temp1, shapeof=src_sig)
# The parameter of interest is called p, and scales the template by
# a factof of 5
src_poi.set_expression('5*p', ['p'], ['5'])
builder.add_source(src_poi)
### Add a background ###
bg = [110, 100, 100, 100, 105]
src_bg = Source(bg)
src_bg.set_expression(
'lumi*xsec_bg',
['lumi', 'xsec_bg'],
['xsec_bg', 'lumi'])
builder.add_source(src_bg)
builder.set_prior('xsec_bg', 1, 0.9, 1.1, 'normal')
### Share one of the systematics with the background ###
bg_syst2 = [10, 20, 10, 20, 10]
src_bg_syst2 = Source(bg_syst2, shapeof=src_bg)
src_bg_syst2.set_expression('syst2')
builder.add_source(src_bg_syst2)
# Note that this parameter is already constrained
### Add a custom regularization for the free parameter ###
builder.add_regularization(
'std::pow(p-syst1, 2)',
['p', 'syst1'],
['2*(p-syst1)', '-2*(p-syst1)'])
# Store the builder so that tests can use it or its contents
cls.builder = builder
cls.spec = builder.build()
def test_pars(self):
"""Check if the spectrum returns the correct list of parameters"""
np.testing.assert_equal(
self.spec.pars,
['lumi',
'p',
'syst1',
'syst2',
'xsec_bg',
'xsec_sig'])
def test_unconstrained(self):
"""Check that the spectrum returns the correct unconstrained pars"""
np.testing.assert_equal(self.spec.unconstrained, ['p'])
def test_central(self):
"""Check if the spectrum returns the correct central value"""
# Paramters are:
# lumi (centered at 1 to leave yields unchanged)
# p (centered at 0 to not contribute)
# syst1 (centered at 0 to not contribute)
# syst2 (centered at 0 to not contribute)
# xsec_sig (centered at 1 to leave yeilds unchanged)
# xsec_bg (centered at 1 to leave yeilds unchanged)
np.testing.assert_array_almost_equal(
[1, 0, 0, 0, 1, 1],
self.spec.central)
def test_scales(self):
"""Check if the spectrum returns the correct scales"""
# Check for all parameters
for par in self.spec.pars:
if par.startswith('stat'):
continue
ipar = self.spec.ipar(par)
if par in self.builder._priors:
# Constrained parameters are scaled by constraint
low = self.builder._priors[par]['low']
high = self.builder._priors[par]['high']
scale = (high-low)/2.
else:
# Unconstrained parameters are not scaled
scale = 0
self.assertAlmostEqual(self.spec.scales[ipar], scale)
def test_ipar(self):
"""Check parameter indices"""
for ipar, par in enumerate(self.spec.pars):
self.assertEqual(ipar, self.spec.ipar(par))
def test_par_info(self):
"""Check parameter information"""
# Check for all parameters
for ipar, par in enumerate(self.spec.pars):
info = self.spec.parinfo(par)
# Should work with indices as well
self.assertEqual(info, self.spec.parinfo(ipar))
self.assertEqual(info['index'], ipar)
self.assertEqual(info['name'], par)
if par in self.spec.unconstrained:
self.assertAlmostEqual(info['central'], 0)
self.assertAlmostEqual(info['low'], 0)
self.assertAlmostEqual(info['high'], 0)
self.assertEqual(info['constraint'], 'none')
else:
prior = self.builder._priors[par]
self.assertAlmostEqual(info['central'], prior['central'])
self.assertAlmostEqual(info['low'], prior['low'])
self.assertAlmostEqual(info['high'], prior['high'])
if par == 'lumi':
self.assertEqual(info['constraint'], 'lognormal')
else:
self.assertEqual(info['constraint'], 'normal')
def test_spec_nom(self):
"""Check nominal spectrum"""
# Nominal spectrum is source + background
true = (
self.builder._sources[0]._data +
self.builder._sources[5]._data
)
# Should get the same spectrum using central parameters
pars = list(self.spec.central)
comp = self.spec(pars)
np.testing.assert_array_almost_equal(true, comp)
def test_stats_nom(self):
"""Check nominal spectrum stats"""
# stats is sum in quadrature of those provided
true = (
self.builder._sources[0]._stats**2 +
self.builder._sources[1]._stats**2
)
# Should get the same spectrum using central parameters
np.testing.assert_array_almost_equal(true, self.spec.stats)
def test_spec_xsec(self):
"""Check spectrum with varied x-section"""
# Modify cross section
true = (
1.2 * self.builder._sources[0]._data +
0.5 * self.builder._sources[5]._data
)
pars = list(self.spec.central)
pars[self.spec.ipar('xsec_sig')] = 1.2
pars[self.spec.ipar('xsec_bg')] = 0.5
comp = self.spec(pars)
np.testing.assert_array_almost_equal(true, comp)
def test_spec_lumi(self):
"""Check spectrum with varied luminosity"""
# Modify luminosity and cross sections
true = (
0.8*1.2 * self.builder._sources[0]._data +
0.8*0.5 * self.builder._sources[5]._data
)
pars = list(self.spec.central)
pars[self.spec.ipar('xsec_sig')] = 1.2
pars[self.spec.ipar('xsec_bg')] = 0.5
pars[self.spec.ipar('lumi')] = 0.8
comp = self.spec(pars)
np.testing.assert_array_almost_equal(true, comp)
def test_spec_syst1_up(self):
"""Check spectrum with positive systematic"""
# Positive value for syst1
true = (
0.8*1.2 * self.builder._sources[0]._data +
0.8*0.5 * self.builder._sources[5]._data +
0.8*1.2*0.2 * self.builder._sources[1]._data
)
pars = list(self.spec.central)
pars[self.spec.ipar('xsec_sig')] = 1.2
pars[self.spec.ipar('xsec_bg')] = 0.5
pars[self.spec.ipar('lumi')] = 0.8
pars[self.spec.ipar('syst1')] = 0.2
comp = self.spec(pars)
np.testing.assert_array_almost_equal(true, comp)
def test_spec_syst1_down(self):
"""Check spectrum with negative systematic"""
# Negative value for syst1
true = (
0.8*1.2 * self.builder._sources[0]._data +
0.8*0.5 * self.builder._sources[5]._data +
-0.8*1.2*0.3 * self.builder._sources[2]._data # notice diff. source
)
pars = list(self.spec.central)
pars[self.spec.ipar('xsec_sig')] = 1.2
pars[self.spec.ipar('xsec_bg')] = 0.5
pars[self.spec.ipar('lumi')] = 0.8
pars[self.spec.ipar('syst1')] = -0.3
comp = self.spec(pars)
np.testing.assert_array_almost_equal(true, comp)
def move_pars(self, pars):
"""Move all types of parameters to non-trivial values"""
pars[self.spec.ipar('xsec_sig')] = 1.2
pars[self.spec.ipar('xsec_bg')] = 0.5
pars[self.spec.ipar('lumi')] = 0.8
pars[self.spec.ipar('syst1')] = +0.2
pars[self.spec.ipar('syst2')] = -0.3
pars[self.spec.ipar('p')] = 1.2
def test_spec_varied(self):
"""Check spectrum with all parameters varied"""
true = (
# Add source with lumi=0.8 and xsec=1.2
0.8*1.2 * self.builder._sources[0]._data +
# Add a 0.2 contribution from syst1
0.8*1.2 * +0.2 * self.builder._sources[1]._data +
# Add a -0.3 contribution from syst2
0.8*1.2 * -0.3 * self.builder._sources[3]._data +
0.8*0.5 * self.builder._sources[5]._data +
0.8*0.5 * -0.3 * self.builder._sources[6]._data +
# Source 4 is the template, with strenght 1.2 and scaled by 5
# as this is the form of the factor for the template
0.8*1.2 * 5*1.2 * self.builder._sources[4]._data
)
pars = list(self.spec.central)
self.move_pars(pars)
comp = self.spec(pars)
np.testing.assert_array_almost_equal(true, comp)
def test_ll_nom(self):
"""Check the nominal log likelihood"""
pars = list(self.spec.central)
nominal = self.spec(pars)
self.spec.set_data(nominal) # nominal data
stats = np.array(self.spec.stats)
# event with nominal, ll penalty from poisson normalization
ll = 0 # log likelihood
ll += np.sum(logPoisson(nominal, nominal, stats))
self.assertAlmostEqual(ll, self.spec.ll(pars))
def test_ll_poisson(self):
"""Check the log likelihood with varied yields"""
# Modify a few bins in data and check for poisson likelihood drop
pars = list(self.spec.central)
nominal = self.spec(pars)
data = np.copy(nominal)
data[1] *= 1.1
data[2] *= 0.5
stats = np.array(self.spec.stats)
ll = 0 # log likelihood
ll += np.sum(logPoisson(data, nominal, stats))
# Set the fluctuated data, and check the log likelihood to nominal
self.spec.set_data(data)
self.assertAlmostEqual(ll/self.spec.ll(pars), 1)
def test_ll_reg(self):
"""Check the log likelihood with varied systematics"""
# Now modify all parameters, and check all regularizations are also
# contributing
centre = self.spec.central
pars = np.copy(centre)
self.move_pars(pars)
# Data includes the shifts, so penalty will be only due to priors
data = self.spec(pars)
self.spec.set_data(data)
stats = np.array(self.spec.stats)
ll = 0
ll += np.sum(logPoisson(data, data, stats))
for ipar, par in enumerate(self.spec.pars):
# Don't regularize free parameters
if par in self.spec.unconstrained:
continue
# Scale is parameter value at 1 sigma, so need to subtract centre
if pars[ipar] >= centre[ipar]:
bound = self.spec.parinfo(par)['high']
else:
bound = self.spec.parinfo(par)['low']
prior = self.builder._priors.get(par, None)
if prior is None or prior['constraint'] == 'normal':
ll += -0.5 * \
(pars[ipar]-centre[ipar])**2 / \
(bound-centre[ipar])**2
elif prior is not None and prior['constraint'] == 'lognormal':
ll += -0.5 * \
(np.log(pars[ipar])-np.log(centre[ipar]))**2 / \
(np.log(bound)-np.log(centre[ipar]))**2
# Add contribution from the custom regularization on p which is
# (p-syst1)**2
ll += (pars[self.spec.ipar('p')]-pars[self.spec.ipar('syst1')])**2
self.assertAlmostEqual(ll/self.spec.ll(pars), 1)
def test_ll_mix(self):
"""Check the log likelihood with varied parameters"""
pars = list(self.spec.central)
data = np.copy(self.spec(pars)) # data at nominal, causes stat penalty
self.spec.set_data(data)
pars[self.spec.ipar('xsec_sig')] = 1.2
pars[self.spec.ipar('p')] = 1.2
varied = self.spec(pars) # nominal expectation (with shifts)
stats = np.array(self.spec.stats)
ll = 0
ll += np.sum(logPoisson(data, varied, stats))
ll += -0.5 * (1.2-1)**2 / (self.spec.parinfo('xsec_sig')['high']-1)**2
# Add custom regularizationonce more
ll += (pars[self.spec.ipar('p')]-pars[self.spec.ipar('syst1')])**2
self.assertAlmostEqual(ll/self.spec.ll(pars), 1)
def test_grads(self):
"""Test the computed gradients agree with numerical computation"""
pars = np.array(self.spec.central, dtype='float64')
data = np.copy(self.spec(pars))
data *= 1.1 # move away from centre to ensure non-zero gradients
self.spec.set_data(data)
self.move_pars(pars) # move parameters to check proper partials
ntol = 5
dp = 10**(-ntol)
for par in self.spec.pars:
# Copy the central parameter values
dpars = np.array(pars, dtype=np.float64)
# Choose a parameter to chnage
ipar = self.spec.ipar(par)
nll = ROOT.Double(0) # variable to pass by ref
grads = dpars*0 # memory in which to store gradients
# Compute the gradients at the central point
self.spec._obj.FdF(pars, nll, grads)
# Shift the parameter slightly down and compute likelihood there
dpars[ipar] = pars[ipar] - dp;
nlld = self.spec.nll(dpars)
# Shift the parameter slightly up and compute likelihood there
dpars[ipar] = pars[ipar] + dp;
nllu = self.spec.nll(dpars)
# Compute the observed gradient for this parameter
dlldp = (nllu-nlld)/(2*dp)
# The computed and numeric gradients should be similar, but won't
# be indentical since the numeric one is an approximation
self.assertAlmostEqual(dlldp/grads[ipar], 1, ntol-1)
def test_grad_func(self):
"""Test that the dedicated gradient function agrees with FdF"""
pars = np.array(self.spec.central, dtype='float64')
data = np.copy(self.spec(pars))
data *= 1.1 # move away from centre to ensure non-zero gradients
self.spec.set_data(data)
self.move_pars(pars) # move parameters to check proper partials
ll = ROOT.Double(0)
grads1 = pars*0
grads2 = pars*0
self.spec._obj.FdF(pars, ll, grads1)
self.spec._obj.Gradient(pars, grads2)
np.testing.assert_almost_equal(grads1, grads2)
def test_ngrads(self):
"""Test the positive likelihood gradients"""
pars = np.array(self.spec.central, dtype='float')
data = np.copy(self.spec(pars))
data *= 1.1 # move away from centre to ensure non-zero gradients
self.spec.set_data(data)
self.move_pars(pars) # move parameters to check proper partials
grads = pars*0
ngrads = pars*0
# Object defaults to NLL for minimization
self.spec._obj.Gradient(pars, grads)
self.spec._obj.setNLL(False)
self.spec._obj.Gradient(pars, ngrads)
# Reset it
self.spec._obj.setNLL(True)
np.testing.assert_almost_equal(grads, -ngrads)
def test_zero(self):
builder = SpecBuilder('SpectrumZero')
sig = [10., 11.]
src_sig = Source(sig)
src_sig.use_stats(.5*(2*np.array(sig))**0.5)
src_sig.set_expression(
'lumi*xsec_sig',
['lumi', 'xsec_sig'],
['xsec_sig', 'lumi'])
builder.add_source(src_sig)
builder.set_prior('xsec_sig', 1, 0.9, 1.2, 'normal')
builder.set_prior('lumi', 1, 0.95, 1.05, 'lognormal')
sig_syst1 = [-5, 0]
src_sig_syst1_up = Source(sig_syst1, shapeof=src_sig)
src_sig_syst1_up.set_expression('syst1', polarity='up')
builder.add_source(src_sig_syst1_up)
builder.set_prior('syst1', 0, -1, 1, 'normal')
spec = builder.build()
pars = list(spec.central)
data = spec(pars)
isyst = spec.ipar('syst1')
pars[isyst] = 2
# ensure syst made bin go to zero
self.assertAlmostEqual(spec(pars)[0], 0)
# ensure not NaN (0 data so bin is ignored)
self.assertTrue(spec.ll(pars) == spec.ll(pars))
# try again with negative bin value
pars[isyst] = 3
self.assertAlmostEqual(spec(pars)[0], -5)
self.assertTrue(spec.ll(pars) == spec.ll(pars))
# now set the data and check that ll goes to -inf
spec.set_data(data)
# check also grads, so need memory arrays
pars = np.array(pars, dtype=np.float64)
grads = pars*0
pars[isyst] = 2
self.assertEqual(spec.ll(pars), float('-inf'))
spec._obj.Gradient(pars, grads)
self.assertEqual(grads[isyst], float('inf'))
pars[isyst] = 3
self.assertEqual(spec.ll(pars), float('-inf'))
spec._obj.Gradient(pars, grads)
self.assertEqual(grads[isyst], float('inf'))
class TestSource(unittest.TestCase):
def test_except_infer_pars(self):
"""Try to infer bad expression"""
src = Source([])
self.assertRaises(RuntimeError, src.set_expression, 'a+a')
self.assertRaises(RuntimeError, src.set_expression, '2*a')
self.assertRaises(ValueError, src.set_expression, '2*a', ['a'])
self.assertRaises(ValueError, src.set_expression, '2*a', grads=['2'])
self.assertRaises(ValueError, src.set_expression, 'a*b', ['a', 'b'], ['b'])
def test_except_inherit(self):
"""Don't re-use an inherited parameter"""
src1 = Source([])
src1.set_expression('a')
src2 = Source([], shapeof=src1)
self.assertRaises(ValueError, src2.set_expression, 'a')
self.assertRaises(ValueError, src2.set_expression, 'a', ['a'], ['1'])
self.assertRaises(ValueError, src2.set_expression, 'a*b', ['a', 'b'], ['b', 'a'])
def test_except_par_name(self):
"""Reject bad parameter names"""
src = Source([])
self.assertRaises(ValueError, src.set_expression, '_a', ['_a'], ['1'])
self.assertRaises(ValueError, src.set_expression, '1a', ['1a'], ['1'])
def test_except_polarity(self):
"""Reject bad polarity values"""
src = Source([])
self.assertRaises(ValueError, src.set_expression, 'a', polarity='invalid')
def test_except_reset(self):
"""Don't allow re-setting expression"""
src = Source([])
src.set_expression('a')
self.assertRaises(RuntimeError, src.set_expression, 'a')
def test_data(self):
"""Data is correctly propagated"""
src = Source([1,2,3])
np.testing.assert_array_almost_equal([1,2,3], src._data, 15)
def test_expression(self):
"""Set an expression, parameters and gradients"""
src = Source([])
src.set_expression('a*b*b', ['a', 'b'], ['b*b', '2*a*b'])
self.assertEqual(['a', 'b'], src._pars)
self.assertEqual(['b*b', '2*a*b'], src._grads)
# Should convert numerical gradients
src = Source([])
src.set_expression('a', ['a'], [1])
self.assertEqual(['1'], src._grads)
def test_infer(self):
"""Infer parameters and gradients from expression"""
src = Source([])
src.set_expression('a')
self.assertEqual(['a'], src._pars)
self.assertEqual(['1'], src._grads)
src = Source([])
src.set_expression('a+b')
self.assertEqual(['a', 'b'], src._pars)
self.assertEqual(['1', '1'], src._grads)
def test_inherit(self):
"""Test inheriting from parent sources"""
# Setup a source which inherits from two others
src1 = Source([])
src1.set_expression('a+b')
src2 = Source([], shapeof=src1)
src2.set_expression('5*c*c', ['c'], ['10*c'])
src3 = Source([], shapeof=src2)
src3.set_expression('d+e')
# Check the correct compound expression
self.assertEqual('((a+b) * (5*c*c)) * (d+e)', src3._expr)
# Ensure paramters correctly ammended
self.assertEqual(['a', 'b', 'c', 'd', 'e'], src3._pars)
# Check that the gradients are correctly propagated
self.assertEqual('((1) * (5*c*c)) * (d+e)', src3._grads[0])
self.assertEqual('((1) * (5*c*c)) * (d+e)', src3._grads[1])
self.assertEqual('((10*c) * (a+b)) * (d+e)', src3._grads[2])
self.assertEqual('(1) * ((a+b) * (5*c*c))', src3._grads[3])
self.assertEqual('(1) * ((a+b) * (5*c*c))', src3._grads[4])
if __name__ == '__main__':
unittest.main()
|
|
# encoding: utf-8
import csv
import json
import os.path
from time import time
from itertools import chain
from collections import defaultdict
from six.moves import StringIO, xrange
import six
from gevent import wsgi
from flask import Flask, make_response, request, render_template
from . import runners
from .cache import memoize
from .runners import MasterLocustRunner
from locust.stats import median_from_dict
from locust import __version__ as version
import logging
logger = logging.getLogger(__name__)
DEFAULT_CACHE_TIME = 2.0
app = Flask(__name__)
app.debug = True
app.root_path = os.path.dirname(os.path.abspath(__file__))
@app.route('/')
def index():
is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
if is_distributed:
slave_count = runners.locust_runner.slave_count
else:
slave_count = 0
if runners.locust_runner.host:
host = runners.locust_runner.host
elif len(runners.locust_runner.locust_classes) > 0:
host = runners.locust_runner.locust_classes[0].host
else:
host = None
return render_template("index.html",
state=runners.locust_runner.state,
is_distributed=is_distributed,
slave_count=slave_count,
user_count=runners.locust_runner.user_count,
version=version,
host=host
)
@app.route('/swarm', methods=["POST"])
def swarm():
assert request.method == "POST"
locust_count = int(request.form["locust_count"])
hatch_rate = float(request.form["hatch_rate"])
runners.locust_runner.start_hatching(locust_count, hatch_rate)
response = make_response(json.dumps({'success':True, 'message': 'Swarming started'}))
response.headers["Content-type"] = "application/json"
return response
@app.route('/stop')
def stop():
runners.locust_runner.stop()
response = make_response(json.dumps({'success':True, 'message': 'Test stopped'}))
response.headers["Content-type"] = "application/json"
return response
@app.route("/stats/reset")
def reset_stats():
runners.locust_runner.stats.reset_all()
return "ok"
@app.route("/stats/requests/csv")
def request_stats_csv():
rows = [
",".join([
'"Method"',
'"Name"',
'"# requests"',
'"# failures"',
'"Median response time"',
'"Average response time"',
'"Min response time"',
'"Max response time"',
'"Average Content Size"',
'"Requests/s"',
])
]
for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]):
rows.append('"%s","%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % (
s.method,
s.name,
s.num_requests,
s.num_failures,
s.median_response_time,
s.avg_response_time,
s.min_response_time or 0,
s.max_response_time,
s.avg_content_length,
s.total_rps,
))
response = make_response("\n".join(rows))
file_name = "requests_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
response.headers["Content-disposition"] = disposition
return response
@app.route("/stats/distribution/csv")
def distribution_stats_csv():
rows = [",".join((
'"Name"',
'"# requests"',
'"50%"',
'"66%"',
'"75%"',
'"80%"',
'"90%"',
'"95%"',
'"98%"',
'"99%"',
'"100%"',
))]
for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]):
if s.num_requests:
rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
else:
rows.append('"%s",0,"N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A"' % s.name)
response = make_response("\n".join(rows))
file_name = "distribution_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
response.headers["Content-disposition"] = disposition
return response
@app.route('/stats/requests')
@memoize(timeout=DEFAULT_CACHE_TIME, dynamic_timeout=True)
def request_stats():
stats = []
for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total")]):
stats.append({
"method": s.method,
"name": s.name,
"num_requests": s.num_requests,
"num_failures": s.num_failures,
"avg_response_time": s.avg_response_time,
"min_response_time": s.min_response_time or 0,
"max_response_time": s.max_response_time,
"current_rps": s.current_rps,
"median_response_time": s.median_response_time,
"avg_content_length": s.avg_content_length,
})
errors = [e.to_dict() for e in six.itervalues(runners.locust_runner.errors)]
# Truncate the total number of stats and errors displayed since a large number of rows will cause the app
# to render extremely slowly. Aggregate stats should be preserved.
report = {"stats": stats[:500], "errors": errors[:500]}
if stats:
report["total_rps"] = stats[len(stats)-1]["current_rps"]
report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats("Total").fail_ratio
# since generating a total response times dict with all response times from all
# urls is slow, we make a new total response time dict which will consist of one
# entry per url with the median response time as key and the number of requests as
# value
response_times = defaultdict(int) # used for calculating total median
for i in xrange(len(stats)-1):
response_times[stats[i]["median_response_time"]] += stats[i]["num_requests"]
# calculate total median
stats[len(stats)-1]["median_response_time"] = median_from_dict(stats[len(stats)-1]["num_requests"], response_times)
is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
if is_distributed:
report["slave_count"] = runners.locust_runner.slave_count
report["state"] = runners.locust_runner.state
report["user_count"] = runners.locust_runner.user_count
return json.dumps(report)
@app.route("/exceptions")
def exceptions():
response = make_response(json.dumps({
'exceptions': [
{
"count": row["count"],
"msg": row["msg"],
"traceback": row["traceback"],
"nodes" : ", ".join(row["nodes"])
} for row in six.itervalues(runners.locust_runner.exceptions)
]
}))
response.headers["Content-type"] = "application/json"
return response
@app.route("/exceptions/csv")
def exceptions_csv():
data = StringIO()
writer = csv.writer(data)
writer.writerow(["Count", "Message", "Traceback", "Nodes"])
for exc in six.itervalues(runners.locust_runner.exceptions):
nodes = ", ".join(exc["nodes"])
writer.writerow([exc["count"], exc["msg"], exc["traceback"], nodes])
data.seek(0)
response = make_response(data.read())
file_name = "exceptions_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
response.headers["Content-disposition"] = disposition
return response
def start(locust, options):
wsgi.WSGIServer((options.web_host, options.port), app, log=None).serve_forever()
def _sort_stats(stats):
return [stats[key] for key in sorted(six.iterkeys(stats))]
|
|
"""Base Command class, and related routines"""
from __future__ import absolute_import, print_function
import logging
import logging.config
import optparse
import os
import platform
import sys
import traceback
from pip._internal.cli import cmdoptions
from pip._internal.cli.command_context import CommandContextMixIn
from pip._internal.cli.parser import (
ConfigOptionParser,
UpdatingDefaultsHelpFormatter,
)
from pip._internal.cli.status_codes import (
ERROR,
PREVIOUS_BUILD_DIR_ERROR,
SUCCESS,
UNKNOWN_ERROR,
VIRTUALENV_NOT_FOUND,
)
from pip._internal.exceptions import (
BadCommand,
CommandError,
InstallationError,
PreviousBuildDirError,
UninstallationError,
)
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging
from pip._internal.utils.misc import get_prog, normalize_path
from pip._internal.utils.temp_dir import (
global_tempdir_manager,
tempdir_registry,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import running_under_virtualenv
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Tuple, Any
from optparse import Values
from pip._internal.utils.temp_dir import (
TempDirectoryTypeRegistry as TempDirRegistry
)
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(CommandContextMixIn):
usage = None # type: str
ignore_require_venv = False # type: bool
def __init__(self, name, summary, isolated=False):
# type: (str, str, bool) -> None
super(Command, self).__init__()
parser_kw = {
'usage': self.usage,
'prog': '{} {}'.format(get_prog(), name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': name,
'description': self.__doc__,
'isolated': isolated,
}
self.name = name
self.summary = summary
self.parser = ConfigOptionParser(**parser_kw)
self.tempdir_registry = None # type: Optional[TempDirRegistry]
# Commands should add options to this option group
optgroup_name = '{} Options'.format(self.name.capitalize())
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def handle_pip_version_check(self, options):
# type: (Values) -> None
"""
This is a no-op so that commands by default do not do the pip version
check.
"""
# Make sure we do the pip version check if the index_group options
# are present.
assert not hasattr(options, 'no_index')
def run(self, options, args):
# type: (Values, List[Any]) -> Any
raise NotImplementedError
def parse_args(self, args):
# type: (List[str]) -> Tuple[Any, Any]
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
# type: (List[str]) -> int
try:
with self.main_context():
return self._main(args)
finally:
logging.shutdown()
def _main(self, args):
# type: (List[str]) -> int
# We must initialize this before the tempdir manager, otherwise the
# configuration would not be accessible by the time we clean up the
# tempdir manager.
self.tempdir_registry = self.enter_context(tempdir_registry())
# Intentionally set as early as possible so globally-managed temporary
# directories are available to the rest of the code.
self.enter_context(global_tempdir_manager())
options, args = self.parse_args(args)
# Set verbosity so that it can be used elsewhere.
self.verbosity = options.verbose - options.quiet
level_number = setup_logging(
verbosity=self.verbosity,
no_color=options.no_color,
user_log_file=options.log,
)
if (
sys.version_info[:2] == (2, 7) and
not options.no_python_version_warning
):
message = (
"pip 21.0 will drop support for Python 2.7 in January 2021. "
"More details about Python 2 support in pip, can be found at "
"https://pip.pypa.io/en/latest/development/release-process/#python-2-support" # noqa
)
if platform.python_implementation() == "CPython":
message = (
"Python 2.7 reached the end of its life on January "
"1st, 2020. Please upgrade your Python as Python 2.7 "
"is no longer maintained. "
) + message
deprecated(message, replacement=None, gone_in=None)
# TODO: Try to get these passing down from the command?
# without resorting to os.environ to hold these.
# This also affects isolated builds and it should.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv and not self.ignore_require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
if options.cache_dir:
options.cache_dir = normalize_path(options.cache_dir)
if not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"or is not writable by the current user. The cache "
"has been disabled. Check the permissions and owner of "
"that directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return ERROR
except CommandError as exc:
logger.critical('%s', exc)
logger.debug('Exception information:', exc_info=True)
return ERROR
except BrokenStdoutLoggingError:
# Bypass our logger and write any remaining messages to stderr
# because stdout no longer works.
print('ERROR: Pipe to stdout was broken', file=sys.stderr)
if level_number <= logging.DEBUG:
traceback.print_exc(file=sys.stderr)
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:', exc_info=True)
return ERROR
except BaseException:
logger.critical('Exception:', exc_info=True)
return UNKNOWN_ERROR
finally:
self.handle_pip_version_check(options)
return SUCCESS
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import AvroTester
from avro.ipc import AvroRemoteException
import avro_utils
import time
def _make_write_params(key, cf, sc, c, v, ts=0, cl='ONE'):
params = dict()
params['key'] = key
params['column_parent'] = dict()
params['column_parent']['column_family'] = cf
params['column_parent']['super_column'] = sc
params['column'] = dict()
params['column']['name'] = c
params['column']['value'] = v
params['column']['timestamp'] = ts
params['consistency_level'] = cl
return params
def _make_read_params(key, cf, sc, c, cl):
params = dict()
params['key'] = key
column_path = dict()
column_path['column_family'] = cf
column_path['super_column'] = sc
column_path['column'] = c
params['column_path'] = column_path
params['consistency_level'] = cl
return params
def _super_col(name, columns):
return {'name': name, 'columns': columns}
def Mutation(**kwargs):
return kwargs
def SlicePredicate(**kwargs):
return kwargs
def SliceRange(start='', finish='', reversed=False, count=10):
return {'start': start, 'finish': finish, 'reversed':reversed, 'count': count}
def ColumnParent(*args, **kwargs):
cp = {}
if args and len(args) > 0:
cp['column_family'] = args[0]
if args and len(args) > 1:
cp['super_column'] = args[1]
for k,v in kwargs.items():
cp[k] = v
return cp
def Deletion(*args, **kwargs):
cp = {}
if args and len(args) > 0:
cp['timestamp'] = args[0]
for k,v in kwargs.items():
cp[k] = v
return cp
def ColumnPath(*args, **kwargs):
cp = {}
if args and len(args) > 0:
cp['column_family'] = args[0]
for k,v in kwargs.items():
cp[k] = v
return cp
def Column(name, value, timestamp, ttl=None):
return {'name':name, 'value':value, 'timestamp': timestamp, 'ttl': ttl}
def _i64(i):
return avro_utils.i64(i)
_SUPER_COLUMNS = [_super_col('sc1', [Column(avro_utils.i64(4), 'value4', 0)]),
_super_col('sc2', [Column(avro_utils.i64(5), 'value5', 0),
Column(avro_utils.i64(6), 'value6', 0)])]
class TestSuperOperations(AvroTester):
def _set_keyspace(self, keyspace):
self.client.request('set_keyspace', {'keyspace': keyspace})
"""
Operations on Super column families
"""
def test_super_insert(self):
"simple super column insert"
self._set_keyspace('Keyspace1')
self._insert_super()
self._verify_super()
def test_slice_super(self):
"tests simple insert and get_slice"
self._set_keyspace('Keyspace1')
self._insert_super()
p = {'slice_range': {'start': '', 'finish': '', 'reversed': False, 'count': 10}}
parent = {'column_family': 'Super1', 'super_column': 'sc1'}
cosc = self.client.request('get_slice', {'key': 'key1', 'column_parent': parent, 'predicate': p, 'consistency_level': 'ONE'})
avro_utils.assert_cosc(cosc[0])
def test_missing_super(self):
"verifies that inserting doesn't yield false positives."
self._set_keyspace('Keyspace1')
avro_utils.assert_raises(AvroRemoteException,
self.client.request,
'get',
_make_read_params('key1', 'Super1', 'sc1', avro_utils.i64(1), 'ONE'))
self._insert_super()
avro_utils.assert_raises(AvroRemoteException,
self.client.request,
'get',
_make_read_params('key1', 'Super1', 'sc1', avro_utils.i64(1), 'ONE'))
def test_super_get(self):
"read back a super column"
self._set_keyspace('Keyspace1')
self._insert_super()
result = self.client.request('get', _make_read_params('key1', 'Super1', 'sc2', None, 'ONE'))['super_column']
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
"test get_slice honors subcolumn reversal and limit"
self._set_keyspace('Keyspace1')
self._insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_time_uuid(self):
"test operation on timeuuid subcolumns in super columns"
import uuid
L = []
self._set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
self.client.request('insert', {'key': 'key1', 'column_parent': ColumnParent('Super4', 'sc1'), 'column': Column(L[-1].bytes, 'value%s' % i, i), 'consistency_level': 'ONE'})
slice = self._big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i]['column']
assert u['value'] == 'value%s' % i
assert u['name'] == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_batch_mutate_remove_super_columns_with_standard_under(self):
"batch mutate with deletions in super columns"
self._set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
self._insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc['columns']:
names.append(c['name'])
mutations.append(Mutation(deletion=Deletion(20, super_column=c['name'], predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = [{'key': key, 'mutations': mutation_map} for key in keys]
def _assert_no_columnpath(key, column_path):
self._assert_no_columnpath(key, column_path)
self.client.request('batch_mutate', {'mutation_map': keyed_mutations, 'consistency_level': 'ONE'})
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc['columns']:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc['name'], column=c['name']))
# internal helper functions.
def _insert_super(self, key='key1'):
self.client.request('insert', _make_write_params(key, 'Super1', 'sc1', avro_utils.i64(4), 'value4', 0, 'ONE'))
self.client.request('insert', _make_write_params(key, 'Super1', 'sc2', avro_utils.i64(5), 'value5', 0, 'ONE'))
self.client.request('insert', _make_write_params(key, 'Super1', 'sc2', avro_utils.i64(6), 'value6', 0, 'ONE'))
def _big_slice(self, key, column_parent):
p = {'slice_range': {'start': '', 'finish': '', 'reversed': False, 'count': 1000}}
return self.client.request('get_slice', {'key': key, 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})
def _verify_super(self, supercf='Super1', key='key1'):
col = self.client.request('get', _make_read_params(key, supercf, 'sc1', avro_utils.i64(4), 'ONE'))['column']
avro_utils.assert_columns_match(col, {'name': avro_utils.i64(4), 'value': 'value4', 'timestamp': 0})
slice = [result['super_column'] for result in self._big_slice(key, {'column_family': supercf})]
assert slice == _SUPER_COLUMNS, _SUPER_COLUMNS
def _assert_no_columnpath(self, key, column_path):
try:
self.client.request('get', {'key': key, 'column_path': column_path, 'consistency_level': 'ONE'})
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except AvroRemoteException:
assert True, 'column did not exist'
|
|
"""Component for wiffi support."""
from datetime import timedelta
import errno
import logging
from wiffi import WiffiTcpServer
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PORT, CONF_TIMEOUT
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.dt import utcnow
from .const import (
CHECK_ENTITIES_SIGNAL,
CREATE_ENTITY_SIGNAL,
DEFAULT_TIMEOUT,
DOMAIN,
UPDATE_ENTITY_SIGNAL,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "binary_sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up wiffi from a config entry, config_entry contains data from config entry database."""
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
# create api object
api = WiffiIntegrationApi(hass)
api.async_setup(entry)
# store api object
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = api
try:
await api.server.start_server()
except OSError as exc:
if exc.errno != errno.EADDRINUSE:
_LOGGER.error("Start_server failed, errno: %d", exc.errno)
return False
_LOGGER.error("Port %s already in use", entry.data[CONF_PORT])
raise ConfigEntryNotReady from exc
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
api: WiffiIntegrationApi = hass.data[DOMAIN][entry.entry_id]
await api.server.close_server()
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
api = hass.data[DOMAIN].pop(entry.entry_id)
api.shutdown()
return unload_ok
def generate_unique_id(device, metric):
"""Generate a unique string for the entity."""
return f"{device.mac_address.replace(':', '')}-{metric.name}"
class WiffiIntegrationApi:
"""API object for wiffi handling. Stored in hass.data."""
def __init__(self, hass):
"""Initialize the instance."""
self._hass = hass
self._server = None
self._known_devices = {}
self._periodic_callback = None
def async_setup(self, config_entry):
"""Set up api instance."""
self._server = WiffiTcpServer(config_entry.data[CONF_PORT], self)
self._periodic_callback = async_track_time_interval(
self._hass, self._periodic_tick, timedelta(seconds=10)
)
def shutdown(self):
"""Shutdown wiffi api.
Remove listener for periodic callbacks.
"""
remove_listener = self._periodic_callback
if remove_listener is not None:
remove_listener()
async def __call__(self, device, metrics):
"""Process callback from TCP server if new data arrives from a device."""
if device.mac_address not in self._known_devices:
# add empty set for new device
self._known_devices[device.mac_address] = set()
for metric in metrics:
if metric.id not in self._known_devices[device.mac_address]:
self._known_devices[device.mac_address].add(metric.id)
async_dispatcher_send(self._hass, CREATE_ENTITY_SIGNAL, device, metric)
else:
async_dispatcher_send(
self._hass,
f"{UPDATE_ENTITY_SIGNAL}-{generate_unique_id(device, metric)}",
device,
metric,
)
@property
def server(self):
"""Return TCP server instance for start + close."""
return self._server
@callback
def _periodic_tick(self, now=None):
"""Check if any entity has timed out because it has not been updated."""
async_dispatcher_send(self._hass, CHECK_ENTITIES_SIGNAL)
class WiffiEntity(Entity):
"""Common functionality for all wiffi entities."""
def __init__(self, device, metric, options):
"""Initialize the base elements of a wiffi entity."""
self._id = generate_unique_id(device, metric)
self._device_info = {
"connections": {
(device_registry.CONNECTION_NETWORK_MAC, device.mac_address)
},
"identifiers": {(DOMAIN, device.mac_address)},
"manufacturer": "stall.biz",
"name": f"{device.moduletype} {device.mac_address}",
"model": device.moduletype,
"sw_version": device.sw_version,
}
self._name = metric.description
self._expiration_date = None
self._value = None
self._timeout = options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT)
async def async_added_to_hass(self):
"""Entity has been added to hass."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{UPDATE_ENTITY_SIGNAL}-{self._id}",
self._update_value_callback,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, CHECK_ENTITIES_SIGNAL, self._check_expiration_date
)
)
@property
def should_poll(self):
"""Disable polling because data driven ."""
return False
@property
def device_info(self):
"""Return wiffi device info which is shared between all entities of a device."""
return self._device_info
@property
def unique_id(self):
"""Return unique id for entity."""
return self._id
@property
def name(self):
"""Return entity name."""
return self._name
@property
def available(self):
"""Return true if value is valid."""
return self._value is not None
def reset_expiration_date(self):
"""Reset value expiration date.
Will be called by derived classes after a value update has been received.
"""
self._expiration_date = utcnow() + timedelta(minutes=self._timeout)
@callback
def _update_value_callback(self, device, metric):
"""Update the value of the entity."""
@callback
def _check_expiration_date(self):
"""Periodically check if entity value has been updated.
If there are no more updates from the wiffi device, the value will be
set to unavailable.
"""
if (
self._value is not None
and self._expiration_date is not None
and utcnow() > self._expiration_date
):
self._value = None
self.async_write_ha_state()
|
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to transferring ownership of volumes.
"""
import hashlib
import hmac
import os
from oslo.config import cfg
from cinder.db import base
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import quota
from cinder.volume import api as volume_api
volume_transfer_opts = [
cfg.IntOpt('volume_transfer_salt_length', default=8,
help='The number of characters in the salt.'),
cfg.IntOpt('volume_transfer_key_length', default=16,
help='The number of characters in the '
'autogenerated auth key.'), ]
CONF = cfg.CONF
CONF.register_opts(volume_transfer_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
class API(base.Base):
"""API for interacting volume transfers."""
def __init__(self, db_driver=None):
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def get(self, context, transfer_id):
rv = self.db.transfer_get(context, transfer_id)
return dict(rv.iteritems())
def delete(self, context, transfer_id):
"""Make the RPC call to delete a volume transfer."""
volume_api.check_policy(context, 'delete_transfer')
transfer = self.db.transfer_get(context, transfer_id)
volume_ref = self.db.volume_get(context, transfer.volume_id)
if volume_ref['status'] != 'awaiting-transfer':
msg = _("Volume in unexpected state")
LOG.error(msg)
self.db.transfer_destroy(context, transfer_id)
def get_all(self, context, filters=None):
filters = filters or {}
volume_api.check_policy(context, 'get_all_transfers')
if context.is_admin and 'all_tenants' in filters:
transfers = self.db.transfer_get_all(context)
else:
transfers = self.db.transfer_get_all_by_project(context,
context.project_id)
return transfers
def _get_random_string(self, length):
"""Get a random hex string of the specified length."""
rndstr = ""
# Note that the string returned by this function must contain only
# characters that the recipient can enter on their keyboard. The
# function ssh224().hexdigit() achieves this by generating a hash
# which will only contain hexidecimal digits.
while len(rndstr) < length:
rndstr += hashlib.sha224(os.urandom(255)).hexdigest()
return rndstr[0:length]
def _get_crypt_hash(self, salt, auth_key):
"""Generate a random hash based on the salt and the auth key."""
return hmac.new(str(salt),
str(auth_key),
hashlib.sha1).hexdigest()
def create(self, context, volume_id, display_name):
"""Creates an entry in the transfers table."""
volume_api.check_policy(context, 'create_transfer')
LOG.info("Generating transfer record for volume %s" % volume_id)
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['status'] != "available":
raise exception.InvalidVolume(reason=_("status must be available"))
# The salt is just a short random string.
salt = self._get_random_string(CONF.volume_transfer_salt_length)
auth_key = self._get_random_string(CONF.volume_transfer_key_length)
crypt_hash = self._get_crypt_hash(salt, auth_key)
# TODO(ollie): Transfer expiry needs to be implemented.
transfer_rec = {'volume_id': volume_id,
'display_name': display_name,
'salt': salt,
'crypt_hash': crypt_hash,
'expires_at': None}
try:
transfer = self.db.transfer_create(context, transfer_rec)
except Exception:
LOG.error(_("Failed to create transfer record for %s") % volume_id)
raise
return {'id': transfer['id'],
'volume_id': transfer['volume_id'],
'display_name': transfer['display_name'],
'auth_key': auth_key,
'created_at': transfer['created_at']}
def accept(self, context, transfer_id, auth_key):
"""Accept a volume that has been offered for transfer."""
# We must use an elevated context to see the volume that is still
# owned by the donor.
volume_api.check_policy(context, 'accept_transfer')
transfer = self.db.transfer_get(context.elevated(), transfer_id)
crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key)
if crypt_hash != transfer['crypt_hash']:
msg = (_("Attempt to transfer %s with invalid auth key.") %
transfer_id)
LOG.error(msg)
raise exception.InvalidAuthKey(reason=msg)
volume_id = transfer['volume_id']
vol_ref = self.db.volume_get(context.elevated(), volume_id)
try:
reservations = QUOTAS.reserve(context, volumes=1,
gigabytes=vol_ref['size'])
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'gigabytes' in overs:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': vol_ref['size'],
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=vol_ref['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'volumes' in overs:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed('volumes')})
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
try:
donor_id = vol_ref['project_id']
donor_reservations = QUOTAS.reserve(context.elevated(),
project_id=donor_id,
volumes=-1,
gigabytes=-vol_ref['size'])
except Exception:
donor_reservations = None
LOG.exception(_("Failed to update quota donating volume"
"transfer id %s") % transfer_id)
try:
# Transfer ownership of the volume now, must use an elevated
# context.
self.volume_api.accept_transfer(context,
vol_ref,
context.user_id,
context.project_id)
self.db.transfer_accept(context.elevated(),
transfer_id,
context.user_id,
context.project_id)
QUOTAS.commit(context, reservations)
if donor_reservations:
QUOTAS.commit(context, donor_reservations, project_id=donor_id)
LOG.info(_("Volume %s has been transferred.") % volume_id)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
if donor_reservations:
QUOTAS.rollback(context, donor_reservations,
project_id=donor_id)
vol_ref = self.db.volume_get(context, volume_id)
return {'id': transfer_id,
'display_name': transfer['display_name'],
'volume_id': vol_ref['id']}
|
|
#!/usr/bin/env python3
""" Create dynamic report for foodlog with daily summary only.
How To Use This Module
======================
1. Import it: ``from daysummary import Summary``
2. Summarize:
summary = Summary(dbfile, start_date, end_date)
3. The Summary object has these attributes:
Summary.days:
List of dicts, each
dict['date'] - YYYY-MM-DD for date summarized
dict[calories, carbs, fat, or protein] -
Nutrient object with
total,
missing_values,
bag_values
name
Summary.total:
TotalNutrition object with attributes:
title - string with currently "Total"
which - dictionary of Nutrient objects keyed by name
Summary.average:
TotalNutrition object with attributes:
title - string with currently "Average"
which - dictionary of Nutrient objects keyed by name
points_in_average - dictionary giving now many averaged by nutrinent
When run as a main program, prints the summary as a report in the form:
| DayDate | Cals | Carbs | Fat | Protein |
|---------|------|-------|-----|---------|
| Date1 | nn | nn | nn | nn |
| Date2 | nn+? | nn?! | nn | nn |
| Date3 | nn | nn | | nn |
| Total | NN+? | NN?! | NN+?| NN |
| Average | NN- | NN- | NN- | NN |
Where
nn and NN are numeric values,
+? indicates that there is a missing value
?! indicates a bad value (not a good number)
- indicates that the Average is missing some data
The Total is created from all numeric values available,
and thus is at least a minimal Total.
The Average is taken from days which had a valid value,
which is to say no missing or bad value
"""
import sys
import os
import argparse
from collections import namedtuple
from datetime import date, datetime, timedelta
import sqlite3
from foodlog.my_info import config_path
INVALID_TEMPLATE = """ {} {} """
config = config_path() # pylint: disable=invalid-name
DB_FILE = config.dir('DB_FILE')
MENU_URL = config.dir('MENU_URL')
VIEW_MENU_URL = config.dir('VIEW_MENU_URL')
VALID = set('start end range title reverse edit'.split())
VALID_RANGES = set('today yesterday lastweek thisweek'.split())
def print_error(header, text):
print(INVALID_TEMPLATE.format(header, text))
sys.exit(2)
def week_range(num_weeks, firstweekday=3):
""" Return the range num_weeks ago
Figure out the week where num_weeks == 0 is this week (contains today)
and week == 1 is last week, and so on. Weeks are defined by start_day
using the datetime.weekday(), so if start_day == 0, the week starts on
Monday or if start_day == 3, the week starts on Thursday.
"""
today = date.today()
new_week_day = (today.weekday() - firstweekday) % 7
weekstart = today - timedelta(days=new_week_day)
first_day_of_week = weekstart - timedelta(days=num_weeks*7)
last_day_of_week = min(today, first_day_of_week + timedelta(days=6))
return first_day_of_week, last_day_of_week
def get_dates(args):
if args['range']:
range_key = args['range']
if range_key == 'today':
which = date.today()
return which, which
elif range_key == 'yesterday':
which = date.today() - timedelta(days=1)
return which, which
elif range_key == 'lastweek':
return week_range(1)
elif range_key == 'thisweek':
return week_range(0)
else:
print_error("Program Error:",
"Error handling range {}".format(args['range']))
else:
return (
args.get('start') or (
# First day of previous month
date.today().replace(day=1) - timedelta(days=1)
).replace(day=1),
args.get('end') or date.today())
def namedtuple_factory(cursor, row):
""" sqlite3 rows as named tuples
Usage connection.row_factory = namedtuple_factory
"""
fields = [col[0] for col in cursor.description]
Row = namedtuple('Row', fields)
return Row(*row)
def add_with_none(now, new, servings):
if new in (None, ''):
return (now[0], "+?")
elif new == '':
print_error("----", "Bad value: new:%s" % new)
return (now[0], "+?")
else:
try:
return (now[0] + (float(new) * float(servings)), now[1])
except ValueError:
print_error(
"add_with_none",
"Bad value: total:%s, new:%s, servings:%s" %
(now[0], new, servings))
return (now[0], "??%s" % (new))
def get_args():
parser = argparse.ArgumentParser(description="Report daily foodlog totals")
parser.add_argument("--start", type=str,
help="First date in report, YYYY-MM-DD")
parser.add_argument("--end", type=str,
help="Last date in report: YYYY-MM-DD")
parser.add_argument("--range", type=str, choices=VALID_RANGES,
help="date range:{}".format(VALID_RANGES))
parser.add_argument("--title", type=str, help="Displayed title")
nsargs = parser.parse_args()
args = vars(nsargs) # Use as dict rather than namespace
# Check for incompatible params
if nsargs.range and nsargs.end: # start is checked par argpars
print_error("Incompatible parameters",
"argument --end not allowed with argument --range")
# Check that start and end are valid dates
# Todo investigate importing dateutil.parser using venv
for date_ in [x for x in ['start', 'end'] if args[x] is not None]:
try:
args[date_] = datetime.strptime(args[date_], ISODATE).date()
except ValueError:
print_error("Bad date",
"Date {} should be YYYY-MM-DD".format(args[date_]))
if args['start'] and args['end'] and args['start'] > args['end']:
print_error("Bad Date Range",
"Start date: {} cannot be after end date: {}".format(
args['start'], args['end']))
return args
class Nutrient:
def __init__(self, name):
self.unchanged = True
self.missing_values = True
self.bad_values = False
self.total = 0.0
self.name = name
def add_nutrient(self, other):
""" add other Nutrient to self """
if self.unchanged:
self.missing_values = other.missing_values
self.bad_values = other.bad_values
self.unchanged = False
else:
self.missing_values = self.missing_values or other.missing_values
self.bad_values = self.bad_values or other.bad_values
self.total += other.total
def addin(self, value, servings):
""" Add in numeric, otherwise set missing or illegal attribute """
if value in (None, ''):
self.missing_values = True
else:
try:
self.total += value * servings
if self.unchanged:
self.missing_values = False
self.unchanged = False
except ValueError:
self.bad_values = True
def __str__(self):
return self.notated_value()
def notated_value(self):
value = "{:.1f}".format(self.total).rstrip('0').rstrip('.')
missing_flag = "+?" if self.missing_values else ""
bad_flag = "??!!" if self.bad_values else ""
return "{}{}{}".format(value, missing_flag, bad_flag)
NUTRIENTS = 'calories carbs fat protein'.split()
class TotalNutrition:
""" Holds aggregated total or average nutrition for a time period
If average,
title is Average, has count of days averaged for each nutrient
If total for day,
title is the day string YYYY-MM-DD
If total for date range
title is "Total"
"""
def __init__(self):
self.which = {}
self.title = ""
self.points_in_average = {}
for nutrient in NUTRIENTS:
self.which[nutrient] = Nutrient(nutrient)
self.points_in_average[nutrient] = 0
def add_total_nutrition(self, other):
""" Add nutrient values in from another TotalNutrition object """
for nutrient in other.which:
self.which[nutrient].add_nutrient(other.which[nutrient])
def add_nutrition(self, dish):
""" Add nutrient values in from mapping """
self.title = dish.day
for nutrient in NUTRIENTS:
self.which[nutrient].addin(getattr(dish, nutrient), dish.servings)
def scale(self):
for nutrient in NUTRIENTS:
if self.points_in_average[nutrient] != 0:
self.which[nutrient].total /= self.points_in_average[nutrient]
def set_title(self, title):
self.title = title
def accumulate_average(self, day):
""" Add valid nutrient values from a daily TotalNutrition object
"""
for nutrient in day.which.values():
if not (nutrient.missing_values or nutrient.bad_values):
self.which[nutrient.name].addin(nutrient.total, servings=1)
self.points_in_average[nutrient.name] += 1
def as_dict(self):
return {x: y.notated_value() for x, y in self.which.items()}
def totals(self):
return [self.which[n].notated_value() for n in NUTRIENTS]
ISODATE = "%Y-%m-%d"
def date_str(str_or_val):
""" Gets date ISO string and datetime.date from either one """
if isinstance(str_or_val, date):
return str_or_val.isoformat()
else:
return str_or_val
def day_range(start_date_string, end_date_string):
start_date = datetime.strptime(start_date_string, ISODATE).date()
end_date = datetime.strptime(end_date_string, ISODATE).date()
delta = end_date - start_date
for i in range(delta.days + 1):
yield (start_date + timedelta(i)).isoformat()
class Summary:
""" Summary of nutrition intake over time range
Track daily totals, overall total, and average
Deals with incomplete or faulty data.
methods:
calc - recalculates from the database
"""
def __init__(self, database, start_date, end_date):
""" database is string with database file name
start_date and end_date are either datetime.date
or ISO string for date (YYYY-MM-DD)
Internally, we use the ISO string
"""
self.database = database
self.start_date = date_str(start_date)
self.end_date = date_str(end_date)
self.total = None
self.average = None
self.days = None
self.days_total_nutrition = None
self.calc()
def calc(self):
self.total = None
self.average = None
self.calc_days()
self.calc_total()
self.calc_average()
def calc_days(self):
""" Calculate daily sums of nutrients """
days = self.days_total_nutrition = {}
for day in day_range(self.start_date, self.end_date):
days[day] = TotalNutrition()
days[day].set_title(day)
with sqlite3.connect(self.database) as conn:
conn.row_factory = namedtuple_factory
cursor = conn.cursor()
# TODO: add user to the selection
cursor.execute("""select
servings, calories, fat, protein, carbs, day
from course
where day between ? and ? order by day, time""",
(self.start_date, self.end_date))
for course in cursor:
days[course.day].add_nutrition(course)
# List of dicts of nutrients and date
self.days = []
for title, day in sorted(days.items()):
row = dict()
row['date'] = title
row.update(day.which)
self.days.append(row)
def calc_total(self):
total = self.total = TotalNutrition()
for day_n in self.days_total_nutrition.values():
total.add_total_nutrition(day_n)
def calc_average(self):
average = self.average = TotalNutrition()
for day_n in self.days_total_nutrition.values():
average.accumulate_average(day_n)
average.scale()
def decorated_nutrient(nutrient, places=0):
""" Writes nutrients coded for missing or bad values
nutrient has float value, bool missing_values and bad_values
return rounded string with flags for missing or bad
"""
valstring = "{{:.{}f}}".format(places)
value = valstring.format(nutrient.total)
missing_flag = "+?" if nutrient.missing_values else ""
bad_flag = "??!!" if nutrient.bad_values else ""
return "{}{}{}".format(value, missing_flag, bad_flag)
def print_row(title_, nutrients):
rowformat = "{title:11} {calories:7} {carbs:6} {fat:6} {protein:6}"
row = dict(title=title_)
for nutrient in NUTRIENTS:
row[nutrient] = decorated_nutrient(nutrients[nutrient])
print(rowformat.format(**row))
def main():
if 'REQUEST_METHOD' in os.environ:
print("""Content-type: text/plain
""")
if not DB_FILE or ";" in DB_FILE:
print_error("PROBLEM WITH DATABASE", DB_FILE)
args = get_args()
start_date, stop_date = get_dates(args)
bareformat = "{:11} {:7} {:6} {:6} {:6}"
summary = Summary(DB_FILE, start_date, stop_date)
headers = "Date Cals Carbs Fat Protein".split()
print(bareformat.format(*headers))
for day in summary.days:
print_row(day['date'], day)
print_row("Total", summary.total.which)
print_row("Average", summary.average.which)
print(bareformat.format(
"Points",
summary.average.points_in_average['calories'],
summary.average.points_in_average['carbs'],
summary.average.points_in_average['fat'],
summary.average.points_in_average['protein'])
)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Read Rainbow
^^^^^^^^^^^^
.. autosummary::
:nosignatures:
:toctree: generated/
read_rainbow
"""
# standard libraries
from __future__ import absolute_import
import sys
import numpy as np
from .. import util as util
def find_key(key, dictionary):
"""Searches for given key in given (nested) dictionary.
Returns all found parent dictionaries in a list.
Parameters
----------
key : string
the key to be searched for in the nested dict
dictionary : dict
the dictionary to be searched
Returns
-------
output : dict
a dictionary or list of dictionaries
"""
for k, v in dictionary.items():
if k == key:
yield dictionary
elif isinstance(v, dict):
for result in find_key(key, v):
yield result
elif isinstance(v, list):
for d in v:
if isinstance(d, dict):
for result in find_key(key, d):
yield result
def decompress(data):
"""Decompression of data
Parameters
----------
data : string
(from xml) data string containing compressed data.
"""
zlib = util.import_optional('zlib')
return zlib.decompress(data)
def get_rb_data_layout(datadepth):
"""Calculates DataWidth and DataType from given DataDepth of
RAINBOW radar data
Parameters
----------
datadepth : int
DataDepth as read from the Rainbow xml metadata.
Returns
-------
datawidth : int
Width in Byte of data.
datatype : string
conversion string .
"""
if sys.byteorder != 'big':
byteorder = '>'
else:
byteorder = '<'
datawidth = int(datadepth / 8)
if datawidth in [1, 2, 4]:
datatype = byteorder + 'u' + str(datawidth)
else:
raise ValueError("Wrong DataDepth: %d. "
"Conversion only for depth 8, 16, 32" % datadepth)
return datawidth, datatype
def get_rb_data_attribute(xmldict, attr):
"""Get Attribute `attr` from dict `xmldict`
Parameters
----------
xmldict : dict
Blob Description Dictionary
attr : string
Attribute key
Returns
-------
sattr : int
Attribute Values
"""
try:
sattr = int(xmldict['@' + attr])
except KeyError:
raise KeyError('Attribute @{0} is missing from '
'Blob Description. There may be some '
'problems with your file'.format(attr))
return sattr
def get_rb_blob_attribute(blobdict, attr):
"""Get Attribute `attr` from dict `blobdict`
Parameters
----------
blobdict : dict
Blob Description Dictionary
attr : string
Attribute key
Returns
-------
ret : Attribute Value
"""
try:
value = blobdict['BLOB']['@' + attr]
except KeyError:
raise KeyError('Attribute @' + attr + ' is missing from Blob.' +
'There may be some problems with your file')
return value
def get_rb_blob_data(datastring, blobid):
""" Read BLOB data from datastring and return it
Parameters
----------
datastring : string
Blob Description String
blobid : int
Number of requested blob
Returns
-------
data : string
Content of blob
"""
xmltodict = util.import_optional('xmltodict')
start = 0
search_string = '<BLOB blobid="{0}"'.format(blobid)
start = datastring.find(search_string.encode(), start)
if start == -1:
raise EOFError('Blob ID {0} not found!'.format(blobid))
end = datastring.find(b'>', start)
xmlstring = datastring[start:end + 1]
# cheat the xml parser by making xml well-known
xmldict = xmltodict.parse(xmlstring.decode() + '</BLOB>')
cmpr = get_rb_blob_attribute(xmldict, 'compression')
size = int(get_rb_blob_attribute(xmldict, 'size'))
data = datastring[end + 2:end + 2 + size] # read blob data to string
# decompress if necessary
# the first 4 bytes are neglected for an unknown reason
if cmpr == "qt":
data = decompress(data[4:])
return data
def map_rb_data(data, datadepth):
""" Map BLOB data to correct DataWidth and Type and convert it
to numpy array
Parameters
----------
data : string
Blob Data
datadepth : int
bit depth of Blob data
Returns
-------
data : numpy array
Content of blob
"""
flagdepth = None
if datadepth < 8:
flagdepth = datadepth
datadepth = 8
datawidth, datatype = get_rb_data_layout(datadepth)
# import from data buffer well aligned to data array
data = np.ndarray(shape=(int(len(data) / datawidth),),
dtype=datatype, buffer=data)
if flagdepth:
data = np.unpackbits(data)
return data
def get_rb_data_shape(blobdict):
"""Retrieve correct BLOB data shape from blobdict
Parameters
----------
blobdict : dict
Blob Description Dict
Returns
-------
tuple : shape
shape of data
"""
# this is a bit hacky, but we do not know beforehand,
# so we extract this on the run
try:
dim0 = get_rb_data_attribute(blobdict, 'rays')
dim1 = get_rb_data_attribute(blobdict, 'bins')
# if rays and bins are found, return both
return dim0, dim1
except KeyError as e1:
try:
# if only rays is found, return rays
return dim0
except UnboundLocalError:
try:
# if both rays and bins not found assuming pixmap
dim0 = get_rb_data_attribute(blobdict, 'rows')
dim1 = get_rb_data_attribute(blobdict, 'columns')
dim2 = get_rb_data_attribute(blobdict, 'depth')
if dim2 < 8:
# if flagged data return rows x columns x depth
return dim0, dim1, dim2
else:
# otherwise just rows x columns
return dim0, dim1
except KeyError as e2:
# if no some keys are missing, print errors and raise
print(e1)
print(e2)
raise
def get_rb_blob_from_string(datastring, blobdict):
"""Read BLOB data from datastring and return it as numpy array with correct
dataWidth and shape
Parameters
----------
datastring : string
Blob Description String
blobdict : dict
Blob Description Dict
Returns
-------
data : numpy array
Content of blob as numpy array
"""
blobid = get_rb_data_attribute(blobdict, 'blobid')
data = get_rb_blob_data(datastring, blobid)
# map data to correct datatype and width
datadepth = get_rb_data_attribute(blobdict, 'depth')
data = map_rb_data(data, datadepth)
# reshape data
data.shape = get_rb_data_shape(blobdict)
return data
def get_rb_blob_from_file(f, blobdict):
"""Read BLOB data from file and return it with correct
dataWidth and shape
Parameters
----------
f : string or file handle
File handle of or path to Rainbow file
blobdict : dict
Blob Dict
Returns
-------
data : numpy array
Content of blob as numpy array
"""
# Try to read the data from a file handle
try:
f.seek(0, 0)
fid = f
datastring = fid.read()
except AttributeError:
# If we did not get a file handle, assume that we got a filename,
# get a file handle and read the data
try:
fid = open(f, "rb")
datastring = fid.read()
fid.close()
except IOError:
print("WRADLIB: Error opening Rainbow file ", f)
raise IOError
data = get_rb_blob_from_string(datastring, blobdict)
return data
def get_rb_file_as_string(fid):
""" Read Rainbow File Contents in data_string
Parameters
----------
fid : file handle
File handle of Data File
Returns
-------
data_string : string
File Contents as data_string
"""
try:
data_string = fid.read()
except Exception:
raise IOError('Could not read from file handle')
return data_string
def get_rb_blobs_from_file(fid, rbdict):
"""Read all BLOBS found in given nested dict, loads them from file
given by filename and add them to the dict at the appropriate position.
Parameters
----------
fid : file handle
File handle of Data File
rbdict : dict
Rainbow file Contents
Returns
-------
ret : dict
Rainbow File Contents
"""
blobs = list(find_key('@blobid', rbdict))
datastring = get_rb_file_as_string(fid)
for blob in blobs:
data = get_rb_blob_from_string(datastring, blob)
blob['data'] = data
return rbdict
def get_rb_header(fid):
"""Read Rainbow Header from filename, converts it to a dict and returns it
Parameters
----------
fid : file handle
File handle of Data File
Returns
-------
object : dictionary
Rainbow File Contents
"""
# load the header lines, i.e. the XML part
end_xml_marker = b"<!-- END XML -->"
header = b""
line = b""
while not line.startswith(end_xml_marker):
header += line[:-1]
line = fid.readline()
if len(line) == 0:
raise IOError("WRADLIB: Rainbow Fileheader Corrupt")
xmltodict = util.import_optional('xmltodict')
return xmltodict.parse(header)
def read_rainbow(f, loaddata=True):
"""Reads Rainbow files files according to their structure
In contrast to other file readers under :meth:`wradlib.io`, this function
will *not* return a two item tuple with (data, metadata). Instead, this
function returns ONE dictionary that contains all the file contents - both
data and metadata. The keys of the output dictionary conform to the XML
outline in the original data file.
The radar data will be extracted from the data blobs, converted and added
to the dict with key 'data' at the place where the @blobid was pointing
from.
Parameters
----------
f : string or file handle
a rainbow file path or file handle of rainbow file
loaddata : bool
True | False, If False function returns only metadata
Returns
-------
rbdict : dict
a dictionary that contains both data and metadata according to the
original rainbow file structure
Examples
--------
See :ref:`/notebooks/fileio/wradlib_load_rainbow_example.ipynb`.
"""
# Check if a file handle has been passed
try:
f.seek(0, 0)
fid = f
except AttributeError:
# If we did not get a file handle, assume that we got a filename and
# get a file handle for the corresponding file
try:
fid = open(f, "rb")
except IOError:
raise IOError("WRADLIB: Error opening Rainbow "
"file '{}' ".format(f))
rbdict = get_rb_header(fid)
if loaddata:
rbdict = get_rb_blobs_from_file(fid, rbdict)
return rbdict
|
|
# Copyright (c) 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from oslo_log import log as logging
from congress.datalog import compile
from congress.datalog import unify
from congress.datalog import utility
from congress.policy_engines import agnostic
from congress.tests import base
LOG = logging.getLogger(__name__)
NREC_THEORY = 'non-recursive theory'
DB_THEORY = 'database'
MAT_THEORY = 'materialized'
# This file contains tests that are likely broken. But the tests
# are good ones once we get the underlying data structures fixed.
# TODO(thinrichs): fix tests so they are working again.
class TestRuntime(base.TestCase):
def prep_runtime(self, code=None, msg=None, target=None):
# compile source
if msg is not None:
LOG.debug(msg)
if code is None:
code = ""
if target is None:
target = MAT_THEORY
run = agnostic.Runtime()
run.theory[NREC_THEORY] = agnostic.NonrecursiveRuleTheory()
run.theory[DB_THEORY] = agnostic.Database()
run.theory[MAT_THEORY] = agnostic.MaterializedViewTheory()
run.debug_mode()
run.insert(code, target=target)
return run
def check_class(self, run, correct_database_code, msg=None):
"""Test MAT_THEORY.
Check that runtime RUN's MAT_THEORY theory
has exactly the same contents as CORRECT_DATABASE_CODE.
"""
self.open(msg)
db_class = run.theory[MAT_THEORY].database
# self.showdb(run)
correct = agnostic.string_to_database(correct_database_code)
self.check_db_diffs(db_class, correct, msg)
self.close(msg)
def check_db(self, run, correct_database_code, msg=None):
"""Test DB_THEORY.
Check that runtime RUN.theory[DB_THEORY] is
equal to CORRECT_DATABASE_CODE.
"""
# extract correct answer from correct_database_code
self.open(msg)
correct_database = agnostic.string_to_database(correct_database_code)
self.check_db_diffs(run.theory[DB_THEORY],
correct_database, msg)
self.close(msg)
def check_db_diffs(self, actual, correct, msg):
extra = actual - correct
missing = correct - actual
extra = [e for e in extra if not e[0].startswith("___")]
missing = [m for m in missing if not m[0].startswith("___")]
self.output_diffs(extra, missing, msg, actual=actual)
def output_diffs(self, extra, missing, msg, actual=None):
if len(extra) > 0:
LOG.debug("Extra tuples")
LOG.debug(", ".join([str(x) for x in extra]))
if len(missing) > 0:
LOG.debug("Missing tuples")
LOG.debug(", ".join([str(x) for x in missing]))
if len(extra) > 0 or len(missing) > 0:
LOG.debug("Resulting database: %s", actual)
self.assertTrue(len(extra) == 0 and len(missing) == 0, msg)
def check_equal(self, actual_code, correct_code, msg=None, equal=None):
def minus(iter1, iter2, invert=False):
extra = []
for i1 in iter1:
found = False
for i2 in iter2:
# for asymmetric equality checks
if invert:
test_result = equal(i2, i1)
else:
test_result = equal(i1, i2)
if test_result:
found = True
break
if not found:
extra.append(i1)
return extra
if equal is None:
equal = lambda x, y: x == y
LOG.debug("** Checking equality: %s **", msg)
actual = compile.parse(actual_code)
correct = compile.parse(correct_code)
extra = minus(actual, correct)
# in case EQUAL is asymmetric, always supply actual as the first arg
missing = minus(correct, actual, invert=True)
self.output_diffs(extra, missing, msg)
LOG.debug("** Finished equality: %s **", msg)
def check_same(self, actual_code, correct_code, msg=None):
"""Checks if ACTUAL_CODE is a variable-renaming of CORRECT_CODE."""
return self.check_equal(
actual_code, correct_code, msg=msg,
equal=lambda x, y: unify.same(x, y) is not None)
def check_instance(self, actual_code, correct_code, msg=None):
"""Checks if ACTUAL_CODE is an instance of CORRECT_CODE."""
return self.check_equal(
actual_code, correct_code, msg=msg,
equal=lambda x, y: unify.instance(x, y) is not None)
def check_proofs(self, run, correct, msg=None):
"""Test proofs.
Check that the proofs stored in runtime RUN are exactly
those in CORRECT.
"""
# example
# check_proofs(run, {'q': {(1,):
# Database.ProofCollection([{'x': 1, 'y': 2}])}})
errs = []
checked_tables = set()
for table in run.database.table_names():
if table in correct:
checked_tables.add(table)
for dbtuple in run.database[table]:
if dbtuple.tuple in correct[table]:
if dbtuple.proofs != correct[table][dbtuple.tuple]:
errs.append(
"For table {} tuple {}\n Computed: {}\n "
"Correct: {}".format(
table, str(dbtuple),
str(dbtuple.proofs),
str(correct[table][dbtuple.tuple])))
for table in set(correct.keys()) - checked_tables:
errs.append("Table {} had a correct answer but did not exist "
"in the database".format(table))
if len(errs) > 0:
# LOG.debug("Check_proof errors:\n%s", "\n".join(errs))
self.fail("\n".join(errs))
def showdb(self, run):
LOG.debug("Resulting DB: %s",
run.theory[run.CLASSIFY_THEORY].database |
run.theory[run.DATABASE] |
run.theory[run.ENFORCEMENT_THEORY].database)
def insert(self, run, alist, target=None):
if target is None:
target = MAT_THEORY
run.insert(tuple(alist))
def delete(self, run, alist):
run.delete(tuple(alist))
def test_remediation(self):
"""Test remediation computation."""
def check(action_code, classify_code, query, correct, msg):
run = self.prep_runtime()
actth = run.ACTION_THEORY
clsth = run.CLASSIFY_THEORY
run.insert(action_code, target=actth)
run.insert(class_code, target=clsth)
self.showdb(run)
self.check_equal(run.remediate(query), correct, msg)
# simple
action_code = ('action("a")'
'p-(x) :- a(x)')
class_code = ('err(x) :- p(x)'
'p(1)')
check(action_code, class_code, 'err(1)', 'p-(1) :- a(1)', 'Monadic')
# rules in action theory
action_code = ('action("a")'
'p-(x) :- q(x)'
'q(x) :- a(x)')
class_code = ('err(x) :- p(x)'
'p(1)')
check(action_code, class_code, 'err(1)', 'p-(1) :- a(1)',
'Monadic, indirect')
# multiple conditions in error
action_code = ('action("a")'
'action("b")'
'p-(x) :- a(x)'
'q-(x) :- b(x)')
class_code = ('err(x) :- p(x), q(x)'
'p(1)'
'q(1)')
check(action_code, class_code, 'err(1)',
'p-(1) :- a(1) q-(1) :- b(1)',
'Monadic, two conditions, two actions')
def test_access_control(self):
"""Test access control: whether a given action is permitted."""
def create(ac_code, class_code):
run = self.prep_runtime()
acth = run.ACCESSCONTROL_THEORY
permitted, errors = run.insert(ac_code, target=acth)
self.assertTrue(permitted,
"Error in access control policy: {}".format(
utility.iterstr(errors)))
clsth = run.CLASSIFY_THEORY
permitted, errors = run.insert(class_code, target=clsth)
self.assertTrue(permitted, "Error in classifier policy: {}".format(
utility.iterstr(errors)))
return run
def check_true(run, query, support='', msg=None):
result = run.access_control(query, support)
self.assertTrue(result,
"Error in access control test {}".format(msg))
def check_false(run, query, support='', msg=None):
result = run.access_control(query, support)
self.assertFalse(result,
"Error in access control test {}".format(msg))
# Only checking basic I/O interface for the access_control request.
# Basic inference algorithms are tested elsewhere.
# Simple
ac_code = ('action(x) :- q(x)')
classify_code = 'q(2)'
run = create(ac_code, classify_code)
check_true(run, "action(2)", msg="Simple true action")
check_false(run, "action(1)", msg="Simple false action")
# Options
ac_code = ('action(x, y) :- q(x), options:value(y, "name", name), '
'r(name)')
classify_code = 'q(2) r("alice")'
run = create(ac_code, classify_code)
check_true(run, 'action(2,18)', 'options:value(18, "name", "alice")',
msg="Single option true")
check_false(run, 'action(2,18)', 'options:value(18, "name", "bob")',
msg="Single option false")
# Multiple Options
ac_code = ('action(x, y) :- q(x), options:value(y, "name", name), '
'r(name), options:value(y, "age", 30)')
classify_code = 'q(2) r("alice")'
run = create(ac_code, classify_code)
check_true(run, 'action(2,18)', 'options:value(18, "name", "alice") '
'options:value(18, "age", 30)', msg="Multiple option true")
check_false(run, 'action(2, 18)', 'options:value(18, "name", "bob") '
'options:value(18, "age", 30)',
msg="Multiple option false")
def test_enforcement(self):
"""Test enforcement."""
def prep_runtime(enforce_theory, action_theory, class_theory):
run = agnostic.Runtime()
run.insert(enforce_theory, target=run.ENFORCEMENT_THEORY)
run.insert(action_theory, target=run.ACTION_THEORY)
run.insert(class_theory, target=run.CLASSIFY_THEORY)
return run
enforce = 'act(x) :- p(x)'
action = 'action("act")'
run = prep_runtime(enforce, action, "")
run.insert('p(1)')
self.check_equal(run.logger.content(), 'act(1)', 'Insert')
run.logger.empty()
run.insert('p(1)')
self.check_equal(run.logger.content(), '', 'Insert again')
run.insert('p(2)')
self.check_equal(run.logger.content(), 'act(2)', 'Insert different')
run.logger.empty()
run.delete('p(2)')
self.check_equal(run.logger.content(), '', 'Delete')
def test_neutron_actions(self):
"""Test our encoding of the Neutron actions basics by simulation."""
def check(query, action_sequence, correct, msg):
actual = run.simulate(query, action_sequence)
LOG.debug("Simulate results: %s", actual)
self.check_instance(actual, correct, msg)
full_path = os.path.realpath(__file__)
path = os.path.dirname(full_path)
neutron_path = path + "/../../../examples/neutron.action"
run = agnostic.Runtime()
run.debug_mode()
# load_file does not exist any longer.
permitted, errs = run.load_file(neutron_path, target=run.ACTION_THEORY)
if not permitted:
self.assertTrue(permitted, "Error in Neutron file: {}".format(
"\n".join([str(x) for x in errs])))
return
# Ports
query = 'neutron:port(x1, x2, x3, x4, x5, x6, x7, x8, x9)'
acts = 'neutron:create_port("net1", 17), sys:user("tim") :- true'
correct = ('neutron:port(id, "net1", name, mac, "null",'
'"null", z, w, "tim")')
check(query, acts, correct, 'Simple port creation')
query = 'neutron:port(x1, x2, x3, x4, x5, x6, x7, x8, x9)'
# result(uuid): simulation-specific table that holds the results
# of the last action invocation
acts = ('neutron:create_port("net1", 17), sys:user("tim") :- true '
'neutron:update_port(uuid, 18), sys:user("tim"), '
' options:value(18, "name", "tims port") :- result(uuid) ')
correct = ('neutron:port(id, "net1", "tims port", mac, "null",'
'"null", z, w, "tim")')
check(query, acts, correct, 'Port create, update')
query = 'neutron:port(x1, x2, x3, x4, x5, x6, x7, x8, x9)'
# result(uuid): simulation-specific table that holds the results
# of the last action invocation
acts = ('neutron:create_port("net1", 17), sys:user("tim") :- true '
'neutron:update_port(uuid, 18), sys:user("tim"), '
' options:value(18, "name", "tims port") :- result(uuid) '
'neutron:delete_port(uuid), sys:user("tim")'
' :- result(uuid) ')
correct = ''
check(query, acts, correct, 'Port create, update, delete')
# Networks
query = ('neutron:network(id, name, status, admin_state, shared,'
'tenenant_id)')
acts = 'neutron:create_network(17), sys:user("tim") :- true'
correct = 'neutron:network(id, "", status, "true", "true", "tim")'
check(query, acts, correct, 'Simple network creation')
query = ('neutron:network(id, name, status, admin_state, '
'shared, tenenant_id)')
acts = ('neutron:create_network(17), sys:user("tim") :- true '
'neutron:update_network(uuid, 18), sys:user("tim"), '
' options:value(18, "admin_state", "false") :- result(uuid)')
correct = 'neutron:network(id, "", status, "false", "true", "tim")'
check(query, acts, correct, 'Network creation, update')
query = ('neutron:network(id, name, status, admin_state, shared, '
'tenenant_id)')
acts = ('neutron:create_network(17), sys:user("tim") :- true '
'neutron:update_network(uuid, 18), sys:user("tim"), '
' options:value(18, "admin_state", "false") :- result(uuid)'
'neutron:delete_network(uuid) :- result(uuid)')
correct = ''
check(query, acts, correct, 'Network creation, update')
# Subnets
query = ('neutron:subnet(id, name, network_id, '
'gateway_ip, ip_version, cidr, enable_dhcp, tenant_id)')
acts = ('neutron:create_subnet("net1", "10.0.0.1/24", 17), '
'sys:user("tim") :- true')
correct = ('neutron:subnet(id, "", "net1", gateway_ip, 4, '
'"10.0.0.1/24", "true", "tim")')
check(query, acts, correct, 'Simple subnet creation')
query = ('neutron:subnet(id, name, network_id, '
'gateway_ip, ip_version, cidr, enable_dhcp, tenant_id)')
acts = ('neutron:create_subnet("net1", "10.0.0.1/24", 17), '
'sys:user("tim") :- true '
'neutron:update_subnet(uuid, 17), sys:user("tim"), '
' options:value(17, "enable_dhcp", "false") :- result(uuid)')
correct = ('neutron:subnet(id, "", "net1", gateway_ip, 4, '
'"10.0.0.1/24", "false", "tim")')
check(query, acts, correct, 'Subnet creation, update')
query = ('neutron:subnet(id, name, network_id, '
'gateway_ip, ip_version, cidr, enable_dhcp, tenant_id)')
acts = ('neutron:create_subnet("net1", "10.0.0.1/24", 17), '
'sys:user("tim") :- true '
'neutron:update_subnet(uuid, 17), sys:user("tim"), '
' options:value(17, "enable_dhcp", "false") :- result(uuid)'
'neutron:delete_subnet(uuid) :- result(uuid)')
correct = ''
check(query, acts, correct, 'Subnet creation, update, delete')
def str2form(formula_string):
return compile.parse1(formula_string)
def str2pol(policy_string):
return compile.parse(policy_string)
def pol2str(policy):
return " ".join(str(x) for x in policy)
def form2str(formula):
return str(formula)
|
|
from sympy import (Symbol, symbols, factorial, factorial2, binomial,
rf, ff, gamma, polygamma, EulerGamma, O, pi, nan,
oo, zoo, simplify, expand_func, C, S, Product)
from sympy.functions.combinatorial.factorials import subfactorial
from sympy.utilities.pytest import XFAIL, raises
def test_rf_eval_apply():
x, y = symbols('x,y')
assert rf(nan, y) == nan
assert rf(x, y) == rf(x, y)
assert rf(oo, 0) == 1
assert rf(-oo, 0) == 1
assert rf(oo, 6) == oo
assert rf(-oo, 7) == -oo
assert rf(oo, -6) == oo
assert rf(-oo, -7) == oo
assert rf(x, 0) == 1
assert rf(x, 1) == x
assert rf(x, 2) == x*(x + 1)
assert rf(x, 3) == x*(x + 1)*(x + 2)
assert rf(x, 5) == x*(x + 1)*(x + 2)*(x + 3)*(x + 4)
assert rf(x, -1) == 1/(x - 1)
assert rf(x, -2) == 1/((x - 1)*(x - 2))
assert rf(x, -3) == 1/((x - 1)*(x - 2)*(x - 3))
assert rf(1, 100) == factorial(100)
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, nonnegative=True)
assert rf(x, m).is_integer is None
assert rf(n, k).is_integer is None
assert rf(n, m).is_integer is True
assert rf(n, k + pi).is_integer is False
assert rf(n, m + pi).is_integer is False
assert rf(pi, m).is_integer is False
def test_ff_eval_apply():
x, y = symbols('x,y')
assert ff(nan, y) == nan
assert ff(x, y) == ff(x, y)
assert ff(oo, 0) == 1
assert ff(-oo, 0) == 1
assert ff(oo, 6) == oo
assert ff(-oo, 7) == -oo
assert ff(oo, -6) == oo
assert ff(-oo, -7) == oo
assert ff(x, 0) == 1
assert ff(x, 1) == x
assert ff(x, 2) == x*(x - 1)
assert ff(x, 3) == x*(x - 1)*(x - 2)
assert ff(x, 5) == x*(x - 1)*(x - 2)*(x - 3)*(x - 4)
assert ff(x, -1) == 1/(x + 1)
assert ff(x, -2) == 1/((x + 1)*(x + 2))
assert ff(x, -3) == 1/((x + 1)*(x + 2)*(x + 3))
assert ff(100, 100) == factorial(100)
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, nonnegative=True)
assert ff(x, m).is_integer is None
assert ff(n, k).is_integer is None
assert ff(n, m).is_integer is True
assert ff(n, k + pi).is_integer is False
assert ff(n, m + pi).is_integer is False
assert ff(pi, m).is_integer is False
def test_factorial():
x = Symbol('x')
n = Symbol('n', integer=True)
k = Symbol('k', integer=True, nonnegative=True)
r = Symbol('r', integer=False)
s = Symbol('s', integer=False, negative=True)
t = Symbol('t', nonnegative=True)
u = Symbol('u', noninteger=True)
v = Symbol('v', integer=True, negative=True)
assert factorial(-2) == zoo
assert factorial(0) == 1
assert factorial(7) == 5040
assert factorial(n).func == factorial
assert factorial(2*n).func == factorial
assert factorial(x).is_integer is None
assert factorial(n).is_integer is None
assert factorial(k).is_integer
assert factorial(r).is_integer is None
assert factorial(n).is_positive is None
assert factorial(k).is_positive
assert factorial(x).is_real is None
assert factorial(n).is_real is None
assert factorial(k).is_real is True
assert factorial(r).is_real is None
assert factorial(s).is_real is True
assert factorial(t).is_real is True
assert factorial(u).is_real is True
assert factorial(x).is_composite is None
assert factorial(n).is_composite is None
assert factorial(k).is_composite is None
assert factorial(k + 3).is_composite is True
assert factorial(r).is_composite is None
assert factorial(s).is_composite is None
assert factorial(t).is_composite is None
assert factorial(u).is_composite is None
assert factorial(v).is_composite is False
assert factorial(oo) == oo
def test_factorial_diff():
n = Symbol('n', integer=True)
assert factorial(n).diff(n) == \
gamma(1 + n)*polygamma(0, 1 + n)
assert factorial(n**2).diff(n) == \
2*n*gamma(1 + n**2)*polygamma(0, 1 + n**2)
def test_factorial_series():
n = Symbol('n', integer=True)
assert factorial(n).series(n, 0, 3) == \
1 - n*EulerGamma + n**2*(EulerGamma**2/2 + pi**2/12) + O(n**3)
def test_factorial_rewrite():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True, nonnegative=True)
assert factorial(n).rewrite(gamma) == gamma(n + 1)
assert str(factorial(k).rewrite(Product)) == 'Product(_i, (_i, 1, k))'
def test_factorial2():
n = Symbol('n', integer=True)
assert factorial2(-1) == 1
assert factorial2(0) == 1
assert factorial2(7) == 105
assert factorial2(8) == 384
assert factorial2(n).func == factorial2
# The following is exhaustive
tt = Symbol('tt', integer=True, nonnegative=True)
tte = Symbol('tte', even=True, nonnegative=True)
tpe = Symbol('tpe', even=True, positive=True)
tto = Symbol('tto', odd=True, nonnegative=True)
tf = Symbol('tf', integer=True, nonnegative=False)
tfe = Symbol('tfe', even=True, nonnegative=False)
tfo = Symbol('tfo', odd=True, nonnegative=False)
ft = Symbol('ft', integer=False, nonnegative=True)
ff = Symbol('ff', integer=False, nonnegative=False)
fn = Symbol('fn', integer=False)
nt = Symbol('nt', nonnegative=True)
nf = Symbol('nf', nonnegative=False)
nn = Symbol('nn')
assert factorial2(n).is_integer is None
assert factorial2(tt - 1).is_integer
assert factorial2(tte - 1).is_integer
assert factorial2(tpe - 3).is_integer
# This should work, but it doesn't due to ...
# assert factorial2(tto - 4).is_integer
assert factorial2(tto - 2).is_integer
assert factorial2(tf).is_integer is None
assert factorial2(tfe).is_integer is None
assert factorial2(tfo).is_integer is None
assert factorial2(ft).is_integer is None
assert factorial2(ff).is_integer is None
assert factorial2(fn).is_integer is None
assert factorial2(nt).is_integer is None
assert factorial2(nf).is_integer is None
assert factorial2(nn).is_integer is None
assert factorial2(n).is_positive is None
assert factorial2(tt - 1).is_positive is True
assert factorial2(tte - 1).is_positive is True
# This should work, but it doesn't due to ...
# assert factorial2(tpe - 3).is_positive is True
assert factorial2(tpe - 1).is_positive is True
# This should work, but it doesn't due to ...
# assert factorial2(tto - 2).is_positive is True
assert factorial2(tto - 1).is_positive is True
assert factorial2(tf).is_positive is None
assert factorial2(tfe).is_positive is None
assert factorial2(tfo).is_positive is None
assert factorial2(ft).is_positive is None
assert factorial2(ff).is_positive is None
assert factorial2(fn).is_positive is None
assert factorial2(nt).is_positive is None
assert factorial2(nf).is_positive is None
assert factorial2(nn).is_positive is None
assert factorial2(tt).is_even is None
assert factorial2(tt).is_odd is None
assert factorial2(tte).is_even is None
assert factorial2(tte).is_odd is None
assert factorial2(tte + 2).is_even is True
assert factorial2(tpe).is_even is True
assert factorial2(tto).is_odd is True
assert factorial2(tf).is_even is None
assert factorial2(tf).is_odd is None
assert factorial2(tfe).is_even is None
assert factorial2(tfe).is_odd is None
assert factorial2(tfo).is_even is False
assert factorial2(tfo).is_odd is None
def test_binomial():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
u = Symbol('v', negative=True)
v = Symbol('m', positive=True)
assert binomial(0, 0) == 1
assert binomial(1, 1) == 1
assert binomial(10, 10) == 1
assert binomial(1, 2) == 0
assert binomial(1, -1) == 0
assert binomial(-1, 1) == -1
assert binomial(-10, 1) == -10
assert binomial(-10, 7) == -11440
assert binomial(n, -1) == 0
assert binomial(n, 0) == 1
assert expand_func(binomial(n, 1)) == n
assert expand_func(binomial(n, 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 1)) == n
assert binomial(n, 3).func == binomial
assert binomial(n, 3).expand(func=True) == n**3/6 - n**2/2 + n/3
assert expand_func(binomial(n, 3)) == n*(n - 2)*(n - 1)/6
assert binomial(n, n) == 1
assert binomial(n, n + 1) == 0
assert binomial(n, u) == 0
assert binomial(n, v).func == binomial
assert binomial(n, k).func == binomial
assert binomial(n, n + v) == 0
assert expand_func(binomial(n, n-3)) == n*(n - 2)*(n - 1)/6
assert binomial(n, k).is_integer
def test_binomial_diff():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).diff(n) == \
(-polygamma(0, 1 + n - k) + polygamma(0, 1 + n))*binomial(n, k)
assert binomial(n**2, k**3).diff(n) == \
2*n*(-polygamma(
0, 1 + n**2 - k**3) + polygamma(0, 1 + n**2))*binomial(n**2, k**3)
assert binomial(n, k).diff(k) == \
(-polygamma(0, 1 + k) + polygamma(0, 1 + n - k))*binomial(n, k)
assert binomial(n**2, k**3).diff(k) == \
3*k**2*(-polygamma(
0, 1 + k**3) + polygamma(0, 1 + n**2 - k**3))*binomial(n**2, k**3)
def test_binomial_rewrite():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).rewrite(
factorial) == factorial(n)/(factorial(k)*factorial(n - k))
assert binomial(
n, k).rewrite(gamma) == gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
@XFAIL
def test_factorial_simplify_fail():
# simplify(factorial(x + 1).diff(x) - ((x + 1)*factorial(x)).diff(x))) == 0
from sympy.abc import x
assert simplify(x*polygamma(0, x + 1) - x*polygamma(0, x + 2) +
polygamma(0, x + 1) - polygamma(0, x + 2) + 1) == 0
def test_subfactorial():
assert all(subfactorial(i) == ans for i, ans in enumerate(
[1, 0, 1, 2, 9, 44, 265, 1854, 14833, 133496]))
assert subfactorial(oo) == oo
x = Symbol('x')
assert subfactorial(x).rewrite(C.uppergamma) == \
C.uppergamma(x + 1, -1)/S.Exp1
tt = Symbol('tt', integer=True, nonnegative=True)
tf = Symbol('tf', integer=True, nonnegative=False)
tn = Symbol('tf', integer=True)
ft = Symbol('ft', integer=False, nonnegative=True)
ff = Symbol('ff', integer=False, nonnegative=False)
fn = Symbol('ff', integer=False)
nt = Symbol('nt', nonnegative=True)
nf = Symbol('nf', nonnegative=False)
nn = Symbol('nf')
te = Symbol('te', even=True, nonnegative=True)
to = Symbol('to', odd=True, nonnegative=True)
assert subfactorial(tt).is_integer
assert subfactorial(tf).is_integer is None
assert subfactorial(tn).is_integer is None
assert subfactorial(ft).is_integer is None
assert subfactorial(ff).is_integer is None
assert subfactorial(fn).is_integer is None
assert subfactorial(nt).is_integer is None
assert subfactorial(nf).is_integer is None
assert subfactorial(nn).is_integer is None
assert subfactorial(tt).is_nonnegative
assert subfactorial(tf).is_nonnegative is None
assert subfactorial(tn).is_nonnegative is None
assert subfactorial(ft).is_nonnegative is None
assert subfactorial(ff).is_nonnegative is None
assert subfactorial(fn).is_nonnegative is None
assert subfactorial(nt).is_nonnegative is None
assert subfactorial(nf).is_nonnegative is None
assert subfactorial(nn).is_nonnegative is None
assert subfactorial(tt).is_even is None
assert subfactorial(tt).is_odd is None
assert subfactorial(te).is_odd is True
assert subfactorial(to).is_even is True
|
|
#Module containing frequently used physics constants and functions
import math
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
#%%
#==============================================================================
# Fundamental constants
#==============================================================================
me = 9.10938*10**-31 #Electron mass in kg
mp = 1.67262158*10**-27 #Proton mass in kg
q = 1.602177*10**-19 #Elementary charge in C
G = 6.674*10**-11 #Gravitational constant in N*m^2/kg^2
c = 2.998*10**8 #Speed of light in m/s
epsilon0 = 8.854*10**-12 #Electric constant in F/m
mu0 = 4*math.pi*10**-7 #Magnetic constant in T*m/A
hbar = 1.054572*10**-34 #Reduced Planck constant in J*s
kb = 1.38065*10**-23 #Boltzmann constant in J/K
NA = 6.022*10**23 #Avagadro's number
#%%
#==============================================================================
# Functions
#==============================================================================
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative)."""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:#, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * math.factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def Sellmeier(wavelength,B1,C1,B2,C2,B3,C3):
"""Calculates the refractive index of a material given the wavelength in
microns and the Sellmeier coefficients."""
nsqaured = 1 + B1*wavelength**2/(wavelength**2-C1) + \
B2*wavelength**2/(wavelength**2-C2) + \
B3*wavelength**2/(wavelength**2-C3)
return math.sqrt(nsqaured)
#%%
#==============================================================================
# Colormaps
#==============================================================================
cm_data = [[ 5.47748087e-04, 3.06077876e-04, 5.99659709e-04],
[ 1.53644587e-03, 8.42655550e-04, 1.34529274e-03],
[ 3.01784633e-03, 1.58023250e-03, 2.32542855e-03],
[ 4.99694751e-03, 2.49381178e-03, 3.50796236e-03],
[ 7.48481708e-03, 3.56440966e-03, 4.87088052e-03],
[ 1.04960883e-02, 4.77613919e-03, 6.39708202e-03],
[ 1.40319020e-02, 6.12063556e-03, 8.07683812e-03],
[ 1.81005129e-02, 7.58879644e-03, 9.90074451e-03],
[ 2.27332706e-02, 9.16464713e-03, 1.18540110e-02],
[ 2.79272650e-02, 1.08453847e-02, 1.39330790e-02],
[ 3.36872542e-02, 1.26262306e-02, 1.61330271e-02],
[ 4.00368926e-02, 1.44963409e-02, 1.84439042e-02],
[ 4.66104510e-02, 1.64449249e-02, 2.08558583e-02],
[ 5.31296479e-02, 1.84753128e-02, 2.33711881e-02],
[ 5.96057496e-02, 2.05831211e-02, 2.59856788e-02],
[ 6.60845032e-02, 2.27438317e-02, 2.86779070e-02],
[ 7.25294283e-02, 2.49708228e-02, 3.14589446e-02],
[ 7.89435396e-02, 2.72607146e-02, 3.43255418e-02],
[ 8.53596773e-02, 2.95919033e-02, 3.72588722e-02],
[ 9.17638550e-02, 3.19685783e-02, 4.02622187e-02],
[ 9.81451315e-02, 3.43953187e-02, 4.32336100e-02],
[ 1.04526807e-01, 3.68537652e-02, 4.61338626e-02],
[ 1.10912754e-01, 3.93370692e-02, 4.89670842e-02],
[ 1.17281155e-01, 4.18213190e-02, 5.17490035e-02],
[ 1.23649104e-01, 4.42314110e-02, 5.44728326e-02],
[ 1.30032724e-01, 4.65668264e-02, 5.71315659e-02],
[ 1.36402526e-01, 4.88536577e-02, 5.97450927e-02],
[ 1.42772901e-01, 5.10827707e-02, 6.23071284e-02],
[ 1.49165802e-01, 5.32377393e-02, 6.48060493e-02],
[ 1.55547516e-01, 5.53486340e-02, 6.72644498e-02],
[ 1.61933457e-01, 5.74043288e-02, 6.96745252e-02],
[ 1.68344281e-01, 5.93881217e-02, 7.20244566e-02],
[ 1.74745763e-01, 6.13314167e-02, 7.43375755e-02],
[ 1.81158328e-01, 6.32175014e-02, 7.66020013e-02],
[ 1.87593551e-01, 6.50366459e-02, 7.88110281e-02],
[ 1.94020688e-01, 6.68182091e-02, 8.09862510e-02],
[ 2.00469427e-01, 6.85354715e-02, 8.31086225e-02],
[ 2.06933861e-01, 7.01944599e-02, 8.51828223e-02],
[ 2.13391007e-01, 7.18183162e-02, 8.72257169e-02],
[ 2.19884314e-01, 7.33649880e-02, 8.92074450e-02],
[ 2.26381434e-01, 7.48667722e-02, 9.11513888e-02],
[ 2.32877958e-01, 7.63290713e-02, 9.30615611e-02],
[ 2.39417030e-01, 7.77075993e-02, 9.49068218e-02],
[ 2.45949187e-01, 7.90539524e-02, 9.67239312e-02],
[ 2.52503045e-01, 8.03383569e-02, 9.84920739e-02],
[ 2.59078620e-01, 8.15605334e-02, 1.00211332e-01],
[ 2.65647177e-01, 8.27523320e-02, 1.01904143e-01],
[ 2.72264658e-01, 8.38512497e-02, 1.03527192e-01],
[ 2.78877454e-01, 8.49173592e-02, 1.05122465e-01],
[ 2.85511685e-01, 8.59209086e-02, 1.06669513e-01],
[ 2.92169537e-01, 8.68587180e-02, 1.08166407e-01],
[ 2.98823804e-01, 8.77631553e-02, 1.09635484e-01],
[ 3.05527253e-01, 8.85696990e-02, 1.11032921e-01],
[ 3.12221950e-01, 8.93488570e-02, 1.12406908e-01],
[ 3.18954132e-01, 9.00424249e-02, 1.13718186e-01],
[ 3.25693760e-01, 9.06876495e-02, 1.14992114e-01],
[ 3.32453378e-01, 9.12678184e-02, 1.16217615e-01],
[ 3.39237731e-01, 9.17755190e-02, 1.17389984e-01],
[ 3.46027836e-01, 9.22355630e-02, 1.18525855e-01],
[ 3.52856378e-01, 9.26018738e-02, 1.19595002e-01],
[ 3.59679944e-01, 9.29340598e-02, 1.20636788e-01],
[ 3.66551779e-01, 9.31548660e-02, 1.21600901e-01],
[ 3.73411664e-01, 9.33503825e-02, 1.22543527e-01],
[ 3.80325509e-01, 9.34213548e-02, 1.23400666e-01],
[ 3.87224407e-01, 9.34702358e-02, 1.24238410e-01],
[ 3.94178548e-01, 9.33868762e-02, 1.24986526e-01],
[ 4.01118932e-01, 9.32779221e-02, 1.25712996e-01],
[ 4.08111186e-01, 9.30356254e-02, 1.26349950e-01],
[ 4.15095232e-01, 9.27563247e-02, 1.26958059e-01],
[ 4.22122894e-01, 9.23504646e-02, 1.27481649e-01],
[ 4.29152396e-01, 9.18869215e-02, 1.27963592e-01],
[ 4.36212185e-01, 9.13129690e-02, 1.28371575e-01],
[ 4.43288441e-01, 9.06498445e-02, 1.28718807e-01],
[ 4.50376440e-01, 8.99035348e-02, 1.29008926e-01],
[ 4.57500122e-01, 8.90240166e-02, 1.29212149e-01],
[ 4.64621315e-01, 8.80820507e-02, 1.29370675e-01],
[ 4.71782712e-01, 8.69874039e-02, 1.29431311e-01],
[ 4.78953140e-01, 8.57983354e-02, 1.29428443e-01],
[ 4.86129742e-01, 8.45174521e-02, 1.29363265e-01],
[ 4.93347856e-01, 8.30602174e-02, 1.29187411e-01],
[ 5.00568348e-01, 8.15074000e-02, 1.28946985e-01],
[ 5.07795622e-01, 7.98442895e-02, 1.28633086e-01],
[ 5.15061443e-01, 7.79847171e-02, 1.28197778e-01],
[ 5.22331370e-01, 7.60042039e-02, 1.27682677e-01],
[ 5.29606493e-01, 7.38929257e-02, 1.27081517e-01],
[ 5.36898464e-01, 7.16095269e-02, 1.26371292e-01],
[ 5.44210979e-01, 6.91294145e-02, 1.25538480e-01],
[ 5.51527384e-01, 6.64871150e-02, 1.24600175e-01],
[ 5.58847543e-01, 6.36701807e-02, 1.23548101e-01],
[ 5.66170867e-01, 6.06655849e-02, 1.22373321e-01],
[ 5.73496198e-01, 5.74600225e-02, 1.21066243e-01],
[ 5.80823155e-01, 5.40347259e-02, 1.19613893e-01],
[ 5.88150809e-01, 5.03692587e-02, 1.18001618e-01],
[ 5.95468514e-01, 4.64809002e-02, 1.16230934e-01],
[ 6.02771410e-01, 4.23662137e-02, 1.14290578e-01],
[ 6.10053697e-01, 3.80294354e-02, 1.12167999e-01],
[ 6.17315053e-01, 3.36821518e-02, 1.09833536e-01],
[ 6.24540275e-01, 2.94772673e-02, 1.07285752e-01],
[ 6.31717080e-01, 2.54981926e-02, 1.04512903e-01],
[ 6.38830845e-01, 2.18456231e-02, 1.01504057e-01],
[ 6.45869143e-01, 1.86152455e-02, 9.82344630e-02],
[ 6.52820000e-01, 1.59074618e-02, 9.46637537e-02],
[ 6.59646391e-01, 1.39587650e-02, 9.08291206e-02],
[ 6.66337846e-01, 1.28797857e-02, 8.66610902e-02],
[ 6.72852150e-01, 1.29508575e-02, 8.22139934e-02],
[ 6.79165725e-01, 1.43662850e-02, 7.74620521e-02],
[ 6.85245344e-01, 1.73724470e-02, 7.24432935e-02],
[ 6.91064162e-01, 2.21812700e-02, 6.72044508e-02],
[ 6.96604313e-01, 2.89518804e-02, 6.17956273e-02],
[ 7.01858034e-01, 3.77785476e-02, 5.62767487e-02],
[ 7.06828946e-01, 4.81077524e-02, 5.07103160e-02],
[ 7.11530852e-01, 5.87396608e-02, 4.51509246e-02],
[ 7.15984104e-01, 6.94561728e-02, 3.96258938e-02],
[ 7.20211973e-01, 8.01198504e-02, 3.43668144e-02],
[ 7.24237819e-01, 9.06466389e-02, 2.96010227e-02],
[ 7.28087046e-01, 1.00974853e-01, 2.53199758e-02],
[ 7.31777099e-01, 1.11092460e-01, 2.14743407e-02],
[ 7.35324853e-01, 1.20992330e-01, 1.80324568e-02],
[ 7.38749986e-01, 1.30660611e-01, 1.49834554e-02],
[ 7.42058546e-01, 1.40125512e-01, 1.22768458e-02],
[ 7.45268541e-01, 1.49376147e-01, 9.91387703e-03],
[ 7.48380807e-01, 1.58446763e-01, 7.85197006e-03],
[ 7.51411961e-01, 1.67326772e-01, 6.09909211e-03],
[ 7.54360108e-01, 1.76048924e-01, 4.62255966e-03],
[ 7.57235438e-01, 1.84613826e-01, 3.42361090e-03],
[ 7.60041495e-01, 1.93035357e-01, 2.49239069e-03],
[ 7.62779423e-01, 2.01329553e-01, 1.81912945e-03],
[ 7.65457394e-01, 2.09496673e-01, 1.40868712e-03],
[ 7.68075387e-01, 2.17551534e-01, 1.25464458e-03],
[ 7.70634609e-01, 2.25504578e-01, 1.35552563e-03],
[ 7.73139048e-01, 2.33360100e-01, 1.71537405e-03],
[ 7.75591948e-01, 2.41122972e-01, 2.33824343e-03],
[ 7.77992056e-01, 2.48804382e-01, 3.22460569e-03],
[ 7.80340998e-01, 2.56409762e-01, 4.37981012e-03],
[ 7.82640876e-01, 2.63943166e-01, 5.81064704e-03],
[ 7.84894356e-01, 2.71407361e-01, 7.52509413e-03],
[ 7.87100025e-01, 2.78810409e-01, 9.52884640e-03],
[ 7.89258842e-01, 2.86156297e-01, 1.18306809e-02],
[ 7.91371647e-01, 2.93448741e-01, 1.44401503e-02],
[ 7.93439582e-01, 3.00690679e-01, 1.73677654e-02],
[ 7.95463974e-01, 3.07884524e-01, 2.06247965e-02],
[ 7.97443965e-01, 3.15035091e-01, 2.42224298e-02],
[ 7.99380049e-01, 3.22145136e-01, 2.81733908e-02],
[ 8.01272655e-01, 3.29217234e-01, 3.24912000e-02],
[ 8.03122155e-01, 3.36253788e-01, 3.71901909e-02],
[ 8.04929101e-01, 3.43256788e-01, 4.22313575e-02],
[ 8.06693679e-01, 3.50228445e-01, 4.73204319e-02],
[ 8.08415707e-01, 3.57171169e-01, 5.24362150e-02],
[ 8.10095390e-01, 3.64086767e-01, 5.75815505e-02],
[ 8.11732900e-01, 3.70976931e-01, 6.27592531e-02],
[ 8.13328386e-01, 3.77843239e-01, 6.79721011e-02],
[ 8.14881914e-01, 3.84687219e-01, 7.32228505e-02],
[ 8.16393162e-01, 3.91510654e-01, 7.85143534e-02],
[ 8.17862434e-01, 3.98314597e-01, 8.38492479e-02],
[ 8.19289808e-01, 4.05100228e-01, 8.92301944e-02],
[ 8.20675356e-01, 4.11868642e-01, 9.46598270e-02],
[ 8.22019147e-01, 4.18620850e-01, 1.00140759e-01],
[ 8.23321076e-01, 4.25357937e-01, 1.05675673e-01],
[ 8.24579971e-01, 4.32081810e-01, 1.11267807e-01],
[ 8.25797022e-01, 4.38792265e-01, 1.16919256e-01],
[ 8.26972318e-01, 4.45490004e-01, 1.22632628e-01],
[ 8.28105969e-01, 4.52175661e-01, 1.28410536e-01],
[ 8.29198106e-01, 4.58849799e-01, 1.34255600e-01],
[ 8.30246845e-01, 4.65514496e-01, 1.40171719e-01],
[ 8.31253477e-01, 4.72169257e-01, 1.46160961e-01],
[ 8.32218825e-01, 4.78813958e-01, 1.52225658e-01],
[ 8.33143166e-01, 4.85448877e-01, 1.58368510e-01],
[ 8.34026337e-01, 4.92074575e-01, 1.64592581e-01],
[ 8.34865726e-01, 4.98693308e-01, 1.70902788e-01],
[ 8.35664916e-01, 5.05302826e-01, 1.77299785e-01],
[ 8.36424449e-01, 5.11903106e-01, 1.83786355e-01],
[ 8.37144349e-01, 5.18494457e-01, 1.90365756e-01],
[ 8.37821386e-01, 5.25079315e-01, 1.97043949e-01],
[ 8.38460593e-01, 5.31654527e-01, 2.03820580e-01],
[ 8.39062934e-01, 5.38219741e-01, 2.10698438e-01],
[ 8.39625786e-01, 5.44776874e-01, 2.17683451e-01],
[ 8.40151464e-01, 5.51324619e-01, 2.24777630e-01],
[ 8.40643694e-01, 5.57860868e-01, 2.31981740e-01],
[ 8.41100710e-01, 5.64386920e-01, 2.39301446e-01],
[ 8.41523874e-01, 5.70902112e-01, 2.46739858e-01],
[ 8.41918660e-01, 5.77403408e-01, 2.54296285e-01],
[ 8.42282318e-01, 5.83892628e-01, 2.61977819e-01],
[ 8.42619551e-01, 5.90367262e-01, 2.69784618e-01],
[ 8.42934948e-01, 5.96824995e-01, 2.77716799e-01],
[ 8.43224874e-01, 6.03268082e-01, 2.85782909e-01],
[ 8.43499590e-01, 6.09691202e-01, 2.93977283e-01],
[ 8.43757426e-01, 6.16095589e-01, 3.02306589e-01],
[ 8.44004819e-01, 6.22478199e-01, 3.10768886e-01],
[ 8.44245148e-01, 6.28837705e-01, 3.19365364e-01],
[ 8.44481740e-01, 6.35172863e-01, 3.28097209e-01],
[ 8.44721292e-01, 6.41480849e-01, 3.36961479e-01],
[ 8.44966060e-01, 6.47761126e-01, 3.45960220e-01],
[ 8.45224116e-01, 6.54010452e-01, 3.55088136e-01],
[ 8.45498073e-01, 6.60228304e-01, 3.64346261e-01],
[ 8.45795977e-01, 6.66411737e-01, 3.73728323e-01],
[ 8.46121480e-01, 6.72559954e-01, 3.83233168e-01],
[ 8.46481807e-01, 6.78670670e-01, 3.92854340e-01],
[ 8.46881608e-01, 6.84742875e-01, 4.02588183e-01],
[ 8.47327050e-01, 6.90775020e-01, 4.12428237e-01],
[ 8.47823192e-01, 6.96766155e-01, 4.22368839e-01],
[ 8.48375288e-01, 7.02715365e-01, 4.32403246e-01],
[ 8.48987971e-01, 7.08622093e-01, 4.42524818e-01],
[ 8.49665854e-01, 7.14485900e-01, 4.52726101e-01],
[ 8.50412432e-01, 7.20306865e-01, 4.63000588e-01],
[ 8.51231805e-01, 7.26084924e-01, 4.73339956e-01],
[ 8.52126044e-01, 7.31820833e-01, 4.83738473e-01],
[ 8.53098501e-01, 7.37514940e-01, 4.94187483e-01],
[ 8.54150182e-01, 7.43168472e-01, 5.04681709e-01],
[ 8.55282963e-01, 7.48782381e-01, 5.15213725e-01],
[ 8.56497845e-01, 7.54357945e-01, 5.25777086e-01],
[ 8.57794001e-01, 7.59897041e-01, 5.36368355e-01],
[ 8.59174105e-01, 7.65400439e-01, 5.46976883e-01],
[ 8.60633354e-01, 7.70871219e-01, 5.57606383e-01],
[ 8.62177083e-01, 7.76309374e-01, 5.68239986e-01],
[ 8.63798762e-01, 7.81718409e-01, 5.78884679e-01],
[ 8.65499273e-01, 7.87099604e-01, 5.89532223e-01],
[ 8.67278601e-01, 7.92454507e-01, 6.00175897e-01],
[ 8.69131487e-01, 7.97786038e-01, 6.10820444e-01],
[ 8.71059544e-01, 8.03095231e-01, 6.21455565e-01],
[ 8.73060264e-01, 8.08384190e-01, 6.32080042e-01],
[ 8.75129420e-01, 8.13655355e-01, 6.42696917e-01],
[ 8.77266957e-01, 8.18910094e-01, 6.53299775e-01],
[ 8.79472552e-01, 8.24149834e-01, 6.63882794e-01],
[ 8.81741135e-01, 8.29377026e-01, 6.74451794e-01],
[ 8.84071022e-01, 8.34593269e-01, 6.85004878e-01],
[ 8.86462613e-01, 8.39799688e-01, 6.95535194e-01],
[ 8.88914103e-01, 8.44997846e-01, 7.06041561e-01],
[ 8.91422030e-01, 8.50189563e-01, 7.16527439e-01],
[ 8.93985071e-01, 8.55376161e-01, 7.26991348e-01],
[ 8.96602050e-01, 8.60558878e-01, 7.37431857e-01],
[ 8.99273599e-01, 8.65738575e-01, 7.47843231e-01],
[ 9.01998246e-01, 8.70916447e-01, 7.58225729e-01],
[ 9.04774179e-01, 8.76093663e-01, 7.68581168e-01],
[ 9.07601077e-01, 8.81271057e-01, 7.78908134e-01],
[ 9.10478923e-01, 8.86449336e-01, 7.89205145e-01],
[ 9.13408065e-01, 8.91629065e-01, 7.99470615e-01],
[ 9.16389285e-01, 8.96810645e-01, 8.09702806e-01],
[ 9.19423884e-01, 9.01994289e-01, 8.19899754e-01],
[ 9.22514634e-01, 9.07179878e-01, 8.30056811e-01],
[ 9.25663625e-01, 9.12367240e-01, 8.40172912e-01],
[ 9.28874059e-01, 9.17555864e-01, 8.50245916e-01],
[ 9.32150713e-01, 9.22744823e-01, 8.60271610e-01],
[ 9.35499755e-01, 9.27932781e-01, 8.70244531e-01],
[ 9.38929005e-01, 9.33117947e-01, 8.80157433e-01],
[ 9.42448176e-01, 9.38298064e-01, 8.90000542e-01],
[ 9.46068952e-01, 9.43470490e-01, 8.99760543e-01],
[ 9.49804691e-01, 9.48632448e-01, 9.09419325e-01],
[ 9.53669300e-01, 9.53781607e-01, 9.18952702e-01],
[ 9.57674688e-01, 9.58917154e-01, 9.28329735e-01],
[ 9.61826200e-01, 9.64041451e-01, 9.37513981e-01],
[ 9.66116261e-01, 9.69161984e-01, 9.46468608e-01],
[ 9.70518526e-01, 9.74292574e-01, 9.55166618e-01],
[ 9.74987153e-01, 9.79452262e-01, 9.63603926e-01],
[ 9.79465063e-01, 9.84661130e-01, 9.71808036e-01],
[ 9.83898767e-01, 9.89934801e-01, 9.79834651e-01],
[ 9.88251024e-01, 9.95281008e-01, 9.87752805e-01]]
physics_heat = ListedColormap(cm_data, name='physics_heat')
|
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from datetime import datetime
import logging
from urllib.parse import urlparse
from time import sleep
from airflow import hooks, settings
from airflow.models import BaseOperator, TaskInstance, Connection as DB
from airflow.hooks import BaseHook
from airflow.utils import State
from airflow.utils import (
apply_defaults, AirflowException, AirflowSensorTimeout)
class BaseSensorOperator(BaseOperator):
'''
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
'''
ui_color = '#e6f1f2'
@apply_defaults
def __init__(
self,
poke_interval=60,
timeout=60*60*24*7,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.timeout = timeout
def poke(self, context):
'''
Function that the sensors defined while deriving this class should
override.
'''
raise AirflowException('Override me.')
def execute(self, context):
started_at = datetime.now()
while not self.poke(context):
sleep(self.poke_interval)
if (datetime.now() - started_at).seconds > self.timeout:
raise AirflowSensorTimeout('Snap. Time is OUT.')
logging.info("Success criteria met. Exiting.")
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement until a criteria is met. It will keep trying until
sql returns no row, or if the first cell in (0, '0', '').
:param conn_id: The connection to run the sensor against
:type conn_id: string
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
@apply_defaults
def __init__(self, conn_id, sql, *args, **kwargs):
self.sql = sql
self.conn_id = conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
hook = BaseHook.get_connection(self.conn_id).get_hook()
logging.info('Poking: ' + self.sql)
records = hook.get_records(self.sql)
if not records:
return False
else:
if str(records[0][0]) in ('0', '',):
return False
else:
return True
print(records[0][0])
class MetastorePartitionSensor(SqlSensor):
"""
An alternative to the HivePartitionSensor that talk directly to the
MySQL db. This was created as a result of observing sub optimal
queries generated by the Metastore thrift service when hitting
subpartitioned tables. The Thrift service's queries were written in a
way that wouldn't leverage the indexes.
:param schema: the schema
:type schema: str
:param table: the table
:type table: str
:param partition_name: the partition name, as defined in the PARTITIONS
table of the Metastore. Order of the fields does matter.
Examples: ``ds=2016-01-01`` or
``ds=2016-01-01/sub=foo`` for a sub partitioned table
:type partition_name: str
"""
template_fields = ('partition_name', 'table', 'schema')
@apply_defaults
def __init__(
self, table, partition_name, schema="default",
mysql_conn_id="metastore_mysql",
*args, **kwargs):
self.partition_name = partition_name
self.table = table
self.schema = schema
self.first_poke = True
self.conn_id = mysql_conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
if self.first_poke:
self.first_poke = False
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.sql = """
SELECT 'X'
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0 ON A0.TBL_ID = B0.TBL_ID
LEFT OUTER JOIN DBS C0 ON B0.DB_ID = C0.DB_ID
WHERE
B0.TBL_NAME = '{self.table}' AND
C0.NAME = '{self.schema}' AND
A0.PART_NAME = '{self.partition_name}';
""".format(self=self)
return super(MetastorePartitionSensor, self).poke(context)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a task to complete in a different DAG
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: string
:param external_task_id: The task_id that contains the task you want to
wait for
:type external_task_id: string
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task.
For yesterday, use [positive!] datetime.timedelta(days=1)
:type execution_delta: datetime.timedelta
"""
@apply_defaults
def __init__(
self,
external_dag_id,
external_task_id,
allowed_states=None,
execution_delta=None,
*args, **kwargs):
super(ExternalTaskSensor, self).__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
self.execution_delta = execution_delta
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
def poke(self, context):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
else:
dttm = context['execution_date']
logging.info(
'Poking for '
'{self.external_dag_id}.'
'{self.external_task_id} on '
'{dttm} ... '.format(**locals()))
TI = TaskInstance
session = settings.Session()
count = session.query(TI).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date == dttm,
).count()
session.commit()
session.close()
return count
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: string
:param partition: The partition clause to wait for. This is passed as
is to the Metastore Thrift client "get_partitions_by_filter" method,
and apparently supports SQL like notation as in `ds='2015-01-01'
AND type='value'` and > < sings as in "ds>=2015-01-01"
:type partition: string
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
@apply_defaults
def __init__(
self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60*3,
*args, **kwargs):
super(HivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
logging.info(
'Poking for table {self.schema}.{self.table}, '
'partition {self.partition}'.format(**locals()))
if not hasattr(self, 'hook'):
self.hook = hooks.HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return self.hook.check_for_partition(
self.schema, self.table, self.partition)
class HdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
hdfs_conn_id='hdfs_default',
*args, **kwargs):
super(HdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.hdfs_conn_id = hdfs_conn_id
def poke(self, context):
sb = hooks.HDFSHook(self.hdfs_conn_id).get_conn()
logging.getLogger("snakebite").setLevel(logging.WARNING)
logging.info(
'Poking for file {self.filepath} '.format(**locals()))
try:
files = [f for f in sb.ls([self.filepath])]
except:
return False
return True
class WebHdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
webhdfs_conn_id='webhdfs_default',
*args, **kwargs):
super(WebHdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.hdfs_conn_id = webhdfs_conn_id
def poke(self, context):
c = hooks.WebHDFSHook(self.webhdfs_conn_id).get_conn()
logging.info(
'Poking for file {self.filepath} '.format(**locals()))
return c.check_for_path(hdfs_path=self.filepath)
class S3KeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
S3 being a key/value it does not support folders. The path is just a key
a resource.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level.
:type bucket_key: str
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:type wildcard_match: bool
:param s3_conn_id: a reference to the s3 connection
:type s3_conn_id: str
"""
template_fields = ('bucket_key', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_key,
bucket_name=None,
wildcard_match=False,
s3_conn_id='s3_default',
*args, **kwargs):
super(S3KeySensor, self).__init__(*args, **kwargs)
session = settings.Session()
db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
if not db:
raise AirflowException("conn_id doesn't exist in the repository")
# Parse
if bucket_name is None:
parsed_url = urlparse(bucket_key)
if parsed_url.netloc == '':
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
if parsed_url.path[0] == '/':
bucket_key = parsed_url.path[1:]
else:
bucket_key = parsed_url.path
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.s3_conn_id = s3_conn_id
session.commit()
session.close()
def poke(self, context):
hook = hooks.S3Hook(s3_conn_id=self.s3_conn_id)
full_url = "s3://" + self.bucket_name + "/" + self.bucket_key
logging.info('Poking for key : {full_url}'.format(**locals()))
if self.wildcard_match:
return hook.check_for_wildcard_key(self.bucket_key,
self.bucket_name)
else:
return hook.check_for_key(self.bucket_key, self.bucket_name)
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_name,
prefix, delimiter='/',
s3_conn_id='s3_default',
*args, **kwargs):
super(S3PrefixSensor, self).__init__(*args, **kwargs)
session = settings.Session()
db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
if not db:
raise AirflowException("conn_id doesn't exist in the repository")
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.s3_conn_id = s3_conn_id
session.commit()
session.close()
def poke(self, context):
logging.info('Poking for prefix : {self.prefix}\n'
'in bucket s3://{self.bucket_name}'.format(**locals()))
hook = hooks.S3Hook(s3_conn_id=self.s3_conn_id)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name)
class TimeSensor(BaseSensorOperator):
"""
Waits until the specified time of the day.
:param target_time: time after which the job succeeds
:type target_time: datetime.time
"""
template_fields = tuple()
@apply_defaults
def __init__(self, target_time, *args, **kwargs):
super(TimeSensor, self).__init__(*args, **kwargs)
self.target_time = target_time
def poke(self, context):
logging.info(
'Checking if the time ({0}) has come'.format(self.target_time))
return datetime.now().time() > self.target_time
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
template_fields = tuple()
@apply_defaults
def __init__(self, delta, *args, **kwargs):
super(TimeDeltaSensor, self).__init__(*args, **kwargs)
self.delta = delta
def poke(self, context):
dag = context['dag']
target_dttm = dag.following_schedule(context['execution_date'])
target_dttm += self.delta
logging.info('Checking if the time ({0}) has come'.format(target_dttm))
return datetime.now() > target_dttm
class HttpSensor(BaseSensorOperator):
"""
Executes a HTTP get statement and returns False on failure:
404 not found or response_check function returned False
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: string
:param endpoint: The relative part of the full url
:type endpoint: string
:param params: The parameters to be added to the GET url
:type params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint',)
@apply_defaults
def __init__(self,
endpoint,
http_conn_id='http_default',
params=None,
headers=None,
response_check=None,
extra_options=None, *args, **kwargs):
super(HttpSensor, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.params = params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.hook = hooks.HttpHook(method='GET', http_conn_id=http_conn_id)
def poke(self, context):
logging.info('Poking: ' + self.endpoint)
try:
response = self.hook.run(self.endpoint,
data=self.params,
headers=self.headers,
extra_options=self.extra_options)
if self.response_check:
# run content check on response
return self.response_check(response)
except AirflowException as ae:
if ae.message.startswith("404"):
return False
return True
|
|
import os
import time
import traceback
import xmlrpclib
from copy import copy
from celery import chain, task
from django.core.cache import cache
from django.template.loader import render_to_string
from django.utils import timezone
from functools import wraps
from empressx import states
from empressx.retinue.conf import settings
from empressx.retinue.utils import localcommand, virtualenvcommand
def task_tracker(func):
@wraps(func)
def wrapper(target):
if target:
app_info, uuid = target
rpc_client = xmlrpclib.Server(settings.EMPRESS_SERVICE_URL)
rpc_client.private.callback({
'uuid': uuid,
'state': states.STARTED,
'data': func.__name__,
})
try:
result = func(target)
except:
rpc_client.private.callback({
'uuid': uuid,
'state': states.FAILURE,
'data': func.__name__,
'ex_data': traceback.format_exc(),
})
else:
return result
return wrapper
@task(ignore_result=True)
@task_tracker
def end(target):
app_info, uuid = target
rpc_client = xmlrpclib.Server(settings.EMPRESS_SERVICE_URL)
rpc_client.private.callback({
'uuid': uuid,
'state': states.SUCCESS,
'data': 'Done.',
})
@task
@task_tracker
def delete_uwsgi_config(target):
app_info, uuid = target
app_name = app_info['app_name']
conf_home = settings.RETINUE_VASSAL_HOME
localcommand("rm -f %(conf_home)s/%(app_name)s.ini" % locals())
return target
@task
@task_tracker
def delete_nginx_config(target):
app_info, uuid = target
app_name = app_info['app_name']
conf_home = settings.RETINUE_NGINX_STATICFILE_HOME
localcommand("rm -f %(conf_home)s/%(app_name)s.ini" % locals())
return target
@task
@task_tracker
def provide_virtualenv(target):
app_info, uuid = target
app_name = app_info['app_name']
virtualenv = app_info.get('virtualenv', settings.DEFAULT_VIRTUALENV_NAME)
# mkvirtualenv if not exist
if virtualenv not in virtualenvcommand('lsvirtualenv -b').splitlines():
virtualenvcommand('mkvirtualenv %s' % virtualenv)
project_home = '%s/%s' % (settings.RETINUE_APP_HOME, app_name)
# mkdir project_home if not exist
if not os.path.exists(project_home):
os.makedirs(project_home)
if virtualenv == app_name:
virtualenvcommand('workon %s && cd %s && setvirtualenvproject $VIRTUAL_ENV `pwd`' % (virtualenv, project_home))
return target
@task
@task_tracker
def pull_source_code(target):
app_info, uuid = target
app_name = app_info['app_name']
project_home = '%s/%s' % (settings.RETINUE_APP_HOME, app_name)
vcs_type = app_info.get('vcs', {}).get('type')
vcs_path = app_info.get('vcs', {}).get('path')
vcs_username = app_info.get('vcs', {}).get('username')
vcs_password = app_info.get('vcs', {}).get('password')
cmd = """
export LC_CTYPE=zh_CN.utf8
if [ -d %(project_home)s/.svn ]
then
cd %(project_home)s
svn cleanup --non-interactive --username=%(vcs_username)s --password=%(vcs_password)s
svn update --non-interactive --username=%(vcs_username)s --password=%(vcs_password)s
else
cd %(project_home)s
svn checkout %(vcs_path)s/ . --non-interactive --no-auth-cache --username=%(vcs_username)s --password=%(vcs_password)s
fi
if [ "${DJANGO_CONF_MODULE}" = "conf.testing" ]
then
fp_root_dir="/data/BKTest_App_FilePool"
elif [ "${DJANGO_CONF_MODULE}" = "conf.production" ]
then
fp_root_dir="/data/BK_App_FilePool"
fi
if [ ! -z "${fp_root_dir}" -a -d ${fp_root_dir} ]
then
fpdir="${fp_root_dir}/%(app_name)s"
[ -d ${fpdir} ] || install -d ${fpdir} -m 1777
if [ $? -eq 0 ]
then
cd %(project_home)s
[ -L USERRES ] || ln -s ${fpdir} USERRES
fi
fi
""" % locals()
print cmd
localcommand(cmd)
return target
@task
@task_tracker
def install_requirements(target):
app_info, uuid = target
app_name = app_info['app_name']
virtualenv = app_info.get('virtualenv', settings.DEFAULT_VIRTUALENV_NAME)
if virtualenv == app_name:
requirements = app_info.get('requirements', 'requirements.txt')
project_home = '%s/%s' % (settings.RETINUE_APP_HOME, app_name)
virtualenvcommand("""
workon %(virtualenv)s
cd %(project_home)s
if [ -f %(requirements)s ]
then
pip install --upgrade -r %(requirements)s
fi
""" % locals())
return target
@task
@task_tracker
def syncdb_and_migrate(target):
app_info, uuid = target
app_name = app_info['app_name']
virtualenv = app_info.get('virtualenv', settings.DEFAULT_VIRTUALENV_NAME)
project_home = '%s/%s' % (settings.RETINUE_APP_HOME, app_name)
virtualenvcommand("""
workon %(virtualenv)s
cd %(project_home)s
python manage.py syncdb --noinput
python manage.py migrate --noinput
python manage.py createcachetable django_cache > /dev/null 2>&1 || echo
""" % locals())
return target
@task
@task_tracker
def render_uwsgi_config(target):
app_info, uuid = target
app_name = app_info['app_name']
context = copy(app_info)
context.update({
'virtualenv': app_info.get('virtualenv', settings.DEFAULT_VIRTUALENV_NAME),
'RETINUE_HOME': settings.RETINUE_HOME,
'RETINUE_WORKON_HOME': settings.RETINUE_WORKON_HOME,
'RETINUE_APP_HOME': settings.RETINUE_APP_HOME,
})
rendered = render_to_string('empressx/retinue/uwsgi.ini', context)
with open('%s/%s.ini' % (settings.RETINUE_VASSAL_HOME, app_name), 'w') as f:
f.write(rendered)
return target
@task
@task_tracker
def render_nginx_config(target):
app_info, uuid = target
app_name = app_info['app_name']
context = copy(app_info)
context.update({
'project_home': '%s/%s' % (settings.RETINUE_APP_HOME, app_name),
})
rendered = render_to_string('empressx/retinue/staticfile.conf', context)
with open('%s/%s.conf' % (settings.RETINUE_NGINX_STATICFILE_HOME, app_name), 'w') as f:
f.write(rendered)
return target
@task
@task_tracker
def reload_nginx(target):
cache.set('nginx_last_reload_request', time.mktime(timezone.now().timetuple()))
_reload_nginx.apply_async(countdown=5)
return target
@task(ignore_result=True)
def _reload_nginx():
last_reload_request = cache.get('nginx_last_reload_request')
now = time.mktime(timezone.now().timetuple())
if not last_reload_request or now - last_reload_request >= 5:
nginx_path = settings.RETINUE_NGINX_PATH
conf = settings.RETINUE_NGINX_CONF if settings.RETINUE_NGINX_CONF else 'conf/nginx.conf'
localcommand("%(nginx_path)s -s reload -c %(conf)s" % locals())
@task
@task_tracker
def render_supervisor_config(target):
app_info, uuid = target
app_name = app_info['app_name']
virtualenv = app_info.get('virtualenv', settings.DEFAULT_VIRTUALENV_NAME)
context = copy(app_info)
context.update({
'virtualenv': virtualenv,
'RETINUE_HOME': settings.RETINUE_HOME,
'RETINUE_WORKON_HOME': settings.RETINUE_WORKON_HOME,
'RETINUE_APP_HOME': settings.RETINUE_APP_HOME,
})
rendered = render_to_string('empressx/retinue/supervisord.conf', context)
etc_home = os.path.join(settings.RETINUE_WORKON_HOME, '%s/etc' % virtualenv)
# mkdir etc_home if not exist
if not os.path.exists(etc_home):
os.makedirs(etc_home)
with open('%s/%s.conf' % (etc_home, app_name), 'w') as f:
f.write(rendered)
return target
@task
@task_tracker
def reload_supervisor(target):
app_info, uuid = target
app_name = app_info['app_name']
virtualenv = app_info.get('virtualenv', settings.DEFAULT_VIRTUALENV_NAME)
cache.set('%s_supervisor_last_reload_request' % app_name,
time.mktime(timezone.now().timetuple()))
_reload_supervisor.apply_async(args=[target], countdown=30)
return target
@task(ignore_result=True)
def _reload_supervisor(target):
app_info, uuid = target
app_name = app_info['app_name']
virtualenv = app_info.get('virtualenv', settings.DEFAULT_VIRTUALENV_NAME)
virtualenv_home = '%s/%s' % (settings.RETINUE_WORKON_HOME, virtualenv)
retinue_home = settings.RETINUE_HOME
last_reload_request = cache.get('%s_supervisor_last_reload_request' % app_name)
now = time.mktime(timezone.now().timetuple())
if not last_reload_request or now - last_reload_request >= 30:
virtualenvcommand("""
workon %(virtualenv)s
cd %(virtualenv_home)s
if [ -e %(retinue_home)s/var/run/%(app_name)s_supervisord.pid ]
then
supervisorctl -c etc/%(app_name)s.conf shutdown
while true
do
if [ -e %(retinue_home)s/var/run/%(app_name)s_supervisord.sock ]
then
sleep 1
else
break
fi
done
fi
supervisord -c etc/%(app_name)s.conf
""" % locals())
@task(ignore_result=True)
def reserve(app_name, uuid):
chain(provide_virtualenv.s(app_name),
pull_source_code.s(),
install_requirements.s()).apply_async()
@task(ignore_result=True)
def serve(app_name, uuid):
client = xmlrpclib.Server(settings.EMPRESS_SERVICE_URL)
app_info = client.private.app_info(app_name)
if app_info.get('celery', {}).get('enabled'):
chain(
provide_virtualenv.s((app_info, uuid)),
pull_source_code.s(),
install_requirements.s(),
syncdb_and_migrate.s(),
render_uwsgi_config.s(),
render_nginx_config.s(),
reload_nginx.s(),
render_supervisor_config.s(),
reload_supervisor.s(),
end.s(),
).apply_async()
else:
chain(
provide_virtualenv.s((app_info, uuid)),
pull_source_code.s(),
install_requirements.s(),
syncdb_and_migrate.s(),
render_uwsgi_config.s(),
render_nginx_config.s(),
reload_nginx.s(),
end.s(),
).apply_async()
@task(ignore_result=True)
def unserve(app_name, uuid):
client = xmlrpclib.Server(settings.EMPRESS_SERVICE_URL)
app_info = client.private.app_info(app_name)
chain(
delete_uwsgi_config.s((app_info, uuid)),
delete_nginx_config.s(),
reload_nginx.s(),
end.s(),
).apply_async()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
from nova import context
from nova import exception
from nova import test
from nova.volume import netapp
from nova.volume import netapp_nfs
from nova.volume import nfs
from mox import IgnoreArg
from mox import IsA
from mox import MockObject
import mox
import suds
import types
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponce(object):
def __init__(self, status):
"""
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappNfsDriverTestCase(test.TestCase):
"""Test case for NetApp specific NFS clone driver"""
def setUp(self):
self._driver = netapp_nfs.NetAppNFSDriver()
self._mox = mox.Mox()
def tearDown(self):
self._mox.UnsetStubs()
def test_check_for_setup_error(self):
mox = self._mox
drv = self._driver
required_flags = [
'netapp_wsdl_url',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port'
]
# check exception raises when flags are not set
self.assertRaises(exception.NovaException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(netapp.FLAGS, flag, 'val')
mox.StubOutWithMock(nfs.NfsDriver, 'check_for_setup_error')
nfs.NfsDriver.check_for_setup_error()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(netapp.FLAGS, flag)
def test_do_setup(self):
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, 'check_for_setup_error')
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, '_get_client')
drv.check_for_setup_error()
netapp_nfs.NetAppNFSDriver._get_client()
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted"""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot"""
drv = self._driver
mox = self._mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(2)
self.assertRaises(exception.NovaException,
drv.create_volume_from_snapshot,
volume,
snapshot)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEquals(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self._mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self._mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
drv._client = MockObject(suds.client.Client)
drv._client.factory = MockObject(suds.client.Factory)
drv._client.service = MockObject(suds.client.ServiceSelector)
# ApiProxy() method is generated by ServiceSelector at runtime from the
# XML, so mocking is impossible.
setattr(drv._client.service,
'ApiProxy',
types.MethodType(lambda *args, **kwargs: FakeResponce(status),
suds.client.ServiceSelector))
mox.StubOutWithMock(drv, '_get_host_id')
mox.StubOutWithMock(drv, '_get_full_export_path')
drv._get_host_id(IgnoreArg()).AndReturn('10')
drv._get_full_export_path(IgnoreArg(), IgnoreArg()).AndReturn('/nfs')
return mox
def test_successfull_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('passed')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
drv._clone_volume(volume_name, clone_name, volume_id)
mox.VerifyAll()
def test_failed_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('failed')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
self.assertRaises(exception.NovaException,
drv._clone_volume,
volume_name, clone_name, volume_id)
mox.VerifyAll()
|
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from collections import deque
from pprint import pformat
from botocore.validate import validate_parameters
from botocore.exceptions import ParamValidationError, \
StubResponseError, StubAssertionError, UnStubbedResponseError
from botocore.awsrequest import AWSResponse
class _ANY(object):
"""
A helper object that compares equal to everything. Copied from
unittest.mock
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
class Stubber(object):
"""
This class will allow you to stub out requests so you don't have to hit
an endpoint to write tests. Responses are returned first in, first out.
If operations are called out of order, or are called with no remaining
queued responses, an error will be raised.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': 'test-bucket'}
stubber.add_response('list_objects', response, expected_params)
stubber.activate()
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
This class can also be called as a context manager, which will handle
activation / deactivation for you.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
response = {
"Owner": {
"ID": "foo",
"DisplayName": "bar"
},
"Buckets": [{
"CreationDate": datetime.datetime(2016, 1, 20, 22, 9),
"Name": "baz"
}]
}
with Stubber(s3) as stubber:
stubber.add_response('list_buckets', response, {})
service_response = s3.list_buckets()
assert service_response == response
If you have an input parameter that is a randomly generated value, or you
otherwise don't care about its value, you can use ``stub.ANY`` to ignore
it in validation.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber, ANY
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': ANY}
stubber.add_response('list_objects', response, expected_params)
with stubber:
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
"""
def __init__(self, client):
"""
:param client: The client to add your stubs to.
"""
self.client = client
self._event_id = 'boto_stubber'
self._expected_params_event_id = 'boto_stubber_expected_params'
self._queue = deque()
def __enter__(self):
self.activate()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.deactivate()
def activate(self):
"""
Activates the stubber on the client
"""
self.client.meta.events.register_first(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.register(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def deactivate(self):
"""
Deactivates the stubber on the client
"""
self.client.meta.events.unregister(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.unregister(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def add_response(self, method, service_response, expected_params=None):
"""
Adds a service response to the response queue. This will be validated
against the service model to ensure correctness. It should be noted,
however, that while missing attributes are often considered correct,
your code may not function properly if you leave them out. Therefore
you should always fill in every value you see in a typical response for
your particular request.
:param method: The name of the client method to stub.
:type method: str
:param service_response: A dict response stub. Provided parameters will
be validated against the service model.
:type service_response: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation. stub.ANY is only valid for top level params.
"""
self._add_response(method, service_response, expected_params)
def _add_response(self, method, service_response, expected_params):
if not hasattr(self.client, method):
raise ValueError(
"Client %s does not have method: %s"
% (self.client.meta.service_model.service_name, method))
# Create a successful http response
http_response = AWSResponse(None, 200, {}, None)
operation_name = self.client.meta.method_to_api_mapping.get(method)
self._validate_response(operation_name, service_response)
# Add the service_response to the queue for returning responses
response = {
'operation_name': operation_name,
'response': (http_response, service_response),
'expected_params': expected_params
}
self._queue.append(response)
def add_client_error(self, method, service_error_code='',
service_message='', http_status_code=400,
service_error_meta=None, expected_params=None,
response_meta=None):
"""
Adds a ``ClientError`` to the response queue.
:param method: The name of the service method to return the error on.
:type method: str
:param service_error_code: The service error code to return,
e.g. ``NoSuchBucket``
:type service_error_code: str
:param service_message: The service message to return, e.g.
'The specified bucket does not exist.'
:type service_message: str
:param http_status_code: The HTTP status code to return, e.g. 404, etc
:type http_status_code: int
:param service_error_meta: Additional keys to be added to the
service Error
:type service_error_meta: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation.
:param response_meta: Additional keys to be added to the
response's ResponseMetadata
:type response_meta: dict
"""
http_response = AWSResponse(None, http_status_code, {}, None)
# We don't look to the model to build this because the caller would
# need to know the details of what the HTTP body would need to
# look like.
parsed_response = {
'ResponseMetadata': {'HTTPStatusCode': http_status_code},
'Error': {
'Message': service_message,
'Code': service_error_code
}
}
if service_error_meta is not None:
parsed_response['Error'].update(service_error_meta)
if response_meta is not None:
parsed_response['ResponseMetadata'].update(response_meta)
operation_name = self.client.meta.method_to_api_mapping.get(method)
# Note that we do not allow for expected_params while
# adding errors into the queue yet.
response = {
'operation_name': operation_name,
'response': (http_response, parsed_response),
'expected_params': expected_params,
}
self._queue.append(response)
def assert_no_pending_responses(self):
"""
Asserts that all expected calls were made.
"""
remaining = len(self._queue)
if remaining != 0:
raise AssertionError(
"%d responses remaining in queue." % remaining)
def _assert_expected_call_order(self, model, params):
if not self._queue:
raise UnStubbedResponseError(
operation_name=model.name,
reason=(
'Unexpected API Call: A call was made but no additional calls expected. '
'Either the API Call was not stubbed or it was called multiple times.'
)
)
name = self._queue[0]['operation_name']
if name != model.name:
raise StubResponseError(
operation_name=model.name,
reason='Operation mismatch: found response for %s.' % name)
def _get_response_handler(self, model, params, context, **kwargs):
self._assert_expected_call_order(model, params)
# Pop off the entire response once everything has been validated
return self._queue.popleft()['response']
def _assert_expected_params(self, model, params, context, **kwargs):
if self._should_not_stub(context):
return
self._assert_expected_call_order(model, params)
expected_params = self._queue[0]['expected_params']
if expected_params is None:
return
# Validate the parameters are equal
for param, value in expected_params.items():
if param not in params or expected_params[param] != params[param]:
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
# Ensure there are no extra params hanging around
if sorted(expected_params.keys()) != sorted(params.keys()):
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
def _should_not_stub(self, context):
# Do not include presign requests when processing stubbed client calls
# as a presign request will never have an HTTP request sent over the
# wire for it and therefore not receive a response back.
if context and context.get('is_presign_request'):
return True
def _validate_response(self, operation_name, service_response):
service_model = self.client.meta.service_model
operation_model = service_model.operation_model(operation_name)
output_shape = operation_model.output_shape
# Remove ResponseMetadata so that the validator doesn't attempt to
# perform validation on it.
response = service_response
if 'ResponseMetadata' in response:
response = copy.copy(service_response)
del response['ResponseMetadata']
if output_shape is not None:
validate_parameters(response, output_shape)
elif response:
# If the output shape is None, that means the response should be
# empty apart from ResponseMetadata
raise ParamValidationError(
report=(
"Service response should only contain ResponseMetadata."))
|
|
"""Aiohttp test utils."""
import asyncio
from contextlib import contextmanager
import json as _json
import re
from unittest import mock
from urllib.parse import parse_qs
from aiohttp import ClientSession
from aiohttp.streams import StreamReader
from yarl import URL
from aiohttp.client_exceptions import ClientResponseError
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
retype = type(re.compile(''))
def mock_stream(data):
"""Mock a stream with data."""
protocol = mock.Mock(_reading_paused=False)
stream = StreamReader(protocol)
stream.feed_data(data)
stream.feed_eof()
return stream
class AiohttpClientMocker:
"""Mock Aiohttp client requests."""
def __init__(self):
"""Initialize the request mocker."""
self._mocks = []
self._cookies = {}
self.mock_calls = []
def request(self, method, url, *,
auth=None,
status=200,
text=None,
data=None,
content=None,
json=None,
params=None,
headers={},
exc=None,
cookies=None):
"""Mock a request."""
if json is not None:
text = _json.dumps(json)
if text is not None:
content = text.encode('utf-8')
if content is None:
content = b''
if not isinstance(url, retype):
url = URL(url)
if params:
url = url.with_query(params)
self._mocks.append(AiohttpClientMockResponse(
method, url, status, content, cookies, exc, headers))
def get(self, *args, **kwargs):
"""Register a mock get request."""
self.request('get', *args, **kwargs)
def put(self, *args, **kwargs):
"""Register a mock put request."""
self.request('put', *args, **kwargs)
def post(self, *args, **kwargs):
"""Register a mock post request."""
self.request('post', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Register a mock delete request."""
self.request('delete', *args, **kwargs)
def options(self, *args, **kwargs):
"""Register a mock options request."""
self.request('options', *args, **kwargs)
@property
def call_count(self):
"""Return the number of requests made."""
return len(self.mock_calls)
def clear_requests(self):
"""Reset mock calls."""
self._mocks.clear()
self._cookies.clear()
self.mock_calls.clear()
def create_session(self, loop):
"""Create a ClientSession that is bound to this mocker."""
session = ClientSession(loop=loop)
# Setting directly on `session` will raise deprecation warning
object.__setattr__(session, '_request', self.match_request)
return session
async def match_request(self, method, url, *, data=None, auth=None,
params=None, headers=None, allow_redirects=None,
timeout=None, json=None, cookies=None):
"""Match a request against pre-registered requests."""
data = data or json
url = URL(url)
if params:
url = url.with_query(params)
for response in self._mocks:
if response.match_request(method, url, params):
self.mock_calls.append((method, url, data, headers))
if response.exc:
raise response.exc
return response
assert False, "No mock registered for {} {} {}".format(method.upper(),
url, params)
class AiohttpClientMockResponse:
"""Mock Aiohttp client response."""
def __init__(self, method, url, status, response, cookies=None, exc=None,
headers=None):
"""Initialize a fake response."""
self.method = method
self._url = url
self.status = status
self.response = response
self.exc = exc
self._headers = headers or {}
self._cookies = {}
if cookies:
for name, data in cookies.items():
cookie = mock.MagicMock()
cookie.value = data
self._cookies[name] = cookie
def match_request(self, method, url, params=None):
"""Test if response answers request."""
if method.lower() != self.method.lower():
return False
# regular expression matching
if isinstance(self._url, retype):
return self._url.search(str(url)) is not None
if (self._url.scheme != url.scheme or self._url.host != url.host or
self._url.path != url.path):
return False
# Ensure all query components in matcher are present in the request
request_qs = parse_qs(url.query_string)
matcher_qs = parse_qs(self._url.query_string)
for key, vals in matcher_qs.items():
for val in vals:
try:
request_qs.get(key, []).remove(val)
except ValueError:
return False
return True
@property
def headers(self):
"""Return content_type."""
return self._headers
@property
def cookies(self):
"""Return dict of cookies."""
return self._cookies
@property
def url(self):
"""Return yarl of URL."""
return self._url
@property
def content_type(self):
"""Return yarl of URL."""
return self._headers.get('content-type')
@property
def content(self):
"""Return content."""
return mock_stream(self.response)
@asyncio.coroutine
def read(self):
"""Return mock response."""
return self.response
@asyncio.coroutine
def text(self, encoding='utf-8'):
"""Return mock response as a string."""
return self.response.decode(encoding)
@asyncio.coroutine
def json(self, encoding='utf-8'):
"""Return mock response as a json."""
return _json.loads(self.response.decode(encoding))
@asyncio.coroutine
def release(self):
"""Mock release."""
pass
def raise_for_status(self):
"""Raise error if status is 400 or higher."""
if self.status >= 400:
raise ClientResponseError(
None, None, code=self.status, headers=self.headers)
def close(self):
"""Mock close."""
pass
@contextmanager
def mock_aiohttp_client():
"""Context manager to mock aiohttp client."""
mocker = AiohttpClientMocker()
def create_session(hass, *args):
session = mocker.create_session(hass.loop)
async def close_session(event):
"""Close session."""
await session.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, close_session)
return session
with mock.patch(
'homeassistant.helpers.aiohttp_client.async_create_clientsession',
side_effect=create_session):
yield mocker
|
|
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the executables produced by gitian only contain
certain symbols and are only linked against allowed libraries.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import re
import sys
import os
from typing import List, Optional, Tuple
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': (2,17),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', 'in6addr_any',
'environ', '_environ', '__environ',
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# bitcoin-qt only
'libxcb.so.1', # part of X11
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
'80386': (2,1),
'X86-64': (2,2,5),
'ARM': (2,4),
'AArch64':(2,17),
'RISC-V': (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# bitcoin-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'Security', # secure the data your app manages, and control access to your app.
'libobjc.A.dylib', # Objective-C runtime library
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# bitcoin-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True) -> List[Tuple[str, str, str]]:
'''
Parse an ELF executable and return a list of (symbol,version, arch) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for {}: {}'.format(executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
if 'Machine:' in line:
arch = line[-1]
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version, arch))
return syms
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def elf_read_libraries(filename) -> List[str]:
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>2 and tokens[1] == '(NEEDED)':
match = re.match(r'^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
def check_imported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym, version, arch in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version, arch):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym,version,arch in read_symbols(filename, False):
if arch == 'RISC-V' or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok = True
for library_name in elf_read_libraries(filename):
if library_name not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name))
ok = False
return ok
def macho_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens) == 1: # skip executable name
continue
libraries.append(tokens[0].split('/')[-1])
return libraries
def check_MACHO_libraries(filename) -> bool:
ok = True
for dylib in macho_read_libraries(filename):
if dylib not in MACHO_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
def pe_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OBJDUMP_CMD, '-x', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
if 'DLL Name:' in line:
tokens = line.split(': ')
libraries.append(tokens[1])
return libraries
def check_PE_libraries(filename) -> bool:
ok = True
for dylib in pe_read_libraries(filename):
if dylib not in PE_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('{}: unknown format'.format(filename))
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('{}: failed {}'.format(filename, ' '.join(failed)))
retval = 1
except IOError:
print('{}: cannot open'.format(filename))
retval = 1
sys.exit(retval)
|
|
import re
from django.contrib.auth.models import User
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from sideloader.db import models
class BaseModelForm(forms.ModelForm):
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-lg-2'
helper.field_class = 'col-lg-8'
helper.add_input(Submit('submit', 'Submit'))
class BaseForm(forms.Form):
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-lg-2'
helper.field_class = 'col-lg-8'
helper.add_input(Submit('submit', 'Submit'))
class ProjectForm(BaseModelForm):
allowed_users = forms.ModelMultipleChoiceField(
queryset=User.objects.all().order_by('username'),
required=False
)
allowed_users.help_text = ''
class Meta:
model = models.Project
exclude = ()
class PackageRepoForm(BaseModelForm):
class Meta:
model = models.PackageRepo
exclude = ()
class RepoForm(BaseModelForm):
github_url = forms.CharField(label="Git checkout URL")
build_type = forms.ChoiceField(
label='Deploy type',
widget=forms.Select,
choices=(
('virtualenv', 'Virtualenv'),
('python', 'Python package'), ('flat', 'Flat')))
version_getter = forms.ChoiceField(
label='Package version',
widget=forms.Select,
choices=(
('setup.py', 'Python setup.py'),
('autonum', 'Auto increment'),
('script', 'Custom script'),
)
)
version_cmd = forms.CharField(
widget=forms.Textarea,
label="Version script",
required=False
)
class Meta:
model = models.Repo
exclude = ('idhash', 'created_by_user', 'build_counter', 'project')
def clean(self):
cleaned_data = super(RepoForm, self).clean()
uri = cleaned_data['github_url'].strip()
if not (uri[-4:] == '.git'):
raise forms.ValidationError("Not a valid Git URI")
cleaned_data['github_url'] = uri
return cleaned_data
class ServerRequestForm(BaseModelForm):
inftype = forms.ChoiceField(
label='Infrastructure type',
widget=forms.Select,
choices=(
('prd', 'Production'),
('qa', 'QA'), ('stg', 'Staging')))
cpus = forms.IntegerField(label="CPU Cores", required=True,
initial=1,
max_value=8,
min_value=1,
help_text="Must be between 1 and 8")
memory = forms.IntegerField(label="Memory (GB)", required=True,
initial=2,
max_value=24,
min_value=1,
help_text="Must be between 1 and 24")
disk = forms.IntegerField(label="Disk space (GB)", required=True,
initial=50,
max_value=250,
min_value=25,
help_text="Must be between 25 and 250")
class Meta:
model = models.ServerRequest
exclude = (
'requested_by', 'project', 'approved_by', 'approval',
'provisioned', 'request_date'
)
def clean(self):
cleaned_data = super(ServerRequestForm, self).clean()
name = cleaned_data['name'].strip()
if ' ' in name:
raise forms.ValidationError("Server name may not contain spaces")
if not re.match(r'^[\w-]+$', name):
raise forms.ValidationError("Server name may only contain letters and numbers")
cleaned_data['name'] = name.lower()
return cleaned_data
class TargetForm(BaseModelForm):
stream_mode = forms.ChoiceField(
label='Deploy mode',
widget=forms.Select,
choices=(
('repo', 'Package repository'),
('server', 'Server'),
)
)
class Meta:
model = models.Target
exclude = ('current_build', 'log', 'state', 'project')
class StreamForm(BaseModelForm):
targets = forms.ModelMultipleChoiceField(
queryset=models.Target.objects.all(),
required=False
)
targets.help_text = ''
package_type = forms.ChoiceField(
label='Package type',
widget=forms.Select,
choices=(
('deb', 'Debian/Ubuntu'),
('rpm', 'RedHat'),
('docker', 'Docker image'),
('dockerhub', 'Docker Hub'),
('tar', 'TAR file'),
('pypi', 'PyPi Upload')
)
)
architecture = forms.ChoiceField(
label='CPU architecture',
widget=forms.Select,
choices=(
('amd64', 'amd64'),
('i386', 'i386'),
)
)
auto_release = forms.BooleanField(
help_text="Automatically deploy new builds to this release workflow",
required=False)
require_signoff = forms.BooleanField(
label="Require sign-off",
required=False)
signoff_list = forms.CharField(
widget=forms.Textarea,
label="Sign-off list",
required=False,
help_text="List email addresses on a new line")
quorum = forms.IntegerField(
required=False,
initial=0,
help_text="Required number of sign-offs before release. 0 means <strong>all</strong> are required")
notify = forms.BooleanField(
label="Notify",
help_text="Send notifications of releases by email",
required=False)
class Meta:
model = models.Stream
exclude = ('project',)
fields = (
'name', 'repo', 'branch', 'package_type', 'architecture',
'targets',
'post_build', 'auto_release', 'require_signoff',
'signoff_list', 'notify', 'notify_list',
)
class ReleasePushForm(BaseModelForm):
tz = forms.CharField(widget=forms.HiddenInput())
class Meta:
model = models.Release
exclude = ('release_date', 'flow', 'build', 'waiting')
#class ModuleForm(BaseModelForm):
# class Meta:
# model = models.ModuleManifest
# fields = ('name', 'key', 'structure',)
#class ManifestForm(BaseModelForm):
# class Meta:
# model = models.ServerManifest
# exclude = ('release',)
class UserForm(BaseModelForm):
password = forms.CharField(widget=forms.PasswordInput(), initial='')
class Meta:
model = User
exclude = (
'email', 'username', 'is_staff', 'is_active', 'is_superuser',
'last_login', 'date_joined', 'groups', 'user_permissions'
)
|
|
#
# Copyright (c) 2012 Patrice Munger
# This file is part of pynetdicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pynetdicom.googlecode.com
#
from DIMSEparameters import *
from DULparameters import *
from dicom.dataset import Dataset
import dsutils
from struct import pack, unpack
from dicom.UID import ImplicitVRLittleEndian
#
# pydicom's dictionnary misses command tags. Add them.
#
from dicom._dicom_dict import DicomDictionary
import itertools
import logging
logger = logging.getLogger(__name__)
DicomDictionary.update({
0x00000000: ('UL', '1', "CommandGroupLength", ''),
0x00000002: ('UI', '1', "Affected SOP class", ''),
0x00000003: ('UI', '1', "RequestedSOPClassUID", ''),
0x00000100: ('US', '1', "CommandField", ''),
0x00000110: ('US', '1', "MessageID", ''),
0x00000120: ('US', '1', "MessageIDBeingRespondedTo", ''),
0x00000600: ('AE', '1', "MoveDestination", ''),
0x00000700: ('US', '1', "Priority", ''),
0x00000800: ('US', '1', "DataSetType", ''),
0x00000900: ('US', '1', "Status", ''),
0x00000901: ('AT', '1', "OffendingElement", ''),
0x00000902: ('LO', '1', "ErrorComment", ''),
0x00000903: ('US', '1', "ErrorID", ''),
0x00001000: ('UI', '1', " AffectedSOPInstanceUID", ''),
0x00001001: ('UI', '1', "RequestedSOPInstanceUID", ''),
0x00001002: ('US', '1', "EventTypeID", ''),
0x00001005: ('AT', '1', "AttributeIdentifierList", ''),
0x00001008: ('US', '1', "ActionTypeID", ''),
0x00001020: ('US', '1', "NumberOfRemainingSubOperations", ''),
0x00001021: ('US', '1', "NumberOfCompletedSubOperations", ''),
0x00001022: ('US', '1', "NumberOfFailedSubOperations", ''),
0x00001023: ('US', '1', "NumberOfWarningSubOperations", ''),
0x00001030: ('AE', '1', "MoveOriginatorApplicationEntityTitle", ''),
0x00001031: ('US', '1', "MoveOriginatorMessageID", ''),
})
"""
All DIMSE Message classes implement the following methods:
FromParams(DIMSEServiceParameter) : Builds a DIMSE message from a
DULServiceParameter
object. Used when receiving
primitives from the
DIMSEServiceUser.
ToParams() : Convert the Message into a
DIMSEServiceParameter object.
Used for sending primitives to
the DIMSEServiceUser.
Encode() : Returns the encoded message in
one or several P-DATA parameters
structure.
Decode(pdata) : Construct the message from one
or several P-DATA primitives
FromParams Encode
|----------------------| -------> |----------| -------> |---------------|
| Service parameters | | DIMSE | | P-DATA |
| object | | message | | primitive(s) |
|______________________| <------- |__________| <------- |_______________|
ToParams Decode
"""
DEBUG = False
def fragment(maxpdulength, str):
s = str
fragments = []
maxsize = maxpdulength - 6
while 1:
fragments.append(s[:maxsize])
s = s[maxsize:]
if len(s) <= maxsize:
if len(s) > 0:
fragments.append(s)
return fragments
class DIMSEMessage:
def __init__(self):
self.CommandSet = None
self.EncodedDataSet = None
self.DataSet = None
self.encoded_command_set = ''
self.ID = id
self.ts = ImplicitVRLittleEndian # imposed by standard.
if self.__class__ != DIMSEMessage:
self.CommandSet = Dataset()
for ii in self.CommandFields:
self.CommandSet.add_new(ii[1], ii[2], '')
def Encode(self, id, maxpdulength):
"""Returns the encoded message as a series of P-DATA service
parameter objects"""
self.ID = id
pdatas = []
encoded_command_set = dsutils.encode(
self.CommandSet, self.ts.is_implicit_VR, self.ts.is_little_endian)
# fragment command set
pdvs = fragment(maxpdulength, encoded_command_set)
assert ''.join(pdvs) == encoded_command_set
for ii in pdvs[:-1]:
# send only one pdv per pdata primitive
pdata = P_DATA_ServiceParameters()
# not last command fragment
pdata.PresentationDataValueList = [[self.ID, pack('b', 1) + ii]]
pdatas.append(pdata)
# last command fragment
pdata = P_DATA_ServiceParameters()
# last command fragment
pdata.PresentationDataValueList = [[self.ID, pack('b', 3) + pdvs[-1]]]
pdatas.append(pdata)
# fragment data set
#if self.__dict__.has_key('DataSet') and self.DataSet:
if 'DataSet' in self.__dict__ and self.DataSet is not None:
pdvs = fragment(maxpdulength, self.DataSet)
assert ''.join(pdvs) == self.DataSet
for ii in pdvs[:-1]:
pdata = P_DATA_ServiceParameters()
# not last data fragment
pdata.PresentationDataValueList = [
[self.ID, pack('b', 0) + ii]]
pdatas.append(pdata)
pdata = P_DATA_ServiceParameters()
# last data fragment
pdata.PresentationDataValueList = [
[self.ID, pack('b', 2) + pdvs[-1]]]
pdatas.append(pdata)
return pdatas
def Decode(self, pdata):
"""Constructs itself receiving a series of P-DATA primitives.
Returns True when complete, False otherwise."""
if pdata.__class__ != P_DATA_ServiceParameters:
# not a pdata
return False
if pdata is None:
return False
ii = pdata
for vv in ii.PresentationDataValueList:
# must be able to read P-DATA with several PDVs
self.ID = vv[0]
if unpack('b', vv[1][0])[0] in (1, 3):
logger.debug(" command fragment %s", self.ID)
self.encoded_command_set += vv[1][1:]
if unpack('b', vv[1][0])[0] == 3:
logger.debug(" last command fragment %s", self.ID)
self.CommandSet = dsutils.decode(
self.encoded_command_set, self.ts.is_implicit_VR,
self.ts.is_little_endian)
self.__class__ = MessageType[
self.CommandSet[(0x0000, 0x0100)].value]
if self.CommandSet[(0x0000, 0x0800)].value == 0x0101:
# response: no dataset
return True
elif unpack('b', vv[1][0])[0] in (0, 2):
if self.DataSet is None:
self.DataSet = ''
self.DataSet += vv[1][1:]
logger.debug(" data fragment %s", self.ID)
if unpack('b', vv[1][0])[0] == 2:
logger.debug(" last data fragment %s", self.ID)
return True
else:
raise "Error"
return False
def SetLength(self):
# compute length
l = 0
for ii in self.CommandSet.values()[1:]:
l += len(dsutils.encode_element(ii,
self.ts.is_implicit_VR,
self.ts.is_little_endian))
# if self.DataSet<>None:
# l += len(self.DataSet)
self.CommandSet[(0x0000, 0x0000)].value = l
def __repr__(self):
return str(self.CommandSet) + '\n'
class C_ECHO_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Data Set Type', (0x0000, 0x0800), 'US', 1)
]
DataField = None
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0030
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.DataSet = None
self.SetLength()
def ToParams(self):
tmp = C_ECHO_ServiceParameters()
tmp.MessageID = self.CommandSet.get((0x0000, 0x0110))
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
return tmp
class C_ECHO_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status', (0x0000, 0x0900), 'US', 1)
]
DataField = None
def FromParams(self, params):
if params.AffectedSOPClassUID:
self.CommandSet[(0x0000, 0x0002)
].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x8030
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.SetLength()
def ToParams(self):
tmp = C_ECHO_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
tmp.Status = 0
return tmp
class C_STORE_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Affected SOP Instance UID',
(0x0000, 0x1000), 'UI', 1),
('Move Originator Application Entity Title',
(0x0000, 0x1030), 'AE', 1),
('Move Originator Message ID',
(0x0000, 0x1031), 'US', 1),
]
DataField = 'Data Set'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0001
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.CommandSet[(0x0000, 0x1000)].value = params.AffectedSOPInstanceUID
if params.MoveOriginatorApplicationEntityTitle:
self.CommandSet[(0x0000, 0x1030)].value = \
params.MoveOriginatorApplicationEntityTitle
else:
self.CommandSet[(0x0000, 0x1030)].value = ""
if params.MoveOriginatorMessageID:
self.CommandSet[(0x0000, 0x1031)
].value = params.MoveOriginatorMessageID
else:
self.CommandSet[(0x0000, 0x1031)].value = ""
self.DataSet = params.DataSet
self.SetLength()
def ToParams(self):
tmp = C_STORE_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.AffectedSOPInstanceUID = self.CommandSet.get((0x0000, 0x1000))
tmp.Priority = self.CommandSet.get((0x0000, 0x0700))
tmp.DataSet = self.DataSet
tmp.MessageID = self.CommandSet.get((0x0000, 0x0110))
return tmp
class C_STORE_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
('Affected SOP Instance UID', (0x0000, 0x1000), 'UI', 1),
('Error Comment',
(0x0000, 0x0902), 'LO', 1),
]
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)
].value = params.AffectedSOPClassUID.value
self.CommandSet[(0x0000, 0x0100)].value = 0x8001
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo.value
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.CommandSet[(0x0000, 0x1000)
].value = params.AffectedSOPInstanceUID.value
self.DataSet = None
self.SetLength()
def ToParams(self):
tmp = C_STORE_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
tmp.Status = self.CommandSet.get((0x0000, 0x0900))
tmp.ErrorComment = self.CommandSet.get((0x0000, 0x0902))
tmp.AffectedSOPInstanceUID = self.CommandSet.get((0x0000, 0x1000))
tmp.DataSet = self.DataSet
return tmp
class C_FIND_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0020
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_FIND_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.Priority = self.CommandSet.get((0x0000, 0x0700))
tmp.Identifier = self.DataSet
tmp.MessageID = self.CommandSet.get((0x0000, 0x0110))
return tmp
class C_FIND_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
('Error Comment',
(0x0000, 0x0902), 'LO', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)
].value = params.AffectedSOPClassUID.value
self.CommandSet[(0x0000, 0x0100)].value = 0x8020
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo.value
if not params.Identifier:
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
else:
self.CommandSet[(0x0000, 0x0800)].value = 0x000
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_FIND_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
tmp.Status = self.CommandSet.get((0x0000, 0x0900))
tmp.ErrorComment = self.CommandSet.get((0x0000, 0x0902))
tmp.Identifier = self.DataSet
return tmp
class C_GET_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0010
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_GET_ServiceParameters()
tmp.MessageID = self.CommandSet.get((0x0000, 0x0110))
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.Priority = self.CommandSet.get((0x0000, 0x0700))
tmp.Identifier = self.DataSet
return tmp
class C_GET_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
('Number of Remaining Sub-operations',
(0x0000, 0x1020), 'US', 1),
('Number of Complete Sub-operations',
(0x0000, 0x1021), 'US', 1),
('Number of Failed Sub-operations', (0x0000, 0x1022), 'US', 1),
('Number of Warning Sub-operations',
(0x0000, 0x1023), 'US', 1),
('Error Comment',
(0x0000, 0x0902), 'LO', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x8010
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
self.CommandSet[(0x0000, 0x1020)
].value = params.NumberOfRemainingSubOperations
self.CommandSet[(0x0000, 0x1021)
].value = params.NumberOfCompletedSubOperations
self.CommandSet[(0x0000, 0x1022)
].value = params.NumberOfFailedSubOperations
self.CommandSet[(0x0000, 0x1023)
].value = params.NumberOfWarningSubOperations
self.SetLength()
def ToParams(self):
tmp = C_GET_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
tmp.Status = self.CommandSet.get((0x0000, 0x0900))
tmp.ErrorComment = self.CommandSet.get((0x0000, 0x0902))
tmp.NumberOfRemainingSubOperations = self.CommandSet.get( (0x0000, 0x1020))
tmp.NumberOfCompletedSubOperations = self.CommandSet.get((0x0000, 0x1021))
tmp.NumberOfFailedSubOperations = self.CommandSet.get((0x0000, 0x1022))
tmp.NumberOfWarningSubOperations = self.CommandSet.get((0x0000, 0x1023))
tmp.Identifier = self.DataSet
return tmp
class C_MOVE_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID',
(0x0000, 0x0110), 'US', 1),
('Priority',
(0x0000, 0x0700), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Move Destination',
(0x0000, 0x0600), 'AE', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x0021
self.CommandSet[(0x0000, 0x0110)].value = params.MessageID
self.CommandSet[(0x0000, 0x0700)].value = params.Priority
self.CommandSet[(0x0000, 0x0800)].value = 0x0001
self.CommandSet[(0x0000, 0x0600)].value = params.MoveDestination
self.DataSet = params.Identifier
self.SetLength()
def ToParams(self):
tmp = C_MOVE_ServiceParameters()
tmp.MessageID = self.CommandSet.get((0x0000, 0x0110))
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.Priority = self.CommandSet.get((0x0000, 0x0700))
tmp.MoveDestination = self.CommandSet.get((0x0000, 0x0600))
tmp.Identifier = self.DataSet
return tmp
class C_MOVE_RSP_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Affected SOP Class UID',
(0x0000, 0x0002), 'UI', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
('Status',
(0x0000, 0x0900), 'US', 1),
('Number of Remaining Sub-operations',
(0x0000, 0x1020), 'US', 1),
('Number of Complete Sub-operations',
(0x0000, 0x1021), 'US', 1),
('Number of Failed Sub-operations', (0x0000, 0x1022), 'US', 1),
('Number of Warning Sub-operations',
(0x0000, 0x1023), 'US', 1),
('Error Comment',
(0x0000, 0x0902), 'LO', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0002)].value = params.AffectedSOPClassUID
self.CommandSet[(0x0000, 0x0100)].value = 0x8021
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.CommandSet[(0x0000, 0x0900)].value = params.Status
#self.CommandSet[(0x0000, 0x0902)].value = params.ErrorComment -- TODO?
self.CommandSet[(0x0000, 0x1020)
].value = params.NumberOfRemainingSubOperations
self.CommandSet[(0x0000, 0x1021)
].value = params.NumberOfCompletedSubOperations
self.CommandSet[(0x0000, 0x1022)
].value = params.NumberOfFailedSubOperations
self.CommandSet[(0x0000, 0x1023)
].value = params.NumberOfWarningSubOperations
self.SetLength()
def ToParams(self):
tmp = C_MOVE_ServiceParameters()
tmp.AffectedSOPClassUID = self.CommandSet.get((0x0000, 0x0002))
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
tmp.Status = self.CommandSet.get((0x0000, 0x0900))
tmp.ErrorComment = self.CommandSet.get((0x0000, 0x0902))
tmp.NumberOfRemainingSubOperations = self.CommandSet.get((0x0000, 0x1020))
tmp.NumberOfCompletedSubOperations = self.CommandSet.get((0x0000, 0x1021))
tmp.NumberOfFailedSubOperations = self.CommandSet.get((0x0000, 0x1022))
tmp.NumberOfWarningSubOperations = self.CommandSet.get((0x0000, 0x1023))
tmp.Identifier = self.DataSet
return tmp
class C_CANCEL_RQ_Message(DIMSEMessage):
CommandFields = [
('Group Length',
(0x0000, 0x0000), 'UL', 1),
('Command Field',
(0x0000, 0x0100), 'US', 1),
('Message ID Being Responded To',
(0x0000, 0x0120), 'US', 1),
('Data Set Type',
(0x0000, 0x0800), 'US', 1),
]
DataField = 'Identifier'
def FromParams(self, params):
self.CommandSet[(0x0000, 0x0100)].value = 0x0FFF
self.CommandSet[(0x0000, 0x0120)
].value = params.MessageIDBeingRespondedTo
self.CommandSet[(0x0000, 0x0800)].value = 0x0101
self.SetLength()
class C_CANCEL_FIND_RQ_Message(C_CANCEL_RQ_Message):
def ToParams(self):
tmp = C_Find_ServiceParameters()
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
return tmp
class C_CANCEL_GET_RQ_Message(C_CANCEL_RQ_Message):
def ToParams(self):
tmp = C_Get_ServiceParameters()
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
return tmp
class C_CANCEL_MOVE_RQ_Message(C_CANCEL_RQ_Message):
def ToParams(self):
tmp = C_Move_ServiceParameters()
tmp.MessageIDBeingRespondedTo = self.CommandSet.get((0x0000, 0x0120))
return tmp
MessageType = {
0x0001: C_STORE_RQ_Message,
0x8001: C_STORE_RSP_Message,
0x0020: C_FIND_RQ_Message,
0x8020: C_FIND_RSP_Message,
0x0FFF: C_CANCEL_RQ_Message,
0x0010: C_GET_RQ_Message,
0x8010: C_GET_RSP_Message,
0x0021: C_MOVE_RQ_Message,
0x8021: C_MOVE_RSP_Message,
0x0030: C_ECHO_RQ_Message,
0x8030: C_ECHO_RSP_Message
}
if __name__ == '__main__':
c = C_ECHO_ServiceParameters()
c.MessageID = 0
c.AffectedSOPClassUID = '12.1232.23.123.231.'
C_ECHO_msg = C_ECHO_RQ_Message()
C_ECHO_msg.FromParams(c)
print C_ECHO_msg
print C_ECHO_msg.ToParams()
print C_ECHO_msg.Encode(1, 100)
|
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import print_function
import os, pdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import pickle as cp
import subprocess
from .imdb import imdb
from .voc_eval import voc_eval
#from fastRCNN.imdb import imdb
#from fastRCNN.voc_eval import voc_eval
class pascal_voc(imdb):
def __init__(self, image_set, year, classes, maxNrRois, cacheDir, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._maxNrRois = maxNrRois
self._ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
self._cacheDir = cacheDir
self._devkit_path = self._get_default_path() if devkit_path is None \
else os.path.join(devkit_path, 'VOCdevkit')
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = classes
#('__background__', # always index 0
# 'aeroplane', 'bicycle', 'bird', 'boat',
# 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant',
# 'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
@property
def cache_path(self):
cache_path = self._cacheDir
#cache_path = osp.abspath(osp.join(datasets.ROOT_DIR, 'data', 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(self._ROOT_DIR, 'data', 'pascalVoc', 'VOCdevkit')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cp.load(fid)
print ('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cp.dump(gt_roidb, fid, cp.HIGHEST_PROTOCOL)
print ('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cp.load(fid, encoding='latin1')
print ('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if int(self._year) == 2007 or not self._image_set.startswith('test'):
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
# Keep max of e.g. 2000 rois
if type(self._maxNrRois) == int:
print ("Only keep the first %d ROIs..." % self._maxNrRois)
for i in range(self.num_images):
gt_overlaps = roidb[i]['gt_overlaps']
gt_overlaps = gt_overlaps.todense()[:self._maxNrRois]
gt_overlaps = scipy.sparse.csr_matrix(gt_overlaps)
roidb[i]['boxes'] = roidb[i]['boxes'][:self._maxNrRois, :]
roidb[i]['gt_classes'] = roidb[i]['gt_classes'][:self._maxNrRois]
roidb[i]['gt_overlaps'] = roidb[i]['gt_overlaps'] = gt_overlaps
with open(cache_file, 'wb') as fid:
cp.dump(roidb, fid, cp.HIGHEST_PROTOCOL)
print ('wrote ss roidb to {}'.format(cache_file))
return roidb
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(self._devkit_path, '..',
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in range(raw_data.shape[0]):
box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_IJCV_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
'{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.
format(self.name, self.config['top_k']))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cp.load(fid)
print ('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cp.dump(roidb, fid, cp.HIGHEST_PROTOCOL)
print ('wrote ss roidb to {}'.format(cache_file))
return roidb
def _load_selective_search_IJCV_roidb(self, gt_roidb):
IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_IJCV_data',
'voc_' + self._year))
assert os.path.exists(IJCV_path), \
'Selective search IJCV data not found at: {}'.format(IJCV_path)
top_k = self.config['top_k']
box_list = []
for i in range(self.num_images):
filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
raw_data = sio.loadmat(filename)
box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
# print ('Loading: {}'.format(filename))
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = self._class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
def _write_voc_results_file(self, all_boxes, output_dir):
comp_id = 'comp4'
if self.config['use_salt']:
comp_id += '-{}'.format(os.getpid())
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print ('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template(output_dir).format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
return comp_id
def evaluate_detections(self, all_boxes, output_dir, boUsePythonImpl = True, use_07_metric = False):
self._write_voc_results_file(all_boxes, output_dir)
if not boUsePythonImpl:
self._do_matlab_eval(comp_id, output_dir)
else:
self._do_python_eval(output_dir, use_07_metric)
def _do_matlab_eval(self, comp_id, output_dir='output'):
rm_results = self.config['cleanup']
path = os.path.join(os.path.dirname(__file__),
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(datasets.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\',{:d}); quit;"' \
.format(self._devkit_path, comp_id,
self._image_set, output_dir, int(rm_results))
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
#########################################################################
# Python evaluation functions (copied from faster-RCNN)
##########################################################################
def _get_voc_results_file_template(self, evalDir):
if not os.path.exists(evalDir):
os.makedirs(evalDir)
filename = self._image_set + '_{:s}.txt'
return os.path.join(evalDir, filename)
def _do_python_eval(self, output_dir='output', use_07_metric=None):
annopath = os.path.join(self._devkit_path, 'VOC' + self._year, 'Annotations', '{}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
aps = []
# The PASCAL VOC metric changed in 2010
if use_07_metric == None:
use_07_metric = True if int(self._year) < 2010 else False
print ('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template(output_dir).format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir = output_dir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
cp.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
# print('~~~~~~~~')
# print('Results:')
# for ap in aps:
# print('{:.3f}'.format(ap))
# print('{:.3f}'.format(np.mean(aps)))
# print('~~~~~~~~')
# print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
if __name__ == '__main__':
d = datasets.pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed; embed()
|
|
import time
import numpy as np
from scipy.integrate import odeint
from scipy.interpolate import interp1d
from . import util as UT
from . import sfh as SFH
def Evolve(shcat, theta, testing=False):
'''
'''
# meta data
nsnap0 = shcat['metadata']['nsnap0']
ngal = len(shcat['m.sham'])
shcat = initSF(shcat, theta) # get SF halos
isSF = np.arange(ngal)[shcat['galtype'] == 'sf']
# initiate logSFR(logM, z) function and keywords
logSFR_logM_z, sfr_kwargs = SFH.logSFR_initiate(shcat, isSF,
theta_sfh=theta['sfh'], theta_sfms=theta['sfms'], testing=testing)
# get integrated stellar masses
logM_integ, logSFRs = _MassSFR_Wrapper(
shcat,
nsnap0,
1,
isSF=isSF,
logSFR_logM_z=logSFR_logM_z,
sfr_kwargs=sfr_kwargs,
theta_sfh=theta['sfh'],
theta_sfms=theta['sfms'],
theta_mass=theta['mass'])
shcat['m.star'] = logM_integ[:,-1] # nsnap = 1
shcat['sfr'] = logSFRs
for ii, n_snap in enumerate(range(2, nsnap0)[::-1]):
isSF_i = np.where(shcat['nsnap_start'][isSF] == n_snap)[0]
shcat['m.star.snap'+str(n_snap)] = logM_integ[:,ii]
shcat['sfr.snap'+str(n_snap)] = np.repeat(-999., len(logM_integ[:,ii]))
# assign M*0 and SFR0
shcat['m.star.snap'+str(n_snap)][isSF[isSF_i]] = shcat['m.star0'][isSF[isSF_i]]
shcat['sfr.snap'+str(n_snap)][isSF[isSF_i]] = shcat['sfr0'][isSF[isSF_i]]
for ii, n_snap in enumerate(range(2, nsnap0)[::-1]):
isSF_i = np.where(shcat['nsnap_start'][isSF] >= n_snap)[0]
sfr_tmp = logSFR_logM_z(
shcat['m.star.snap'+str(n_snap)][isSF],
UT.z_nsnap(n_snap),
**sfr_kwargs)
shcat['sfr.snap'+str(n_snap)][isSF[isSF_i]] = sfr_tmp[isSF_i]
# not star-forming nsnap_f M* is just their SHAM M*
shcat['m.star'][shcat['galtype'] != 'sf'] = shcat['m.sham'][shcat['galtype'] != 'sf']
if testing and 'tsteps' in sfr_kwargs.keys():
return shcat, sfr_kwargs['tsteps'], sfr_kwargs['dlogSFR_amp']
else:
return shcat
def initSF(shcat, theta):
'''
Initialize the "star forming" subhalos. Select
"star-forming" subhalos at z~0 using input f_SFMS(M_SHAM).
Assumptions:
- f_SFMS does not depend on other subhalo properties.
- SF galaxies at z~0 have remained on the SFMS since z > 1
'''
# meta data
nsnap0 = shcat['metadata']['nsnap0']
ngal = len(shcat['m.sham'])
# pick SF subhalos based on f_SFS(M_SHAM) at snapshot 1
f_sfs = Fsfms(shcat['m.sham'])
f_sfs = np.clip(f_sfs, 0., 1.)
rand = np.random.uniform(0., 1., ngal)
isSF = (rand < f_sfs)
shcat['sfr0'] = np.repeat(-999., ngal) # assign initial SFRs
dsfr0 = theta['sfms']['sigma'] * np.random.randn(np.sum(isSF))
shcat['sfr0'][isSF] = SFH.SFR_sfms(
shcat['m.star0'][isSF], # Mstar
UT.z_nsnap(shcat['nsnap_start'][isSF]), # redshift
theta['sfms'] # theta of SFMS
) + dsfr0
shcat['galtype'] = UT.replicate('', ngal)
shcat['galtype'][isSF] = 'sf'
return shcat
def _MassSFR_Wrapper(SHcat, nsnap0, nsnapf, isSF=None, logSFR_logM_z=None, sfr_kwargs=None, **theta):
''' Evolve galaxies that remain star-forming throughout the snapshots.
'''
# parse theta
theta_mass = theta['theta_mass']
theta_sfh = theta['theta_sfh']
theta_sfms = theta['theta_sfms']
# precompute z(t_cosmic)
z_table, t_table = UT.zt_table()
#z_of_t = interp1d(t_table, z_table, kind='cubic')
z_of_t = lambda tt: UT.z_of_t(tt, deg=6)
# now solve M*, SFR ODE
dlogmdt_kwargs = {}
dlogmdt_kwargs['logsfr_M_z'] = logSFR_logM_z
dlogmdt_kwargs['f_retain'] = theta_mass['f_retain']
dlogmdt_kwargs['zoft'] = z_of_t
# choose ODE solver
if theta_mass['solver'] == 'euler': # Forward euler
f_ode = SFH.ODE_Euler
elif theta_mass['solver'] == 'scipy': # scipy ODE solver
f_ode = odeint
else:
raise ValueError
logM_integ = np.tile(-999., (len(SHcat['galtype']), nsnap0 - nsnapf))
dlogmdt_kwarg_list = []
for nn in range(nsnapf+1, nsnap0+1)[::-1]:
# starts at n_snap = nn
isStart = np.where(SHcat['nsnap_start'][isSF] == nn)
if theta_mass['solver'] != 'scipy':
dlogmdt_kwarg = dlogmdt_kwargs.copy()
for k in sfr_kwargs.keys():
if isinstance(sfr_kwargs[k], np.ndarray):
dlogmdt_kwarg[k] = sfr_kwargs[k][isStart]
else:
dlogmdt_kwarg[k] = sfr_kwargs[k]
dlogmdt_kwarg_list.append(dlogmdt_kwarg)
del dlogmdt_kwarg
else:
sfr_kwarg = {}
for k in sfr_kwargs.keys():
if isinstance(sfr_kwargs[k], np.ndarray):
sfr_kwarg[k] = sfr_kwargs[k][isStart]
else:
sfr_kwarg[k] = sfr_kwargs[k]
dlogmdt_arg = (
dlogmdt_kwargs['logsfr_M_z'],
dlogmdt_kwargs['f_retain'],
dlogmdt_kwargs['zoft'],
sfr_kwarg
)
dlogmdt_kwarg_list.append(dlogmdt_arg)
del dlogmdt_arg
#t_s = time.time()
for i_n, nn in enumerate(range(nsnapf+1, nsnap0+1)[::-1]):
# starts at n_snap = nn
isStart = np.where(SHcat['nsnap_start'][isSF] == nn)
if theta_mass['solver'] != 'scipy':
tmp_logM_integ = f_ode(
SFH.dlogMdt, # dy/dt
SHcat['m.star0'][isSF[isStart]], # logM0
t_table[nsnapf:nn+1][::-1], # t_final
theta_mass['t_step'], # time step
**dlogmdt_kwarg_list[i_n])
else:
print('==================================')
print('===========SCIPY ODEINT===========')
tmp_logM_integ = f_ode(
SFH.dlogMdt_scipy, # dy/dt
SHcat['m.star0'][isSF[isStart]], # logM0
t_table[nsnapf:nn+1][::-1], # t_final
args=dlogmdt_kwarg_list[i_n])
logM_integ[isSF[isStart], nsnap0-nn:] = tmp_logM_integ.T[:,1:]
isStart = np.where(SHcat['nsnap_start'][isSF] == 1)
logM_integ[isSF[isStart], -1] = SHcat['m.star0'][isSF[isStart]]
#print time.time() - t_s
# log(SFR) @ nsnapf
logSFRs = np.repeat(-999., len(SHcat['galtype']))
logSFRs[isSF] = logSFR_logM_z(logM_integ[isSF, -1], UT.z_nsnap(nsnapf), **sfr_kwargs)
return logM_integ, logSFRs
def _MassSFR_tarr(SHcat, nsnap0, tarr, isSF=None, logSFR_logM_z=None, sfr_kwargs=None, **theta):
''' Evolve galaxies that remain star-forming throughout the snapshots.
'''
# parse theta
theta_mass = theta['theta_mass']
theta_sfh = theta['theta_sfh']
theta_sfms = theta['theta_sfms']
# precompute z(t_cosmic)
z_table, t_table = UT.zt_table()
z_of_t = lambda tt: UT.z_of_t(tt, deg=6)
nsnapf = 1
# now solve M*, SFR ODE
dlogmdt_kwargs = {}
dlogmdt_kwargs['logsfr_M_z'] = logSFR_logM_z
dlogmdt_kwargs['f_retain'] = theta_mass['f_retain']
dlogmdt_kwargs['zoft'] = z_of_t
# choose ODE solver
if theta_mass['solver'] == 'euler': # Forward euler
f_ode = SFH.ODE_Euler
elif theta_mass['solver'] == 'scipy': # scipy ODE solver
f_ode = odeint
else:
raise ValueError
logM_integ = np.tile(-999., (len(SHcat['galtype']), len(tarr)))
dlogmdt_kwarg_list = []
for nn in range(nsnapf+1, nsnap0+1)[::-1]:
# starts at n_snap = nn
isStart = np.where(SHcat['nsnap_start'][isSF] == nn)
if theta_mass['solver'] != 'scipy':
dlogmdt_kwarg = dlogmdt_kwargs.copy()
for k in sfr_kwargs.keys():
if isinstance(sfr_kwargs[k], np.ndarray):
dlogmdt_kwarg[k] = sfr_kwargs[k][isStart]
else:
dlogmdt_kwarg[k] = sfr_kwargs[k]
dlogmdt_kwarg_list.append(dlogmdt_kwarg)
del dlogmdt_kwarg
else:
sfr_kwarg = {}
for k in sfr_kwargs.keys():
if isinstance(sfr_kwargs[k], np.ndarray):
sfr_kwarg[k] = sfr_kwargs[k][isStart]
else:
sfr_kwarg[k] = sfr_kwargs[k]
dlogmdt_arg = (
dlogmdt_kwargs['logsfr_M_z'],
dlogmdt_kwargs['f_retain'],
dlogmdt_kwargs['zoft'],
sfr_kwarg
)
dlogmdt_kwarg_list.append(dlogmdt_arg)
del dlogmdt_arg
#t_s = time.time()
for i_n, nn in enumerate(range(nsnapf+1, nsnap0+1)[::-1]):
# starts at n_snap = nn
isStart = np.where(SHcat['nsnap_start'][isSF] == nn)
tsnap = UT.t_nsnap(nn)
if theta_mass['solver'] != 'scipy':
tmp_logM_integ = f_ode(
SFH.dlogMdt, # dy/dt
SHcat['m.star0'][isSF[isStart]], # logM0
tarr[tarr > tsnap], # t_final
theta_mass['t_step'], # time step
**dlogmdt_kwarg_list[i_n])
else:
print('==================================')
print('===========SCIPY ODEINT===========')
tmp_logM_integ = f_ode(
SFH.dlogMdt_scipy, # dy/dt
SHcat['m.star0'][isSF[isStart]], # logM0
tarr[tarr > tsnap], # t_final
args=dlogmdt_kwarg_list[i_n])
ii = np.arange(len(tarr))[tarr > tsnap].min()
logM_integ[isSF[isStart],ii+1:] = tmp_logM_integ.T[:,1:].copy()
isStart = np.where(SHcat['nsnap_start'][isSF] == 1)
logM_integ[isSF[isStart], -1] = SHcat['m.star0'][isSF[isStart]]
#print time.time() - t_s
# log(SFR) @ nsnapf
logSFRs = np.repeat(-999., len(SHcat['galtype']))
logSFRs[isSF] = logSFR_logM_z(logM_integ[isSF, -1], UT.z_nsnap(nsnapf), **sfr_kwargs)
return logM_integ, logSFRs
def defaultTheta(sfh):
''' Return generic default parameter values
'''
theta = {}
theta['gv'] = {'slope': 1.03, 'fidmass': 10.5, 'offset': -0.02}
theta['sfms'] = {'name': 'flex', 'zslope': 1.05, 'mslope':0.58, 'offset': -0.1, 'sigma': 0.3}
theta['fq'] = {'name': 'cosmos_tinker'}
theta['fpq'] = {'slope': -2.079703, 'offset': 1.6153725, 'fidmass': 10.5}
theta['mass'] = {'solver': 'euler', 'f_retain': 0.6, 't_step': 0.05}
theta['sfh'] = {'name': sfh}
if sfh == 'constant_offset':
theta['sfh']['nsnap0'] = 15
elif sfh == 'corr_constant_offset':
theta['sfh']['m.kind'] = 'm.star'
theta['sfh']['dm.kind'] = 0.01
theta['sfh']['sig_abias'] = 0.3
elif sfh == 'random_step':
theta['sfh']['dt_min'] = 0.5
theta['sfh']['dt_max'] = 0.5
theta['sfh']['sigma'] = 0.3
elif sfh == 'random_step_fluct':
theta['sfh']['dt_min'] = 0.5
theta['sfh']['dt_max'] = 0.5
theta['sfh']['sigma'] = 0.3
elif sfh == 'random_step_abias':
theta['sfh']['dt_min'] = 0.25
theta['sfh']['dt_max'] = 0.25
theta['sfh']['sigma_tot'] = 0.3
theta['sfh']['sigma_corr'] = 0.29
elif sfh == 'random_step_abias2':
theta['sfh']['dt_min'] = 0.5
theta['sfh']['dt_max'] = 0.5
theta['sfh']['t_abias'] = 2. # Gyr
theta['sfh']['sigma_tot'] = 0.3
theta['sfh']['sigma_corr'] = 0.29
elif sfh == 'random_step_abias_delay':
theta['sfh']['dt_min'] = 0.5
theta['sfh']['dt_max'] = 0.5
theta['sfh']['sigma_tot'] = 0.3
theta['sfh']['sigma_corr'] = 0.2
theta['sfh']['dt_delay'] = 1. # Gyr
theta['sfh']['dz_dMh'] = 0.5
else:
raise NotImplementedError
return theta
def Fsfms(mm):
''' Star Formation Main Sequence fraction as a function of log M*.
See paper.py to see how f_SFMS was estimated for each stellar mass
from the SDSS Group Catalog.
'''
return -0.634 * mm + 6.898
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.