code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#! /usr/bin/env python2
"""
Copyright 2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
import sys
import os
import re
from os.path import abspath, join, dirname, relpath, sep
import json
import traceback
from fnmatch import translate
from argparse import ArgumentParser
ROOT = abspath(join(dirname(__file__), '..'))
sys.path.insert(0, ROOT)
from tools.config import ConfigException
from tools.utils import cmd, run_cmd
plugins_path = abspath(join(ROOT, 'TEST_APPS', 'icetea_plugins', 'plugins_to_load.py'))
def find_build_from_build_data(build_data, id, target, toolchain):
if 'builds' not in build_data:
raise Exception("build data is in wrong format, does not include builds object")
for build in build_data['builds']:
if 'id' in build.keys() \
and id.upper() in build['id'].upper() \
and 'target_name' in build.keys() \
and target.upper() == build['target_name'].upper() \
and 'toolchain_name' in build.keys() \
and toolchain.upper() == build['toolchain_name'].upper() \
and 'result' in build.keys() \
and "OK" == build['result']:
return build
return None
def create_test_suite(target, tool, icetea_json_output, build_data, tests_by_name):
"""
Create test suite content
:param target:
:param tool:
:param icetea_json_output:
:param build_data:
:return:
"""
test_suite = dict()
test_suite['testcases'] = list()
for test in icetea_json_output:
skip = False
for dut in test['requirements']['duts'].values():
# Set binary path based on application name
if 'application' in dut.keys() and 'name' in dut['application'].keys():
build = find_build_from_build_data(
build_data=build_data,
id=dut['application']['name'],
target=target,
toolchain=tool)
if build:
try:
dut['application']['bin'] = build['bin_fullpath']
except KeyError:
raise Exception('Full path is missing from build: {}'.format(build))
else:
skip = True
if not tests_by_name or is_test_in_test_by_name(test['name'], tests_by_name):
test_case = {
'name': test['name'],
'config': {
'requirements': set_allowed_platform(test['requirements'], target)
}
}
# Skip test if not binary path
if skip:
test_case['config']['execution'] = {
'skip': {
'value': True,
'reason': "Test requiring application binary not build"
}
}
test_suite['testcases'].append(test_case)
return test_suite
def set_allowed_platform(requirements, target):
"""
Allowed platform restrict icetea to run tests on specific board
This targets tests to the right board in case that user has multiple ones connected same time
"""
if '*' not in requirements['duts'].keys():
requirements['duts']['*'] = dict()
requirements['duts']['*']['allowed_platforms'] = [target]
return requirements
def get_applications(test):
ret = list()
for dut in test['requirements']['duts'].values():
if 'application' in dut.keys() and 'name' in dut['application'].keys():
ret.append(dut['application']['name'])
return ret
def filter_test_by_build_data(icetea_json_output, build_data, target, toolchain):
if not build_data:
return icetea_json_output
ret = list()
for test in icetea_json_output:
for dut in test['requirements']['duts'].values():
if 'application' in dut.keys() and 'name' in dut['application'].keys():
id = dut['application']['name']
if find_build_from_build_data(build_data, id, target, toolchain):
# Test requiring build found
ret.append(test)
return ret
def filter_test_by_name(icetea_json_output, test_by_name):
if not test_by_name:
return icetea_json_output
ret = list()
for test_temp in icetea_json_output:
if is_test_in_test_by_name(test_temp['name'], test_by_name) and test_temp not in ret:
ret.append(test_temp)
return ret
def get_applications_from_test(test):
ret = list()
if u'requirements' in test.keys() and u'duts' in test[u'requirements']:
for name, dut in test[u'requirements'][u'duts'].items():
if u'application' in dut.keys() and u'name' in dut[u'application']:
ret.append(dut[u'application'][u'name'])
return ret
def get_application_list(icetea_json_output, tests_by_name):
""" Return comma separated list of application which are used in tests """
ret = list()
for test in filter_test_by_name(icetea_json_output, tests_by_name):
ret.extend(get_applications_from_test(test))
# Remove duplicates
return list(set(ret))
def icetea_tests(target, tcdir, verbose):
command = ['icetea', '--tcdir', tcdir, '--list', '--json', '--platform_filter', target] \
+ (['-v'] if verbose else [])
stdout, stderr, returncode = run_cmd(command)
if returncode != 0:
raise Exception(
"Error when running icetea. \ncwd:{} \nCommand:'{}' \noutput:{}".format(os.getcwd(), ' '.join(command),
stderr.decode()))
return json.loads(stdout)
def is_test_in_test_by_name(test_name, test_by_name):
for tbn_temp in test_by_name:
if re.search(translate(tbn_temp), test_name):
return True
return False
def check_tests(icetea_json_output):
"""
Check that all tests have all necessary information
:return:
"""
for test in icetea_json_output:
if not get_applications_from_test(test):
raise Exception('Test {} does not have application with correct name'.format(test['name']))
def load_build_data(build_data_path):
"""
:return: build_data.json content as dict and None if build data is not available
"""
if not os.path.isfile(build_data_path):
return None
return json.load(open(build_data_path))
if __name__ == '__main__':
try:
# Parse Options
parser = ArgumentParser()
parser.add_argument('-m', '--mcu',
dest='target',
default=None,
help='Test target MCU',
required=True)
parser.add_argument('-t', '--toolchain',
dest='toolchain',
default=None,
help='Toolchain',
required=True)
parser.add_argument('--build-data',
dest='build_data',
default=None,
help='Detail data from build')
parser.add_argument('--test-suite',
dest='test_suite',
default=None,
help='Path used for test suite file')
parser.add_argument('-n', '--tests-by-name',
dest='tests_by_name',
default=None,
help='Limit the tests to a list (ex. test1,test2,test3)')
parser.add_argument('--tcdir',
dest='tcdir',
default='TEST_APPS',
help='Test case directory',
required=False)
parser.add_argument('--compile-list',
action='store_true',
dest='compile_list',
default=False,
help='List tests, which applications can be compiled')
parser.add_argument('--run-list',
action='store_true',
dest='run_list',
default=False,
help='List tests, which applications are compiled and ready for run')
parser.add_argument('--application-list',
action='store_true',
dest='application_list',
default=False,
help='List applications that need to be build')
parser.add_argument('--ignore-checks',
action='store_true',
dest='ignore_checks',
default=False,
help='Ignore data validation checks')
parser.add_argument('-v', '--verbose',
action='store_true',
dest='verbose',
default=False,
help='Verbose diagnostic output')
options = parser.parse_args()
icetea_json_output = icetea_tests(options.target, options.tcdir, options.verbose)
tests_by_name = options.tests_by_name.split(',') if options.tests_by_name else None
build_data = load_build_data(options.build_data) if options.build_data else None
if not options.ignore_checks:
check_tests(icetea_json_output)
if options.compile_list:
print('Available icetea tests for build \'{}-{}\', location \'{}\''.format(
options.target, options.toolchain, options.tcdir))
for test in icetea_json_output:
print(
'Test Case:\n Name: {name}\n Path: .{sep}{filepath}\n Test applications: .{sep}{apps}'.format(
name=test['name'],
sep=sep,
filepath=relpath(test['filepath'], ROOT),
apps=''.join(get_applications(test)).replace('-', os.path.sep)))
elif options.run_list:
print('Available icetea tests for build \'{}-{}\', location \'{}\''.format(
options.target, options.toolchain, options.tcdir))
# Filters
tests = filter_test_by_name(icetea_json_output, tests_by_name)
if build_data:
tests = filter_test_by_build_data(tests, build_data, options.target, options.toolchain)
for test in tests:
print(' test \'{name}\''.format(name=test['name']))
elif options.application_list:
print(','.join(get_application_list(icetea_json_output, tests_by_name)))
else:
if not build_data:
raise Exception("Build data file does not exist: {}".format(options.build_data))
test_suite = create_test_suite(options.target, options.toolchain, icetea_json_output, build_data,
tests_by_name)
if not test_suite['testcases']:
raise Exception("Test suite is empty. Check that --tcdir and --tests-by-name have correct values")
if not options.test_suite:
raise Exception('--test-suite is required when running tests')
with open(options.test_suite, 'w') as f:
json.dump(test_suite, f, indent=2)
# List just for debug
if options.verbose:
cmd(['icetea', '--tcdir', options.tcdir, '--list'] + (['-v'] if options.verbose else []))
cmd(['icetea', '--tcdir', options.tcdir, '--suite', options.test_suite, '--clean', '--plugin_path',
plugins_path] + (['-v'] if options.verbose else []))
except KeyboardInterrupt as e:
print('\n[CTRL+c] exit')
except ConfigException as e:
# Catching ConfigException here to prevent a traceback
print('[ERROR] {}'.format(e))
except Exception as e:
traceback.print_exc(file=sys.stdout)
print('[ERROR] {}'.format(e))
sys.exit(1)
| betzw/mbed-os | tools/run_icetea.py | Python | apache-2.0 | 12,808 |
# This is a demo script. It finds all tissue sections, crops them and downloads them as separate images.
import girder_client
import urllib2
import numpy as np
import scipy
import cv2
def load_low_res(gc, item_id, mag):
chip_url = gc.urlBase + "item/%s/tiles/region?magnification=%f&encoding=JPEG&jpegQuality=95"%(item_id, mag)
req = urllib2.Request(chip_url)
req.add_header('Girder-Token', gc.token)
try:
resp = urllib2.urlopen(req)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
except urllib2.HTTPError, err:
if err.code == 400:
print("Bad request!")
elif err.code == 404:
print("Page not found!")
elif err.code == 403:
print("Access denied!")
else:
print("Something happened! Error code %d" % err.code)
return None
def load_high_res_region(gc, item_id, left, right, top, bottom):
chip_url = gc.urlBase + "item/%s/tiles/region?left=%d&top=%d&right=%d&bottom=%s&units=base_pixels&magnification=10&exact=false&encoding=JPEG&jpegQuality=95&jpegSubsampling=0"%(item_id, left, top, right, bottom)
req = urllib2.Request(chip_url)
req.add_header('Girder-Token', gc.token)
try:
resp = urllib2.urlopen(req)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
except urllib2.HTTPError, err:
if err.code == 400:
print("Bad request!")
elif err.code == 404:
print("Page not found!")
elif err.code == 403:
print("Access denied!")
else:
print("Something happened! Error code %d" % err.code)
return None
if __name__ == '__main__':
girder_item_id = "5915e6c3dd98b578723a0a21"
girder_url = 'https://images.slide-atlas.org/api/v1'
gc = girder_client.GirderClient(apiUrl=girder_url)
# Load a low resolution version of the whole slide
mag = 0.2
img = load_low_res(gc, girder_item_id, mag)
#cv2.imwrite("lowres.jpg", img);
# Threshold the image
grayscaled = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imwrite("gray.jpg", grayscaled);
retval, threshold = cv2.threshold(grayscaled, 200, 255, cv2.THRESH_BINARY)
cv2.imwrite("threshold.jpg", threshold);
# Get rid of noise with morphological operations.
kernel = np.ones((7,7),np.uint8)
opening = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel)
cv2.imwrite("open.jpg", opening);
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
cv2.imwrite("close.jpg", closing);
# Use contours to segment the tissue islands
im2, contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# This first sectionis the entire image (negating the image will porbably fix this.
contours = contours[1:-1]
count = 0
for section in contours:
print(count)
x0 = np.min(section[...,0])
x1 = np.max(section[...,0])+1
y0 = np.min(section[...,1])
y1 = np.max(section[...,1])+1
# convert to the high res (40x) coordinate system
x0 = int(x0 * 40/0.2)
x1 = int(x1 * 40/0.2)
y0 = int(y0 * 40/0.2)
y1 = int(y1 * 40/0.2)
region = load_high_res_region(gc, girder_item_id, x0, x1, y0, y1)
cv2.imwrite("section%d.jpg"%count, region);
count = count + 1
| law12019/deep_learning | scripts/download_tissue_sections.py | Python | apache-2.0 | 3,540 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrapper around rmake.worker that receives and sends messages to the dispatcher.
"""
import os
import signal
import socket
import time
import traceback
from conary import errors
from rmake import failure
from rmake.build import subscriber
from rmake.lib import logger
from rmake.lib import procutil
from rmake.lib import server
from rmake.lib.apiutils import api, api_parameters, api_return, freeze, thaw
from rmake.messagebus.rpclib import SessionProxy
from rmake.server import client
from rmake.worker import worker
from rmake.multinode import messages
from rmake.multinode import nodetypes
from rmake.multinode import nodeclient
# Register ResolveJob apiutils type
from rmake.build import dephandler # pyflakes=ignore
class rMakeWorkerNodeServer(worker.Worker):
"""
Class that wraps worker functionality from rmake.worker.worker. Actual
communication w/ messagebus is handled in worker.client
@param cfg: node cfg
@type cfg: rmake_node.nodecfg.NodeConfiguration
@param messageBusInfo: override information for how to get to the
messagebus
@type messageBusInfo: (host, port)
"""
def __init__(self, cfg, messageBusInfo=None):
serverLogger = logger.Logger('rmake-node',
logPath=cfg.logDir + '/rmake-node.log')
try:
serverLogger.info('Starting rMake Node (pid %s)' % os.getpid())
worker.Worker.__init__(self, cfg, serverLogger,
slots=cfg.slots)
#calculates current state of the rmake chroot directory.
chroots = self.listChroots()
self.client = WorkerNodeClient(cfg, self,
procutil.MachineInformation(),
chroots=chroots,
messageBusInfo=messageBusInfo)
self.lastStatusSent = 0
self.statusPeriod = 60
except Exception, err:
self.error('Error initializing Node Server:\n %s\n%s', err,
traceback.format_exc())
raise
def busConnected(self, sessionId):
pass
def receivedResolveCommand(self, info):
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.resolve(info.getResolveJob(), eventHandler, info.getLogData(),
commandId=info.getCommandId())
def receivedActionCommand(self, info):
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.actOnTrove(info.getCommandName(),
info.getBuildConfig(),
info.getJobId(), info.getTrove(),
eventHandler, info.getLogData(),
commandId=info.getCommandId())
def receivedLoadCommand(self, info):
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.loadTroves(info.getJob(), info.getLoadTroves(), eventHandler,
info.getReposName(), commandId=info.getCommandId())
def receivedBuildCommand(self, info):
# allow state changes in the trove before/after we actually fork the
# command
RmakeBusPublisher(info.getJobId(), self.client).attach(info.getTrove())
# create an eventHandler which will take events from the command
# and send them to the messagebus.
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.buildTrove(info.getBuildConfig(), info.getJobId(),
info.getTrove(), eventHandler,
info.getBuildReqs(), info.getCrossReqs(),
info.getTargetLabel(), info.getLogInfo(),
bootstrapReqs=info.getBootstrapReqs(),
builtTroves=info.getBuiltTroves(),
commandId=info.getCommandId())
def receivedStopCommand(self, info):
# pass command on to worknode underneath.
self.stopCommand(commandId=info.getCommandId(),
targetCommandId=info.getTargetCommandId())
def _signalHandler(self, signal, frame):
server.Server._signalHandler(self, signal, frame)
os.kill(os.getpid(), signal)
def _serveLoopHook(self):
# Called every .1 seconds or so when polling for
# new requests.
# Sends status update about the machine.
if not self.client.isConnected():
self.client.connect()
return
if (time.time() - self.lastStatusSent) > self.statusPeriod:
if self.client:
self.lastStatusSent = time.time()
info = procutil.MachineInformation()
commandIds = [ x.getCommandId() for x in self.commands]
commandIds += [ x[2][0] for x in self._queuedCommands ]
self.client.updateStatus(info, commandIds)
worker.Worker._serveLoopHook(self)
def handleRequestIfReady(self, sleep=0.1):
# override standard worker's poll mechanism to check the bus
# instead.
try:
self.client.poll(timeout=sleep, maxIterations=1)
except socket.error, err:
self.error('Socket connection died: %s' % err.args[1])
time.sleep(sleep)
# passing 0 to tell it we've arleady slept if necessary.
return worker.Worker.handleRequestIfReady(self, 0)
def commandErrored(self, commandId, msg, tb=''):
"""
Called by worker after command finishes with error.
Pass any command errors back to the message bus where they'll
be dealt with.
"""
self.client.commandErrored(commandId, msg, tb)
def commandCompleted(self, commandId):
"""
Called by worker after command finishes with error.
Pass any command errors back to the message bus where they'll
be dealt with.
"""
self.client.commandCompleted(commandId)
class WorkerNodeClient(nodeclient.NodeClient):
"""
Manages worker node's low-level connection to the messagebus.
When it receives messages it parses them and passes the information
up to the WorkerNodeServer. It also accepts commands from
the node server and passes the information back to the
message bus.
Initialization parameters:
@param cfg: node configuration
@param server: rMakeServerClass to call when messages received
@param nodeInfo: procutils.MachineInformation object describing
the current state of the node.
"""
sessionClass = 'WORKER' # type information used by messagebus to classify
# connections.
name = 'rmake-node' # name used by logging
def __init__(self, cfg, server, nodeInfo, chroots, messageBusInfo=None):
# Create a nodeType describing this client that will be passed
# to the message bus and made available to interested listeners
# (like the dispatcher)
node = nodetypes.WorkerNode(name=cfg.name,
host=cfg.hostName,
slots=cfg.slots,
jobTypes=cfg.jobTypes,
buildFlavors=cfg.buildFlavors,
loadThreshold=cfg.loadThreshold,
nodeInfo=nodeInfo, chroots=chroots,
chrootLimit=cfg.chrootLimit)
# grab the message bus location from the rmake server.
rmakeClient = client.rMakeClient(cfg.rmakeUrl)
if not messageBusInfo:
messageBus = None
while not messageBus:
try:
messageBus = rmakeClient.getMessageBusInfo()
except errors.UncatchableExceptionClasses, e:
raise
except Exception, e:
server.error('Could not contact rmake server at %r - waiting 5 seconds and retrying.', cfg.rmakeUrl)
if not messageBus:
time.sleep(5)
messageBusHost, messageBusPort = messageBus.host, messageBus.port
else:
messageBusHost, messageBusPort = messageBusInfo
nodeclient.NodeClient.__init__(self, messageBusHost,
messageBusPort,
cfg, server, node)
# Never give up on reconnecting to the messagebus, we want
# nodes to keep attempting to reconnect forever.
self.getBusClient().setConnectionTimeout(-1)
def updateStatus(self, info, commandIds):
"""
Send current status of node to messagebus to be picked up
by dispatcher
@param info: current status of this node
@type info: procutil.MachineInformation
"""
m = messages.NodeInfo(info, commandIds)
self.bus.sendMessage('/nodestatus', m)
def messageReceived(self, m):
"""
Direct messages accepted by rMake Node.
@param m: messages.Message subclass.
"""
nodeclient.NodeClient.messageReceived(self, m)
if isinstance(m, messages.ConnectedResponse):
self.bus.subscribe('/command?targetNode=%s' % m.getSessionId())
self.server.busConnected(m.getSessionId())
elif isinstance(m, messages.BuildCommand):
self.server.info('Received build command')
self.server.receivedBuildCommand(m)
elif isinstance(m, messages.ActionCommand):
self.server.info('Received action command')
self.server.receivedActionCommand(m)
elif isinstance(m, messages.StopCommand):
self.server.info('Received stop command')
self.server.receivedStopCommand(m)
elif isinstance(m, messages.ResolveCommand):
self.server.info('Received resolve command')
self.server.receivedResolveCommand(m)
elif isinstance(m, messages.LoadCommand):
self.server.info('Received load command')
self.server.receivedLoadCommand(m)
def commandErrored(self, commandId, message, traceback=''):
"""
Send status to messagebus about command commandId
"""
m = messages.CommandStatus()
if not isinstance(message, failure.FailureReason):
failureReason = failure.CommandFailed(commandId, message, traceback)
else:
failureReason = message
m.set(commandId, m.ERROR, failureReason)
self.bus.sendMessage('/commandstatus', m)
def commandCompleted(self, commandId):
"""
Send status to messagebus about worker command commandId
"""
m = messages.CommandStatus()
m.set(commandId, m.COMPLETED)
self.bus.sendMessage('/commandstatus', m)
def emitEvents(self, jobId, eventList):
"""
Send in-progress status updates on events affecting troves
"""
m = messages.EventList()
m.set(jobId, eventList)
# send synchronous message tells the node not to return until
# the messages are sent. We want events to be high-priority
# messages that get
self.bus.sendSynchronousMessage('/event', m)
@api(version=1)
@api_return(1, None)
def listChroots(self, callData):
"""
Part of node XMLRPC interface. List all chroot names
known about for this node.
"""
return self.server.listChroots()
@api(version=1)
@api_return(1, None)
def listCommands(self, callData):
"""
Part of node XMLRPC interface. List all commands that are
Currently queued or active on this node.
"""
return (
[ x.getCommandId() for x in self.server.listQueuedCommands() ],
[ (x.getCommandId(), x.pid) for x in self.server.listCommands() ])
@api(version=1)
@api_parameters(1, 'str', 'str', 'bool', None)
@api_return(1, None)
def startChrootSession(self, callData, chrootPath, command,
superUser=False, buildTrove=None):
"""
Part of rMake node XMLRPC interface. The rMake
server uses these methods to communicate directly to a
node without going through the dispatcher.
Basically a passthrough
to worker.startSession.
Returns (True, (hostName, port)) if the connection succeeds.
Returns (False, FailureReason) if it fails.
"""
if buildTrove:
buildTrove = thaw('BuildTrove', buildTrove)
passed, results = self.server.startSession('_local_', chrootPath,
command, superUser, buildTrove)
if not passed:
results = freeze('FailureReason', results)
return passed, results
@api(version=1)
@api_parameters(1, 'str', 'str')
@api_return(1, None)
def archiveChroot(self, callData, chrootPath, newPath):
"""
Part of rMake node XMLRPC interface. The rMake
server uses these methods to communicate directly to a
node without going through the dispatcher.
"""
return self.server.archiveChroot('_local_', chrootPath, newPath)
@api(version=1)
@api_parameters(1, 'str')
@api_return(1, None)
def deleteChroot(self, callData, chrootPath):
"""
Part of rMake node XMLRPC interface. The rMake
server uses these methods to communicate directly to a
node without going through the dispatcher.
Basically a passthrough to deleteChroot.
"""
return self.server.deleteChroot('_local_', chrootPath)
class WorkerNodeRPCClient(object):
"""
XMLRPC client for communicating to rMake Node.
client: connected messagebus session.
sessionId: sessionId of rMake node to communicate with.
"""
def __init__(self, client, sessionId):
self.proxy = SessionProxy(WorkerNodeClient, client, sessionId)
def listCommands(self):
return self.proxy.listCommands()
def listChroots(self):
return self.proxy.listChroots()
def getStatus(self):
raise NotImplementedError
def startChrootSession(self, chrootPath, command, superUser=False,
buildTrove=None):
"""
Starts a chroot session on the given node.
"""
if buildTrove is None:
buildTrove = ''
else:
buildTrove = freeze('BuildTrove', buildTrove)
return self.proxy.startChrootSession(chrootPath, command, superUser,
buildTrove)
def archiveChroot(self, chrootPath, newPath):
return self.proxy.archiveChroot(chrootPath, newPath)
def deleteChroot(self, chrootPath):
return self.proxy.deleteChroot(chrootPath)
class RmakeBusPublisher(subscriber._RmakePublisherProxy):
"""
Receives events in unfrozen form, freezes them and puts them
on the message bus.
@param jobId: jobId for the events being logged
@param client: WorkerNodeClient instance
"""
def __init__(self, jobId, client):
self.jobId = jobId
self.client = client
subscriber._RmakePublisherProxy.__init__(self)
def _emitEvents(self, apiVer, eventList):
self.client.emitEvents(self.jobId, eventList)
class DirectRmakeBusPublisher(RmakeBusPublisher):
"""
Receives events already frozen and publishes them directly.
Overrides _receiveEvents where events are frozen.
"""
def _freezeEvents(self, apiVer, frozenEventList):
"""
Events on this bus are already frozen (they come from
the command)
"""
return self.jobId, frozenEventList
| sassoftware/rmake | rmake/multinode/workernode.py | Python | apache-2.0 | 16,683 |
"""
WSGI config for isa project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isa.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| sthiers/Marc | isa/wsgi.py | Python | apache-2.0 | 381 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skill Discriminator Prediction and Training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.distributions import tanh_bijector_stable
class SkillDiscriminator:
def __init__(
self,
observation_size,
skill_size,
skill_type,
normalize_observations=False,
# network properties
fc_layer_params=(256, 256),
fix_variance=False,
input_type='diayn',
# probably do not need to change these
graph=None,
scope_name='skill_discriminator'):
self._observation_size = observation_size
self._skill_size = skill_size
self._skill_type = skill_type
self._normalize_observations = normalize_observations
# tensorflow requirements
if graph is not None:
self._graph = graph
else:
self._graph = tf.get_default_graph()
self._scope_name = scope_name
# discriminator network properties
self._fc_layer_params = fc_layer_params
self._fix_variance = fix_variance
if not self._fix_variance:
self._std_lower_clip = 0.3
self._std_upper_clip = 10.0
self._input_type = input_type
self._use_placeholders = False
self.log_probability = None
self.disc_max_op = None
self.disc_min_op = None
self._session = None
# saving/restoring variables
self._saver = None
def _get_distributions(self, out):
if self._skill_type in ['gaussian', 'cont_uniform']:
mean = tf.layers.dense(
out, self._skill_size, name='mean', reuse=tf.AUTO_REUSE)
if not self._fix_variance:
stddev = tf.clip_by_value(
tf.layers.dense(
out,
self._skill_size,
activation=tf.nn.softplus,
name='stddev',
reuse=tf.AUTO_REUSE), self._std_lower_clip,
self._std_upper_clip)
else:
stddev = tf.fill([tf.shape(out)[0], self._skill_size], 1.0)
inference_distribution = tfp.distributions.MultivariateNormalDiag(
loc=mean, scale_diag=stddev)
if self._skill_type == 'gaussian':
prior_distribution = tfp.distributions.MultivariateNormalDiag(
loc=[0.] * self._skill_size, scale_diag=[1.] * self._skill_size)
elif self._skill_type == 'cont_uniform':
prior_distribution = tfp.distributions.Independent(
tfp.distributions.Uniform(
low=[-1.] * self._skill_size, high=[1.] * self._skill_size),
reinterpreted_batch_ndims=1)
# squash posterior to the right range of [-1, 1]
bijectors = []
bijectors.append(tanh_bijector_stable.Tanh())
bijector_chain = tfp.bijectors.Chain(bijectors)
inference_distribution = tfp.distributions.TransformedDistribution(
distribution=inference_distribution, bijector=bijector_chain)
elif self._skill_type == 'discrete_uniform':
logits = tf.layers.dense(
out, self._skill_size, name='logits', reuse=tf.AUTO_REUSE)
inference_distribution = tfp.distributions.OneHotCategorical(
logits=logits)
prior_distribution = tfp.distributions.OneHotCategorical(
probs=[1. / self._skill_size] * self._skill_size)
elif self._skill_type == 'multivariate_bernoulli':
print('Not supported yet')
return inference_distribution, prior_distribution
# simple dynamics graph
def _default_graph(self, timesteps):
out = timesteps
for idx, layer_size in enumerate(self._fc_layer_params):
out = tf.layers.dense(
out,
layer_size,
activation=tf.nn.relu,
name='hid_' + str(idx),
reuse=tf.AUTO_REUSE)
return self._get_distributions(out)
def _get_dict(self,
input_steps,
target_skills,
input_next_steps=None,
batch_size=-1,
batch_norm=False):
if batch_size > 0:
shuffled_batch = np.random.permutation(len(input_steps))[:batch_size]
else:
shuffled_batch = np.arange(len(input_steps))
batched_input = input_steps[shuffled_batch, :]
batched_skills = target_skills[shuffled_batch, :]
if self._input_type in ['diff', 'both']:
batched_targets = input_next_steps[shuffled_batch, :]
return_dict = {
self.timesteps_pl: batched_input,
self.skills_pl: batched_skills,
}
if self._input_type in ['diff', 'both']:
return_dict[self.next_timesteps_pl] = batched_targets
if self._normalize_observations:
return_dict[self.is_training_pl] = batch_norm
return return_dict
def make_placeholders(self):
self._use_placeholders = True
with self._graph.as_default(), tf.variable_scope(self._scope_name):
self.timesteps_pl = tf.placeholder(
tf.float32, shape=(None, self._observation_size), name='timesteps_pl')
self.skills_pl = tf.placeholder(
tf.float32, shape=(None, self._skill_size), name='skills_pl')
if self._input_type in ['diff', 'both']:
self.next_timesteps_pl = tf.placeholder(
tf.float32,
shape=(None, self._observation_size),
name='next_timesteps_pl')
if self._normalize_observations:
self.is_training_pl = tf.placeholder(tf.bool, name='batch_norm_pl')
def set_session(self, session=None, initialize_or_restore_variables=False):
if session is None:
self._session = tf.Session(graph=self._graph)
else:
self._session = session
# only initialize uninitialized variables
if initialize_or_restore_variables:
if tf.gfile.Exists(self._save_prefix):
self.restore_variables()
with self._graph.as_default():
is_initialized = self._session.run([
tf.compat.v1.is_variable_initialized(v)
for key, v in self._variable_list.items()
])
uninitialized_vars = []
for flag, v in zip(is_initialized, self._variable_list.items()):
if not flag:
uninitialized_vars.append(v[1])
if uninitialized_vars:
self._session.run(
tf.compat.v1.variables_initializer(uninitialized_vars))
def build_graph(self,
timesteps=None,
skills=None,
next_timesteps=None,
is_training=None):
with self._graph.as_default(), tf.variable_scope(self._scope_name):
if self._use_placeholders:
timesteps = self.timesteps_pl
skills = self.skills_pl
if self._input_type in ['diff', 'both']:
next_timesteps = self.next_timesteps_pl
if self._normalize_observations:
is_training = self.is_training_pl
# use deltas
if self._input_type == 'both':
next_timesteps -= timesteps
timesteps = tf.concat([timesteps, next_timesteps], axis=1)
if self._input_type == 'diff':
timesteps = next_timesteps - timesteps
if self._normalize_observations:
timesteps = tf.layers.batch_normalization(
timesteps,
training=is_training,
name='input_normalization',
reuse=tf.AUTO_REUSE)
inference_distribution, prior_distribution = self._default_graph(
timesteps)
self.log_probability = inference_distribution.log_prob(skills)
self.prior_probability = prior_distribution.log_prob(skills)
return self.log_probability, self.prior_probability
def increase_prob_op(self, learning_rate=3e-4):
with self._graph.as_default():
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.disc_max_op = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(
-tf.reduce_mean(self.log_probability))
return self.disc_max_op
def decrease_prob_op(self, learning_rate=3e-4):
with self._graph.as_default():
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.disc_min_op = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(
tf.reduce_mean(self.log_probability))
return self.disc_min_op
# only useful when training use placeholders, otherwise use ops directly
def train(self,
timesteps,
skills,
next_timesteps=None,
batch_size=512,
num_steps=1,
increase_probs=True):
if not self._use_placeholders:
return
if increase_probs:
run_op = self.disc_max_op
else:
run_op = self.disc_min_op
for _ in range(num_steps):
self._session.run(
run_op,
feed_dict=self._get_dict(
timesteps,
skills,
input_next_steps=next_timesteps,
batch_size=batch_size,
batch_norm=True))
def get_log_probs(self, timesteps, skills, next_timesteps=None):
if not self._use_placeholders:
return
return self._session.run([self.log_probability, self.prior_probability],
feed_dict=self._get_dict(
timesteps,
skills,
input_next_steps=next_timesteps,
batch_norm=False))
def create_saver(self, save_prefix):
if self._saver is not None:
return self._saver
else:
with self._graph.as_default():
self._variable_list = {}
for var in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope_name):
self._variable_list[var.name] = var
self._saver = tf.train.Saver(self._variable_list, save_relative_paths=True)
self._save_prefix = save_prefix
def save_variables(self, global_step):
if not tf.gfile.Exists(self._save_prefix):
tf.gfile.MakeDirs(self._save_prefix)
self._saver.save(
self._session,
os.path.join(self._save_prefix, 'ckpt'),
global_step=global_step)
def restore_variables(self):
self._saver.restore(self._session,
tf.train.latest_checkpoint(self._save_prefix))
| google-research/dads | unsupervised_skill_learning/skill_discriminator.py | Python | apache-2.0 | 10,897 |
import mongomock
import pymongo
from mock import mock
from mdstudio.db.impl.mongo_client_wrapper import MongoClientWrapper
from mdstudio.db.impl.mongo_database_wrapper import MongoDatabaseWrapper
from mdstudio.unittest import db
from mdstudio.unittest.db import DBTestCase
class TestMongoClientWrapper(DBTestCase):
def setUp(self):
self.d = MongoClientWrapper("localhost", 27127)
def test_construction(self):
self.assertEqual(self.d._host, "localhost")
self.assertEqual(self.d._port, 27127)
self.assertEqual(self.d._databases, {})
self.assertIsInstance(self.d._client, mongomock.MongoClient)
def test_get_database_not_exists(self):
self.d.logger = mock.MagicMock()
ldb = self.d.get_database('database_name')
self.d.logger.info.assert_called_once_with('Creating database "{database}"', database='database_name')
self.assertIsInstance(ldb, MongoDatabaseWrapper)
self.assertEqual(ldb, self.d.get_database('database_name'))
def test_get_database_exists(self):
self.d._client.get_database('database_name')
self.d.logger = mock.MagicMock()
ldb = self.d.get_database('database_name')
self.d.logger.info.assert_not_called()
self.assertIsInstance(ldb, MongoDatabaseWrapper)
self.assertEqual(ldb, self.d.get_database('database_name'))
def test_create_mongo_client(self):
db.create_mock_client = False
self.assertIsInstance(MongoClientWrapper.create_mongo_client('localhost', 2), pymongo.MongoClient)
def test_create_mongo_client_mock(self):
db.create_mock_client = True
self.assertIsInstance(MongoClientWrapper.create_mongo_client('localhost', 2), mongomock.MongoClient)
| MD-Studio/MDStudio | mdstudio/mdstudio/tests/db/impl/test_mongo_client_wrapper.py | Python | apache-2.0 | 1,761 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Histograms.
Please see @{$python/histogram_ops} guide.
@@histogram_fixed_width
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def histogram_fixed_width(values,
value_range,
nbins=100,
dtype=dtypes.int32,
name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram_fixed_width').
Returns:
A 1-D `Tensor` holding histogram of values.
Examples:
```python
# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
nbins = 5
value_range = [0.0, 5.0]
new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with tf.get_default_session() as sess:
hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
variables.global_variables_initializer().run()
sess.run(hist) => [2, 1, 1, 0, 2]
```
"""
with ops.name_scope(name, 'histogram_fixed_width',
[values, value_range, nbins]) as name:
return gen_math_ops.histogram_fixed_width(values, value_range, nbins,
dtype=dtype, name=name)
| shakamunyi/tensorflow | tensorflow/python/ops/histogram_ops.py | Python | apache-2.0 | 2,771 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import argparse
import re
"""
Clean up valgrind output and produce
1) suppressions file containing everything that is considered to be a false positive.
2) a file containing everything else (these should be actual errors)
3) suppressions that are 'not clean'
The output is written to files called 'suppress.txt' and 'dont_suppress.txt'
"""
def in_fortran_runtime(supp):
"""
If the suppression occurs is in the fortran runtime.
"""
intel_strings = ['fun:__intel_sse2_strlen', 'fun:kill_trailing',
'fun:_intel_fast_memcpy', 'fun:for__format_value',
'fun:for_read_int_lis_xmit', 'fun:_intel_fast_memcmp',
'fun:for_trim', 'fun:for_len_trim', 'fun:for_f90_scan',
'for_cpstr', 'fun:__intel_sse2_strcpy']
for s in intel_strings:
if s in supp:
return True
return False
def in_mpi(supp):
"""
If the suppression occurs is in the MPI library.
"""
mpi_stings = ['libmlx4-rdma', 'librdmacm', 'fun:clone']
for s in mpi_stings:
if s in supp:
return True
return False
def is_clean(supp):
"""
Check a suppressions string and return True if it is clean.
"""
# General clean up.
if '==' in supp:
return False
if 'forrtl: error' in supp:
return False
if supp.count('{') > 1:
return False
if supp.count('<') > 1:
return False
if supp.count('>') > 1:
return False
# Check some beginnings.
supp_beginings = ['{', '}', ' Memcheck:Cond', ' Memcheck:Value',
' Memcheck:Param', ' Memcheck:Addr',
' Memcheck:User',
' fun:', ' <insert_a_suppression_name_here>',
' write', ' obj:', ' ioctl', ' close',
' munmap']
for line in supp.split('\n'):
for s in supp_beginings:
if s == line[0:len(s)]:
break
else:
return False
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_files', nargs='+', help='File containing valgrind output')
args = parser.parse_args()
suppress = []
dont_suppress = []
not_clean = []
for file in args.input_files:
with open(file) as f:
s = f.read()
for m in re.finditer(r'{[^{]+?}', s, re.DOTALL):
if not is_clean(m.group(0)):
if m.group(0) not in not_clean:
not_clean.append(m.group(0))
continue
if in_mpi(m.group(0)) or in_fortran_runtime(m.group(0)):
if m.group(0) not in suppress:
suppress.append(m.group(0))
else:
if m.group(0) not in dont_suppress:
dont_suppress.append(m.group(0))
with open('suppress.txt', 'w') as f:
for s in suppress:
print(s, file=f)
with open('dont_suppress.txt', 'w') as f:
for s in dont_suppress:
print(s, file=f)
with open('not_clean.txt', 'w') as f:
for s in not_clean:
print(s, file=f)
print("""Now please check:
- There are no valid suppressions in not_clean.txt,
- There is nothing in suppress.txt that shouldn't be suppressed,
- There is nothing in dont_suppress.txt that should be suppressed.""")
return 0
if __name__ == '__main__':
sys.exit(main())
| CWSL/access-cm-tools | debug/valgrind_suppressions.py | Python | apache-2.0 | 3,664 |
# -*- coding: utf-8 -*-
# Copyright 2016 Mark Brand - c01db33f (at) gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reil.arm64.control_flow - ARMv8 translators
This module generates REIL (reverse engineering intermediate language)
IL from ARMv8 machine code.
This file is responsible for translation of basic control flow instructions
such as b, bl.
"""
import reil.error
from reil import *
from reil.shorthand import *
from reil.utilities import *
import reil.arm64.conditional as conditional
import reil.arm64.operand as operand
def arm64_b(ctx, i):
a = operand.get(ctx, i, 0)
cond = conditional.condition(ctx, i.cc)
ctx.emit( jcc_ (cond, a))
| c01db33f/reil | arm64/control_flow.py | Python | apache-2.0 | 1,207 |
from setuptools import setup, find_packages
import sys, os, io
# List all of your Python package dependencies in the
# requirements.txt file
def readfile(filename, split=False):
with io.open(filename, encoding="utf-8") as stream:
if split:
return stream.read().split("\\n")
return stream.read()
readme = readfile("README.rst", split=True)[3:] # skip title
# For requirements not hosted on PyPi place listings
# into the 'requirements.txt' file.
#requires = readfile("requirements.txt")
requires = [
# minimal requirements listing
"opencmiss.zinc >= 3.3", # not yet on pypi - need manual install from opencmiss.org
"opencmiss.zincwidgets >= 2.0.3"
]
license = readfile("LICENSE")
setup(
name=u'mapclientplugins.smoothfitstep',
version='0.1.0',
description='',
long_description='\n'.join(readme) + license,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
],
author=u'Richard Christie',
author_email='',
url='',
license='GPL',
packages=find_packages(exclude=['ez_setup',]),
namespace_packages=['mapclientplugins'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
| rchristie/mapclientplugins.smoothfitstep | setup.py | Python | apache-2.0 | 1,287 |
import sys
from oba_rvtd_deployer.aws import launch_new, tear_down
from oba_rvtd_deployer.gtfs import validate_gtfs, update
from oba_rvtd_deployer.oba import install, deploy, start, copy_gwt,\
install_watchdog
def run_all():
'''A single script to deploy OBA in one command to a new EC2 instance
'''
# dl gtfs and validate it
if not validate_gtfs():
print('GTFS Validation Failed')
sys.exit()
# setup new EC2 instance
instance = launch_new()
public_dns_name = instance.public_dns_name
# install OBA
install(public_dns_name)
# update GTFS, make new bundle
update(public_dns_name)
# deploy webapps to tomcat
deploy(public_dns_name)
# start server
start(public_dns_name)
# move GWT files to production webapp dir
copy_gwt(public_dns_name)
# install watchdog python script
install_watchdog(public_dns_name)
print('Deployment of new server has finished. Please follow steps: OneBusAway Setup and xWiki Setup')
| trilliumtransit/oba_rvtd_deployer | oba_rvtd_deployer/master.py | Python | apache-2.0 | 1,050 |
import convert
import random
import tensorflow as tf
import tensorflow.contrib as tfc
from tensorflow.contrib.learn import DNNRegressor, SKCompat
random.seed(0xbedbeef) # so we get consistent results
fit_boards = []
fit_moves = []
with open('train_data/acm.pgn') as pgn:
boards, moves = convert.generate_next_boards(pgn)
fit_boards += boards
fit_moves += moves
target_boards = []
with open('train_data/man_machine.pgn') as pgn:
boards, moves = convert.generate_next_boards(pgn)
target_boards += boards
nn = SKCompat(DNNRegressor(hidden_units=[64, 128, 63],
feature_columns=[tfc.layers.real_valued_column("")]))
nn.fit(tf.constant(fit_boards), tf.constant(fit_moves))
res = nn.predict(target_boards)
print(res)
| wspeirs/nnpychess | main.py | Python | apache-2.0 | 767 |
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from rest_framework import authentication, exceptions
from rest_framework.compat import is_authenticated
from rest_framework.exceptions import APIException
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import BasePermission, DjangoModelPermissions, SAFE_METHODS
from rest_framework.serializers import Field, ValidationError
from users.models import Token
WRITE_OPERATIONS = ['create', 'update', 'partial_update', 'delete']
class ServiceUnavailable(APIException):
status_code = 503
default_detail = "Service temporarily unavailable, please try again later."
#
# Authentication
#
class TokenAuthentication(authentication.TokenAuthentication):
"""
A custom authentication scheme which enforces Token expiration times.
"""
model = Token
def authenticate_credentials(self, key):
model = self.get_model()
try:
token = model.objects.select_related('user').get(key=key)
except model.DoesNotExist:
raise exceptions.AuthenticationFailed("Invalid token")
# Enforce the Token's expiration time, if one has been set.
if token.is_expired:
raise exceptions.AuthenticationFailed("Token expired")
if not token.user.is_active:
raise exceptions.AuthenticationFailed("User inactive")
return token.user, token
class TokenPermissions(DjangoModelPermissions):
"""
Custom permissions handler which extends the built-in DjangoModelPermissions to validate a Token's write ability
for unsafe requests (POST/PUT/PATCH/DELETE).
"""
def __init__(self):
# LOGIN_REQUIRED determines whether read-only access is provided to anonymous users.
self.authenticated_users_only = settings.LOGIN_REQUIRED
super(TokenPermissions, self).__init__()
def has_permission(self, request, view):
# If token authentication is in use, verify that the token allows write operations (for unsafe methods).
if request.method not in SAFE_METHODS and isinstance(request.auth, Token):
if not request.auth.write_enabled:
return False
return super(TokenPermissions, self).has_permission(request, view)
class IsAuthenticatedOrLoginNotRequired(BasePermission):
"""
Returns True if the user is authenticated or LOGIN_REQUIRED is False.
"""
def has_permission(self, request, view):
if not settings.LOGIN_REQUIRED:
return True
return request.user and is_authenticated(request.user)
#
# Serializers
#
class ChoiceFieldSerializer(Field):
"""
Represent a ChoiceField as {'value': <DB value>, 'label': <string>}.
"""
def __init__(self, choices, **kwargs):
self._choices = dict()
for k, v in choices:
# Unpack grouped choices
if type(v) in [list, tuple]:
for k2, v2 in v:
self._choices[k2] = v2
else:
self._choices[k] = v
super(ChoiceFieldSerializer, self).__init__(**kwargs)
def to_representation(self, obj):
return {'value': obj, 'label': self._choices[obj]}
def to_internal_value(self, data):
return self._choices.get(data)
class ContentTypeFieldSerializer(Field):
"""
Represent a ContentType as '<app_label>.<model>'
"""
def to_representation(self, obj):
return "{}.{}".format(obj.app_label, obj.model)
def to_internal_value(self, data):
app_label, model = data.split('.')
try:
return ContentType.objects.get_by_natural_key(app_label=app_label, model=model)
except ContentType.DoesNotExist:
raise ValidationError("Invalid content type")
#
# Mixins
#
class ModelValidationMixin(object):
"""
Enforce a model's validation through clean() when validating serializer data. This is necessary to ensure we're
employing the same validation logic via both forms and the API.
"""
def validate(self, attrs):
instance = self.Meta.model(**attrs)
instance.clean()
return attrs
class WritableSerializerMixin(object):
"""
Allow for the use of an alternate, writable serializer class for write operations (e.g. POST, PUT).
"""
def get_serializer_class(self):
if self.action in WRITE_OPERATIONS and hasattr(self, 'write_serializer_class'):
return self.write_serializer_class
return self.serializer_class
#
# Pagination
#
class OptionalLimitOffsetPagination(LimitOffsetPagination):
"""
Override the stock paginator to allow setting limit=0 to disable pagination for a request. This returns all objects
matching a query, but retains the same format as a paginated request. The limit can only be disabled if
MAX_PAGE_SIZE has been set to 0 or None.
"""
def paginate_queryset(self, queryset, request, view=None):
try:
self.count = queryset.count()
except (AttributeError, TypeError):
self.count = len(queryset)
self.limit = self.get_limit(request)
self.offset = self.get_offset(request)
self.request = request
if self.limit and self.count > self.limit and self.template is not None:
self.display_page_controls = True
if self.count == 0 or self.offset > self.count:
return list()
if self.limit:
return list(queryset[self.offset:self.offset + self.limit])
else:
return list(queryset[self.offset:])
def get_limit(self, request):
if self.limit_query_param:
try:
limit = int(request.query_params[self.limit_query_param])
if limit < 0:
raise ValueError()
# Enforce maximum page size, if defined
if settings.MAX_PAGE_SIZE:
if limit == 0:
return settings.MAX_PAGE_SIZE
else:
return min(limit, settings.MAX_PAGE_SIZE)
return limit
except (KeyError, ValueError):
pass
return self.default_limit
| snazy2000/netbox | netbox/utilities/api.py | Python | apache-2.0 | 6,324 |
"""Support for BH1750 light sensor."""
from __future__ import annotations
from functools import partial
import logging
from i2csense.bh1750 import BH1750 # pylint: disable=import-error
import smbus
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
)
from homeassistant.const import CONF_NAME, LIGHT_LUX
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
CONF_I2C_ADDRESS = "i2c_address"
CONF_I2C_BUS = "i2c_bus"
CONF_OPERATION_MODE = "operation_mode"
CONF_SENSITIVITY = "sensitivity"
CONF_DELAY = "measurement_delay_ms"
CONF_MULTIPLIER = "multiplier"
# Operation modes for BH1750 sensor (from the datasheet). Time typically 120ms
# In one time measurements, device is set to Power Down after each sample.
CONTINUOUS_LOW_RES_MODE = "continuous_low_res_mode"
CONTINUOUS_HIGH_RES_MODE_1 = "continuous_high_res_mode_1"
CONTINUOUS_HIGH_RES_MODE_2 = "continuous_high_res_mode_2"
ONE_TIME_LOW_RES_MODE = "one_time_low_res_mode"
ONE_TIME_HIGH_RES_MODE_1 = "one_time_high_res_mode_1"
ONE_TIME_HIGH_RES_MODE_2 = "one_time_high_res_mode_2"
OPERATION_MODES = {
CONTINUOUS_LOW_RES_MODE: (0x13, True), # 4lx resolution
CONTINUOUS_HIGH_RES_MODE_1: (0x10, True), # 1lx resolution.
CONTINUOUS_HIGH_RES_MODE_2: (0x11, True), # 0.5lx resolution.
ONE_TIME_LOW_RES_MODE: (0x23, False), # 4lx resolution.
ONE_TIME_HIGH_RES_MODE_1: (0x20, False), # 1lx resolution.
ONE_TIME_HIGH_RES_MODE_2: (0x21, False), # 0.5lx resolution.
}
DEFAULT_NAME = "BH1750 Light Sensor"
DEFAULT_I2C_ADDRESS = "0x23"
DEFAULT_I2C_BUS = 1
DEFAULT_MODE = CONTINUOUS_HIGH_RES_MODE_1
DEFAULT_DELAY_MS = 120
DEFAULT_SENSITIVITY = 69 # from 31 to 254
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): cv.string,
vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): vol.Coerce(int),
vol.Optional(CONF_OPERATION_MODE, default=DEFAULT_MODE): vol.In(
OPERATION_MODES
),
vol.Optional(CONF_SENSITIVITY, default=DEFAULT_SENSITIVITY): cv.positive_int,
vol.Optional(CONF_DELAY, default=DEFAULT_DELAY_MS): cv.positive_int,
vol.Optional(CONF_MULTIPLIER, default=1.0): vol.Range(min=0.1, max=10),
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the BH1750 sensor."""
_LOGGER.warning(
"The BH1750 integration is deprecated and will be removed "
"in Home Assistant Core 2022.4; this integration is removed under "
"Architectural Decision Record 0019, more information can be found here: "
"https://github.com/home-assistant/architecture/blob/master/adr/0019-GPIO.md"
)
name = config[CONF_NAME]
bus_number = config[CONF_I2C_BUS]
i2c_address = config[CONF_I2C_ADDRESS]
operation_mode = config[CONF_OPERATION_MODE]
bus = smbus.SMBus(bus_number)
sensor = await hass.async_add_executor_job(
partial(
BH1750,
bus,
i2c_address,
operation_mode=operation_mode,
measurement_delay=config[CONF_DELAY],
sensitivity=config[CONF_SENSITIVITY],
logger=_LOGGER,
)
)
if not sensor.sample_ok:
_LOGGER.error("BH1750 sensor not detected at %s", i2c_address)
return
dev = [BH1750Sensor(sensor, name, LIGHT_LUX, config[CONF_MULTIPLIER])]
_LOGGER.info(
"Setup of BH1750 light sensor at %s in mode %s is complete",
i2c_address,
operation_mode,
)
async_add_entities(dev, True)
class BH1750Sensor(SensorEntity):
"""Implementation of the BH1750 sensor."""
_attr_device_class = SensorDeviceClass.ILLUMINANCE
def __init__(self, bh1750_sensor, name, unit, multiplier=1.0):
"""Initialize the sensor."""
self._attr_name = name
self._attr_native_unit_of_measurement = unit
self._multiplier = multiplier
self.bh1750_sensor = bh1750_sensor
async def async_update(self):
"""Get the latest data from the BH1750 and update the states."""
await self.hass.async_add_executor_job(self.bh1750_sensor.update)
if self.bh1750_sensor.sample_ok and self.bh1750_sensor.light_level >= 0:
self._attr_native_value = int(
round(self.bh1750_sensor.light_level * self._multiplier)
)
else:
_LOGGER.warning(
"Bad Update of sensor.%s: %s", self.name, self.bh1750_sensor.light_level
)
| mezz64/home-assistant | homeassistant/components/bh1750/sensor.py | Python | apache-2.0 | 4,972 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add manage_boot to nodes
Revision ID: 2970d2d44edc
Revises: e169a4a81d88
Create Date: 2016-05-16 14:03:02.861672
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2970d2d44edc'
down_revision = '18440d0834af'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('nodes', sa.Column('manage_boot', sa.Boolean(),
nullable=True, default=True))
| openstack/ironic-inspector | ironic_inspector/migrations/versions/2970d2d44edc_add_manage_boot_to_nodes.py | Python | apache-2.0 | 999 |
# Copyright 2015 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from gzip import compress
from io import BytesIO
from unittest import TestCase
from unittest.mock import MagicMock, patch
import boto3
from botocore.exceptions import PaginationError
from botocore.response import StreamingBody
from botocore.stub import Stubber
from flowlogs_reader import (
aggregated_records,
FlowRecord,
FlowLogsReader,
S3FlowLogsReader,
)
from flowlogs_reader.flowlogs_reader import (
DUPLICATE_NEXT_TOKEN_MESSAGE,
LAST_EVENT_DELAY_MSEC,
)
V2_RECORDS = [
(
'2 123456789010 eni-102010ab 198.51.100.1 192.0.2.1 '
'443 49152 6 10 840 1439387263 1439387264 ACCEPT OK'
),
(
'2 123456789010 eni-102010ab 192.0.2.1 198.51.100.1 '
'49152 443 6 20 1680 1439387264 1439387265 ACCEPT OK'
),
(
'2 123456789010 eni-102010cd 192.0.2.1 198.51.100.1 '
'49152 443 6 20 1680 1439387263 1439387266 REJECT OK'
),
(
'2 123456789010 eni-1a2b3c4d - - - - - - - '
'1431280876 1431280934 - NODATA'
),
(
'2 123456789010 eni-4b118871 - - - - - - - '
'1431280876 1431280934 - SKIPDATA'
),
]
V3_FILE = (
'account-id bytes dstaddr dstport end instance-id packets '
'pkt-dstaddr pkt-srcaddr protocol srcaddr srcport start subnet-id '
'tcp-flags type version vpc-id\n'
'000000000000 6392 172.18.160.93 47460 1568300425 i-06f9249b 10 '
'172.18.160.93 192.168.0.1 6 172.18.160.68 443 1568300367 subnet-089e7569 '
'19 IPv4 3 vpc-0461a061\n'
'000000000000 1698 172.18.160.68 443 1568300425 i-06f9249b 10 '
'192.168.0.1 172.18.160.9 6 172.18.160.93 8088 1568300367 subnet-089e7569 '
'3 IPv4 3 vpc-0461a061\n'
)
V4_FILE = (
'account-id bytes dstaddr dstport end instance-id packets '
'pkt-dstaddr pkt-srcaddr protocol srcaddr srcport start subnet-id '
'tcp-flags type version vpc-id region az-id sublocation-type '
'sublocation-id\n'
'000000000000 6392 172.18.160.93 47460 1568300425 i-06f9249b 10 '
'172.18.160.93 192.168.0.1 6 172.18.160.68 443 1568300367 subnet-089e7569 '
'19 IPv4 4 vpc-0461a061 us-east-1 use1-az4 wavelength wlid04\n'
'000000000000 1698 172.18.160.68 443 1568300425 i-06f9249b 10 '
'192.168.0.1 172.18.160.9 6 172.18.160.93 8088 1568300367 subnet-089e7569 '
'3 IPv4 4 vpc-0461a061 us-east-1 use1-az4 outpost outpostid04\n'
)
V5_FILE = (
'account-id action az-id bytes dstaddr dstport end flow-direction '
'instance-id interface-id log-status packets pkt-dst-aws-service '
'pkt-dstaddr pkt-src-aws-service pkt-srcaddr protocol region srcaddr '
'srcport start sublocation-id sublocation-type subnet-id tcp-flags '
'traffic-path type version vpc-id\n'
'999999999999 ACCEPT use2-az2 4895 192.0.2.156 50318 1614866511 '
'ingress i-00123456789abcdef eni-00123456789abcdef OK 15 - 192.0.2.156 '
'S3 198.51.100.6 6 us-east-2 198.51.100.7 443 1614866493 - - '
'subnet-0123456789abcdef 19 - IPv4 5 vpc-04456ab739938ee3f\n'
'999999999999 ACCEPT use2-az2 3015 198.51.100.6 443 1614866511 '
'egress i-00123456789abcdef eni-00123456789abcdef OK 16 S3 198.51.100.7 '
'- 192.0.2.156 6 us-east-2 192.0.2.156 50318 1614866493 - - '
'subnet-0123456789abcdef 7 7 IPv4 5 vpc-04456ab739938ee3f\n'
)
PARQUET_FILE = 'tests/data/flows.parquet'
class FlowRecordTestCase(TestCase):
def test_parse(self):
flow_record = FlowRecord.from_cwl_event({'message': V2_RECORDS[0]})
actual = flow_record.to_dict()
expected = {
'account_id': '123456789010',
'action': 'ACCEPT',
'bytes': 840,
'dstaddr': '192.0.2.1',
'dstport': 49152,
'end': datetime(2015, 8, 12, 13, 47, 44),
'interface_id': 'eni-102010ab',
'log_status': 'OK',
'packets': 10,
'protocol': 6,
'srcaddr': '198.51.100.1',
'srcport': 443,
'start': datetime(2015, 8, 12, 13, 47, 43),
'version': 2,
}
self.assertEqual(actual, expected)
def test_eq(self):
flow_record = FlowRecord.from_cwl_event({'message': V2_RECORDS[0]})
equal_record = FlowRecord.from_cwl_event({'message': V2_RECORDS[0]})
unequal_record = FlowRecord.from_cwl_event({'message': V2_RECORDS[1]})
self.assertEqual(flow_record, equal_record)
self.assertNotEqual(flow_record, unequal_record)
self.assertNotEqual(flow_record, Ellipsis)
def test_hash(self):
record_set = {
FlowRecord.from_cwl_event({'message': V2_RECORDS[0]}),
FlowRecord.from_cwl_event({'message': V2_RECORDS[0]}),
FlowRecord.from_cwl_event({'message': V2_RECORDS[1]}),
FlowRecord.from_cwl_event({'message': V2_RECORDS[1]}),
FlowRecord.from_cwl_event({'message': V2_RECORDS[2]}),
FlowRecord.from_cwl_event({'message': V2_RECORDS[2]}),
}
self.assertEqual(len(record_set), 3)
def test_str(self):
flow_record = FlowRecord.from_cwl_event({'message': V2_RECORDS[0]})
actual = str(flow_record)
expected = (
'version: 2, account_id: 123456789010, '
'interface_id: eni-102010ab, srcaddr: 198.51.100.1, '
'dstaddr: 192.0.2.1, srcport: 443, dstport: 49152, protocol: 6, '
'packets: 10, bytes: 840, start: 2015-08-12 13:47:43, '
'end: 2015-08-12 13:47:44, action: ACCEPT, log_status: OK'
)
self.assertEqual(actual, expected)
def test_to_dict(self):
flow_record = FlowRecord.from_cwl_event({'message': V2_RECORDS[2]})
actual = flow_record.to_dict()
expected = {
'account_id': '123456789010',
'action': 'REJECT',
'bytes': 1680,
'dstaddr': '198.51.100.1',
'dstport': 443,
'end': datetime(2015, 8, 12, 13, 47, 46),
'interface_id': 'eni-102010cd',
'log_status': 'OK',
'packets': 20,
'protocol': 6,
'srcaddr': '192.0.2.1',
'srcport': 49152,
'start': datetime(2015, 8, 12, 13, 47, 43),
'version': 2,
}
self.assertEqual(actual, expected)
def test_millisecond_timestamp(self):
# This record has millisecond timestamps
record = (
'2 123456789010 eni-4b118871 - - - - - - - '
'1512564058000 1512564059000 - SKIPDATA'
)
flow_record = FlowRecord.from_cwl_event({'message': record})
self.assertEqual(flow_record.start, datetime(2017, 12, 6, 12, 40, 58))
self.assertEqual(flow_record.end, datetime(2017, 12, 6, 12, 40, 59))
def test_missing_timestamps(self):
event_data = {
'version': '3',
'srcaddr': '192.0.2.0',
'dstaddr': '198.51.100.0',
'bytes': '200',
}
flow_record = FlowRecord(event_data)
self.assertEqual(
flow_record.to_dict(),
{
'version': 3,
'srcaddr': '192.0.2.0',
'dstaddr': '198.51.100.0',
'bytes': 200,
},
)
self.assertIsNone(flow_record.start)
self.assertIsNone(flow_record.end)
class FlowLogsReaderTestCase(TestCase):
def setUp(self):
self.mock_client = MagicMock()
self.start_time = datetime(2015, 8, 12, 12, 0, 0)
self.end_time = datetime(2015, 8, 12, 13, 0, 0)
self.inst = FlowLogsReader(
'group_name',
start_time=self.start_time,
end_time=self.end_time,
filter_pattern='REJECT',
boto_client=self.mock_client,
)
def test_init(self):
self.assertEqual(self.inst.log_group_name, 'group_name')
self.assertEqual(
datetime.utcfromtimestamp(self.inst.start_ms // 1000),
self.start_time,
)
self.assertEqual(
datetime.utcfromtimestamp(self.inst.end_ms // 1000), self.end_time
)
self.assertEqual(self.inst.paginator_kwargs['filterPattern'], 'REJECT')
@patch('flowlogs_reader.flowlogs_reader.boto3.client', autospec=True)
def test_region_name(self, mock_client):
# Region specified for session
FlowLogsReader('some_group', region_name='some-region')
mock_client.assert_called_with('logs', region_name='some-region')
# None specified
FlowLogsReader('some_group')
mock_client.assert_called_with('logs')
@patch('flowlogs_reader.flowlogs_reader.boto3.client', autospec=True)
def test_get_fields(self, mock_client):
cwl_client = MagicMock()
ec2_client = mock_client.return_value
ec2_client.describe_flow_logs.return_value = {
'FlowLogs': [
{'LogFormat': '${srcaddr} ${dstaddr} ${start} ${log-status}'}
]
}
reader = FlowLogsReader(
'some_group',
boto_client=cwl_client,
fields=None,
)
self.assertEqual(
reader.fields, ('srcaddr', 'dstaddr', 'start', 'log_status')
)
ec2_client.describe_flow_logs.assert_called_once_with(
Filters=[{'Name': 'log-group-name', 'Values': ['some_group']}]
)
def test_read_streams(self):
paginator = MagicMock()
paginator.paginate.return_value = [
{
'events': [
{'logStreamName': 'log_0', 'message': V2_RECORDS[0]},
{'logStreamName': 'log_0', 'message': V2_RECORDS[1]},
],
},
{
'events': [
{'logStreamName': 'log_0', 'message': V2_RECORDS[2]},
{'logStreamName': 'log_1', 'message': V2_RECORDS[3]},
{'logStreamName': 'log_2', 'message': V2_RECORDS[4]},
],
},
]
self.mock_client.get_paginator.return_value = paginator
actual = list(self.inst._read_streams())
expected = []
for page in paginator.paginate.return_value:
expected += page['events']
self.assertEqual(actual, expected)
def test_iteration(self):
paginator = MagicMock()
paginator.paginate.return_value = [
{
'events': [
{'logStreamName': 'log_0', 'message': V2_RECORDS[0]},
{'logStreamName': 'log_0', 'message': V2_RECORDS[1]},
],
},
{
'events': [
{'logStreamName': 'log_0', 'message': V2_RECORDS[2]},
{'logStreamName': 'log_1', 'message': V2_RECORDS[3]},
{'logStreamName': 'log_2', 'message': V2_RECORDS[4]},
],
},
]
self.mock_client.get_paginator.return_value = paginator
# Calling list on the instance causes it to iterate through all records
actual = [next(self.inst)] + list(self.inst)
expected = [
FlowRecord.from_cwl_event({'message': x}) for x in V2_RECORDS
]
self.assertEqual(actual, expected)
expected_bytes = 0
all_pages = paginator.paginate.return_value
expected_bytes = sum(
len(e['message']) for p in all_pages for e in p['events']
)
self.assertEqual(self.inst.bytes_processed, expected_bytes)
def test_iteration_error(self):
# Simulate the paginator failing
def _get_paginator(*args, **kwargs):
event_0 = {'logStreamName': 'log_0', 'message': V2_RECORDS[0]}
event_1 = {'logStreamName': 'log_0', 'message': V2_RECORDS[1]}
for item in [{'events': [event_0, event_1]}]:
yield item
err_msg = '{}: {}'.format(DUPLICATE_NEXT_TOKEN_MESSAGE, 'token')
raise PaginationError(message=err_msg)
self.mock_client.get_paginator.return_value.paginate.side_effect = (
_get_paginator
)
# Don't fail if botocore's paginator raises a PaginationError
actual = [next(self.inst)] + list(self.inst)
records = V2_RECORDS[:2]
expected = [FlowRecord.from_cwl_event({'message': x}) for x in records]
self.assertEqual(actual, expected)
def test_iteration_unexpecetd_error(self):
# Simulate the paginator failing
def _get_paginator(*args, **kwargs):
event_0 = {'logStreamName': 'log_0', 'message': V2_RECORDS[0]}
yield {'events': [event_0]}
raise PaginationError(message='other error')
self.mock_client.get_paginator.return_value.paginate.side_effect = (
_get_paginator
)
# Fail for unexpected PaginationError
self.assertRaises(PaginationError, lambda: list(self.inst))
def test_threads(self):
inst = FlowLogsReader(
'group_name',
start_time=self.start_time,
end_time=self.end_time,
filter_pattern='REJECT',
boto_client=self.mock_client,
thread_count=1,
)
paginators = []
def _get_paginator(operation):
nonlocal paginators
paginator = MagicMock()
if operation == 'describe_log_streams':
paginator.paginate.return_value = [
{
'logStreams': [
{
'logStreamName': 'too_late',
'firstEventTimestamp': inst.end_ms,
'lastEventTimestamp': inst.start_ms,
},
{
'logStreamName': 'too_late',
'firstEventTimestamp': inst.end_ms - 1,
'lastEventTimestamp': (
inst.start_ms - LAST_EVENT_DELAY_MSEC - 1
),
},
],
},
{
'logStreams': [
{
'logStreamName': 'first_stream',
'firstEventTimestamp': inst.start_ms,
'lastEventTimestamp': inst.end_ms,
},
{
'logStreamName': 'second_stream',
'firstEventTimestamp': inst.start_ms,
'lastEventTimestamp': inst.end_ms,
},
],
},
]
elif operation == 'filter_log_events':
paginator.paginate.return_value = [
{
'events': [
{'message': V2_RECORDS[0]},
{'message': V2_RECORDS[1]},
],
},
{
'events': [
{'message': V2_RECORDS[2]},
{'message': V2_RECORDS[3]},
],
},
]
else:
self.fail('invalid operation')
paginators.append(paginator)
return paginator
self.mock_client.get_paginator.side_effect = _get_paginator
events = list(inst)
self.assertEqual(len(events), 8)
paginators[0].paginate.assert_called_once_with(
logGroupName='group_name',
orderBy='LastEventTime',
descending=True,
)
paginators[1].paginate.assert_called_once_with(
logGroupName='group_name',
startTime=inst.start_ms,
endTime=inst.end_ms,
interleaved=True,
filterPattern='REJECT',
logStreamNames=['first_stream'],
)
paginators[2].paginate.assert_called_once_with(
logGroupName='group_name',
startTime=inst.start_ms,
endTime=inst.end_ms,
interleaved=True,
filterPattern='REJECT',
logStreamNames=['second_stream'],
)
class S3FlowLogsReaderTestCase(TestCase):
def setUp(self):
self.start_time = datetime(2015, 8, 12, 12, 0, 0)
self.end_time = datetime(2015, 8, 12, 13, 0, 0)
self.thread_count = 0
def tearDown(self):
pass
def _test_iteration(self, data, expected):
boto_client = boto3.client('s3')
with Stubber(boto_client) as stubbed_client:
# Accounts call
accounts_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'CommonPrefixes': [
# This one is used
{'Prefix': 'AWSLogs/123456789010/'},
# This one is ignored
{'Prefix': 'AWSLogs/123456789011/'},
],
}
accounts_params = {
'Bucket': 'example-bucket',
'Delimiter': '/',
'Prefix': 'AWSLogs/',
}
stubbed_client.add_response(
'list_objects_v2', accounts_response, accounts_params
)
# Regions call
regions_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'CommonPrefixes': [
# This one is used
{'Prefix': 'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'},
# This one is ignored
{'Prefix': 'AWSLogs/123456789010/vpcflowlogs/pangaea-2/'},
],
}
regions_params = {
'Bucket': 'example-bucket',
'Delimiter': '/',
'Prefix': 'AWSLogs/123456789010/vpcflowlogs/',
}
stubbed_client.add_response(
'list_objects_v2', regions_response, regions_params
)
# List objects call
list_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'Contents': [
# Too early - not downloaded
{
'Key': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'
'2015/08/12/'
'123456789010_vpcflowlogs_'
'pangaea-1_fl-102010_'
'20150812T1155Z_'
'h45h.log.gz'
),
},
# Right on time
{
'Key': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'
'2015/08/12/'
'123456789010_vpcflowlogs_'
'pangaea-1_fl-102010_'
'20150812T1200Z_'
'h45h.log.gz'
),
},
# Some fool put a different key here
{
'Key': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'
'2015/08/12/test_file.log.gz'
),
},
],
}
list_params = {
'Bucket': 'example-bucket',
'Prefix': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/2015/08/12/'
),
}
stubbed_client.add_response(
'list_objects_v2', list_response, list_params
)
# Get object call
data = compress(data.encode('utf-8'))
get_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'Body': StreamingBody(BytesIO(data), len(data)),
'ContentLength': len(data),
}
get_params = {
'Bucket': 'example-bucket',
'Key': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'
'2015/08/12/'
'123456789010_vpcflowlogs_'
'pangaea-1_fl-102010_'
'20150812T1200Z_'
'h45h.log.gz'
),
}
stubbed_client.add_response('get_object', get_response, get_params)
# Do the deed
stubbed_client.activate()
reader = S3FlowLogsReader(
'example-bucket',
start_time=self.start_time,
end_time=self.end_time,
thread_count=self.thread_count,
include_accounts={'123456789010'},
include_regions={'pangaea-1'},
boto_client=boto_client,
)
actual = [record.to_dict() for record in reader]
self.assertEqual(actual, expected)
return reader
def test_serial(self):
expected = [
{
'version': 3,
'account_id': '000000000000',
'srcaddr': '172.18.160.68',
'dstaddr': '172.18.160.93',
'srcport': 443,
'dstport': 47460,
'protocol': 6,
'packets': 10,
'bytes': 6392,
'start': datetime(2019, 9, 12, 14, 59, 27),
'end': datetime(2019, 9, 12, 15, 0, 25),
'vpc_id': 'vpc-0461a061',
'subnet_id': 'subnet-089e7569',
'instance_id': 'i-06f9249b',
'tcp_flags': 19,
'type': 'IPv4',
'pkt_srcaddr': '192.168.0.1',
'pkt_dstaddr': '172.18.160.93',
},
{
'version': 3,
'account_id': '000000000000',
'srcaddr': '172.18.160.93',
'dstaddr': '172.18.160.68',
'srcport': 8088,
'dstport': 443,
'protocol': 6,
'packets': 10,
'bytes': 1698,
'start': datetime(2019, 9, 12, 14, 59, 27),
'end': datetime(2019, 9, 12, 15, 0, 25),
'vpc_id': 'vpc-0461a061',
'subnet_id': 'subnet-089e7569',
'instance_id': 'i-06f9249b',
'tcp_flags': 3,
'type': 'IPv4',
'pkt_srcaddr': '172.18.160.9',
'pkt_dstaddr': '192.168.0.1',
},
]
reader = self._test_iteration(V3_FILE, expected)
self.assertEqual(reader.bytes_processed, len(V3_FILE.encode()))
self.assertEqual(
reader.compressed_bytes_processed, len(compress(V3_FILE.encode()))
)
def test_serial_v4(self):
expected = [
{
'version': 4,
'account_id': '000000000000',
'srcaddr': '172.18.160.68',
'dstaddr': '172.18.160.93',
'srcport': 443,
'dstport': 47460,
'protocol': 6,
'packets': 10,
'bytes': 6392,
'start': datetime(2019, 9, 12, 14, 59, 27),
'end': datetime(2019, 9, 12, 15, 0, 25),
'vpc_id': 'vpc-0461a061',
'subnet_id': 'subnet-089e7569',
'instance_id': 'i-06f9249b',
'tcp_flags': 19,
'type': 'IPv4',
'pkt_srcaddr': '192.168.0.1',
'pkt_dstaddr': '172.18.160.93',
'vpc_id': 'vpc-0461a061',
'region': 'us-east-1',
'az_id': 'use1-az4',
'sublocation_type': 'wavelength',
'sublocation_id': 'wlid04',
},
{
'version': 4,
'account_id': '000000000000',
'srcaddr': '172.18.160.93',
'dstaddr': '172.18.160.68',
'srcport': 8088,
'dstport': 443,
'protocol': 6,
'packets': 10,
'bytes': 1698,
'start': datetime(2019, 9, 12, 14, 59, 27),
'end': datetime(2019, 9, 12, 15, 0, 25),
'vpc_id': 'vpc-0461a061',
'subnet_id': 'subnet-089e7569',
'instance_id': 'i-06f9249b',
'tcp_flags': 3,
'type': 'IPv4',
'pkt_srcaddr': '172.18.160.9',
'pkt_dstaddr': '192.168.0.1',
'region': 'us-east-1',
'az_id': 'use1-az4',
'sublocation_type': 'outpost',
'sublocation_id': 'outpostid04',
},
]
reader = self._test_iteration(V4_FILE, expected)
self.assertEqual(reader.bytes_processed, len(V4_FILE.encode()))
self.assertEqual(
reader.compressed_bytes_processed, len(compress(V4_FILE.encode()))
)
def test_serial_v5(self):
expected = [
{
'account_id': '999999999999',
'action': 'ACCEPT',
'az_id': 'use2-az2',
'bytes': 4895,
'dstaddr': '192.0.2.156',
'dstport': 50318,
'end': datetime(2021, 3, 4, 14, 1, 51),
'flow_direction': 'ingress',
'instance_id': 'i-00123456789abcdef',
'interface_id': 'eni-00123456789abcdef',
'log_status': 'OK',
'packets': 15,
'pkt_dstaddr': '192.0.2.156',
'pkt_src_aws_service': 'S3',
'pkt_srcaddr': '198.51.100.6',
'protocol': 6,
'region': 'us-east-2',
'srcaddr': '198.51.100.7',
'srcport': 443,
'start': datetime(2021, 3, 4, 14, 1, 33),
'subnet_id': 'subnet-0123456789abcdef',
'tcp_flags': 19,
'type': 'IPv4',
'version': 5,
'vpc_id': 'vpc-04456ab739938ee3f',
},
{
'account_id': '999999999999',
'action': 'ACCEPT',
'az_id': 'use2-az2',
'bytes': 3015,
'dstaddr': '198.51.100.6',
'dstport': 443,
'end': datetime(2021, 3, 4, 14, 1, 51),
'flow_direction': 'egress',
'instance_id': 'i-00123456789abcdef',
'interface_id': 'eni-00123456789abcdef',
'log_status': 'OK',
'packets': 16,
'pkt_dst_aws_service': 'S3',
'pkt_dstaddr': '198.51.100.7',
'pkt_srcaddr': '192.0.2.156',
'protocol': 6,
'region': 'us-east-2',
'srcaddr': '192.0.2.156',
'srcport': 50318,
'start': datetime(2021, 3, 4, 14, 1, 33),
'subnet_id': 'subnet-0123456789abcdef',
'tcp_flags': 7,
'traffic_path': 7,
'type': 'IPv4',
'version': 5,
'vpc_id': 'vpc-04456ab739938ee3f',
},
]
reader = self._test_iteration(V5_FILE, expected)
self.assertEqual(reader.bytes_processed, len(V5_FILE.encode()))
self.assertEqual(
reader.compressed_bytes_processed, len(compress(V5_FILE.encode()))
)
def _test_parquet_reader(self, data, expected):
boto_client = boto3.client('s3')
with Stubber(boto_client) as stubbed_client:
# Accounts call
accounts_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'CommonPrefixes': [
# This one is used
{'Prefix': 'AWSLogs/123456789010/'},
# This one is ignored
{'Prefix': 'AWSLogs/123456789011/'},
],
}
accounts_params = {
'Bucket': 'example-bucket',
'Delimiter': '/',
'Prefix': 'AWSLogs/',
}
stubbed_client.add_response(
'list_objects_v2', accounts_response, accounts_params
)
# Regions call
regions_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'CommonPrefixes': [
# This one is used
{'Prefix': 'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'},
# This one is ignored
{'Prefix': 'AWSLogs/123456789010/vpcflowlogs/pangaea-2/'},
],
}
regions_params = {
'Bucket': 'example-bucket',
'Delimiter': '/',
'Prefix': 'AWSLogs/123456789010/vpcflowlogs/',
}
stubbed_client.add_response(
'list_objects_v2', regions_response, regions_params
)
# List objects call
list_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'Contents': [
{
'Key': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'
'2015/08/12/'
'123456789010_vpcflowlogs_'
'pangaea-1_fl-102010_'
'20150812T1200Z_'
'h45h.log.parquet'
),
},
],
}
list_params = {
'Bucket': 'example-bucket',
'Prefix': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/2015/08/12/'
),
}
stubbed_client.add_response(
'list_objects_v2', list_response, list_params
)
get_response = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'Body': StreamingBody(BytesIO(data), len(data)),
'ContentLength': len(data),
}
get_params = {
'Bucket': 'example-bucket',
'Key': (
'AWSLogs/123456789010/vpcflowlogs/pangaea-1/'
'2015/08/12/'
'123456789010_vpcflowlogs_'
'pangaea-1_fl-102010_'
'20150812T1200Z_'
'h45h.log.parquet'
),
}
stubbed_client.add_response('get_object', get_response, get_params)
# Do the deed
stubbed_client.activate()
reader = S3FlowLogsReader(
'example-bucket',
start_time=self.start_time,
end_time=self.end_time,
thread_count=self.thread_count,
include_accounts={'123456789010'},
include_regions={'pangaea-1'},
boto_client=boto_client,
)
actual = [record.to_dict() for record in reader]
self.assertEqual(actual, expected)
return reader
def test_serial_parquet(self):
expected = [
{
'version': 2,
'account_id': '123456789010',
'interface_id': 'eni-102010ab',
'srcaddr': '198.51.100.1',
'dstaddr': '192.0.2.1',
'srcport': 443,
'dstport': 49152,
'protocol': 6,
'packets': 10,
'bytes': 840,
'start': datetime(2015, 8, 12, 13, 47, 43),
'end': datetime(2015, 8, 12, 13, 47, 44),
'action': 'ACCEPT',
'log_status': 'OK',
},
{
'version': 2,
'account_id': '123456789010',
'interface_id': 'eni-202010ab',
'start': datetime(2015, 8, 12, 13, 47, 43),
'end': datetime(2015, 8, 12, 13, 47, 44),
'log_status': 'NODATA',
},
{
'version': 2,
'account_id': '123456789010',
'interface_id': 'eni-202010ab',
'srcaddr': '198.51.100.7',
'dstaddr': '192.0.2.156',
'srcport': 80,
'dstport': 100,
'protocol': 8080,
'start': datetime(2015, 8, 12, 13, 47, 43),
'end': datetime(2015, 8, 12, 13, 47, 44),
'log_status': 'NODATA',
},
]
with open(PARQUET_FILE, "rb") as parquet_data:
data = parquet_data.read()
reader = self._test_parquet_reader(data, expected)
self.assertEqual(reader.compressed_bytes_processed, len(data))
self.assertEqual(reader.bytes_processed, parquet_data.tell())
def test_threads(self):
expected = [
{
'version': 3,
'account_id': '000000000000',
'srcaddr': '172.18.160.68',
'dstaddr': '172.18.160.93',
'srcport': 443,
'dstport': 47460,
'protocol': 6,
'packets': 10,
'bytes': 6392,
'start': datetime(2019, 9, 12, 14, 59, 27),
'end': datetime(2019, 9, 12, 15, 0, 25),
'vpc_id': 'vpc-0461a061',
'subnet_id': 'subnet-089e7569',
'instance_id': 'i-06f9249b',
'tcp_flags': 19,
'type': 'IPv4',
'pkt_srcaddr': '192.168.0.1',
'pkt_dstaddr': '172.18.160.93',
},
{
'version': 3,
'account_id': '000000000000',
'srcaddr': '172.18.160.93',
'dstaddr': '172.18.160.68',
'srcport': 8088,
'dstport': 443,
'protocol': 6,
'packets': 10,
'bytes': 1698,
'start': datetime(2019, 9, 12, 14, 59, 27),
'end': datetime(2019, 9, 12, 15, 0, 25),
'vpc_id': 'vpc-0461a061',
'subnet_id': 'subnet-089e7569',
'instance_id': 'i-06f9249b',
'tcp_flags': 3,
'type': 'IPv4',
'pkt_srcaddr': '172.18.160.9',
'pkt_dstaddr': '192.168.0.1',
},
]
self.thread_count = 2
self._test_iteration(V3_FILE, expected)
class AggregationTestCase(TestCase):
def test_aggregated_records(self):
# Aggregate by 5-tuple by default
events = [
{'message': V2_RECORDS[0]},
{'message': V2_RECORDS[1]},
{'message': V2_RECORDS[2].replace('REJECT', 'ACCEPT')},
{'message': V2_RECORDS[3]},
]
all_records = (FlowRecord.from_cwl_event(x) for x in events)
results = aggregated_records(all_records)
actual = sorted(results, key=lambda x: x['srcaddr'])
expected = [
{
'srcaddr': '192.0.2.1',
'srcport': 49152,
'dstaddr': '198.51.100.1',
'dstport': 443,
'protocol': 6,
'start': datetime(2015, 8, 12, 13, 47, 43),
'end': datetime(2015, 8, 12, 13, 47, 46),
'packets': 40,
'bytes': 3360,
},
{
'srcaddr': '198.51.100.1',
'srcport': 443,
'dstaddr': '192.0.2.1',
'dstport': 49152,
'protocol': 6,
'start': datetime(2015, 8, 12, 13, 47, 43),
'end': datetime(2015, 8, 12, 13, 47, 44),
'packets': 10,
'bytes': 840,
},
]
self.assertEqual(actual, expected)
def test_aggregated_records_custom(self):
# Aggregate by interface_id
events = [
{'message': V2_RECORDS[1]},
{'message': V2_RECORDS[2].replace('REJECT', 'ACCEPT')},
]
all_records = (FlowRecord.from_cwl_event(x) for x in events)
key_fields = ('interface_id', 'srcaddr', 'srcport', 'dstport')
results = aggregated_records(all_records, key_fields=key_fields)
actual = sorted(results, key=lambda x: x['interface_id'])
expected = [
{
'srcaddr': '192.0.2.1',
'srcport': 49152,
'interface_id': 'eni-102010ab',
'dstport': 443,
'start': datetime(2015, 8, 12, 13, 47, 44),
'end': datetime(2015, 8, 12, 13, 47, 45),
'packets': 20,
'bytes': 1680,
},
{
'srcaddr': '192.0.2.1',
'srcport': 49152,
'interface_id': 'eni-102010cd',
'dstport': 443,
'start': datetime(2015, 8, 12, 13, 47, 43),
'end': datetime(2015, 8, 12, 13, 47, 46),
'packets': 20,
'bytes': 1680,
},
]
self.assertEqual(actual, expected)
| obsrvbl/flowlogs-reader | tests/test_flowlogs_reader.py | Python | apache-2.0 | 38,204 |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from ..builder import BBOX_CODERS
from ..transforms import bbox_rescale
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class BucketingBBoxCoder(BaseBBoxCoder):
"""Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented here.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
num_buckets (int): Number of buckets.
scale_factor (int): Scale factor of proposals to generate buckets.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset upperbound to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self,
num_buckets,
scale_factor,
offset_topk=2,
offset_upperbound=1.0,
cls_ignore_neighbor=True,
clip_border=True):
super(BucketingBBoxCoder, self).__init__()
self.num_buckets = num_buckets
self.scale_factor = scale_factor
self.offset_topk = offset_topk
self.offset_upperbound = offset_upperbound
self.cls_ignore_neighbor = cls_ignore_neighbor
self.clip_border = clip_border
def encode(self, bboxes, gt_bboxes):
"""Get bucketing estimation and fine regression targets during
training.
Args:
bboxes (torch.Tensor): source boxes, e.g., object proposals.
gt_bboxes (torch.Tensor): target of the transformation, e.g.,
ground truth boxes.
Returns:
encoded_bboxes(tuple[Tensor]): bucketing estimation
and fine regression targets and weights
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
self.scale_factor, self.offset_topk,
self.offset_upperbound,
self.cls_ignore_neighbor)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, max_shape=None):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes.
pred_bboxes (torch.Tensor): Predictions for bucketing estimation
and fine regression
max_shape (tuple[int], optional): Maximum shape of boxes.
Defaults to None.
Returns:
torch.Tensor: Decoded boxes.
"""
assert len(pred_bboxes) == 2
cls_preds, offset_preds = pred_bboxes
assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
0) == bboxes.size(0)
decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,
self.num_buckets, self.scale_factor,
max_shape, self.clip_border)
return decoded_bboxes
@mmcv.jit(coderize=True)
def generat_buckets(proposals, num_buckets, scale_factor=1.0):
"""Generate buckets w.r.t bucket number and scale factor of proposals.
Args:
proposals (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
Returns:
tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
t_buckets, d_buckets)
- bucket_w: Width of buckets on x-axis. Shape (n, ).
- bucket_h: Height of buckets on y-axis. Shape (n, ).
- l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
- r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
- t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
- d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
"""
proposals = bbox_rescale(proposals, scale_factor)
# number of buckets in each side
side_num = int(np.ceil(num_buckets / 2.0))
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
px1 = proposals[..., 0]
py1 = proposals[..., 1]
px2 = proposals[..., 2]
py2 = proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
# left buckets
l_buckets = px1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# right buckets
r_buckets = px2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# top buckets
t_buckets = py1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
# down buckets
d_buckets = py2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
@mmcv.jit(coderize=True)
def bbox2bucket(proposals,
gt,
num_buckets,
scale_factor,
offset_topk=2,
offset_upperbound=1.0,
cls_ignore_neighbor=True):
"""Generate buckets estimation and fine regression targets.
Args:
proposals (Tensor): Shape (n, 4)
gt (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset allowance to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
Returns:
tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
- offsets: Fine regression targets. \
Shape (n, num_buckets*2).
- offsets_weights: Fine regression weights. \
Shape (n, num_buckets*2).
- bucket_labels: Bucketing estimation labels. \
Shape (n, num_buckets*2).
- cls_weights: Bucketing estimation weights. \
Shape (n, num_buckets*2).
"""
assert proposals.size() == gt.size()
# generate buckets
proposals = proposals.float()
gt = gt.float()
(bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
gx1 = gt[..., 0]
gy1 = gt[..., 1]
gx2 = gt[..., 2]
gy2 = gt[..., 3]
# generate offset targets and weights
# offsets from buckets to gts
l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
# select top-k nearest buckets
l_topk, l_label = l_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
r_topk, r_label = r_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
t_topk, t_label = t_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
d_topk, d_label = d_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
offset_l_weights = l_offsets.new_zeros(l_offsets.size())
offset_r_weights = r_offsets.new_zeros(r_offsets.size())
offset_t_weights = t_offsets.new_zeros(t_offsets.size())
offset_d_weights = d_offsets.new_zeros(d_offsets.size())
inds = torch.arange(0, proposals.size(0)).to(proposals).long()
# generate offset weights of top-k nearest buckets
for k in range(offset_topk):
if k >= 1:
offset_l_weights[inds, l_label[:,
k]] = (l_topk[:, k] <
offset_upperbound).float()
offset_r_weights[inds, r_label[:,
k]] = (r_topk[:, k] <
offset_upperbound).float()
offset_t_weights[inds, t_label[:,
k]] = (t_topk[:, k] <
offset_upperbound).float()
offset_d_weights[inds, d_label[:,
k]] = (d_topk[:, k] <
offset_upperbound).float()
else:
offset_l_weights[inds, l_label[:, k]] = 1.0
offset_r_weights[inds, r_label[:, k]] = 1.0
offset_t_weights[inds, t_label[:, k]] = 1.0
offset_d_weights[inds, d_label[:, k]] = 1.0
offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
offsets_weights = torch.cat([
offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
],
dim=-1)
# generate bucket labels and weight
side_num = int(np.ceil(num_buckets / 2.0))
labels = torch.stack(
[l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
batch_size = labels.size(0)
bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
-1).float()
bucket_cls_l_weights = (l_offsets.abs() < 1).float()
bucket_cls_r_weights = (r_offsets.abs() < 1).float()
bucket_cls_t_weights = (t_offsets.abs() < 1).float()
bucket_cls_d_weights = (d_offsets.abs() < 1).float()
bucket_cls_weights = torch.cat([
bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
bucket_cls_d_weights
],
dim=-1)
# ignore second nearest buckets for cls if necessary
if cls_ignore_neighbor:
bucket_cls_weights = (~((bucket_cls_weights == 1) &
(bucket_labels == 0))).float()
else:
bucket_cls_weights[:] = 1.0
return offsets, offsets_weights, bucket_labels, bucket_cls_weights
@mmcv.jit(coderize=True)
def bucket2bbox(proposals,
cls_preds,
offset_preds,
num_buckets,
scale_factor=1.0,
max_shape=None,
clip_border=True):
"""Apply bucketing estimation (cls preds) and fine regression (offset
preds) to generate det bboxes.
Args:
proposals (Tensor): Boxes to be transformed. Shape (n, 4)
cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
Returns:
tuple[Tensor]: (bboxes, loc_confidence).
- bboxes: predicted bboxes. Shape (n, 4)
- loc_confidence: localization confidence of predicted bboxes.
Shape (n,).
"""
side_num = int(np.ceil(num_buckets / 2.0))
cls_preds = cls_preds.view(-1, side_num)
offset_preds = offset_preds.view(-1, side_num)
scores = F.softmax(cls_preds, dim=1)
score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
rescaled_proposals = bbox_rescale(proposals, scale_factor)
pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
px1 = rescaled_proposals[..., 0]
py1 = rescaled_proposals[..., 1]
px2 = rescaled_proposals[..., 2]
py2 = rescaled_proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
score_inds_l = score_label[0::4, 0]
score_inds_r = score_label[1::4, 0]
score_inds_t = score_label[2::4, 0]
score_inds_d = score_label[3::4, 0]
l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
offsets = offset_preds.view(-1, 4, side_num)
inds = torch.arange(proposals.size(0)).to(proposals).long()
l_offsets = offsets[:, 0, :][inds, score_inds_l]
r_offsets = offsets[:, 1, :][inds, score_inds_r]
t_offsets = offsets[:, 2, :][inds, score_inds_t]
d_offsets = offsets[:, 3, :][inds, score_inds_d]
x1 = l_buckets - l_offsets * bucket_w
x2 = r_buckets - r_offsets * bucket_w
y1 = t_buckets - t_offsets * bucket_h
y2 = d_buckets - d_offsets * bucket_h
if clip_border and max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
dim=-1)
# bucketing guided rescoring
loc_confidence = score_topk[:, 0]
top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
return bboxes, loc_confidence
| open-mmlab/mmdetection | mmdet/core/bbox/coder/bucketing_bbox_coder.py | Python | apache-2.0 | 14,119 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model."""
import math
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class RobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = RobertaAttention(config, position_embedding_type="absolute")
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, RobertaEncoder):
module.gradient_checkpointing = value
def update_keys_to_ignore(self, config, del_keys_to_ignore):
"""Remove some keys from ignore list"""
if not config.tie_word_embeddings:
# must make a new list, or the class variable gets modified!
self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
self._keys_to_ignore_on_load_missing = [
k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
]
ROBERTA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`RobertaConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in *Attention is
all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
.. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.""", ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
Example:
```python
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained("roberta-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| huggingface/transformers | src/transformers/models/roberta/modeling_roberta.py | Python | apache-2.0 | 67,545 |
"""Support for Blink Alarm Control Panel."""
import logging
from homeassistant.components.alarm_control_panel import AlarmControlPanel
from homeassistant.const import (
ATTR_ATTRIBUTION, STATE_ALARM_ARMED_AWAY, STATE_ALARM_DISARMED)
from . import BLINK_DATA, DEFAULT_ATTRIBUTION
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['blink']
ICON = 'mdi:security'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arlo Alarm Control Panels."""
if discovery_info is None:
return
data = hass.data[BLINK_DATA]
sync_modules = []
for sync_name, sync_module in data.sync.items():
sync_modules.append(BlinkSyncModule(data, sync_name, sync_module))
add_entities(sync_modules, True)
class BlinkSyncModule(AlarmControlPanel):
"""Representation of a Blink Alarm Control Panel."""
def __init__(self, data, name, sync):
"""Initialize the alarm control panel."""
self.data = data
self.sync = sync
self._name = name
self._state = None
@property
def unique_id(self):
"""Return the unique id for the sync module."""
return self.sync.serial
@property
def icon(self):
"""Return icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the panel."""
return "{} {}".format(BLINK_DATA, self._name)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = self.sync.attributes
attr['network_info'] = self.data.networks
attr['associated_cameras'] = list(self.sync.cameras.keys())
attr[ATTR_ATTRIBUTION] = DEFAULT_ATTRIBUTION
return attr
def update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating Blink Alarm Control Panel %s", self._name)
self.data.refresh()
mode = self.sync.arm
if mode:
self._state = STATE_ALARM_ARMED_AWAY
else:
self._state = STATE_ALARM_DISARMED
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.sync.arm = False
self.sync.refresh()
def alarm_arm_away(self, code=None):
"""Send arm command."""
self.sync.arm = True
self.sync.refresh()
| jamespcole/home-assistant | homeassistant/components/blink/alarm_control_panel.py | Python | apache-2.0 | 2,424 |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import abstractmethod
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import force_fp32
from mmdet.core import build_bbox_coder, multi_apply
from mmdet.core.anchor.point_generator import MlvlPointGenerator
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
"""Anchor-free head (FCOS, Fovea, RepPoints, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
stacked_convs (int): Number of stacking convs of the head.
strides (tuple): Downsample factor of each feature map.
dcn_on_last_conv (bool): If true, use dcn in the last layer of
towers. Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
bbox_coder (dict): Config of bbox coder. Defaults
'DistancePointBBoxCoder'.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: W605
_version = 1
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
dcn_on_last_conv=False,
conv_bias='auto',
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
bbox_coder=dict(type='DistancePointBBoxCoder'),
conv_cfg=None,
norm_cfg=None,
train_cfg=None,
test_cfg=None,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='conv_cls',
std=0.01,
bias_prob=0.01))):
super(AnchorFreeHead, self).__init__(init_cfg)
self.num_classes = num_classes
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.dcn_on_last_conv = dcn_on_last_conv
assert conv_bias == 'auto' or isinstance(conv_bias, bool)
self.conv_bias = conv_bias
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.bbox_coder = build_bbox_coder(bbox_coder)
self.prior_generator = MlvlPointGenerator(strides)
# In order to keep a more general interface and be consistent with
# anchor_head. We can think of point like one anchor
self.num_base_priors = self.prior_generator.num_base_priors[0]
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
"""Initialize layers of the head."""
self._init_cls_convs()
self._init_reg_convs()
self._init_predictor()
def _init_cls_convs(self):
"""Initialize classification conv layers of the head."""
self.cls_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_reg_convs(self):
"""Initialize bbox regression conv layers of the head."""
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_predictor(self):
"""Initialize predictor layers of the head."""
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Hack some keys of the model state dict so that can load checkpoints
of previous version."""
version = local_metadata.get('version', None)
if version is None:
# the key is different in early versions
# for example, 'fcos_cls' become 'conv_cls' now
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(prefix)
]
ori_predictor_keys = []
new_predictor_keys = []
# e.g. 'fcos_cls' or 'fcos_reg'
for key in bbox_head_keys:
ori_predictor_keys.append(key)
key = key.split('.')
conv_name = None
if key[1].endswith('cls'):
conv_name = 'conv_cls'
elif key[1].endswith('reg'):
conv_name = 'conv_reg'
elif key[1].endswith('centerness'):
conv_name = 'conv_centerness'
else:
assert NotImplementedError
if conv_name is not None:
key[1] = conv_name
new_predictor_keys.append('.'.join(key))
else:
ori_predictor_keys.pop(-1)
for i in range(len(new_predictor_keys)):
state_dict[new_predictor_keys[i]] = state_dict.pop(
ori_predictor_keys[i])
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually contain classification scores and bbox predictions.
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
"""
return multi_apply(self.forward_single, feats)[:2]
def forward_single(self, x):
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
Returns:
tuple: Scores for each class, bbox predictions, features
after classification and regression conv layers, some
models needs these features like FCOS.
"""
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.conv_cls(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
bbox_pred = self.conv_reg(reg_feat)
return cls_score, bbox_pred, cls_feat, reg_feat
@abstractmethod
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
"""
raise NotImplementedError
@abstractmethod
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerness targets for points
in multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
"""
raise NotImplementedError
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points of a single scale level.
This function will be deprecated soon.
"""
warnings.warn(
'`_get_points_single` in `AnchorFreeHead` will be '
'deprecated soon, we support a multi level point generator now'
'you can get points of a single level feature map '
'with `self.prior_generator.single_level_grid_priors` ')
h, w = featmap_size
# First create Range with the default dtype, than convert to
# target `dtype` for onnx exporting.
x_range = torch.arange(w, device=device).to(dtype)
y_range = torch.arange(h, device=device).to(dtype)
y, x = torch.meshgrid(y_range, x_range)
if flatten:
y = y.flatten()
x = x.flatten()
return y, x
def get_points(self, featmap_sizes, dtype, device, flatten=False):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
warnings.warn(
'`get_points` in `AnchorFreeHead` will be '
'deprecated soon, we support a multi level point generator now'
'you can get points of all levels '
'with `self.prior_generator.grid_priors` ')
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self._get_points_single(featmap_sizes[i], self.strides[i],
dtype, device, flatten))
return mlvl_points
def aug_test(self, feats, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
| open-mmlab/mmdetection | mmdet/models/dense_heads/anchor_free_head.py | Python | apache-2.0 | 13,958 |
from setuptools import setup
setup(
name='simulator',
version='0.1.0',
description='',
license='Apache-2.0',
url='',
author=['Felipe Zapata'],
author_email='tifonzafel_gmail.com',
keywords='microservices',
packages=['simulator', 'simulator.simulator', 'simulator.storage'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'programming language :: python :: 3',
],
install_requires=[
'autobahn', 'pydrive', 'twisted']
)
| felipeZ/microservices | setup.py | Python | apache-2.0 | 555 |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class ComputeNetworksTest(base.BaseV2ComputeTest):
"""Test compute networks API with compute microversion less than 2.36"""
max_microversion = '2.35'
@classmethod
def skip_checks(cls):
super(ComputeNetworksTest, cls).skip_checks()
if CONF.service_available.neutron:
raise cls.skipException('nova-network is not available.')
@classmethod
def setup_clients(cls):
super(ComputeNetworksTest, cls).setup_clients()
cls.client = cls.os_primary.compute_networks_client
@decorators.idempotent_id('3fe07175-312e-49a5-a623-5f52eeada4c2')
def test_list_networks(self):
"""Test listing networks using compute networks API"""
networks = self.client.list_networks()['networks']
self.assertNotEmpty(networks, "No networks found.")
| openstack/tempest | tempest/api/compute/test_networks.py | Python | apache-2.0 | 1,541 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2015 [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, sys, re, uuid, string
class Project(object):
api_files = []
src_files = []
prj_files = []
prjname = 'general'
pkgname = 'template'
rootpath = os.path.abspath('..')
inc_dir = 'include'
src_dir = 'src'
prj_dir = 'projects'
def __init__(self, prjname):
self.prjname = prjname
dir = os.path.join(self.rootpath, self.inc_dir, self.pkgname, self.prjname)
self.api_files = os.listdir(dir)
dir = os.path.join(self.rootpath, self.src_dir, self.pkgname, self.prjname)
self.src_files = os.listdir(dir)
dir = os.path.join(self.rootpath, self.prj_dir, self.pkgname)
for item in os.listdir(dir):
if self.prjname in item:
self.prj_files.append(item)
def generate(self, des_pkgname, des_prjname):
desdir = os.path.join(self.rootpath, self.src_dir, string.lower(des_pkgname), string.lower(des_prjname))
if not os.path.exists(desdir):
os.makedirs(desdir)
desdir = os.path.join(self.rootpath, self.inc_dir, string.lower(des_pkgname), string.lower(des_prjname))
if not os.path.exists(desdir):
os.makedirs(desdir)
for item in self.api_files:
srcfile = os.path.join(self.rootpath, self.inc_dir, string.lower(self.pkgname), string.lower(self.prjname), item)
desfile = os.path.join(self.rootpath, self.inc_dir, string.lower(des_pkgname), string.lower(des_prjname), item.replace(string.lower(self.prjname), string.lower(des_prjname), 1))
self.__copyfile(srcfile, desfile, des_pkgname, des_prjname)
for item in self.src_files:
srcfile = os.path.join(self.rootpath, self.src_dir, string.lower(self.pkgname), string.lower(self.prjname), item)
desfile = os.path.join(self.rootpath, self.src_dir, string.lower(des_pkgname), string.lower(des_prjname), item.replace(string.lower(self.prjname), string.lower(des_prjname), 1))
self.__copyfile(srcfile, desfile, des_pkgname, des_prjname)
for item in self.prj_files:
srcfile = os.path.join(self.rootpath, self.prj_dir, string.lower(self.pkgname), item)
desfile = os.path.join(self.rootpath, self.prj_dir, string.lower(des_pkgname), item.replace(string.lower(self.prjname), string.lower(des_prjname), 1))
self.__copyfile(srcfile, desfile, des_pkgname, des_prjname)
def __copyfile(self, srcfile, desfile, des_pkg, des_prj):
src_str = open(srcfile, "rb").read()
des_str = self.__replace(src_str, des_pkg, des_prj)
open(desfile, 'wb').write(des_str)
print '[generate] %s' % desfile
def __replace(self, src_str, pkgname, prjname):
ret = src_str.replace('{{pkg_name}}', pkgname)
ret = ret.replace('{{project_name}}', prjname)
ret = ret.replace('{{project_name_upper}}', string.upper(prjname))
ret = ret.replace('{{project_name_lower}}', string.lower(prjname))
ret = ret.replace('{{uuid}}', str(uuid.uuid1()))
return ret
if __name__=="__main__":
def inputparam(index, prompt, default='', options=None):
if len(sys.argv) > index:
ret = sys.argv[index]
else:
ret = raw_input(prompt)
if ret == '':
ret = default
if(options is not None and ret not in options):
print 'select one of the options please!\n'
ret = inputparam(index, prompt, default, options)
return ret
types = ['general', 'pane', 'view']
prjtype = inputparam(1, 'select plugin type(general/pane/view)\nplugin type(default: general): ', 'general', types)
prjname = inputparam(2, 'project name: ')
pkgname = inputparam(3, 'package name of the new project (default: plugins): ', 'plugins')
prj = Project(prjtype)
prj.generate(pkgname, prjname)
if len(sys.argv) < 3:
raw_input("Press <ENTER> to end.")
| AquariusCoder/xtpuiplugin | tools/makeplugin.py | Python | apache-2.0 | 4,793 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# The path that will be mounted in docker containers for data IO
DOCKER_DATA_VOLUME = '/mnt/girder_worker/data'
# The path that will be mounted in docker containers for utility scripts
DOCKER_SCRIPTS_VOUME = '/mnt/girder_worker/scripts'
| salamb/girder | plugins/worker/server/constants.py | Python | apache-2.0 | 1,027 |
"""
WSGI config for codingPortal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "codingPortal.settings")
application = get_wsgi_application()
| SJIT-Hackerspace/SJIT-CodingPortal | codingPortal/wsgi.py | Python | apache-2.0 | 401 |
#!/usr/bin/env python
# @@xxx_skip_license@@
# -*- coding: utf-8 -*-
# @PydevCodeAnalysisIgnore
#
# Generated Tue Nov 6 11:26:05 2018 by generateDS.py version 2.29.24.
# Python 3.7.0 (default, Jul 23 2018, 20:22:55) [Clang 9.1.0 (clang-902.0.39.2)]
#
# Command line options:
# ('-f', '')
# ('--no-questions', '')
# ('--external-encoding', 'utf-8')
# ('-o', 'gen.py')
# ('-s', 'sub.py')
#
# Command line arguments:
# ../../../service/api/MessageFlow.1.xsd
#
# Command line:
# /usr/local/bin/generateDS -f --no-questions --external-encoding="utf-8" -o "gen.py" -s "sub.py" ../../../service/api/MessageFlow.1.xsd
#
# Current working directory (os.getcwd()):
# mfd
#
import base64
import datetime as datetime_
import re as re_
import sys
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class AlertIntervalType(object):
NONE='NONE'
INTERVAL__5='INTERVAL_5'
INTERVAL__15='INTERVAL_15'
INTERVAL__30='INTERVAL_30'
INTERVAL__60='INTERVAL_60'
INTERVAL__300='INTERVAL_300'
INTERVAL__900='INTERVAL_900'
INTERVAL__3600='INTERVAL_3600'
class AlertType(object):
BEEP='BEEP'
SILENT='SILENT'
RING__5='RING_5'
RING__15='RING_15'
RING__30='RING_30'
RING__60='RING_60'
class FormButton(object):
POSITIVE='positive'
NEGATIVE='negative'
class KeyboardType(object):
DEFAULT='DEFAULT'
AUTO_CAPITALIZED='AUTO_CAPITALIZED'
EMAIL='EMAIL'
URL='URL'
PHONE='PHONE'
NUMBER='NUMBER'
DECIMAL='DECIMAL'
PASSWORD='PASSWORD'
NUMBER_PASSWORD='NUMBER_PASSWORD'
class MemberStatus(object):
SUBMITTED='SUBMITTED'
INITIATED='INITIATED'
RUNNING='RUNNING'
FINISHED='FINISHED'
class ProgrammingLanguage(object):
JYTHON='JYTHON'
JRUBY='JRUBY'
class Attachment(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, url=None, contentType=None, size=None):
self.original_tagname_ = None
self.name = _cast(None, name)
self.url = _cast(None, url)
self.contentType = _cast(None, contentType)
self.size = _cast(int, size)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Attachment)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Attachment.subclass:
return Attachment.subclass(*args_, **kwargs_)
else:
return Attachment(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_url(self): return self.url
def set_url(self, url): self.url = url
def get_contentType(self): return self.contentType
def set_contentType(self, contentType): self.contentType = contentType
def get_size(self): return self.size
def set_size(self, size): self.size = size
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Attachment', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Attachment')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Attachment')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Attachment', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Attachment'):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), ))
if self.url is not None and 'url' not in already_processed:
already_processed.add('url')
outfile.write(' url=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.url), input_name='url')), ))
if self.contentType is not None and 'contentType' not in already_processed:
already_processed.add('contentType')
outfile.write(' contentType=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.contentType), input_name='contentType')), ))
if self.size is not None and 'size' not in already_processed:
already_processed.add('size')
outfile.write(' size="%s"' % self.gds_format_integer(self.size, input_name='size'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Attachment', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('url', node)
if value is not None and 'url' not in already_processed:
already_processed.add('url')
self.url = value
value = find_attr_value_('contentType', node)
if value is not None and 'contentType' not in already_processed:
already_processed.add('contentType')
self.contentType = value
value = find_attr_value_('size', node)
if value is not None and 'size' not in already_processed:
already_processed.add('size')
try:
self.size = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Attachment
class FlowElement(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, extensiontype_=None):
self.original_tagname_ = None
self.id = _cast(None, id)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, FlowElement)
if subclass is not None:
return subclass(*args_, **kwargs_)
if FlowElement.subclass:
return FlowElement.subclass(*args_, **kwargs_)
else:
return FlowElement(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='FlowElement', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('FlowElement')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FlowElement')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='FlowElement', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='FlowElement'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='FlowElement', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class FlowElement
class Answer(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, caption=None, action=None, id=None, reference=None, color=None):
self.original_tagname_ = None
self.caption = _cast(None, caption)
self.action = _cast(None, action)
self.id = _cast(None, id)
self.reference = _cast(None, reference)
self.color = _cast(None, color)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Answer)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Answer.subclass:
return Answer.subclass(*args_, **kwargs_)
else:
return Answer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_caption(self): return self.caption
def set_caption(self, caption): self.caption = caption
def get_action(self): return self.action
def set_action(self, action): self.action = action
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def get_color(self): return self.color
def set_color(self, color): self.color = color
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Answer', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Answer')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Answer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Answer', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Answer'):
if self.caption is not None and 'caption' not in already_processed:
already_processed.add('caption')
outfile.write(' caption=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.caption), input_name='caption')), ))
if self.action is not None and 'action' not in already_processed:
already_processed.add('action')
outfile.write(' action=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.action), input_name='action')), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.reference is not None and 'reference' not in already_processed:
already_processed.add('reference')
outfile.write(' reference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.reference), input_name='reference')), ))
if self.color is not None and 'color' not in already_processed:
already_processed.add('color')
outfile.write(' color=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.color), input_name='color')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Answer', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('caption', node)
if value is not None and 'caption' not in already_processed:
already_processed.add('caption')
self.caption = value
value = find_attr_value_('action', node)
if value is not None and 'action' not in already_processed:
already_processed.add('action')
self.action = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.add('reference')
self.reference = value
value = find_attr_value_('color', node)
if value is not None and 'color' not in already_processed:
already_processed.add('color')
self.color = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Answer
class Message(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, allowDismiss=None, dismissReference=None, brandingKey=None, autoLock=None, vibrate=None, alertType=None, alertIntervalType=None, content=None, answer=None, attachment=None):
self.original_tagname_ = None
super(Message, self).__init__(id, )
self.allowDismiss = _cast(bool, allowDismiss)
self.dismissReference = _cast(None, dismissReference)
self.brandingKey = _cast(None, brandingKey)
self.autoLock = _cast(bool, autoLock)
self.vibrate = _cast(bool, vibrate)
self.alertType = _cast(None, alertType)
self.alertIntervalType = _cast(None, alertIntervalType)
self.content = content
if answer is None:
self.answer = []
else:
self.answer = answer
if attachment is None:
self.attachment = []
else:
self.attachment = attachment
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Message)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Message.subclass:
return Message.subclass(*args_, **kwargs_)
else:
return Message(*args_, **kwargs_)
factory = staticmethod(factory)
def get_content(self): return self.content
def set_content(self, content): self.content = content
def get_answer(self): return self.answer
def set_answer(self, answer): self.answer = answer
def add_answer(self, value): self.answer.append(value)
def insert_answer_at(self, index, value): self.answer.insert(index, value)
def replace_answer_at(self, index, value): self.answer[index] = value
def get_attachment(self): return self.attachment
def set_attachment(self, attachment): self.attachment = attachment
def add_attachment(self, value): self.attachment.append(value)
def insert_attachment_at(self, index, value): self.attachment.insert(index, value)
def replace_attachment_at(self, index, value): self.attachment[index] = value
def get_allowDismiss(self): return self.allowDismiss
def set_allowDismiss(self, allowDismiss): self.allowDismiss = allowDismiss
def get_dismissReference(self): return self.dismissReference
def set_dismissReference(self, dismissReference): self.dismissReference = dismissReference
def get_brandingKey(self): return self.brandingKey
def set_brandingKey(self, brandingKey): self.brandingKey = brandingKey
def get_autoLock(self): return self.autoLock
def set_autoLock(self, autoLock): self.autoLock = autoLock
def get_vibrate(self): return self.vibrate
def set_vibrate(self, vibrate): self.vibrate = vibrate
def get_alertType(self): return self.alertType
def set_alertType(self, alertType): self.alertType = alertType
def get_alertIntervalType(self): return self.alertIntervalType
def set_alertIntervalType(self, alertIntervalType): self.alertIntervalType = alertIntervalType
def validate_AlertType(self, value):
# Validate type AlertType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['BEEP', 'SILENT', 'RING_5', 'RING_15', 'RING_30', 'RING_60']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AlertType' % {"value" : value.encode("utf-8")} )
def validate_AlertIntervalType(self, value):
# Validate type AlertIntervalType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['NONE', 'INTERVAL_5', 'INTERVAL_15', 'INTERVAL_30', 'INTERVAL_60', 'INTERVAL_300', 'INTERVAL_900', 'INTERVAL_3600']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AlertIntervalType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.content is not None or
self.answer or
self.attachment or
super(Message, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Message', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Message')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Message')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Message', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Message'):
super(Message, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Message')
if self.allowDismiss is not None and 'allowDismiss' not in already_processed:
already_processed.add('allowDismiss')
outfile.write(' allowDismiss="%s"' % self.gds_format_boolean(self.allowDismiss, input_name='allowDismiss'))
if self.dismissReference is not None and 'dismissReference' not in already_processed:
already_processed.add('dismissReference')
outfile.write(' dismissReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.dismissReference), input_name='dismissReference')), ))
if self.brandingKey is not None and 'brandingKey' not in already_processed:
already_processed.add('brandingKey')
outfile.write(' brandingKey=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.brandingKey), input_name='brandingKey')), ))
if self.autoLock is not None and 'autoLock' not in already_processed:
already_processed.add('autoLock')
outfile.write(' autoLock="%s"' % self.gds_format_boolean(self.autoLock, input_name='autoLock'))
if self.vibrate is not None and 'vibrate' not in already_processed:
already_processed.add('vibrate')
outfile.write(' vibrate="%s"' % self.gds_format_boolean(self.vibrate, input_name='vibrate'))
if self.alertType is not None and 'alertType' not in already_processed:
already_processed.add('alertType')
outfile.write(' alertType=%s' % (quote_attrib(self.alertType), ))
if self.alertIntervalType is not None and 'alertIntervalType' not in already_processed:
already_processed.add('alertIntervalType')
outfile.write(' alertIntervalType=%s' % (quote_attrib(self.alertIntervalType), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Message', fromsubclass_=False, pretty_print=True):
super(Message, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.content is not None:
self.content.export(outfile, level, namespaceprefix_, name_='content', pretty_print=pretty_print)
for answer_ in self.answer:
answer_.export(outfile, level, namespaceprefix_, name_='answer', pretty_print=pretty_print)
for attachment_ in self.attachment:
attachment_.export(outfile, level, namespaceprefix_, name_='attachment', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('allowDismiss', node)
if value is not None and 'allowDismiss' not in already_processed:
already_processed.add('allowDismiss')
if value in ('true', '1'):
self.allowDismiss = True
elif value in ('false', '0'):
self.allowDismiss = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('dismissReference', node)
if value is not None and 'dismissReference' not in already_processed:
already_processed.add('dismissReference')
self.dismissReference = value
value = find_attr_value_('brandingKey', node)
if value is not None and 'brandingKey' not in already_processed:
already_processed.add('brandingKey')
self.brandingKey = value
value = find_attr_value_('autoLock', node)
if value is not None and 'autoLock' not in already_processed:
already_processed.add('autoLock')
if value in ('true', '1'):
self.autoLock = True
elif value in ('false', '0'):
self.autoLock = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('vibrate', node)
if value is not None and 'vibrate' not in already_processed:
already_processed.add('vibrate')
if value in ('true', '1'):
self.vibrate = True
elif value in ('false', '0'):
self.vibrate = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('alertType', node)
if value is not None and 'alertType' not in already_processed:
already_processed.add('alertType')
self.alertType = value
self.validate_AlertType(self.alertType) # validate type AlertType
value = find_attr_value_('alertIntervalType', node)
if value is not None and 'alertIntervalType' not in already_processed:
already_processed.add('alertIntervalType')
self.alertIntervalType = value
self.validate_AlertIntervalType(self.alertIntervalType) # validate type AlertIntervalType
super(Message, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'content':
obj_ = contentType.factory()
obj_.build(child_)
self.content = obj_
obj_.original_tagname_ = 'content'
elif nodeName_ == 'answer':
obj_ = Answer.factory()
obj_.build(child_)
self.answer.append(obj_)
obj_.original_tagname_ = 'answer'
elif nodeName_ == 'attachment':
obj_ = Attachment.factory()
obj_.build(child_)
self.attachment.append(obj_)
obj_.original_tagname_ = 'attachment'
super(Message, self).buildChildren(child_, node, nodeName_, True)
# end class Message
class ResultsFlush(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, reference=None):
self.original_tagname_ = None
super(ResultsFlush, self).__init__(id, )
self.reference = _cast(None, reference)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ResultsFlush)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ResultsFlush.subclass:
return ResultsFlush.subclass(*args_, **kwargs_)
else:
return ResultsFlush(*args_, **kwargs_)
factory = staticmethod(factory)
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def hasContent_(self):
if (
super(ResultsFlush, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='ResultsFlush', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ResultsFlush')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ResultsFlush')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='ResultsFlush', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ResultsFlush'):
super(ResultsFlush, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ResultsFlush')
if self.reference is not None and 'reference' not in already_processed:
already_processed.add('reference')
outfile.write(' reference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.reference), input_name='reference')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='ResultsFlush', fromsubclass_=False, pretty_print=True):
super(ResultsFlush, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.add('reference')
self.reference = value
super(ResultsFlush, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ResultsFlush, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ResultsFlush
class ResultsEmail(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, reference=None, emailAdmins=None, email=None):
self.original_tagname_ = None
super(ResultsEmail, self).__init__(id, )
self.reference = _cast(None, reference)
self.emailAdmins = _cast(bool, emailAdmins)
if email is None:
self.email = []
else:
self.email = email
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ResultsEmail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ResultsEmail.subclass:
return ResultsEmail.subclass(*args_, **kwargs_)
else:
return ResultsEmail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_email(self): return self.email
def set_email(self, email): self.email = email
def add_email(self, value): self.email.append(value)
def insert_email_at(self, index, value): self.email.insert(index, value)
def replace_email_at(self, index, value): self.email[index] = value
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def get_emailAdmins(self): return self.emailAdmins
def set_emailAdmins(self, emailAdmins): self.emailAdmins = emailAdmins
def hasContent_(self):
if (
self.email or
super(ResultsEmail, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='ResultsEmail', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ResultsEmail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ResultsEmail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='ResultsEmail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ResultsEmail'):
super(ResultsEmail, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ResultsEmail')
if self.reference is not None and 'reference' not in already_processed:
already_processed.add('reference')
outfile.write(' reference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.reference), input_name='reference')), ))
if self.emailAdmins is not None and 'emailAdmins' not in already_processed:
already_processed.add('emailAdmins')
outfile.write(' emailAdmins="%s"' % self.gds_format_boolean(self.emailAdmins, input_name='emailAdmins'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='ResultsEmail', fromsubclass_=False, pretty_print=True):
super(ResultsEmail, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for email_ in self.email:
email_.export(outfile, level, namespaceprefix_, name_='email', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.add('reference')
self.reference = value
value = find_attr_value_('emailAdmins', node)
if value is not None and 'emailAdmins' not in already_processed:
already_processed.add('emailAdmins')
if value in ('true', '1'):
self.emailAdmins = True
elif value in ('false', '0'):
self.emailAdmins = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(ResultsEmail, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'email':
class_obj_ = self.get_class_obj_(child_, Value)
obj_ = class_obj_.factory()
obj_.build(child_)
self.email.append(obj_)
obj_.original_tagname_ = 'email'
super(ResultsEmail, self).buildChildren(child_, node, nodeName_, True)
# end class ResultsEmail
class FlowCode(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, exceptionReference=None, outlet=None, javascriptCode=None):
self.original_tagname_ = None
super(FlowCode, self).__init__(id, )
self.exceptionReference = _cast(None, exceptionReference)
if outlet is None:
self.outlet = []
else:
self.outlet = outlet
self.javascriptCode = javascriptCode
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, FlowCode)
if subclass is not None:
return subclass(*args_, **kwargs_)
if FlowCode.subclass:
return FlowCode.subclass(*args_, **kwargs_)
else:
return FlowCode(*args_, **kwargs_)
factory = staticmethod(factory)
def get_outlet(self): return self.outlet
def set_outlet(self, outlet): self.outlet = outlet
def add_outlet(self, value): self.outlet.append(value)
def insert_outlet_at(self, index, value): self.outlet.insert(index, value)
def replace_outlet_at(self, index, value): self.outlet[index] = value
def get_javascriptCode(self): return self.javascriptCode
def set_javascriptCode(self, javascriptCode): self.javascriptCode = javascriptCode
def get_exceptionReference(self): return self.exceptionReference
def set_exceptionReference(self, exceptionReference): self.exceptionReference = exceptionReference
def hasContent_(self):
if (
self.outlet or
self.javascriptCode is not None or
super(FlowCode, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='FlowCode', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('FlowCode')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FlowCode')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='FlowCode', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='FlowCode'):
super(FlowCode, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FlowCode')
if self.exceptionReference is not None and 'exceptionReference' not in already_processed:
already_processed.add('exceptionReference')
outfile.write(' exceptionReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.exceptionReference), input_name='exceptionReference')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='FlowCode', fromsubclass_=False, pretty_print=True):
super(FlowCode, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for outlet_ in self.outlet:
outlet_.export(outfile, level, namespaceprefix_, name_='outlet', pretty_print=pretty_print)
if self.javascriptCode is not None:
self.javascriptCode.export(outfile, level, namespaceprefix_, name_='javascriptCode', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('exceptionReference', node)
if value is not None and 'exceptionReference' not in already_processed:
already_processed.add('exceptionReference')
self.exceptionReference = value
super(FlowCode, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'outlet':
obj_ = Outlet.factory()
obj_.build(child_)
self.outlet.append(obj_)
obj_.original_tagname_ = 'outlet'
elif nodeName_ == 'javascriptCode':
obj_ = javascriptCodeType.factory()
obj_.build(child_)
self.javascriptCode = obj_
obj_.original_tagname_ = 'javascriptCode'
super(FlowCode, self).buildChildren(child_, node, nodeName_, True)
# end class FlowCode
class Widget(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, extensiontype_=None):
self.original_tagname_ = None
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Widget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Widget.subclass:
return Widget.subclass(*args_, **kwargs_)
else:
return Widget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Widget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Widget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Widget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Widget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Widget'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Widget', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Widget
class BaseSliderWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, min=None, max=None, step=None, precision=None, unit=None, extensiontype_=None):
self.original_tagname_ = None
super(BaseSliderWidget, self).__init__(extensiontype_, )
self.min = _cast(float, min)
self.max = _cast(float, max)
self.step = _cast(float, step)
self.precision = _cast(int, precision)
self.unit = _cast(None, unit)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, BaseSliderWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if BaseSliderWidget.subclass:
return BaseSliderWidget.subclass(*args_, **kwargs_)
else:
return BaseSliderWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_min(self): return self.min
def set_min(self, min): self.min = min
def get_max(self): return self.max
def set_max(self, max): self.max = max
def get_step(self): return self.step
def set_step(self, step): self.step = step
def get_precision(self): return self.precision
def set_precision(self, precision): self.precision = precision
def get_unit(self): return self.unit
def set_unit(self, unit): self.unit = unit
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(BaseSliderWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='BaseSliderWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('BaseSliderWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BaseSliderWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='BaseSliderWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='BaseSliderWidget'):
super(BaseSliderWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BaseSliderWidget')
if self.min is not None and 'min' not in already_processed:
already_processed.add('min')
outfile.write(' min="%s"' % self.gds_format_float(self.min, input_name='min'))
if self.max is not None and 'max' not in already_processed:
already_processed.add('max')
outfile.write(' max="%s"' % self.gds_format_float(self.max, input_name='max'))
if self.step is not None and 'step' not in already_processed:
already_processed.add('step')
outfile.write(' step="%s"' % self.gds_format_float(self.step, input_name='step'))
if self.precision is not None and 'precision' not in already_processed:
already_processed.add('precision')
outfile.write(' precision="%s"' % self.gds_format_integer(self.precision, input_name='precision'))
if self.unit is not None and 'unit' not in already_processed:
already_processed.add('unit')
outfile.write(' unit=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.unit), input_name='unit')), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='BaseSliderWidget', fromsubclass_=False, pretty_print=True):
super(BaseSliderWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('min', node)
if value is not None and 'min' not in already_processed:
already_processed.add('min')
try:
self.min = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (min): %s' % exp)
value = find_attr_value_('max', node)
if value is not None and 'max' not in already_processed:
already_processed.add('max')
try:
self.max = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (max): %s' % exp)
value = find_attr_value_('step', node)
if value is not None and 'step' not in already_processed:
already_processed.add('step')
try:
self.step = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (step): %s' % exp)
value = find_attr_value_('precision', node)
if value is not None and 'precision' not in already_processed:
already_processed.add('precision')
try:
self.precision = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('unit', node)
if value is not None and 'unit' not in already_processed:
already_processed.add('unit')
self.unit = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BaseSliderWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BaseSliderWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BaseSliderWidget
class SliderWidget(BaseSliderWidget):
subclass = None
superclass = BaseSliderWidget
def __init__(self, min=None, max=None, step=None, precision=None, unit=None, value=None):
self.original_tagname_ = None
super(SliderWidget, self).__init__(min, max, step, precision, unit, )
self.value = _cast(float, value)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SliderWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SliderWidget.subclass:
return SliderWidget.subclass(*args_, **kwargs_)
else:
return SliderWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
super(SliderWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SliderWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SliderWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SliderWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SliderWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SliderWidget'):
super(SliderWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SliderWidget')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value="%s"' % self.gds_format_float(self.value, input_name='value'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SliderWidget', fromsubclass_=False, pretty_print=True):
super(SliderWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
try:
self.value = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (value): %s' % exp)
super(SliderWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SliderWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SliderWidget
class RangeSliderWidget(BaseSliderWidget):
subclass = None
superclass = BaseSliderWidget
def __init__(self, min=None, max=None, step=None, precision=None, unit=None, lowValue=None, highValue=None):
self.original_tagname_ = None
super(RangeSliderWidget, self).__init__(min, max, step, precision, unit, )
self.lowValue = _cast(float, lowValue)
self.highValue = _cast(float, highValue)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, RangeSliderWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if RangeSliderWidget.subclass:
return RangeSliderWidget.subclass(*args_, **kwargs_)
else:
return RangeSliderWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_lowValue(self): return self.lowValue
def set_lowValue(self, lowValue): self.lowValue = lowValue
def get_highValue(self): return self.highValue
def set_highValue(self, highValue): self.highValue = highValue
def hasContent_(self):
if (
super(RangeSliderWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='RangeSliderWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('RangeSliderWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RangeSliderWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='RangeSliderWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='RangeSliderWidget'):
super(RangeSliderWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RangeSliderWidget')
if self.lowValue is not None and 'lowValue' not in already_processed:
already_processed.add('lowValue')
outfile.write(' lowValue="%s"' % self.gds_format_float(self.lowValue, input_name='lowValue'))
if self.highValue is not None and 'highValue' not in already_processed:
already_processed.add('highValue')
outfile.write(' highValue="%s"' % self.gds_format_float(self.highValue, input_name='highValue'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='RangeSliderWidget', fromsubclass_=False, pretty_print=True):
super(RangeSliderWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('lowValue', node)
if value is not None and 'lowValue' not in already_processed:
already_processed.add('lowValue')
try:
self.lowValue = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (lowValue): %s' % exp)
value = find_attr_value_('highValue', node)
if value is not None and 'highValue' not in already_processed:
already_processed.add('highValue')
try:
self.highValue = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (highValue): %s' % exp)
super(RangeSliderWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(RangeSliderWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class RangeSliderWidget
class PhotoUploadWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, quality=None, gallery=None, camera=None, ratio=None):
self.original_tagname_ = None
super(PhotoUploadWidget, self).__init__()
self.quality = _cast(None, quality)
self.gallery = _cast(bool, gallery)
self.camera = _cast(bool, camera)
self.ratio = _cast(None, ratio)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PhotoUploadWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PhotoUploadWidget.subclass:
return PhotoUploadWidget.subclass(*args_, **kwargs_)
else:
return PhotoUploadWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_quality(self): return self.quality
def set_quality(self, quality): self.quality = quality
def get_gallery(self): return self.gallery
def set_gallery(self, gallery): self.gallery = gallery
def get_camera(self): return self.camera
def set_camera(self, camera): self.camera = camera
def get_ratio(self): return self.ratio
def set_ratio(self, ratio): self.ratio = ratio
def hasContent_(self):
if (
super(PhotoUploadWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='PhotoUploadWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PhotoUploadWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PhotoUploadWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='PhotoUploadWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PhotoUploadWidget'):
super(PhotoUploadWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PhotoUploadWidget')
if self.quality is not None and 'quality' not in already_processed:
already_processed.add('quality')
outfile.write(' quality=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.quality), input_name='quality')), ))
if self.gallery is not None and 'gallery' not in already_processed:
already_processed.add('gallery')
outfile.write(' gallery="%s"' % self.gds_format_boolean(self.gallery, input_name='gallery'))
if self.camera is not None and 'camera' not in already_processed:
already_processed.add('camera')
outfile.write(' camera="%s"' % self.gds_format_boolean(self.camera, input_name='camera'))
if self.ratio is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
outfile.write(' ratio=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.ratio), input_name='ratio')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='PhotoUploadWidget', fromsubclass_=False, pretty_print=True):
super(PhotoUploadWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('quality', node)
if value is not None and 'quality' not in already_processed:
already_processed.add('quality')
self.quality = value
value = find_attr_value_('gallery', node)
if value is not None and 'gallery' not in already_processed:
already_processed.add('gallery')
if value in ('true', '1'):
self.gallery = True
elif value in ('false', '0'):
self.gallery = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('camera', node)
if value is not None and 'camera' not in already_processed:
already_processed.add('camera')
if value in ('true', '1'):
self.camera = True
elif value in ('false', '0'):
self.camera = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('ratio', node)
if value is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
self.ratio = value
super(PhotoUploadWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(PhotoUploadWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class PhotoUploadWidget
class GPSLocationWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, gps=None):
self.original_tagname_ = None
super(GPSLocationWidget, self).__init__()
self.gps = _cast(bool, gps)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GPSLocationWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GPSLocationWidget.subclass:
return GPSLocationWidget.subclass(*args_, **kwargs_)
else:
return GPSLocationWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_gps(self): return self.gps
def set_gps(self, gps): self.gps = gps
def hasContent_(self):
if (
super(GPSLocationWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='GPSLocationWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GPSLocationWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GPSLocationWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='GPSLocationWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GPSLocationWidget'):
super(GPSLocationWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GPSLocationWidget')
if self.gps is not None and 'gps' not in already_processed:
already_processed.add('gps')
outfile.write(' gps="%s"' % self.gds_format_boolean(self.gps, input_name='gps'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='GPSLocationWidget', fromsubclass_=False, pretty_print=True):
super(GPSLocationWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('gps', node)
if value is not None and 'gps' not in already_processed:
already_processed.add('gps')
if value in ('true', '1'):
self.gps = True
elif value in ('false', '0'):
self.gps = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(GPSLocationWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(GPSLocationWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class GPSLocationWidget
class TextWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None, extensiontype_=None):
self.original_tagname_ = None
super(TextWidget, self).__init__(extensiontype_, )
self.maxChars = _cast(int, maxChars)
self.placeholder = _cast(None, placeholder)
self.value = _cast(None, value)
self.keyboardType = _cast(None, keyboardType)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextWidget.subclass:
return TextWidget.subclass(*args_, **kwargs_)
else:
return TextWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_maxChars(self): return self.maxChars
def set_maxChars(self, maxChars): self.maxChars = maxChars
def get_placeholder(self): return self.placeholder
def set_placeholder(self, placeholder): self.placeholder = placeholder
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_keyboardType(self): return self.keyboardType
def set_keyboardType(self, keyboardType): self.keyboardType = keyboardType
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def validate_KeyboardType(self, value):
# Validate type KeyboardType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['DEFAULT', 'AUTO_CAPITALIZED', 'EMAIL', 'URL', 'PHONE', 'NUMBER', 'DECIMAL', 'PASSWORD', 'NUMBER_PASSWORD']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on KeyboardType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
super(TextWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextWidget'):
super(TextWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextWidget')
if self.maxChars is not None and 'maxChars' not in already_processed:
already_processed.add('maxChars')
outfile.write(' maxChars="%s"' % self.gds_format_integer(self.maxChars, input_name='maxChars'))
if self.placeholder is not None and 'placeholder' not in already_processed:
already_processed.add('placeholder')
outfile.write(' placeholder=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.placeholder), input_name='placeholder')), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
if self.keyboardType is not None and 'keyboardType' not in already_processed:
already_processed.add('keyboardType')
outfile.write(' keyboardType=%s' % (quote_attrib(self.keyboardType), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextWidget', fromsubclass_=False, pretty_print=True):
super(TextWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('maxChars', node)
if value is not None and 'maxChars' not in already_processed:
already_processed.add('maxChars')
try:
self.maxChars = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('placeholder', node)
if value is not None and 'placeholder' not in already_processed:
already_processed.add('placeholder')
self.placeholder = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('keyboardType', node)
if value is not None and 'keyboardType' not in already_processed:
already_processed.add('keyboardType')
self.keyboardType = value
self.validate_KeyboardType(self.keyboardType) # validate type KeyboardType
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(TextWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TextWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TextWidget
class TextLineWidget(TextWidget):
subclass = None
superclass = TextWidget
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None):
self.original_tagname_ = None
super(TextLineWidget, self).__init__(maxChars, placeholder, value, keyboardType, )
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextLineWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextLineWidget.subclass:
return TextLineWidget.subclass(*args_, **kwargs_)
else:
return TextLineWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(TextLineWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextLineWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextLineWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextLineWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextLineWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextLineWidget'):
super(TextLineWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextLineWidget')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextLineWidget', fromsubclass_=False, pretty_print=True):
super(TextLineWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TextLineWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TextLineWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TextLineWidget
class TextBlockWidget(TextWidget):
subclass = None
superclass = TextWidget
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None):
self.original_tagname_ = None
super(TextBlockWidget, self).__init__(maxChars, placeholder, value, keyboardType, )
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextBlockWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextBlockWidget.subclass:
return TextBlockWidget.subclass(*args_, **kwargs_)
else:
return TextBlockWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(TextBlockWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextBlockWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextBlockWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextBlockWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextBlockWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextBlockWidget'):
super(TextBlockWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextBlockWidget')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextBlockWidget', fromsubclass_=False, pretty_print=True):
super(TextBlockWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TextBlockWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TextBlockWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TextBlockWidget
class Value(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, value=None, extensiontype_=None):
self.original_tagname_ = None
self.value = _cast(None, value)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Value)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Value.subclass:
return Value.subclass(*args_, **kwargs_)
else:
return Value(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Value', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Value')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Value')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Value', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Value'):
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Value', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Value
class FloatValue(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, value=None):
self.original_tagname_ = None
self.value = _cast(float, value)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, FloatValue)
if subclass is not None:
return subclass(*args_, **kwargs_)
if FloatValue.subclass:
return FloatValue.subclass(*args_, **kwargs_)
else:
return FloatValue(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='FloatValue', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('FloatValue')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FloatValue')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='FloatValue', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='FloatValue'):
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value="%s"' % self.gds_format_float(self.value, input_name='value'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='FloatValue', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
try:
self.value = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (value): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class FloatValue
class AdvancedOrderCategory(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, name=None, item=None):
self.original_tagname_ = None
super(AdvancedOrderCategory, self).__init__(id, )
self.name = name
if item is None:
self.item = []
else:
self.item = item
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, AdvancedOrderCategory)
if subclass is not None:
return subclass(*args_, **kwargs_)
if AdvancedOrderCategory.subclass:
return AdvancedOrderCategory.subclass(*args_, **kwargs_)
else:
return AdvancedOrderCategory(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_item(self): return self.item
def set_item(self, item): self.item = item
def add_item(self, value): self.item.append(value)
def insert_item_at(self, index, value): self.item.insert(index, value)
def replace_item_at(self, index, value): self.item[index] = value
def hasContent_(self):
if (
self.name is not None or
self.item or
super(AdvancedOrderCategory, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderCategory', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AdvancedOrderCategory')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderCategory')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='AdvancedOrderCategory', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AdvancedOrderCategory'):
super(AdvancedOrderCategory, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderCategory')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderCategory', fromsubclass_=False, pretty_print=True):
super(AdvancedOrderCategory, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<name>%s</name>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.name), input_name='name')), eol_))
for item_ in self.item:
item_.export(outfile, level, namespaceprefix_, name_='item', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(AdvancedOrderCategory, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'item':
obj_ = AdvancedOrderItem.factory()
obj_.build(child_)
self.item.append(obj_)
obj_.original_tagname_ = 'item'
super(AdvancedOrderCategory, self).buildChildren(child_, node, nodeName_, True)
# end class AdvancedOrderCategory
class AdvancedOrderItem(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, value=None, unit=None, unitPrice=None, hasPrice=True, step=None, stepUnit=None, stepUnitConversion=None, imageUrl=None, name=None, description=None):
self.original_tagname_ = None
super(AdvancedOrderItem, self).__init__(id, )
self.value = _cast(int, value)
self.unit = _cast(None, unit)
self.unitPrice = _cast(int, unitPrice)
self.hasPrice = _cast(bool, hasPrice)
self.step = _cast(int, step)
self.stepUnit = _cast(None, stepUnit)
self.stepUnitConversion = _cast(int, stepUnitConversion)
self.imageUrl = _cast(None, imageUrl)
self.name = name
self.description = description
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, AdvancedOrderItem)
if subclass is not None:
return subclass(*args_, **kwargs_)
if AdvancedOrderItem.subclass:
return AdvancedOrderItem.subclass(*args_, **kwargs_)
else:
return AdvancedOrderItem(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_unit(self): return self.unit
def set_unit(self, unit): self.unit = unit
def get_unitPrice(self): return self.unitPrice
def set_unitPrice(self, unitPrice): self.unitPrice = unitPrice
def get_hasPrice(self): return self.hasPrice
def set_hasPrice(self, hasPrice): self.hasPrice = hasPrice
def get_step(self): return self.step
def set_step(self, step): self.step = step
def get_stepUnit(self): return self.stepUnit
def set_stepUnit(self, stepUnit): self.stepUnit = stepUnit
def get_stepUnitConversion(self): return self.stepUnitConversion
def set_stepUnitConversion(self, stepUnitConversion): self.stepUnitConversion = stepUnitConversion
def get_imageUrl(self): return self.imageUrl
def set_imageUrl(self, imageUrl): self.imageUrl = imageUrl
def hasContent_(self):
if (
self.name is not None or
self.description is not None or
super(AdvancedOrderItem, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderItem', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AdvancedOrderItem')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderItem')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='AdvancedOrderItem', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AdvancedOrderItem'):
super(AdvancedOrderItem, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderItem')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value="%s"' % self.gds_format_integer(self.value, input_name='value'))
if self.unit is not None and 'unit' not in already_processed:
already_processed.add('unit')
outfile.write(' unit=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.unit), input_name='unit')), ))
if self.unitPrice is not None and 'unitPrice' not in already_processed:
already_processed.add('unitPrice')
outfile.write(' unitPrice="%s"' % self.gds_format_integer(self.unitPrice, input_name='unitPrice'))
if not self.hasPrice and 'hasPrice' not in already_processed:
already_processed.add('hasPrice')
outfile.write(' hasPrice="%s"' % self.gds_format_boolean(self.hasPrice, input_name='hasPrice'))
if self.step is not None and 'step' not in already_processed:
already_processed.add('step')
outfile.write(' step="%s"' % self.gds_format_integer(self.step, input_name='step'))
if self.stepUnit is not None and 'stepUnit' not in already_processed:
already_processed.add('stepUnit')
outfile.write(' stepUnit=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.stepUnit), input_name='stepUnit')), ))
if self.stepUnitConversion is not None and 'stepUnitConversion' not in already_processed:
already_processed.add('stepUnitConversion')
outfile.write(' stepUnitConversion="%s"' % self.gds_format_integer(self.stepUnitConversion, input_name='stepUnitConversion'))
if self.imageUrl is not None and 'imageUrl' not in already_processed:
already_processed.add('imageUrl')
outfile.write(' imageUrl=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.imageUrl), input_name='imageUrl')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderItem', fromsubclass_=False, pretty_print=True):
super(AdvancedOrderItem, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<name>%s</name>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.name), input_name='name')), eol_))
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<description>%s</description>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
try:
self.value = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('unit', node)
if value is not None and 'unit' not in already_processed:
already_processed.add('unit')
self.unit = value
value = find_attr_value_('unitPrice', node)
if value is not None and 'unitPrice' not in already_processed:
already_processed.add('unitPrice')
try:
self.unitPrice = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('hasPrice', node)
if value is not None and 'hasPrice' not in already_processed:
already_processed.add('hasPrice')
if value in ('true', '1'):
self.hasPrice = True
elif value in ('false', '0'):
self.hasPrice = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('step', node)
if value is not None and 'step' not in already_processed:
already_processed.add('step')
try:
self.step = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('stepUnit', node)
if value is not None and 'stepUnit' not in already_processed:
already_processed.add('stepUnit')
self.stepUnit = value
value = find_attr_value_('stepUnitConversion', node)
if value is not None and 'stepUnitConversion' not in already_processed:
already_processed.add('stepUnitConversion')
try:
self.stepUnitConversion = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('imageUrl', node)
if value is not None and 'imageUrl' not in already_processed:
already_processed.add('imageUrl')
self.imageUrl = value
super(AdvancedOrderItem, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
super(AdvancedOrderItem, self).buildChildren(child_, node, nodeName_, True)
# end class AdvancedOrderItem
class BasePaymentMethod(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, currency=None, amount=None, precision=None, extensiontype_=None):
self.original_tagname_ = None
super(BasePaymentMethod, self).__init__(id, extensiontype_, )
self.currency = currency
self.amount = amount
self.precision = precision
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, BasePaymentMethod)
if subclass is not None:
return subclass(*args_, **kwargs_)
if BasePaymentMethod.subclass:
return BasePaymentMethod.subclass(*args_, **kwargs_)
else:
return BasePaymentMethod(*args_, **kwargs_)
factory = staticmethod(factory)
def get_currency(self): return self.currency
def set_currency(self, currency): self.currency = currency
def get_amount(self): return self.amount
def set_amount(self, amount): self.amount = amount
def get_precision(self): return self.precision
def set_precision(self, precision): self.precision = precision
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.currency is not None or
self.amount is not None or
self.precision is not None or
super(BasePaymentMethod, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='BasePaymentMethod', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('BasePaymentMethod')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BasePaymentMethod')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='BasePaymentMethod', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='BasePaymentMethod'):
super(BasePaymentMethod, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BasePaymentMethod')
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='BasePaymentMethod', fromsubclass_=False, pretty_print=True):
super(BasePaymentMethod, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.currency is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<currency>%s</currency>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.currency), input_name='currency')), eol_))
if self.amount is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<amount>%s</amount>%s' % (self.gds_format_integer(self.amount, input_name='amount'), eol_))
if self.precision is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<precision>%s</precision>%s' % (self.gds_format_integer(self.precision, input_name='precision'), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BasePaymentMethod, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'currency':
currency_ = child_.text
currency_ = self.gds_validate_string(currency_, node, 'currency')
self.currency = currency_
elif nodeName_ == 'amount' and child_.text:
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'amount')
self.amount = ival_
elif nodeName_ == 'precision' and child_.text:
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'precision')
self.precision = ival_
super(BasePaymentMethod, self).buildChildren(child_, node, nodeName_, True)
# end class BasePaymentMethod
class PaymentMethod(BasePaymentMethod):
subclass = None
superclass = BasePaymentMethod
def __init__(self, id=None, currency=None, amount=None, precision=None, provider_id=None, calculateAmount=False, target=None):
self.original_tagname_ = None
super(PaymentMethod, self).__init__(id, currency, amount, precision, )
self.provider_id = provider_id
self.calculateAmount = calculateAmount
self.target = target
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PaymentMethod)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PaymentMethod.subclass:
return PaymentMethod.subclass(*args_, **kwargs_)
else:
return PaymentMethod(*args_, **kwargs_)
factory = staticmethod(factory)
def get_provider_id(self): return self.provider_id
def set_provider_id(self, provider_id): self.provider_id = provider_id
def get_calculateAmount(self): return self.calculateAmount
def set_calculateAmount(self, calculateAmount): self.calculateAmount = calculateAmount
def get_target(self): return self.target
def set_target(self, target): self.target = target
def hasContent_(self):
if (
self.provider_id is not None or
self.calculateAmount or
self.target is not None or
super(PaymentMethod, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='PaymentMethod', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PaymentMethod')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PaymentMethod')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='PaymentMethod', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PaymentMethod'):
super(PaymentMethod, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PaymentMethod')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='PaymentMethod', fromsubclass_=False, pretty_print=True):
super(PaymentMethod, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.provider_id is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<provider_id>%s</provider_id>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.provider_id), input_name='provider_id')), eol_))
if self.calculateAmount is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<calculateAmount>%s</calculateAmount>%s' % (self.gds_format_boolean(self.calculateAmount, input_name='calculateAmount'), eol_))
if self.target is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<target>%s</target>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.target), input_name='target')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(PaymentMethod, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'provider_id':
provider_id_ = child_.text
provider_id_ = self.gds_validate_string(provider_id_, node, 'provider_id')
self.provider_id = provider_id_
elif nodeName_ == 'calculateAmount':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'calculateAmount')
self.calculateAmount = ival_
elif nodeName_ == 'target':
target_ = child_.text
target_ = self.gds_validate_string(target_, node, 'target')
self.target = target_
super(PaymentMethod, self).buildChildren(child_, node, nodeName_, True)
# end class PaymentMethod
class TextAutocompleteWidget(TextWidget):
subclass = None
superclass = TextWidget
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None, suggestion=None):
self.original_tagname_ = None
super(TextAutocompleteWidget, self).__init__(maxChars, placeholder, value, keyboardType, )
if suggestion is None:
self.suggestion = []
else:
self.suggestion = suggestion
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextAutocompleteWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextAutocompleteWidget.subclass:
return TextAutocompleteWidget.subclass(*args_, **kwargs_)
else:
return TextAutocompleteWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_suggestion(self): return self.suggestion
def set_suggestion(self, suggestion): self.suggestion = suggestion
def add_suggestion(self, value): self.suggestion.append(value)
def insert_suggestion_at(self, index, value): self.suggestion.insert(index, value)
def replace_suggestion_at(self, index, value): self.suggestion[index] = value
def hasContent_(self):
if (
self.suggestion or
super(TextAutocompleteWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextAutocompleteWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextAutocompleteWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextAutocompleteWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextAutocompleteWidget', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextAutocompleteWidget'):
super(TextAutocompleteWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextAutocompleteWidget')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextAutocompleteWidget', fromsubclass_=False, pretty_print=True):
super(TextAutocompleteWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for suggestion_ in self.suggestion:
suggestion_.export(outfile, level, namespaceprefix_, name_='suggestion', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TextAutocompleteWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'suggestion':
class_obj_ = self.get_class_obj_(child_, Value)
obj_ = class_obj_.factory()
obj_.build(child_)
self.suggestion.append(obj_)
obj_.original_tagname_ = 'suggestion'
super(TextAutocompleteWidget, self).buildChildren(child_, node, nodeName_, True)
# end class TextAutocompleteWidget
class Choice(Value):
subclass = None
superclass = Value
def __init__(self, value=None, label=None):
self.original_tagname_ = None
super(Choice, self).__init__(value, )
self.label = _cast(None, label)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Choice)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Choice.subclass:
return Choice.subclass(*args_, **kwargs_)
else:
return Choice(*args_, **kwargs_)
factory = staticmethod(factory)
def get_label(self): return self.label
def set_label(self, label): self.label = label
def hasContent_(self):
if (
super(Choice, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Choice', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Choice')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Choice')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Choice', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Choice'):
super(Choice, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Choice')
if self.label is not None and 'label' not in already_processed:
already_processed.add('label')
outfile.write(' label=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.label), input_name='label')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Choice', fromsubclass_=False, pretty_print=True):
super(Choice, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('label', node)
if value is not None and 'label' not in already_processed:
already_processed.add('label')
self.label = value
super(Choice, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Choice, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Choice
class SelectWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, choice=None, extensiontype_=None):
self.original_tagname_ = None
super(SelectWidget, self).__init__(extensiontype_, )
if choice is None:
self.choice = []
else:
self.choice = choice
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectWidget.subclass:
return SelectWidget.subclass(*args_, **kwargs_)
else:
return SelectWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_choice(self): return self.choice
def set_choice(self, choice): self.choice = choice
def add_choice(self, value): self.choice.append(value)
def insert_choice_at(self, index, value): self.choice.insert(index, value)
def replace_choice_at(self, index, value): self.choice[index] = value
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.choice or
super(SelectWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectWidget', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectWidget'):
super(SelectWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectWidget')
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectWidget', fromsubclass_=False, pretty_print=True):
super(SelectWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for choice_ in self.choice:
choice_.export(outfile, level, namespaceprefix_, name_='choice', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(SelectWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'choice':
obj_ = Choice.factory()
obj_.build(child_)
self.choice.append(obj_)
obj_.original_tagname_ = 'choice'
super(SelectWidget, self).buildChildren(child_, node, nodeName_, True)
# end class SelectWidget
class SelectSingleWidget(SelectWidget):
subclass = None
superclass = SelectWidget
def __init__(self, choice=None, value=None):
self.original_tagname_ = None
super(SelectSingleWidget, self).__init__(choice, )
self.value = _cast(None, value)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectSingleWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectSingleWidget.subclass:
return SelectSingleWidget.subclass(*args_, **kwargs_)
else:
return SelectSingleWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
super(SelectSingleWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectSingleWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectSingleWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectSingleWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectSingleWidget', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectSingleWidget'):
super(SelectSingleWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectSingleWidget')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectSingleWidget', fromsubclass_=False, pretty_print=True):
super(SelectSingleWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
super(SelectSingleWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SelectSingleWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SelectSingleWidget
class SelectMultiWidget(SelectWidget):
subclass = None
superclass = SelectWidget
def __init__(self, choice=None, value=None):
self.original_tagname_ = None
super(SelectMultiWidget, self).__init__(choice, )
if value is None:
self.value = []
else:
self.value = value
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectMultiWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectMultiWidget.subclass:
return SelectMultiWidget.subclass(*args_, **kwargs_)
else:
return SelectMultiWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def add_value(self, value): self.value.append(value)
def insert_value_at(self, index, value): self.value.insert(index, value)
def replace_value_at(self, index, value): self.value[index] = value
def hasContent_(self):
if (
self.value or
super(SelectMultiWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectMultiWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectMultiWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectMultiWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectMultiWidget', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectMultiWidget'):
super(SelectMultiWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectMultiWidget')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectMultiWidget', fromsubclass_=False, pretty_print=True):
super(SelectMultiWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for value_ in self.value:
value_.export(outfile, level, namespaceprefix_, name_='value', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(SelectMultiWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'value':
class_obj_ = self.get_class_obj_(child_, Value)
obj_ = class_obj_.factory()
obj_.build(child_)
self.value.append(obj_)
obj_.original_tagname_ = 'value'
super(SelectMultiWidget, self).buildChildren(child_, node, nodeName_, True)
# end class SelectMultiWidget
class SelectDateWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, minDate=None, maxDate=None, date=None, minuteInterval=None, mode=None, unit=None):
self.original_tagname_ = None
super(SelectDateWidget, self).__init__()
self.minDate = _cast(int, minDate)
self.maxDate = _cast(int, maxDate)
self.date = _cast(int, date)
self.minuteInterval = _cast(int, minuteInterval)
self.mode = _cast(None, mode)
self.unit = _cast(None, unit)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectDateWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectDateWidget.subclass:
return SelectDateWidget.subclass(*args_, **kwargs_)
else:
return SelectDateWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_minDate(self): return self.minDate
def set_minDate(self, minDate): self.minDate = minDate
def get_maxDate(self): return self.maxDate
def set_maxDate(self, maxDate): self.maxDate = maxDate
def get_date(self): return self.date
def set_date(self, date): self.date = date
def get_minuteInterval(self): return self.minuteInterval
def set_minuteInterval(self, minuteInterval): self.minuteInterval = minuteInterval
def get_mode(self): return self.mode
def set_mode(self, mode): self.mode = mode
def get_unit(self): return self.unit
def set_unit(self, unit): self.unit = unit
def hasContent_(self):
if (
super(SelectDateWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectDateWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectDateWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectDateWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectDateWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectDateWidget'):
super(SelectDateWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectDateWidget')
if self.minDate is not None and 'minDate' not in already_processed:
already_processed.add('minDate')
outfile.write(' minDate="%s"' % self.gds_format_integer(self.minDate, input_name='minDate'))
if self.maxDate is not None and 'maxDate' not in already_processed:
already_processed.add('maxDate')
outfile.write(' maxDate="%s"' % self.gds_format_integer(self.maxDate, input_name='maxDate'))
if self.date is not None and 'date' not in already_processed:
already_processed.add('date')
outfile.write(' date="%s"' % self.gds_format_integer(self.date, input_name='date'))
if self.minuteInterval is not None and 'minuteInterval' not in already_processed:
already_processed.add('minuteInterval')
outfile.write(' minuteInterval="%s"' % self.gds_format_integer(self.minuteInterval, input_name='minuteInterval'))
if self.mode is not None and 'mode' not in already_processed:
already_processed.add('mode')
outfile.write(' mode=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.mode), input_name='mode')), ))
if self.unit is not None and 'unit' not in already_processed:
already_processed.add('unit')
outfile.write(' unit=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.unit), input_name='unit')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectDateWidget', fromsubclass_=False, pretty_print=True):
super(SelectDateWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('minDate', node)
if value is not None and 'minDate' not in already_processed:
already_processed.add('minDate')
try:
self.minDate = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('maxDate', node)
if value is not None and 'maxDate' not in already_processed:
already_processed.add('maxDate')
try:
self.maxDate = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('date', node)
if value is not None and 'date' not in already_processed:
already_processed.add('date')
try:
self.date = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('minuteInterval', node)
if value is not None and 'minuteInterval' not in already_processed:
already_processed.add('minuteInterval')
try:
self.minuteInterval = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('mode', node)
if value is not None and 'mode' not in already_processed:
already_processed.add('mode')
self.mode = value
value = find_attr_value_('unit', node)
if value is not None and 'unit' not in already_processed:
already_processed.add('unit')
self.unit = value
super(SelectDateWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SelectDateWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SelectDateWidget
class SelectFriendWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, selectionRequired=None, multiSelect=None):
self.original_tagname_ = None
super(SelectFriendWidget, self).__init__()
self.selectionRequired = _cast(bool, selectionRequired)
self.multiSelect = _cast(bool, multiSelect)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectFriendWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectFriendWidget.subclass:
return SelectFriendWidget.subclass(*args_, **kwargs_)
else:
return SelectFriendWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_selectionRequired(self): return self.selectionRequired
def set_selectionRequired(self, selectionRequired): self.selectionRequired = selectionRequired
def get_multiSelect(self): return self.multiSelect
def set_multiSelect(self, multiSelect): self.multiSelect = multiSelect
def hasContent_(self):
if (
super(SelectFriendWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectFriendWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectFriendWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectFriendWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectFriendWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectFriendWidget'):
super(SelectFriendWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectFriendWidget')
if self.selectionRequired is not None and 'selectionRequired' not in already_processed:
already_processed.add('selectionRequired')
outfile.write(' selectionRequired="%s"' % self.gds_format_boolean(self.selectionRequired, input_name='selectionRequired'))
if self.multiSelect is not None and 'multiSelect' not in already_processed:
already_processed.add('multiSelect')
outfile.write(' multiSelect="%s"' % self.gds_format_boolean(self.multiSelect, input_name='multiSelect'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectFriendWidget', fromsubclass_=False, pretty_print=True):
super(SelectFriendWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('selectionRequired', node)
if value is not None and 'selectionRequired' not in already_processed:
already_processed.add('selectionRequired')
if value in ('true', '1'):
self.selectionRequired = True
elif value in ('false', '0'):
self.selectionRequired = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('multiSelect', node)
if value is not None and 'multiSelect' not in already_processed:
already_processed.add('multiSelect')
if value in ('true', '1'):
self.multiSelect = True
elif value in ('false', '0'):
self.multiSelect = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(SelectFriendWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SelectFriendWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SelectFriendWidget
class MyDigiPassWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, scope=None):
self.original_tagname_ = None
super(MyDigiPassWidget, self).__init__()
self.scope = _cast(None, scope)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassWidget.subclass:
return MyDigiPassWidget.subclass(*args_, **kwargs_)
else:
return MyDigiPassWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_scope(self): return self.scope
def set_scope(self, scope): self.scope = scope
def hasContent_(self):
if (
super(MyDigiPassWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassWidget'):
super(MyDigiPassWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassWidget')
if self.scope is not None and 'scope' not in already_processed:
already_processed.add('scope')
outfile.write(' scope=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.scope), input_name='scope')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassWidget', fromsubclass_=False, pretty_print=True):
super(MyDigiPassWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('scope', node)
if value is not None and 'scope' not in already_processed:
already_processed.add('scope')
self.scope = value
super(MyDigiPassWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(MyDigiPassWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class MyDigiPassWidget
class OpenIdWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, provider=None, scope=None):
self.original_tagname_ = None
super(OpenIdWidget, self).__init__()
self.provider = _cast(None, provider)
self.scope = _cast(None, scope)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, OpenIdWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if OpenIdWidget.subclass:
return OpenIdWidget.subclass(*args_, **kwargs_)
else:
return OpenIdWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_provider(self): return self.provider
def set_provider(self, provider): self.provider = provider
def get_scope(self): return self.scope
def set_scope(self, scope): self.scope = scope
def validate_OpenIdProvider(self, value):
# Validate type OpenIdProvider, a restriction on None.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['itsme']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on OpenIdProvider' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
super(OpenIdWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='OpenIdWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('OpenIdWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='OpenIdWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='OpenIdWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='OpenIdWidget'):
super(OpenIdWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='OpenIdWidget')
if self.provider is not None and 'provider' not in already_processed:
already_processed.add('provider')
outfile.write(' provider=%s' % (quote_attrib(self.provider), ))
if self.scope is not None and 'scope' not in already_processed:
already_processed.add('scope')
outfile.write(' scope=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.scope), input_name='scope')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='OpenIdWidget', fromsubclass_=False, pretty_print=True):
super(OpenIdWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('provider', node)
if value is not None and 'provider' not in already_processed:
already_processed.add('provider')
self.provider = value
self.validate_OpenIdProvider(self.provider) # validate type OpenIdProvider
value = find_attr_value_('scope', node)
if value is not None and 'scope' not in already_processed:
already_processed.add('scope')
self.scope = value
super(OpenIdWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(OpenIdWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class OpenIdWidget
class AdvancedOrderWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, currency=None, leapTime=None, category=None):
self.original_tagname_ = None
super(AdvancedOrderWidget, self).__init__()
self.currency = _cast(None, currency)
self.leapTime = _cast(int, leapTime)
if category is None:
self.category = []
else:
self.category = category
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, AdvancedOrderWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if AdvancedOrderWidget.subclass:
return AdvancedOrderWidget.subclass(*args_, **kwargs_)
else:
return AdvancedOrderWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_category(self): return self.category
def set_category(self, category): self.category = category
def add_category(self, value): self.category.append(value)
def insert_category_at(self, index, value): self.category.insert(index, value)
def replace_category_at(self, index, value): self.category[index] = value
def get_currency(self): return self.currency
def set_currency(self, currency): self.currency = currency
def get_leapTime(self): return self.leapTime
def set_leapTime(self, leapTime): self.leapTime = leapTime
def hasContent_(self):
if (
self.category or
super(AdvancedOrderWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AdvancedOrderWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='AdvancedOrderWidget', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AdvancedOrderWidget'):
super(AdvancedOrderWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderWidget')
if self.currency is not None and 'currency' not in already_processed:
already_processed.add('currency')
outfile.write(' currency=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.currency), input_name='currency')), ))
if self.leapTime is not None and 'leapTime' not in already_processed:
already_processed.add('leapTime')
outfile.write(' leapTime="%s"' % self.gds_format_integer(self.leapTime, input_name='leapTime'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderWidget', fromsubclass_=False, pretty_print=True):
super(AdvancedOrderWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for category_ in self.category:
category_.export(outfile, level, namespaceprefix_, name_='category', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('currency', node)
if value is not None and 'currency' not in already_processed:
already_processed.add('currency')
self.currency = value
value = find_attr_value_('leapTime', node)
if value is not None and 'leapTime' not in already_processed:
already_processed.add('leapTime')
try:
self.leapTime = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(AdvancedOrderWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'category':
obj_ = AdvancedOrderCategory.factory()
obj_.build(child_)
self.category.append(obj_)
obj_.original_tagname_ = 'category'
super(AdvancedOrderWidget, self).buildChildren(child_, node, nodeName_, True)
# end class AdvancedOrderWidget
class SignWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, caption=None, algorithm=None, keyName=None, index=None, payload=None):
self.original_tagname_ = None
super(SignWidget, self).__init__()
self.caption = _cast(None, caption)
self.algorithm = _cast(None, algorithm)
self.keyName = _cast(None, keyName)
self.index = _cast(None, index)
self.payload = payload
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SignWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SignWidget.subclass:
return SignWidget.subclass(*args_, **kwargs_)
else:
return SignWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_payload(self): return self.payload
def set_payload(self, payload): self.payload = payload
def get_caption(self): return self.caption
def set_caption(self, caption): self.caption = caption
def get_algorithm(self): return self.algorithm
def set_algorithm(self, algorithm): self.algorithm = algorithm
def get_keyName(self): return self.keyName
def set_keyName(self, keyName): self.keyName = keyName
def get_index(self): return self.index
def set_index(self, index): self.index = index
def hasContent_(self):
if (
self.payload is not None or
super(SignWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SignWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SignWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SignWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SignWidget', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SignWidget'):
super(SignWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SignWidget')
if self.caption is not None and 'caption' not in already_processed:
already_processed.add('caption')
outfile.write(' caption=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.caption), input_name='caption')), ))
if self.algorithm is not None and 'algorithm' not in already_processed:
already_processed.add('algorithm')
outfile.write(' algorithm=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.algorithm), input_name='algorithm')), ))
if self.keyName is not None and 'keyName' not in already_processed:
already_processed.add('keyName')
outfile.write(' keyName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.keyName), input_name='keyName')), ))
if self.index is not None and 'index' not in already_processed:
already_processed.add('index')
outfile.write(' index=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.index), input_name='index')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SignWidget', fromsubclass_=False, pretty_print=True):
super(SignWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.payload is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<payload>%s</payload>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.payload), input_name='payload')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('caption', node)
if value is not None and 'caption' not in already_processed:
already_processed.add('caption')
self.caption = value
value = find_attr_value_('algorithm', node)
if value is not None and 'algorithm' not in already_processed:
already_processed.add('algorithm')
self.algorithm = value
value = find_attr_value_('keyName', node)
if value is not None and 'keyName' not in already_processed:
already_processed.add('keyName')
self.keyName = value
value = find_attr_value_('index', node)
if value is not None and 'index' not in already_processed:
already_processed.add('index')
self.index = value
super(SignWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'payload':
payload_ = child_.text
payload_ = self.gds_validate_string(payload_, node, 'payload')
self.payload = payload_
super(SignWidget, self).buildChildren(child_, node, nodeName_, True)
# end class SignWidget
class OauthWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, url=None, caption=None, successMessage=None):
self.original_tagname_ = None
super(OauthWidget, self).__init__()
self.url = _cast(None, url)
self.caption = _cast(None, caption)
self.successMessage = _cast(None, successMessage)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, OauthWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if OauthWidget.subclass:
return OauthWidget.subclass(*args_, **kwargs_)
else:
return OauthWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_url(self): return self.url
def set_url(self, url): self.url = url
def get_caption(self): return self.caption
def set_caption(self, caption): self.caption = caption
def get_successMessage(self): return self.successMessage
def set_successMessage(self, successMessage): self.successMessage = successMessage
def hasContent_(self):
if (
super(OauthWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='OauthWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('OauthWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='OauthWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='OauthWidget', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='OauthWidget'):
super(OauthWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='OauthWidget')
if self.url is not None and 'url' not in already_processed:
already_processed.add('url')
outfile.write(' url=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.url), input_name='url')), ))
if self.caption is not None and 'caption' not in already_processed:
already_processed.add('caption')
outfile.write(' caption=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.caption), input_name='caption')), ))
if self.successMessage is not None and 'successMessage' not in already_processed:
already_processed.add('successMessage')
outfile.write(' successMessage=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.successMessage), input_name='successMessage')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='OauthWidget', fromsubclass_=False, pretty_print=True):
super(OauthWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('url', node)
if value is not None and 'url' not in already_processed:
already_processed.add('url')
self.url = value
value = find_attr_value_('caption', node)
if value is not None and 'caption' not in already_processed:
already_processed.add('caption')
self.caption = value
value = find_attr_value_('successMessage', node)
if value is not None and 'successMessage' not in already_processed:
already_processed.add('successMessage')
self.successMessage = value
super(OauthWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(OauthWidget, self).buildChildren(child_, node, nodeName_, True)
pass
# end class OauthWidget
class PayWidget(Widget):
subclass = None
superclass = Widget
def __init__(self, memo=None, target=None, autoSubmit=True, testMode=False, embeddedAppId=None, method=None, baseMethod=None):
self.original_tagname_ = None
super(PayWidget, self).__init__()
self.memo = _cast(None, memo)
self.target = _cast(None, target)
self.autoSubmit = _cast(bool, autoSubmit)
self.testMode = _cast(bool, testMode)
self.embeddedAppId = _cast(None, embeddedAppId)
if method is None:
self.method = []
else:
self.method = method
self.baseMethod = baseMethod
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PayWidget)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PayWidget.subclass:
return PayWidget.subclass(*args_, **kwargs_)
else:
return PayWidget(*args_, **kwargs_)
factory = staticmethod(factory)
def get_method(self): return self.method
def set_method(self, method): self.method = method
def add_method(self, value): self.method.append(value)
def insert_method_at(self, index, value): self.method.insert(index, value)
def replace_method_at(self, index, value): self.method[index] = value
def get_baseMethod(self): return self.baseMethod
def set_baseMethod(self, baseMethod): self.baseMethod = baseMethod
def get_memo(self): return self.memo
def set_memo(self, memo): self.memo = memo
def get_target(self): return self.target
def set_target(self, target): self.target = target
def get_autoSubmit(self): return self.autoSubmit
def set_autoSubmit(self, autoSubmit): self.autoSubmit = autoSubmit
def get_testMode(self): return self.testMode
def set_testMode(self, testMode): self.testMode = testMode
def get_embeddedAppId(self): return self.embeddedAppId
def set_embeddedAppId(self, embeddedAppId): self.embeddedAppId = embeddedAppId
def hasContent_(self):
if (
self.method or
self.baseMethod is not None or
super(PayWidget, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='PayWidget', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PayWidget')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PayWidget')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='PayWidget', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PayWidget'):
super(PayWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PayWidget')
if self.memo is not None and 'memo' not in already_processed:
already_processed.add('memo')
outfile.write(' memo=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.memo), input_name='memo')), ))
if self.target is not None and 'target' not in already_processed:
already_processed.add('target')
outfile.write(' target=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.target), input_name='target')), ))
if not self.autoSubmit and 'autoSubmit' not in already_processed:
already_processed.add('autoSubmit')
outfile.write(' autoSubmit="%s"' % self.gds_format_boolean(self.autoSubmit, input_name='autoSubmit'))
if self.testMode and 'testMode' not in already_processed:
already_processed.add('testMode')
outfile.write(' testMode="%s"' % self.gds_format_boolean(self.testMode, input_name='testMode'))
if self.embeddedAppId is not None and 'embeddedAppId' not in already_processed:
already_processed.add('embeddedAppId')
outfile.write(' embeddedAppId=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.embeddedAppId), input_name='embeddedAppId')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='PayWidget', fromsubclass_=False, pretty_print=True):
super(PayWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for method_ in self.method:
method_.export(outfile, level, namespaceprefix_, name_='method', pretty_print=pretty_print)
if self.baseMethod is not None:
self.baseMethod.export(outfile, level, namespaceprefix_, name_='baseMethod', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('memo', node)
if value is not None and 'memo' not in already_processed:
already_processed.add('memo')
self.memo = value
value = find_attr_value_('target', node)
if value is not None and 'target' not in already_processed:
already_processed.add('target')
self.target = value
value = find_attr_value_('autoSubmit', node)
if value is not None and 'autoSubmit' not in already_processed:
already_processed.add('autoSubmit')
if value in ('true', '1'):
self.autoSubmit = True
elif value in ('false', '0'):
self.autoSubmit = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('testMode', node)
if value is not None and 'testMode' not in already_processed:
already_processed.add('testMode')
if value in ('true', '1'):
self.testMode = True
elif value in ('false', '0'):
self.testMode = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('embeddedAppId', node)
if value is not None and 'embeddedAppId' not in already_processed:
already_processed.add('embeddedAppId')
self.embeddedAppId = value
super(PayWidget, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'method':
obj_ = PaymentMethod.factory()
obj_.build(child_)
self.method.append(obj_)
obj_.original_tagname_ = 'method'
elif nodeName_ == 'baseMethod':
class_obj_ = self.get_class_obj_(child_, BasePaymentMethod)
obj_ = class_obj_.factory()
obj_.build(child_)
self.baseMethod = obj_
obj_.original_tagname_ = 'baseMethod'
super(PayWidget, self).buildChildren(child_, node, nodeName_, True)
# end class PayWidget
class Form(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, positiveButtonCaption=None, positiveButtonConfirmation=None, negativeButtonCaption=None, negativeButtonConfirmation=None, widget=None, javascriptValidation=None):
self.original_tagname_ = None
self.positiveButtonCaption = _cast(None, positiveButtonCaption)
self.positiveButtonConfirmation = _cast(None, positiveButtonConfirmation)
self.negativeButtonCaption = _cast(None, negativeButtonCaption)
self.negativeButtonConfirmation = _cast(None, negativeButtonConfirmation)
self.widget = widget
self.javascriptValidation = javascriptValidation
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Form)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Form.subclass:
return Form.subclass(*args_, **kwargs_)
else:
return Form(*args_, **kwargs_)
factory = staticmethod(factory)
def get_widget(self): return self.widget
def set_widget(self, widget): self.widget = widget
def get_javascriptValidation(self): return self.javascriptValidation
def set_javascriptValidation(self, javascriptValidation): self.javascriptValidation = javascriptValidation
def get_positiveButtonCaption(self): return self.positiveButtonCaption
def set_positiveButtonCaption(self, positiveButtonCaption): self.positiveButtonCaption = positiveButtonCaption
def get_positiveButtonConfirmation(self): return self.positiveButtonConfirmation
def set_positiveButtonConfirmation(self, positiveButtonConfirmation): self.positiveButtonConfirmation = positiveButtonConfirmation
def get_negativeButtonCaption(self): return self.negativeButtonCaption
def set_negativeButtonCaption(self, negativeButtonCaption): self.negativeButtonCaption = negativeButtonCaption
def get_negativeButtonConfirmation(self): return self.negativeButtonConfirmation
def set_negativeButtonConfirmation(self, negativeButtonConfirmation): self.negativeButtonConfirmation = negativeButtonConfirmation
def hasContent_(self):
if (
self.widget is not None or
self.javascriptValidation is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Form', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Form')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Form')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Form', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Form'):
if self.positiveButtonCaption is not None and 'positiveButtonCaption' not in already_processed:
already_processed.add('positiveButtonCaption')
outfile.write(' positiveButtonCaption=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.positiveButtonCaption), input_name='positiveButtonCaption')), ))
if self.positiveButtonConfirmation is not None and 'positiveButtonConfirmation' not in already_processed:
already_processed.add('positiveButtonConfirmation')
outfile.write(' positiveButtonConfirmation=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.positiveButtonConfirmation), input_name='positiveButtonConfirmation')), ))
if self.negativeButtonCaption is not None and 'negativeButtonCaption' not in already_processed:
already_processed.add('negativeButtonCaption')
outfile.write(' negativeButtonCaption=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.negativeButtonCaption), input_name='negativeButtonCaption')), ))
if self.negativeButtonConfirmation is not None and 'negativeButtonConfirmation' not in already_processed:
already_processed.add('negativeButtonConfirmation')
outfile.write(' negativeButtonConfirmation=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.negativeButtonConfirmation), input_name='negativeButtonConfirmation')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Form', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.widget is not None:
self.widget.export(outfile, level, namespaceprefix_, name_='widget', pretty_print=pretty_print)
if self.javascriptValidation is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<javascriptValidation>%s</javascriptValidation>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.javascriptValidation), input_name='javascriptValidation')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('positiveButtonCaption', node)
if value is not None and 'positiveButtonCaption' not in already_processed:
already_processed.add('positiveButtonCaption')
self.positiveButtonCaption = value
value = find_attr_value_('positiveButtonConfirmation', node)
if value is not None and 'positiveButtonConfirmation' not in already_processed:
already_processed.add('positiveButtonConfirmation')
self.positiveButtonConfirmation = value
value = find_attr_value_('negativeButtonCaption', node)
if value is not None and 'negativeButtonCaption' not in already_processed:
already_processed.add('negativeButtonCaption')
self.negativeButtonCaption = value
value = find_attr_value_('negativeButtonConfirmation', node)
if value is not None and 'negativeButtonConfirmation' not in already_processed:
already_processed.add('negativeButtonConfirmation')
self.negativeButtonConfirmation = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'widget':
class_obj_ = self.get_class_obj_(child_, Widget)
obj_ = class_obj_.factory()
obj_.build(child_)
self.widget = obj_
obj_.original_tagname_ = 'widget'
elif nodeName_ == 'javascriptValidation':
javascriptValidation_ = child_.text
javascriptValidation_ = self.gds_validate_string(javascriptValidation_, node, 'javascriptValidation')
self.javascriptValidation = javascriptValidation_
# end class Form
class FormMessage(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, member=None, brandingKey=None, autoLock=None, vibrate=None, alertType=None, alertIntervalType=None, positiveReference=None, negativeReference=None, content=None, form=None, attachment=None):
self.original_tagname_ = None
super(FormMessage, self).__init__(id, )
self.member = _cast(None, member)
self.brandingKey = _cast(None, brandingKey)
self.autoLock = _cast(bool, autoLock)
self.vibrate = _cast(bool, vibrate)
self.alertType = _cast(None, alertType)
self.alertIntervalType = _cast(None, alertIntervalType)
self.positiveReference = _cast(None, positiveReference)
self.negativeReference = _cast(None, negativeReference)
self.content = content
self.form = form
if attachment is None:
self.attachment = []
else:
self.attachment = attachment
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, FormMessage)
if subclass is not None:
return subclass(*args_, **kwargs_)
if FormMessage.subclass:
return FormMessage.subclass(*args_, **kwargs_)
else:
return FormMessage(*args_, **kwargs_)
factory = staticmethod(factory)
def get_content(self): return self.content
def set_content(self, content): self.content = content
def get_form(self): return self.form
def set_form(self, form): self.form = form
def get_attachment(self): return self.attachment
def set_attachment(self, attachment): self.attachment = attachment
def add_attachment(self, value): self.attachment.append(value)
def insert_attachment_at(self, index, value): self.attachment.insert(index, value)
def replace_attachment_at(self, index, value): self.attachment[index] = value
def get_member(self): return self.member
def set_member(self, member): self.member = member
def get_brandingKey(self): return self.brandingKey
def set_brandingKey(self, brandingKey): self.brandingKey = brandingKey
def get_autoLock(self): return self.autoLock
def set_autoLock(self, autoLock): self.autoLock = autoLock
def get_vibrate(self): return self.vibrate
def set_vibrate(self, vibrate): self.vibrate = vibrate
def get_alertType(self): return self.alertType
def set_alertType(self, alertType): self.alertType = alertType
def get_alertIntervalType(self): return self.alertIntervalType
def set_alertIntervalType(self, alertIntervalType): self.alertIntervalType = alertIntervalType
def get_positiveReference(self): return self.positiveReference
def set_positiveReference(self, positiveReference): self.positiveReference = positiveReference
def get_negativeReference(self): return self.negativeReference
def set_negativeReference(self, negativeReference): self.negativeReference = negativeReference
def validate_AlertType(self, value):
# Validate type AlertType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['BEEP', 'SILENT', 'RING_5', 'RING_15', 'RING_30', 'RING_60']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AlertType' % {"value" : value.encode("utf-8")} )
def validate_AlertIntervalType(self, value):
# Validate type AlertIntervalType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['NONE', 'INTERVAL_5', 'INTERVAL_15', 'INTERVAL_30', 'INTERVAL_60', 'INTERVAL_300', 'INTERVAL_900', 'INTERVAL_3600']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AlertIntervalType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.content is not None or
self.form is not None or
self.attachment or
super(FormMessage, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='FormMessage', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('FormMessage')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FormMessage')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='FormMessage', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='FormMessage'):
super(FormMessage, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FormMessage')
if self.member is not None and 'member' not in already_processed:
already_processed.add('member')
outfile.write(' member=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.member), input_name='member')), ))
if self.brandingKey is not None and 'brandingKey' not in already_processed:
already_processed.add('brandingKey')
outfile.write(' brandingKey=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.brandingKey), input_name='brandingKey')), ))
if self.autoLock is not None and 'autoLock' not in already_processed:
already_processed.add('autoLock')
outfile.write(' autoLock="%s"' % self.gds_format_boolean(self.autoLock, input_name='autoLock'))
if self.vibrate is not None and 'vibrate' not in already_processed:
already_processed.add('vibrate')
outfile.write(' vibrate="%s"' % self.gds_format_boolean(self.vibrate, input_name='vibrate'))
if self.alertType is not None and 'alertType' not in already_processed:
already_processed.add('alertType')
outfile.write(' alertType=%s' % (quote_attrib(self.alertType), ))
if self.alertIntervalType is not None and 'alertIntervalType' not in already_processed:
already_processed.add('alertIntervalType')
outfile.write(' alertIntervalType=%s' % (quote_attrib(self.alertIntervalType), ))
if self.positiveReference is not None and 'positiveReference' not in already_processed:
already_processed.add('positiveReference')
outfile.write(' positiveReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.positiveReference), input_name='positiveReference')), ))
if self.negativeReference is not None and 'negativeReference' not in already_processed:
already_processed.add('negativeReference')
outfile.write(' negativeReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.negativeReference), input_name='negativeReference')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='FormMessage', fromsubclass_=False, pretty_print=True):
super(FormMessage, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.content is not None:
self.content.export(outfile, level, namespaceprefix_, name_='content', pretty_print=pretty_print)
if self.form is not None:
self.form.export(outfile, level, namespaceprefix_, name_='form', pretty_print=pretty_print)
for attachment_ in self.attachment:
attachment_.export(outfile, level, namespaceprefix_, name_='attachment', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('member', node)
if value is not None and 'member' not in already_processed:
already_processed.add('member')
self.member = value
value = find_attr_value_('brandingKey', node)
if value is not None and 'brandingKey' not in already_processed:
already_processed.add('brandingKey')
self.brandingKey = value
value = find_attr_value_('autoLock', node)
if value is not None and 'autoLock' not in already_processed:
already_processed.add('autoLock')
if value in ('true', '1'):
self.autoLock = True
elif value in ('false', '0'):
self.autoLock = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('vibrate', node)
if value is not None and 'vibrate' not in already_processed:
already_processed.add('vibrate')
if value in ('true', '1'):
self.vibrate = True
elif value in ('false', '0'):
self.vibrate = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('alertType', node)
if value is not None and 'alertType' not in already_processed:
already_processed.add('alertType')
self.alertType = value
self.validate_AlertType(self.alertType) # validate type AlertType
value = find_attr_value_('alertIntervalType', node)
if value is not None and 'alertIntervalType' not in already_processed:
already_processed.add('alertIntervalType')
self.alertIntervalType = value
self.validate_AlertIntervalType(self.alertIntervalType) # validate type AlertIntervalType
value = find_attr_value_('positiveReference', node)
if value is not None and 'positiveReference' not in already_processed:
already_processed.add('positiveReference')
self.positiveReference = value
value = find_attr_value_('negativeReference', node)
if value is not None and 'negativeReference' not in already_processed:
already_processed.add('negativeReference')
self.negativeReference = value
super(FormMessage, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'content':
obj_ = contentType1.factory()
obj_.build(child_)
self.content = obj_
obj_.original_tagname_ = 'content'
elif nodeName_ == 'form':
obj_ = Form.factory()
obj_.build(child_)
self.form = obj_
obj_.original_tagname_ = 'form'
elif nodeName_ == 'attachment':
obj_ = Attachment.factory()
obj_.build(child_)
self.attachment.append(obj_)
obj_.original_tagname_ = 'attachment'
super(FormMessage, self).buildChildren(child_, node, nodeName_, True)
# end class FormMessage
class Outlet(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, value=None, name=None, reference=None):
self.original_tagname_ = None
self.value = _cast(None, value)
self.name = _cast(None, name)
self.reference = _cast(None, reference)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Outlet)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Outlet.subclass:
return Outlet.subclass(*args_, **kwargs_)
else:
return Outlet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Outlet', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Outlet')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Outlet')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Outlet', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Outlet'):
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), ))
if self.reference is not None and 'reference' not in already_processed:
already_processed.add('reference')
outfile.write(' reference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.reference), input_name='reference')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Outlet', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.add('reference')
self.reference = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Outlet
class End(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, waitForFollowUpMessage=False):
self.original_tagname_ = None
super(End, self).__init__(id, )
self.waitForFollowUpMessage = _cast(bool, waitForFollowUpMessage)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, End)
if subclass is not None:
return subclass(*args_, **kwargs_)
if End.subclass:
return End.subclass(*args_, **kwargs_)
else:
return End(*args_, **kwargs_)
factory = staticmethod(factory)
def get_waitForFollowUpMessage(self): return self.waitForFollowUpMessage
def set_waitForFollowUpMessage(self, waitForFollowUpMessage): self.waitForFollowUpMessage = waitForFollowUpMessage
def hasContent_(self):
if (
super(End, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='End', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('End')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='End')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='End', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='End'):
super(End, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='End')
if self.waitForFollowUpMessage and 'waitForFollowUpMessage' not in already_processed:
already_processed.add('waitForFollowUpMessage')
outfile.write(' waitForFollowUpMessage="%s"' % self.gds_format_boolean(self.waitForFollowUpMessage, input_name='waitForFollowUpMessage'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='End', fromsubclass_=False, pretty_print=True):
super(End, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('waitForFollowUpMessage', node)
if value is not None and 'waitForFollowUpMessage' not in already_processed:
already_processed.add('waitForFollowUpMessage')
if value in ('true', '1'):
self.waitForFollowUpMessage = True
elif value in ('false', '0'):
self.waitForFollowUpMessage = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(End, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(End, self).buildChildren(child_, node, nodeName_, True)
pass
# end class End
class MessageFlowDefinition(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, startReference=None, language=None, end=None, message=None, formMessage=None, resultsFlush=None, resultsEmail=None, flowCode=None):
self.original_tagname_ = None
self.name = _cast(None, name)
self.startReference = _cast(None, startReference)
self.language = _cast(None, language)
if end is None:
self.end = []
else:
self.end = end
if message is None:
self.message = []
else:
self.message = message
if formMessage is None:
self.formMessage = []
else:
self.formMessage = formMessage
if resultsFlush is None:
self.resultsFlush = []
else:
self.resultsFlush = resultsFlush
if resultsEmail is None:
self.resultsEmail = []
else:
self.resultsEmail = resultsEmail
if flowCode is None:
self.flowCode = []
else:
self.flowCode = flowCode
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MessageFlowDefinition)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MessageFlowDefinition.subclass:
return MessageFlowDefinition.subclass(*args_, **kwargs_)
else:
return MessageFlowDefinition(*args_, **kwargs_)
factory = staticmethod(factory)
def get_end(self): return self.end
def set_end(self, end): self.end = end
def add_end(self, value): self.end.append(value)
def insert_end_at(self, index, value): self.end.insert(index, value)
def replace_end_at(self, index, value): self.end[index] = value
def get_message(self): return self.message
def set_message(self, message): self.message = message
def add_message(self, value): self.message.append(value)
def insert_message_at(self, index, value): self.message.insert(index, value)
def replace_message_at(self, index, value): self.message[index] = value
def get_formMessage(self): return self.formMessage
def set_formMessage(self, formMessage): self.formMessage = formMessage
def add_formMessage(self, value): self.formMessage.append(value)
def insert_formMessage_at(self, index, value): self.formMessage.insert(index, value)
def replace_formMessage_at(self, index, value): self.formMessage[index] = value
def get_resultsFlush(self): return self.resultsFlush
def set_resultsFlush(self, resultsFlush): self.resultsFlush = resultsFlush
def add_resultsFlush(self, value): self.resultsFlush.append(value)
def insert_resultsFlush_at(self, index, value): self.resultsFlush.insert(index, value)
def replace_resultsFlush_at(self, index, value): self.resultsFlush[index] = value
def get_resultsEmail(self): return self.resultsEmail
def set_resultsEmail(self, resultsEmail): self.resultsEmail = resultsEmail
def add_resultsEmail(self, value): self.resultsEmail.append(value)
def insert_resultsEmail_at(self, index, value): self.resultsEmail.insert(index, value)
def replace_resultsEmail_at(self, index, value): self.resultsEmail[index] = value
def get_flowCode(self): return self.flowCode
def set_flowCode(self, flowCode): self.flowCode = flowCode
def add_flowCode(self, value): self.flowCode.append(value)
def insert_flowCode_at(self, index, value): self.flowCode.insert(index, value)
def replace_flowCode_at(self, index, value): self.flowCode[index] = value
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_startReference(self): return self.startReference
def set_startReference(self, startReference): self.startReference = startReference
def get_language(self): return self.language
def set_language(self, language): self.language = language
def hasContent_(self):
if (
self.end or
self.message or
self.formMessage or
self.resultsFlush or
self.resultsEmail or
self.flowCode
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MessageFlowDefinition', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MessageFlowDefinition')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MessageFlowDefinition')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MessageFlowDefinition', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MessageFlowDefinition'):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), ))
if self.startReference is not None and 'startReference' not in already_processed:
already_processed.add('startReference')
outfile.write(' startReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.startReference), input_name='startReference')), ))
if self.language is not None and 'language' not in already_processed:
already_processed.add('language')
outfile.write(' language=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.language), input_name='language')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MessageFlowDefinition', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for end_ in self.end:
end_.export(outfile, level, namespaceprefix_, name_='end', pretty_print=pretty_print)
for message_ in self.message:
message_.export(outfile, level, namespaceprefix_, name_='message', pretty_print=pretty_print)
for formMessage_ in self.formMessage:
formMessage_.export(outfile, level, namespaceprefix_, name_='formMessage', pretty_print=pretty_print)
for resultsFlush_ in self.resultsFlush:
resultsFlush_.export(outfile, level, namespaceprefix_, name_='resultsFlush', pretty_print=pretty_print)
for resultsEmail_ in self.resultsEmail:
resultsEmail_.export(outfile, level, namespaceprefix_, name_='resultsEmail', pretty_print=pretty_print)
for flowCode_ in self.flowCode:
flowCode_.export(outfile, level, namespaceprefix_, name_='flowCode', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('startReference', node)
if value is not None and 'startReference' not in already_processed:
already_processed.add('startReference')
self.startReference = value
value = find_attr_value_('language', node)
if value is not None and 'language' not in already_processed:
already_processed.add('language')
self.language = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'end':
obj_ = End.factory()
obj_.build(child_)
self.end.append(obj_)
obj_.original_tagname_ = 'end'
elif nodeName_ == 'message':
obj_ = Message.factory()
obj_.build(child_)
self.message.append(obj_)
obj_.original_tagname_ = 'message'
elif nodeName_ == 'formMessage':
obj_ = FormMessage.factory()
obj_.build(child_)
self.formMessage.append(obj_)
obj_.original_tagname_ = 'formMessage'
elif nodeName_ == 'resultsFlush':
obj_ = ResultsFlush.factory()
obj_.build(child_)
self.resultsFlush.append(obj_)
obj_.original_tagname_ = 'resultsFlush'
elif nodeName_ == 'resultsEmail':
obj_ = ResultsEmail.factory()
obj_.build(child_)
self.resultsEmail.append(obj_)
obj_.original_tagname_ = 'resultsEmail'
elif nodeName_ == 'flowCode':
obj_ = FlowCode.factory()
obj_.build(child_)
self.flowCode.append(obj_)
obj_.original_tagname_ = 'flowCode'
# end class MessageFlowDefinition
class MessageFlowDefinitionSet(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, definition=None):
self.original_tagname_ = None
if definition is None:
self.definition = []
else:
self.definition = definition
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MessageFlowDefinitionSet)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MessageFlowDefinitionSet.subclass:
return MessageFlowDefinitionSet.subclass(*args_, **kwargs_)
else:
return MessageFlowDefinitionSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_definition(self): return self.definition
def set_definition(self, definition): self.definition = definition
def add_definition(self, value): self.definition.append(value)
def insert_definition_at(self, index, value): self.definition.insert(index, value)
def replace_definition_at(self, index, value): self.definition[index] = value
def hasContent_(self):
if (
self.definition
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MessageFlowDefinitionSet', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MessageFlowDefinitionSet')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MessageFlowDefinitionSet')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MessageFlowDefinitionSet', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MessageFlowDefinitionSet'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MessageFlowDefinitionSet', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for definition_ in self.definition:
definition_.export(outfile, level, namespaceprefix_, name_='definition', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'definition':
obj_ = MessageFlowDefinition.factory()
obj_.build(child_)
self.definition.append(obj_)
obj_.original_tagname_ = 'definition'
# end class MessageFlowDefinitionSet
class Step(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, extensiontype_=None):
self.original_tagname_ = None
self.id = _cast(None, id)
self.creationTimestamp = _cast(int, creationTimestamp)
self.definition = _cast(None, definition)
self.previousStep = _cast(None, previousStep)
self.nextStep = _cast(None, nextStep)
self.message = _cast(None, message)
self.button = _cast(None, button)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Step)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Step.subclass:
return Step.subclass(*args_, **kwargs_)
else:
return Step(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_creationTimestamp(self): return self.creationTimestamp
def set_creationTimestamp(self, creationTimestamp): self.creationTimestamp = creationTimestamp
def get_definition(self): return self.definition
def set_definition(self, definition): self.definition = definition
def get_previousStep(self): return self.previousStep
def set_previousStep(self, previousStep): self.previousStep = previousStep
def get_nextStep(self): return self.nextStep
def set_nextStep(self, nextStep): self.nextStep = nextStep
def get_message(self): return self.message
def set_message(self, message): self.message = message
def get_button(self): return self.button
def set_button(self, button): self.button = button
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Step', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Step')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Step')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Step', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Step'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.creationTimestamp is not None and 'creationTimestamp' not in already_processed:
already_processed.add('creationTimestamp')
outfile.write(' creationTimestamp="%s"' % self.gds_format_integer(self.creationTimestamp, input_name='creationTimestamp'))
if self.definition is not None and 'definition' not in already_processed:
already_processed.add('definition')
outfile.write(' definition=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.definition), input_name='definition')), ))
if self.previousStep is not None and 'previousStep' not in already_processed:
already_processed.add('previousStep')
outfile.write(' previousStep=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.previousStep), input_name='previousStep')), ))
if self.nextStep is not None and 'nextStep' not in already_processed:
already_processed.add('nextStep')
outfile.write(' nextStep=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.nextStep), input_name='nextStep')), ))
if self.message is not None and 'message' not in already_processed:
already_processed.add('message')
outfile.write(' message=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.message), input_name='message')), ))
if self.button is not None and 'button' not in already_processed:
already_processed.add('button')
outfile.write(' button=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.button), input_name='button')), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Step', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('creationTimestamp', node)
if value is not None and 'creationTimestamp' not in already_processed:
already_processed.add('creationTimestamp')
try:
self.creationTimestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('definition', node)
if value is not None and 'definition' not in already_processed:
already_processed.add('definition')
self.definition = value
value = find_attr_value_('previousStep', node)
if value is not None and 'previousStep' not in already_processed:
already_processed.add('previousStep')
self.previousStep = value
value = find_attr_value_('nextStep', node)
if value is not None and 'nextStep' not in already_processed:
already_processed.add('nextStep')
self.nextStep = value
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.add('message')
self.message = value
value = find_attr_value_('button', node)
if value is not None and 'button' not in already_processed:
already_processed.add('button')
self.button = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Step
class BaseMessageStep(Step):
subclass = None
superclass = Step
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, extensiontype_=None):
self.original_tagname_ = None
super(BaseMessageStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, extensiontype_, )
self.receivedTimestamp = _cast(int, receivedTimestamp)
self.acknowledgedTimestamp = _cast(int, acknowledgedTimestamp)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, BaseMessageStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if BaseMessageStep.subclass:
return BaseMessageStep.subclass(*args_, **kwargs_)
else:
return BaseMessageStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_receivedTimestamp(self): return self.receivedTimestamp
def set_receivedTimestamp(self, receivedTimestamp): self.receivedTimestamp = receivedTimestamp
def get_acknowledgedTimestamp(self): return self.acknowledgedTimestamp
def set_acknowledgedTimestamp(self, acknowledgedTimestamp): self.acknowledgedTimestamp = acknowledgedTimestamp
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(BaseMessageStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='BaseMessageStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('BaseMessageStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BaseMessageStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='BaseMessageStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='BaseMessageStep'):
super(BaseMessageStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BaseMessageStep')
if self.receivedTimestamp is not None and 'receivedTimestamp' not in already_processed:
already_processed.add('receivedTimestamp')
outfile.write(' receivedTimestamp="%s"' % self.gds_format_integer(self.receivedTimestamp, input_name='receivedTimestamp'))
if self.acknowledgedTimestamp is not None and 'acknowledgedTimestamp' not in already_processed:
already_processed.add('acknowledgedTimestamp')
outfile.write(' acknowledgedTimestamp="%s"' % self.gds_format_integer(self.acknowledgedTimestamp, input_name='acknowledgedTimestamp'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='BaseMessageStep', fromsubclass_=False, pretty_print=True):
super(BaseMessageStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('receivedTimestamp', node)
if value is not None and 'receivedTimestamp' not in already_processed:
already_processed.add('receivedTimestamp')
try:
self.receivedTimestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('acknowledgedTimestamp', node)
if value is not None and 'acknowledgedTimestamp' not in already_processed:
already_processed.add('acknowledgedTimestamp')
try:
self.acknowledgedTimestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BaseMessageStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BaseMessageStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BaseMessageStep
class MessageStep(BaseMessageStep):
subclass = None
superclass = BaseMessageStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, answer=None):
self.original_tagname_ = None
super(MessageStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, )
self.answer = _cast(None, answer)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MessageStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MessageStep.subclass:
return MessageStep.subclass(*args_, **kwargs_)
else:
return MessageStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_answer(self): return self.answer
def set_answer(self, answer): self.answer = answer
def hasContent_(self):
if (
super(MessageStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MessageStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MessageStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MessageStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MessageStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MessageStep'):
super(MessageStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MessageStep')
if self.answer is not None and 'answer' not in already_processed:
already_processed.add('answer')
outfile.write(' answer=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.answer), input_name='answer')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MessageStep', fromsubclass_=False, pretty_print=True):
super(MessageStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('answer', node)
if value is not None and 'answer' not in already_processed:
already_processed.add('answer')
self.answer = value
super(MessageStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(MessageStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class MessageStep
class WidgetStep(BaseMessageStep):
subclass = None
superclass = BaseMessageStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, extensiontype_=None):
self.original_tagname_ = None
super(WidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, extensiontype_, )
self.displayValue = _cast(None, displayValue)
self.formButton = _cast(None, formButton)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, WidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if WidgetStep.subclass:
return WidgetStep.subclass(*args_, **kwargs_)
else:
return WidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_displayValue(self): return self.displayValue
def set_displayValue(self, displayValue): self.displayValue = displayValue
def get_formButton(self): return self.formButton
def set_formButton(self, formButton): self.formButton = formButton
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def validate_FormButton(self, value):
# Validate type FormButton, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['positive', 'negative']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on FormButton' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
super(WidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='WidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('WidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='WidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='WidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='WidgetStep'):
super(WidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='WidgetStep')
if self.displayValue is not None and 'displayValue' not in already_processed:
already_processed.add('displayValue')
outfile.write(' displayValue=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.displayValue), input_name='displayValue')), ))
if self.formButton is not None and 'formButton' not in already_processed:
already_processed.add('formButton')
outfile.write(' formButton=%s' % (quote_attrib(self.formButton), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='WidgetStep', fromsubclass_=False, pretty_print=True):
super(WidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('displayValue', node)
if value is not None and 'displayValue' not in already_processed:
already_processed.add('displayValue')
self.displayValue = value
value = find_attr_value_('formButton', node)
if value is not None and 'formButton' not in already_processed:
already_processed.add('formButton')
self.formButton = value
self.validate_FormButton(self.formButton) # validate type FormButton
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(WidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(WidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class WidgetStep
class TextWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None, extensiontype_=None):
self.original_tagname_ = None
super(TextWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, extensiontype_, )
self.value = _cast(None, value)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextWidgetStep.subclass:
return TextWidgetStep.subclass(*args_, **kwargs_)
else:
return TextWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(TextWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextWidgetStep'):
super(TextWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextWidgetStep')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextWidgetStep', fromsubclass_=False, pretty_print=True):
super(TextWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(TextWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TextWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TextWidgetStep
class TextLineWidgetStep(TextWidgetStep):
subclass = None
superclass = TextWidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
self.original_tagname_ = None
super(TextLineWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextLineWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextLineWidgetStep.subclass:
return TextLineWidgetStep.subclass(*args_, **kwargs_)
else:
return TextLineWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(TextLineWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextLineWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextLineWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextLineWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextLineWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextLineWidgetStep'):
super(TextLineWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextLineWidgetStep')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextLineWidgetStep', fromsubclass_=False, pretty_print=True):
super(TextLineWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TextLineWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TextLineWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TextLineWidgetStep
class TextBlockWidgetStep(TextWidgetStep):
subclass = None
superclass = TextWidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
self.original_tagname_ = None
super(TextBlockWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextBlockWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextBlockWidgetStep.subclass:
return TextBlockWidgetStep.subclass(*args_, **kwargs_)
else:
return TextBlockWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(TextBlockWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextBlockWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextBlockWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextBlockWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextBlockWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextBlockWidgetStep'):
super(TextBlockWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextBlockWidgetStep')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextBlockWidgetStep', fromsubclass_=False, pretty_print=True):
super(TextBlockWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TextBlockWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TextBlockWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TextBlockWidgetStep
class TextAutoCompleteWidgetStep(TextWidgetStep):
subclass = None
superclass = TextWidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
self.original_tagname_ = None
super(TextAutoCompleteWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextAutoCompleteWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextAutoCompleteWidgetStep.subclass:
return TextAutoCompleteWidgetStep.subclass(*args_, **kwargs_)
else:
return TextAutoCompleteWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(TextAutoCompleteWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='TextAutoCompleteWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TextAutoCompleteWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextAutoCompleteWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='TextAutoCompleteWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TextAutoCompleteWidgetStep'):
super(TextAutoCompleteWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TextAutoCompleteWidgetStep')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='TextAutoCompleteWidgetStep', fromsubclass_=False, pretty_print=True):
super(TextAutoCompleteWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TextAutoCompleteWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(TextAutoCompleteWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class TextAutoCompleteWidgetStep
class SliderWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
self.original_tagname_ = None
super(SliderWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.value = _cast(float, value)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SliderWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SliderWidgetStep.subclass:
return SliderWidgetStep.subclass(*args_, **kwargs_)
else:
return SliderWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
super(SliderWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SliderWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SliderWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SliderWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SliderWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SliderWidgetStep'):
super(SliderWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SliderWidgetStep')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value="%s"' % self.gds_format_float(self.value, input_name='value'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SliderWidgetStep', fromsubclass_=False, pretty_print=True):
super(SliderWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
try:
self.value = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (value): %s' % exp)
super(SliderWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SliderWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SliderWidgetStep
class RangeSliderWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
self.original_tagname_ = None
super(RangeSliderWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
if value is None:
self.value = []
else:
self.value = value
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, RangeSliderWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if RangeSliderWidgetStep.subclass:
return RangeSliderWidgetStep.subclass(*args_, **kwargs_)
else:
return RangeSliderWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def add_value(self, value): self.value.append(value)
def insert_value_at(self, index, value): self.value.insert(index, value)
def replace_value_at(self, index, value): self.value[index] = value
def hasContent_(self):
if (
self.value or
super(RangeSliderWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='RangeSliderWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('RangeSliderWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RangeSliderWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='RangeSliderWidgetStep', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='RangeSliderWidgetStep'):
super(RangeSliderWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RangeSliderWidgetStep')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='RangeSliderWidgetStep', fromsubclass_=False, pretty_print=True):
super(RangeSliderWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for value_ in self.value:
value_.export(outfile, level, namespaceprefix_, name_='value', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(RangeSliderWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'value':
obj_ = FloatValue.factory()
obj_.build(child_)
self.value.append(obj_)
obj_.original_tagname_ = 'value'
super(RangeSliderWidgetStep, self).buildChildren(child_, node, nodeName_, True)
# end class RangeSliderWidgetStep
class PhotoUploadWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
self.original_tagname_ = None
super(PhotoUploadWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.value = _cast(None, value)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PhotoUploadWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PhotoUploadWidgetStep.subclass:
return PhotoUploadWidgetStep.subclass(*args_, **kwargs_)
else:
return PhotoUploadWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
super(PhotoUploadWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='PhotoUploadWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PhotoUploadWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PhotoUploadWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='PhotoUploadWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PhotoUploadWidgetStep'):
super(PhotoUploadWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PhotoUploadWidgetStep')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='PhotoUploadWidgetStep', fromsubclass_=False, pretty_print=True):
super(PhotoUploadWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
super(PhotoUploadWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(PhotoUploadWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class PhotoUploadWidgetStep
class GPSLocationWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, horizontalAccuracy=None, verticalAccuracy=None, latitude=None, longitude=None, altitude=None, timestamp=None):
self.original_tagname_ = None
super(GPSLocationWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.horizontalAccuracy = _cast(float, horizontalAccuracy)
self.verticalAccuracy = _cast(float, verticalAccuracy)
self.latitude = _cast(float, latitude)
self.longitude = _cast(float, longitude)
self.altitude = _cast(float, altitude)
self.timestamp = _cast(int, timestamp)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GPSLocationWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GPSLocationWidgetStep.subclass:
return GPSLocationWidgetStep.subclass(*args_, **kwargs_)
else:
return GPSLocationWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_horizontalAccuracy(self): return self.horizontalAccuracy
def set_horizontalAccuracy(self, horizontalAccuracy): self.horizontalAccuracy = horizontalAccuracy
def get_verticalAccuracy(self): return self.verticalAccuracy
def set_verticalAccuracy(self, verticalAccuracy): self.verticalAccuracy = verticalAccuracy
def get_latitude(self): return self.latitude
def set_latitude(self, latitude): self.latitude = latitude
def get_longitude(self): return self.longitude
def set_longitude(self, longitude): self.longitude = longitude
def get_altitude(self): return self.altitude
def set_altitude(self, altitude): self.altitude = altitude
def get_timestamp(self): return self.timestamp
def set_timestamp(self, timestamp): self.timestamp = timestamp
def hasContent_(self):
if (
super(GPSLocationWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='GPSLocationWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GPSLocationWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GPSLocationWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='GPSLocationWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GPSLocationWidgetStep'):
super(GPSLocationWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GPSLocationWidgetStep')
if self.horizontalAccuracy is not None and 'horizontalAccuracy' not in already_processed:
already_processed.add('horizontalAccuracy')
outfile.write(' horizontalAccuracy="%s"' % self.gds_format_float(self.horizontalAccuracy, input_name='horizontalAccuracy'))
if self.verticalAccuracy is not None and 'verticalAccuracy' not in already_processed:
already_processed.add('verticalAccuracy')
outfile.write(' verticalAccuracy="%s"' % self.gds_format_float(self.verticalAccuracy, input_name='verticalAccuracy'))
if self.latitude is not None and 'latitude' not in already_processed:
already_processed.add('latitude')
outfile.write(' latitude="%s"' % self.gds_format_float(self.latitude, input_name='latitude'))
if self.longitude is not None and 'longitude' not in already_processed:
already_processed.add('longitude')
outfile.write(' longitude="%s"' % self.gds_format_float(self.longitude, input_name='longitude'))
if self.altitude is not None and 'altitude' not in already_processed:
already_processed.add('altitude')
outfile.write(' altitude="%s"' % self.gds_format_float(self.altitude, input_name='altitude'))
if self.timestamp is not None and 'timestamp' not in already_processed:
already_processed.add('timestamp')
outfile.write(' timestamp="%s"' % self.gds_format_integer(self.timestamp, input_name='timestamp'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='GPSLocationWidgetStep', fromsubclass_=False, pretty_print=True):
super(GPSLocationWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('horizontalAccuracy', node)
if value is not None and 'horizontalAccuracy' not in already_processed:
already_processed.add('horizontalAccuracy')
try:
self.horizontalAccuracy = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (horizontalAccuracy): %s' % exp)
value = find_attr_value_('verticalAccuracy', node)
if value is not None and 'verticalAccuracy' not in already_processed:
already_processed.add('verticalAccuracy')
try:
self.verticalAccuracy = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (verticalAccuracy): %s' % exp)
value = find_attr_value_('latitude', node)
if value is not None and 'latitude' not in already_processed:
already_processed.add('latitude')
try:
self.latitude = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (latitude): %s' % exp)
value = find_attr_value_('longitude', node)
if value is not None and 'longitude' not in already_processed:
already_processed.add('longitude')
try:
self.longitude = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (longitude): %s' % exp)
value = find_attr_value_('altitude', node)
if value is not None and 'altitude' not in already_processed:
already_processed.add('altitude')
try:
self.altitude = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (altitude): %s' % exp)
value = find_attr_value_('timestamp', node)
if value is not None and 'timestamp' not in already_processed:
already_processed.add('timestamp')
try:
self.timestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(GPSLocationWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(GPSLocationWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class GPSLocationWidgetStep
class MyDigiPassEidProfile(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, firstName=None, firstName3=None, lastName=None, gender=None, nationality=None, dateOfBirth=None, locationOfBirth=None, nobleCondition=None, issuingMunicipality=None, cardNumber=None, chipNumber=None, validityBeginsAt=None, validityEndsAt=None, createdAt=None):
self.original_tagname_ = None
self.firstName = _cast(None, firstName)
self.firstName3 = _cast(None, firstName3)
self.lastName = _cast(None, lastName)
self.gender = _cast(None, gender)
self.nationality = _cast(None, nationality)
self.dateOfBirth = _cast(None, dateOfBirth)
self.locationOfBirth = _cast(None, locationOfBirth)
self.nobleCondition = _cast(None, nobleCondition)
self.issuingMunicipality = _cast(None, issuingMunicipality)
self.cardNumber = _cast(None, cardNumber)
self.chipNumber = _cast(None, chipNumber)
self.validityBeginsAt = _cast(None, validityBeginsAt)
self.validityEndsAt = _cast(None, validityEndsAt)
self.createdAt = _cast(None, createdAt)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassEidProfile)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassEidProfile.subclass:
return MyDigiPassEidProfile.subclass(*args_, **kwargs_)
else:
return MyDigiPassEidProfile(*args_, **kwargs_)
factory = staticmethod(factory)
def get_firstName(self): return self.firstName
def set_firstName(self, firstName): self.firstName = firstName
def get_firstName3(self): return self.firstName3
def set_firstName3(self, firstName3): self.firstName3 = firstName3
def get_lastName(self): return self.lastName
def set_lastName(self, lastName): self.lastName = lastName
def get_gender(self): return self.gender
def set_gender(self, gender): self.gender = gender
def get_nationality(self): return self.nationality
def set_nationality(self, nationality): self.nationality = nationality
def get_dateOfBirth(self): return self.dateOfBirth
def set_dateOfBirth(self, dateOfBirth): self.dateOfBirth = dateOfBirth
def get_locationOfBirth(self): return self.locationOfBirth
def set_locationOfBirth(self, locationOfBirth): self.locationOfBirth = locationOfBirth
def get_nobleCondition(self): return self.nobleCondition
def set_nobleCondition(self, nobleCondition): self.nobleCondition = nobleCondition
def get_issuingMunicipality(self): return self.issuingMunicipality
def set_issuingMunicipality(self, issuingMunicipality): self.issuingMunicipality = issuingMunicipality
def get_cardNumber(self): return self.cardNumber
def set_cardNumber(self, cardNumber): self.cardNumber = cardNumber
def get_chipNumber(self): return self.chipNumber
def set_chipNumber(self, chipNumber): self.chipNumber = chipNumber
def get_validityBeginsAt(self): return self.validityBeginsAt
def set_validityBeginsAt(self, validityBeginsAt): self.validityBeginsAt = validityBeginsAt
def get_validityEndsAt(self): return self.validityEndsAt
def set_validityEndsAt(self, validityEndsAt): self.validityEndsAt = validityEndsAt
def get_createdAt(self): return self.createdAt
def set_createdAt(self, createdAt): self.createdAt = createdAt
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassEidProfile', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassEidProfile')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassEidProfile')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassEidProfile', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassEidProfile'):
if self.firstName is not None and 'firstName' not in already_processed:
already_processed.add('firstName')
outfile.write(' firstName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.firstName), input_name='firstName')), ))
if self.firstName3 is not None and 'firstName3' not in already_processed:
already_processed.add('firstName3')
outfile.write(' firstName3=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.firstName3), input_name='firstName3')), ))
if self.lastName is not None and 'lastName' not in already_processed:
already_processed.add('lastName')
outfile.write(' lastName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.lastName), input_name='lastName')), ))
if self.gender is not None and 'gender' not in already_processed:
already_processed.add('gender')
outfile.write(' gender=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.gender), input_name='gender')), ))
if self.nationality is not None and 'nationality' not in already_processed:
already_processed.add('nationality')
outfile.write(' nationality=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.nationality), input_name='nationality')), ))
if self.dateOfBirth is not None and 'dateOfBirth' not in already_processed:
already_processed.add('dateOfBirth')
outfile.write(' dateOfBirth=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.dateOfBirth), input_name='dateOfBirth')), ))
if self.locationOfBirth is not None and 'locationOfBirth' not in already_processed:
already_processed.add('locationOfBirth')
outfile.write(' locationOfBirth=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.locationOfBirth), input_name='locationOfBirth')), ))
if self.nobleCondition is not None and 'nobleCondition' not in already_processed:
already_processed.add('nobleCondition')
outfile.write(' nobleCondition=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.nobleCondition), input_name='nobleCondition')), ))
if self.issuingMunicipality is not None and 'issuingMunicipality' not in already_processed:
already_processed.add('issuingMunicipality')
outfile.write(' issuingMunicipality=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.issuingMunicipality), input_name='issuingMunicipality')), ))
if self.cardNumber is not None and 'cardNumber' not in already_processed:
already_processed.add('cardNumber')
outfile.write(' cardNumber=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.cardNumber), input_name='cardNumber')), ))
if self.chipNumber is not None and 'chipNumber' not in already_processed:
already_processed.add('chipNumber')
outfile.write(' chipNumber=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.chipNumber), input_name='chipNumber')), ))
if self.validityBeginsAt is not None and 'validityBeginsAt' not in already_processed:
already_processed.add('validityBeginsAt')
outfile.write(' validityBeginsAt=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.validityBeginsAt), input_name='validityBeginsAt')), ))
if self.validityEndsAt is not None and 'validityEndsAt' not in already_processed:
already_processed.add('validityEndsAt')
outfile.write(' validityEndsAt=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.validityEndsAt), input_name='validityEndsAt')), ))
if self.createdAt is not None and 'createdAt' not in already_processed:
already_processed.add('createdAt')
outfile.write(' createdAt=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.createdAt), input_name='createdAt')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassEidProfile', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('firstName', node)
if value is not None and 'firstName' not in already_processed:
already_processed.add('firstName')
self.firstName = value
value = find_attr_value_('firstName3', node)
if value is not None and 'firstName3' not in already_processed:
already_processed.add('firstName3')
self.firstName3 = value
value = find_attr_value_('lastName', node)
if value is not None and 'lastName' not in already_processed:
already_processed.add('lastName')
self.lastName = value
value = find_attr_value_('gender', node)
if value is not None and 'gender' not in already_processed:
already_processed.add('gender')
self.gender = value
value = find_attr_value_('nationality', node)
if value is not None and 'nationality' not in already_processed:
already_processed.add('nationality')
self.nationality = value
value = find_attr_value_('dateOfBirth', node)
if value is not None and 'dateOfBirth' not in already_processed:
already_processed.add('dateOfBirth')
self.dateOfBirth = value
value = find_attr_value_('locationOfBirth', node)
if value is not None and 'locationOfBirth' not in already_processed:
already_processed.add('locationOfBirth')
self.locationOfBirth = value
value = find_attr_value_('nobleCondition', node)
if value is not None and 'nobleCondition' not in already_processed:
already_processed.add('nobleCondition')
self.nobleCondition = value
value = find_attr_value_('issuingMunicipality', node)
if value is not None and 'issuingMunicipality' not in already_processed:
already_processed.add('issuingMunicipality')
self.issuingMunicipality = value
value = find_attr_value_('cardNumber', node)
if value is not None and 'cardNumber' not in already_processed:
already_processed.add('cardNumber')
self.cardNumber = value
value = find_attr_value_('chipNumber', node)
if value is not None and 'chipNumber' not in already_processed:
already_processed.add('chipNumber')
self.chipNumber = value
value = find_attr_value_('validityBeginsAt', node)
if value is not None and 'validityBeginsAt' not in already_processed:
already_processed.add('validityBeginsAt')
self.validityBeginsAt = value
value = find_attr_value_('validityEndsAt', node)
if value is not None and 'validityEndsAt' not in already_processed:
already_processed.add('validityEndsAt')
self.validityEndsAt = value
value = find_attr_value_('createdAt', node)
if value is not None and 'createdAt' not in already_processed:
already_processed.add('createdAt')
self.createdAt = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDigiPassEidProfile
class MyDigiPassEidAddress(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, streetAndNumber=None, zipCode=None, municipality=None):
self.original_tagname_ = None
self.streetAndNumber = _cast(None, streetAndNumber)
self.zipCode = _cast(None, zipCode)
self.municipality = _cast(None, municipality)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassEidAddress)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassEidAddress.subclass:
return MyDigiPassEidAddress.subclass(*args_, **kwargs_)
else:
return MyDigiPassEidAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_streetAndNumber(self): return self.streetAndNumber
def set_streetAndNumber(self, streetAndNumber): self.streetAndNumber = streetAndNumber
def get_zipCode(self): return self.zipCode
def set_zipCode(self, zipCode): self.zipCode = zipCode
def get_municipality(self): return self.municipality
def set_municipality(self, municipality): self.municipality = municipality
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassEidAddress', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassEidAddress')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassEidAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassEidAddress', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassEidAddress'):
if self.streetAndNumber is not None and 'streetAndNumber' not in already_processed:
already_processed.add('streetAndNumber')
outfile.write(' streetAndNumber=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.streetAndNumber), input_name='streetAndNumber')), ))
if self.zipCode is not None and 'zipCode' not in already_processed:
already_processed.add('zipCode')
outfile.write(' zipCode=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.zipCode), input_name='zipCode')), ))
if self.municipality is not None and 'municipality' not in already_processed:
already_processed.add('municipality')
outfile.write(' municipality=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.municipality), input_name='municipality')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassEidAddress', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('streetAndNumber', node)
if value is not None and 'streetAndNumber' not in already_processed:
already_processed.add('streetAndNumber')
self.streetAndNumber = value
value = find_attr_value_('zipCode', node)
if value is not None and 'zipCode' not in already_processed:
already_processed.add('zipCode')
self.zipCode = value
value = find_attr_value_('municipality', node)
if value is not None and 'municipality' not in already_processed:
already_processed.add('municipality')
self.municipality = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDigiPassEidAddress
class MyDigiPassProfile(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, updatedAt=None, firstName=None, lastName=None, bornOn=None, preferredLocale=None, uuid=None):
self.original_tagname_ = None
self.updatedAt = _cast(None, updatedAt)
self.firstName = _cast(None, firstName)
self.lastName = _cast(None, lastName)
self.bornOn = _cast(None, bornOn)
self.preferredLocale = _cast(None, preferredLocale)
self.uuid = _cast(None, uuid)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassProfile)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassProfile.subclass:
return MyDigiPassProfile.subclass(*args_, **kwargs_)
else:
return MyDigiPassProfile(*args_, **kwargs_)
factory = staticmethod(factory)
def get_updatedAt(self): return self.updatedAt
def set_updatedAt(self, updatedAt): self.updatedAt = updatedAt
def get_firstName(self): return self.firstName
def set_firstName(self, firstName): self.firstName = firstName
def get_lastName(self): return self.lastName
def set_lastName(self, lastName): self.lastName = lastName
def get_bornOn(self): return self.bornOn
def set_bornOn(self, bornOn): self.bornOn = bornOn
def get_preferredLocale(self): return self.preferredLocale
def set_preferredLocale(self, preferredLocale): self.preferredLocale = preferredLocale
def get_uuid(self): return self.uuid
def set_uuid(self, uuid): self.uuid = uuid
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassProfile', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassProfile')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassProfile')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassProfile', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassProfile'):
if self.updatedAt is not None and 'updatedAt' not in already_processed:
already_processed.add('updatedAt')
outfile.write(' updatedAt=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.updatedAt), input_name='updatedAt')), ))
if self.firstName is not None and 'firstName' not in already_processed:
already_processed.add('firstName')
outfile.write(' firstName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.firstName), input_name='firstName')), ))
if self.lastName is not None and 'lastName' not in already_processed:
already_processed.add('lastName')
outfile.write(' lastName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.lastName), input_name='lastName')), ))
if self.bornOn is not None and 'bornOn' not in already_processed:
already_processed.add('bornOn')
outfile.write(' bornOn=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.bornOn), input_name='bornOn')), ))
if self.preferredLocale is not None and 'preferredLocale' not in already_processed:
already_processed.add('preferredLocale')
outfile.write(' preferredLocale=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.preferredLocale), input_name='preferredLocale')), ))
if self.uuid is not None and 'uuid' not in already_processed:
already_processed.add('uuid')
outfile.write(' uuid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.uuid), input_name='uuid')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassProfile', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('updatedAt', node)
if value is not None and 'updatedAt' not in already_processed:
already_processed.add('updatedAt')
self.updatedAt = value
value = find_attr_value_('firstName', node)
if value is not None and 'firstName' not in already_processed:
already_processed.add('firstName')
self.firstName = value
value = find_attr_value_('lastName', node)
if value is not None and 'lastName' not in already_processed:
already_processed.add('lastName')
self.lastName = value
value = find_attr_value_('bornOn', node)
if value is not None and 'bornOn' not in already_processed:
already_processed.add('bornOn')
self.bornOn = value
value = find_attr_value_('preferredLocale', node)
if value is not None and 'preferredLocale' not in already_processed:
already_processed.add('preferredLocale')
self.preferredLocale = value
value = find_attr_value_('uuid', node)
if value is not None and 'uuid' not in already_processed:
already_processed.add('uuid')
self.uuid = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDigiPassProfile
class MyDigiPassAddress(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, address1=None, address2=None, city=None, zip=None, country=None, state=None):
self.original_tagname_ = None
self.address1 = _cast(None, address1)
self.address2 = _cast(None, address2)
self.city = _cast(None, city)
self.zip = _cast(None, zip)
self.country = _cast(None, country)
self.state = _cast(None, state)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassAddress)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassAddress.subclass:
return MyDigiPassAddress.subclass(*args_, **kwargs_)
else:
return MyDigiPassAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_address1(self): return self.address1
def set_address1(self, address1): self.address1 = address1
def get_address2(self): return self.address2
def set_address2(self, address2): self.address2 = address2
def get_city(self): return self.city
def set_city(self, city): self.city = city
def get_zip(self): return self.zip
def set_zip(self, zip): self.zip = zip
def get_country(self): return self.country
def set_country(self, country): self.country = country
def get_state(self): return self.state
def set_state(self, state): self.state = state
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassAddress', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassAddress')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassAddress', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassAddress'):
if self.address1 is not None and 'address1' not in already_processed:
already_processed.add('address1')
outfile.write(' address1=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.address1), input_name='address1')), ))
if self.address2 is not None and 'address2' not in already_processed:
already_processed.add('address2')
outfile.write(' address2=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.address2), input_name='address2')), ))
if self.city is not None and 'city' not in already_processed:
already_processed.add('city')
outfile.write(' city=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.city), input_name='city')), ))
if self.zip is not None and 'zip' not in already_processed:
already_processed.add('zip')
outfile.write(' zip=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.zip), input_name='zip')), ))
if self.country is not None and 'country' not in already_processed:
already_processed.add('country')
outfile.write(' country=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.country), input_name='country')), ))
if self.state is not None and 'state' not in already_processed:
already_processed.add('state')
outfile.write(' state=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.state), input_name='state')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassAddress', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('address1', node)
if value is not None and 'address1' not in already_processed:
already_processed.add('address1')
self.address1 = value
value = find_attr_value_('address2', node)
if value is not None and 'address2' not in already_processed:
already_processed.add('address2')
self.address2 = value
value = find_attr_value_('city', node)
if value is not None and 'city' not in already_processed:
already_processed.add('city')
self.city = value
value = find_attr_value_('zip', node)
if value is not None and 'zip' not in already_processed:
already_processed.add('zip')
self.zip = value
value = find_attr_value_('country', node)
if value is not None and 'country' not in already_processed:
already_processed.add('country')
self.country = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.add('state')
self.state = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDigiPassAddress
class MyDigiPassWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, eidPhoto=None, email=None, phone=None, eidProfile=None, eidAddress=None, profile=None, address=None):
self.original_tagname_ = None
super(MyDigiPassWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.eidPhoto = _cast(None, eidPhoto)
self.email = _cast(None, email)
self.phone = _cast(None, phone)
self.eidProfile = eidProfile
self.eidAddress = eidAddress
self.profile = profile
self.address = address
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassWidgetStep.subclass:
return MyDigiPassWidgetStep.subclass(*args_, **kwargs_)
else:
return MyDigiPassWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_eidProfile(self): return self.eidProfile
def set_eidProfile(self, eidProfile): self.eidProfile = eidProfile
def get_eidAddress(self): return self.eidAddress
def set_eidAddress(self, eidAddress): self.eidAddress = eidAddress
def get_profile(self): return self.profile
def set_profile(self, profile): self.profile = profile
def get_address(self): return self.address
def set_address(self, address): self.address = address
def get_eidPhoto(self): return self.eidPhoto
def set_eidPhoto(self, eidPhoto): self.eidPhoto = eidPhoto
def get_email(self): return self.email
def set_email(self, email): self.email = email
def get_phone(self): return self.phone
def set_phone(self, phone): self.phone = phone
def hasContent_(self):
if (
self.eidProfile is not None or
self.eidAddress is not None or
self.profile is not None or
self.address is not None or
super(MyDigiPassWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassWidgetStep', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassWidgetStep'):
super(MyDigiPassWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassWidgetStep')
if self.eidPhoto is not None and 'eidPhoto' not in already_processed:
already_processed.add('eidPhoto')
outfile.write(' eidPhoto=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.eidPhoto), input_name='eidPhoto')), ))
if self.email is not None and 'email' not in already_processed:
already_processed.add('email')
outfile.write(' email=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.email), input_name='email')), ))
if self.phone is not None and 'phone' not in already_processed:
already_processed.add('phone')
outfile.write(' phone=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.phone), input_name='phone')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassWidgetStep', fromsubclass_=False, pretty_print=True):
super(MyDigiPassWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.eidProfile is not None:
self.eidProfile.export(outfile, level, namespaceprefix_, name_='eidProfile', pretty_print=pretty_print)
if self.eidAddress is not None:
self.eidAddress.export(outfile, level, namespaceprefix_, name_='eidAddress', pretty_print=pretty_print)
if self.profile is not None:
self.profile.export(outfile, level, namespaceprefix_, name_='profile', pretty_print=pretty_print)
if self.address is not None:
self.address.export(outfile, level, namespaceprefix_, name_='address', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('eidPhoto', node)
if value is not None and 'eidPhoto' not in already_processed:
already_processed.add('eidPhoto')
self.eidPhoto = value
value = find_attr_value_('email', node)
if value is not None and 'email' not in already_processed:
already_processed.add('email')
self.email = value
value = find_attr_value_('phone', node)
if value is not None and 'phone' not in already_processed:
already_processed.add('phone')
self.phone = value
super(MyDigiPassWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'eidProfile':
obj_ = MyDigiPassEidProfile.factory()
obj_.build(child_)
self.eidProfile = obj_
obj_.original_tagname_ = 'eidProfile'
elif nodeName_ == 'eidAddress':
obj_ = MyDigiPassEidAddress.factory()
obj_.build(child_)
self.eidAddress = obj_
obj_.original_tagname_ = 'eidAddress'
elif nodeName_ == 'profile':
obj_ = MyDigiPassProfile.factory()
obj_.build(child_)
self.profile = obj_
obj_.original_tagname_ = 'profile'
elif nodeName_ == 'address':
obj_ = MyDigiPassAddress.factory()
obj_.build(child_)
self.address = obj_
obj_.original_tagname_ = 'address'
super(MyDigiPassWidgetStep, self).buildChildren(child_, node, nodeName_, True)
# end class MyDigiPassWidgetStep
class SelectSingleWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
self.original_tagname_ = None
super(SelectSingleWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.value = _cast(None, value)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectSingleWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectSingleWidgetStep.subclass:
return SelectSingleWidgetStep.subclass(*args_, **kwargs_)
else:
return SelectSingleWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
super(SelectSingleWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectSingleWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectSingleWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectSingleWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectSingleWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectSingleWidgetStep'):
super(SelectSingleWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectSingleWidgetStep')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectSingleWidgetStep', fromsubclass_=False, pretty_print=True):
super(SelectSingleWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
super(SelectSingleWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SelectSingleWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SelectSingleWidgetStep
class SelectMultiWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, selection=None):
self.original_tagname_ = None
super(SelectMultiWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
if selection is None:
self.selection = []
else:
self.selection = selection
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectMultiWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectMultiWidgetStep.subclass:
return SelectMultiWidgetStep.subclass(*args_, **kwargs_)
else:
return SelectMultiWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_selection(self): return self.selection
def set_selection(self, selection): self.selection = selection
def add_selection(self, value): self.selection.append(value)
def insert_selection_at(self, index, value): self.selection.insert(index, value)
def replace_selection_at(self, index, value): self.selection[index] = value
def hasContent_(self):
if (
self.selection or
super(SelectMultiWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectMultiWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectMultiWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectMultiWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectMultiWidgetStep', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectMultiWidgetStep'):
super(SelectMultiWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectMultiWidgetStep')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectMultiWidgetStep', fromsubclass_=False, pretty_print=True):
super(SelectMultiWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for selection_ in self.selection:
selection_.export(outfile, level, namespaceprefix_, name_='selection', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(SelectMultiWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'selection':
class_obj_ = self.get_class_obj_(child_, Value)
obj_ = class_obj_.factory()
obj_.build(child_)
self.selection.append(obj_)
obj_.original_tagname_ = 'selection'
super(SelectMultiWidgetStep, self).buildChildren(child_, node, nodeName_, True)
# end class SelectMultiWidgetStep
class SelectDateWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, date=None):
self.original_tagname_ = None
super(SelectDateWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.date = _cast(int, date)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectDateWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectDateWidgetStep.subclass:
return SelectDateWidgetStep.subclass(*args_, **kwargs_)
else:
return SelectDateWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_date(self): return self.date
def set_date(self, date): self.date = date
def hasContent_(self):
if (
super(SelectDateWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectDateWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectDateWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectDateWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectDateWidgetStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectDateWidgetStep'):
super(SelectDateWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectDateWidgetStep')
if self.date is not None and 'date' not in already_processed:
already_processed.add('date')
outfile.write(' date="%s"' % self.gds_format_integer(self.date, input_name='date'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectDateWidgetStep', fromsubclass_=False, pretty_print=True):
super(SelectDateWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('date', node)
if value is not None and 'date' not in already_processed:
already_processed.add('date')
try:
self.date = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(SelectDateWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SelectDateWidgetStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SelectDateWidgetStep
class SelectFriendWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, selection=None):
self.original_tagname_ = None
super(SelectFriendWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
if selection is None:
self.selection = []
else:
self.selection = selection
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SelectFriendWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SelectFriendWidgetStep.subclass:
return SelectFriendWidgetStep.subclass(*args_, **kwargs_)
else:
return SelectFriendWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_selection(self): return self.selection
def set_selection(self, selection): self.selection = selection
def add_selection(self, value): self.selection.append(value)
def insert_selection_at(self, index, value): self.selection.insert(index, value)
def replace_selection_at(self, index, value): self.selection[index] = value
def hasContent_(self):
if (
self.selection or
super(SelectFriendWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='SelectFriendWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SelectFriendWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectFriendWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='SelectFriendWidgetStep', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SelectFriendWidgetStep'):
super(SelectFriendWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SelectFriendWidgetStep')
def exportChildren(self, outfile, level, namespaceprefix_='', name_='SelectFriendWidgetStep', fromsubclass_=False, pretty_print=True):
super(SelectFriendWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for selection_ in self.selection:
selection_.export(outfile, level, namespaceprefix_, name_='selection', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(SelectFriendWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'selection':
class_obj_ = self.get_class_obj_(child_, Value)
obj_ = class_obj_.factory()
obj_.build(child_)
self.selection.append(obj_)
obj_.original_tagname_ = 'selection'
super(SelectFriendWidgetStep, self).buildChildren(child_, node, nodeName_, True)
# end class SelectFriendWidgetStep
class AdvancedOrderWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, currency=None, category=None):
self.original_tagname_ = None
super(AdvancedOrderWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.currency = _cast(None, currency)
if category is None:
self.category = []
else:
self.category = category
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, AdvancedOrderWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if AdvancedOrderWidgetStep.subclass:
return AdvancedOrderWidgetStep.subclass(*args_, **kwargs_)
else:
return AdvancedOrderWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_category(self): return self.category
def set_category(self, category): self.category = category
def add_category(self, value): self.category.append(value)
def insert_category_at(self, index, value): self.category.insert(index, value)
def replace_category_at(self, index, value): self.category[index] = value
def get_currency(self): return self.currency
def set_currency(self, currency): self.currency = currency
def hasContent_(self):
if (
self.category or
super(AdvancedOrderWidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderWidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AdvancedOrderWidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderWidgetStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='AdvancedOrderWidgetStep', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AdvancedOrderWidgetStep'):
super(AdvancedOrderWidgetStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AdvancedOrderWidgetStep')
if self.currency is not None and 'currency' not in already_processed:
already_processed.add('currency')
outfile.write(' currency=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.currency), input_name='currency')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='AdvancedOrderWidgetStep', fromsubclass_=False, pretty_print=True):
super(AdvancedOrderWidgetStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for category_ in self.category:
category_.export(outfile, level, namespaceprefix_, name_='category', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('currency', node)
if value is not None and 'currency' not in already_processed:
already_processed.add('currency')
self.currency = value
super(AdvancedOrderWidgetStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'category':
obj_ = AdvancedOrderCategory.factory()
obj_.build(child_)
self.category.append(obj_)
obj_.original_tagname_ = 'category'
super(AdvancedOrderWidgetStep, self).buildChildren(child_, node, nodeName_, True)
# end class AdvancedOrderWidgetStep
class MemberRun(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, email=None, name=None, status=None, endReference=None, language=None, appId=None, avatarUrl=None, userData=None, step=None):
self.original_tagname_ = None
self.email = _cast(None, email)
self.name = _cast(None, name)
self.status = _cast(None, status)
self.endReference = _cast(None, endReference)
self.language = _cast(None, language)
self.appId = _cast(None, appId)
self.avatarUrl = _cast(None, avatarUrl)
self.userData = _cast(None, userData)
if step is None:
self.step = []
else:
self.step = step
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MemberRun)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MemberRun.subclass:
return MemberRun.subclass(*args_, **kwargs_)
else:
return MemberRun(*args_, **kwargs_)
factory = staticmethod(factory)
def get_step(self): return self.step
def set_step(self, step): self.step = step
def add_step(self, value): self.step.append(value)
def insert_step_at(self, index, value): self.step.insert(index, value)
def replace_step_at(self, index, value): self.step[index] = value
def get_email(self): return self.email
def set_email(self, email): self.email = email
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_status(self): return self.status
def set_status(self, status): self.status = status
def get_endReference(self): return self.endReference
def set_endReference(self, endReference): self.endReference = endReference
def get_language(self): return self.language
def set_language(self, language): self.language = language
def get_appId(self): return self.appId
def set_appId(self, appId): self.appId = appId
def get_avatarUrl(self): return self.avatarUrl
def set_avatarUrl(self, avatarUrl): self.avatarUrl = avatarUrl
def get_userData(self): return self.userData
def set_userData(self, userData): self.userData = userData
def validate_MemberStatus(self, value):
# Validate type MemberStatus, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['SUBMITTED', 'INITIATED', 'RUNNING', 'FINISHED']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on MemberStatus' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.step
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MemberRun', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MemberRun')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MemberRun')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MemberRun', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MemberRun'):
if self.email is not None and 'email' not in already_processed:
already_processed.add('email')
outfile.write(' email=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.email), input_name='email')), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), ))
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
outfile.write(' status=%s' % (quote_attrib(self.status), ))
if self.endReference is not None and 'endReference' not in already_processed:
already_processed.add('endReference')
outfile.write(' endReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.endReference), input_name='endReference')), ))
if self.language is not None and 'language' not in already_processed:
already_processed.add('language')
outfile.write(' language=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.language), input_name='language')), ))
if self.appId is not None and 'appId' not in already_processed:
already_processed.add('appId')
outfile.write(' appId=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.appId), input_name='appId')), ))
if self.avatarUrl is not None and 'avatarUrl' not in already_processed:
already_processed.add('avatarUrl')
outfile.write(' avatarUrl=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.avatarUrl), input_name='avatarUrl')), ))
if self.userData is not None and 'userData' not in already_processed:
already_processed.add('userData')
outfile.write(' userData=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.userData), input_name='userData')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MemberRun', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for step_ in self.step:
step_.export(outfile, level, namespaceprefix_, name_='step', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('email', node)
if value is not None and 'email' not in already_processed:
already_processed.add('email')
self.email = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('status', node)
if value is not None and 'status' not in already_processed:
already_processed.add('status')
self.status = value
self.validate_MemberStatus(self.status) # validate type MemberStatus
value = find_attr_value_('endReference', node)
if value is not None and 'endReference' not in already_processed:
already_processed.add('endReference')
self.endReference = value
value = find_attr_value_('language', node)
if value is not None and 'language' not in already_processed:
already_processed.add('language')
self.language = value
value = find_attr_value_('appId', node)
if value is not None and 'appId' not in already_processed:
already_processed.add('appId')
self.appId = value
value = find_attr_value_('avatarUrl', node)
if value is not None and 'avatarUrl' not in already_processed:
already_processed.add('avatarUrl')
self.avatarUrl = value
value = find_attr_value_('userData', node)
if value is not None and 'userData' not in already_processed:
already_processed.add('userData')
self.userData = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'step':
class_obj_ = self.get_class_obj_(child_, Step)
obj_ = class_obj_.factory()
obj_.build(child_)
self.step.append(obj_)
obj_.original_tagname_ = 'step'
# end class MemberRun
class MessageFlowRun(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, launchTimestamp=None, serviceName=None, serviceDisplayEmail=None, serviceEmail=None, serviceData=None, definition=None, memberRun=None, flowParams=None):
self.original_tagname_ = None
self.launchTimestamp = _cast(int, launchTimestamp)
self.serviceName = _cast(None, serviceName)
self.serviceDisplayEmail = _cast(None, serviceDisplayEmail)
self.serviceEmail = _cast(None, serviceEmail)
self.serviceData = _cast(None, serviceData)
if definition is None:
self.definition = []
else:
self.definition = definition
if memberRun is None:
self.memberRun = []
else:
self.memberRun = memberRun
self.flowParams = flowParams
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MessageFlowRun)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MessageFlowRun.subclass:
return MessageFlowRun.subclass(*args_, **kwargs_)
else:
return MessageFlowRun(*args_, **kwargs_)
factory = staticmethod(factory)
def get_definition(self): return self.definition
def set_definition(self, definition): self.definition = definition
def add_definition(self, value): self.definition.append(value)
def insert_definition_at(self, index, value): self.definition.insert(index, value)
def replace_definition_at(self, index, value): self.definition[index] = value
def get_memberRun(self): return self.memberRun
def set_memberRun(self, memberRun): self.memberRun = memberRun
def add_memberRun(self, value): self.memberRun.append(value)
def insert_memberRun_at(self, index, value): self.memberRun.insert(index, value)
def replace_memberRun_at(self, index, value): self.memberRun[index] = value
def get_flowParams(self): return self.flowParams
def set_flowParams(self, flowParams): self.flowParams = flowParams
def get_launchTimestamp(self): return self.launchTimestamp
def set_launchTimestamp(self, launchTimestamp): self.launchTimestamp = launchTimestamp
def get_serviceName(self): return self.serviceName
def set_serviceName(self, serviceName): self.serviceName = serviceName
def get_serviceDisplayEmail(self): return self.serviceDisplayEmail
def set_serviceDisplayEmail(self, serviceDisplayEmail): self.serviceDisplayEmail = serviceDisplayEmail
def get_serviceEmail(self): return self.serviceEmail
def set_serviceEmail(self, serviceEmail): self.serviceEmail = serviceEmail
def get_serviceData(self): return self.serviceData
def set_serviceData(self, serviceData): self.serviceData = serviceData
def hasContent_(self):
if (
self.definition or
self.memberRun or
self.flowParams is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MessageFlowRun', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MessageFlowRun')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MessageFlowRun')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MessageFlowRun', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MessageFlowRun'):
if self.launchTimestamp is not None and 'launchTimestamp' not in already_processed:
already_processed.add('launchTimestamp')
outfile.write(' launchTimestamp="%s"' % self.gds_format_integer(self.launchTimestamp, input_name='launchTimestamp'))
if self.serviceName is not None and 'serviceName' not in already_processed:
already_processed.add('serviceName')
outfile.write(' serviceName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.serviceName), input_name='serviceName')), ))
if self.serviceDisplayEmail is not None and 'serviceDisplayEmail' not in already_processed:
already_processed.add('serviceDisplayEmail')
outfile.write(' serviceDisplayEmail=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.serviceDisplayEmail), input_name='serviceDisplayEmail')), ))
if self.serviceEmail is not None and 'serviceEmail' not in already_processed:
already_processed.add('serviceEmail')
outfile.write(' serviceEmail=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.serviceEmail), input_name='serviceEmail')), ))
if self.serviceData is not None and 'serviceData' not in already_processed:
already_processed.add('serviceData')
outfile.write(' serviceData=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.serviceData), input_name='serviceData')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MessageFlowRun', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for definition_ in self.definition:
definition_.export(outfile, level, namespaceprefix_, name_='definition', pretty_print=pretty_print)
for memberRun_ in self.memberRun:
memberRun_.export(outfile, level, namespaceprefix_, name_='memberRun', pretty_print=pretty_print)
if self.flowParams is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<flowParams>%s</flowParams>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.flowParams), input_name='flowParams')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('launchTimestamp', node)
if value is not None and 'launchTimestamp' not in already_processed:
already_processed.add('launchTimestamp')
try:
self.launchTimestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('serviceName', node)
if value is not None and 'serviceName' not in already_processed:
already_processed.add('serviceName')
self.serviceName = value
value = find_attr_value_('serviceDisplayEmail', node)
if value is not None and 'serviceDisplayEmail' not in already_processed:
already_processed.add('serviceDisplayEmail')
self.serviceDisplayEmail = value
value = find_attr_value_('serviceEmail', node)
if value is not None and 'serviceEmail' not in already_processed:
already_processed.add('serviceEmail')
self.serviceEmail = value
value = find_attr_value_('serviceData', node)
if value is not None and 'serviceData' not in already_processed:
already_processed.add('serviceData')
self.serviceData = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'definition':
obj_ = MessageFlowDefinition.factory()
obj_.build(child_)
self.definition.append(obj_)
obj_.original_tagname_ = 'definition'
elif nodeName_ == 'memberRun':
obj_ = MemberRun.factory()
obj_.build(child_)
self.memberRun.append(obj_)
obj_.original_tagname_ = 'memberRun'
elif nodeName_ == 'flowParams':
flowParams_ = child_.text
flowParams_ = self.gds_validate_string(flowParams_, node, 'flowParams')
self.flowParams = flowParams_
# end class MessageFlowRun
class contentType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.original_tagname_ = None
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, contentType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if contentType.subclass:
return contentType.subclass(*args_, **kwargs_)
else:
return contentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='contentType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('contentType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='contentType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.convert_unicode(self.valueOf_))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='contentType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='contentType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', name_='contentType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class contentType
class javascriptCodeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.original_tagname_ = None
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, javascriptCodeType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if javascriptCodeType.subclass:
return javascriptCodeType.subclass(*args_, **kwargs_)
else:
return javascriptCodeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='javascriptCodeType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('javascriptCodeType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='javascriptCodeType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.convert_unicode(self.valueOf_))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='javascriptCodeType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='javascriptCodeType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', name_='javascriptCodeType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class javascriptCodeType
class contentType1(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.original_tagname_ = None
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, contentType1)
if subclass is not None:
return subclass(*args_, **kwargs_)
if contentType1.subclass:
return contentType1.subclass(*args_, **kwargs_)
else:
return contentType1(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='contentType1', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('contentType1')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='contentType1')
if self.hasContent_():
outfile.write('>')
outfile.write(self.convert_unicode(self.valueOf_))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='contentType1', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='contentType1'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', name_='contentType1', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class contentType1
GDSClassesMapping = {
'messageFlowDefinition': MessageFlowDefinition,
'messageFlowDefinitionSet': MessageFlowDefinitionSet,
'messageFlowRun': MessageFlowRun,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Attachment'
rootClass = Attachment
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Attachment'
rootClass = Attachment
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Attachment'
rootClass = Attachment
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Attachment'
rootClass = Attachment
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from gen import *\n\n')
sys.stdout.write('import gen as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AdvancedOrderCategory",
"AdvancedOrderItem",
"AdvancedOrderWidget",
"AdvancedOrderWidgetStep",
"Answer",
"Attachment",
"BaseMessageStep",
"BasePaymentMethod",
"BaseSliderWidget",
"Choice",
"End",
"FloatValue",
"FlowCode",
"FlowElement",
"Form",
"FormMessage",
"GPSLocationWidget",
"GPSLocationWidgetStep",
"MemberRun",
"Message",
"MessageFlowDefinition",
"MessageFlowDefinitionSet",
"MessageFlowRun",
"MessageStep",
"MyDigiPassAddress",
"MyDigiPassEidAddress",
"MyDigiPassEidProfile",
"MyDigiPassProfile",
"MyDigiPassWidget",
"MyDigiPassWidgetStep",
"OauthWidget",
"OpenIdWidget",
"Outlet",
"PayWidget",
"PaymentMethod",
"PhotoUploadWidget",
"PhotoUploadWidgetStep",
"RangeSliderWidget",
"RangeSliderWidgetStep",
"ResultsEmail",
"ResultsFlush",
"SelectDateWidget",
"SelectDateWidgetStep",
"SelectFriendWidget",
"SelectFriendWidgetStep",
"SelectMultiWidget",
"SelectMultiWidgetStep",
"SelectSingleWidget",
"SelectSingleWidgetStep",
"SelectWidget",
"SignWidget",
"SliderWidget",
"SliderWidgetStep",
"Step",
"TextAutoCompleteWidgetStep",
"TextAutocompleteWidget",
"TextBlockWidget",
"TextBlockWidgetStep",
"TextLineWidget",
"TextLineWidgetStep",
"TextWidget",
"TextWidgetStep",
"Value",
"Widget",
"WidgetStep",
"contentType",
"contentType1",
"javascriptCodeType"
]
| our-city-app/oca-backend | src/rogerthat/bizz/service/mfd/gen.py | Python | apache-2.0 | 404,967 |
from model.contact import Contact
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contact", "output file"])
except getopt.GetoptError as err:
sys.exit(2)
n = 5
f = "data/contact.json"
for option, arg in opts:
if option == '-n':
n = int(arg)
elif option == '-f':
f = arg
test_data = [Contact(first_name='', mid_name='', last_name='',
adress='', email_prime='', home_phone = '')] + \
[Contact.random() for i in range(n)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, 'w') as out_file:
jsonpickle.set_encoder_options("json", indent=2)
out_file.write(jsonpickle.encode(test_data))
| IKeiran/FPT-Sinyakov | generator/contact.py | Python | apache-2.0 | 775 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from datetime import date
import os.path
import re
from flask import abort, current_app, jsonify, render_template, redirect, request, send_file, url_for
from flask.ext.babel import gettext, format_date
from sqlalchemy.orm import undefer, undefer_group
from sqlalchemy.orm.exc import NoResultFound
from pokr.cache import cache
from pokr.database import db_session
from pokr.models.meeting import Meeting
from pokr.widgets.year import year
from utils.jinja import breadcrumb
date_re = re.compile(r'^\d{4}-\d{2}-\d{2}$')
def latest_meeting_date(year):
date_ = db_session.query(Meeting.date)\
.filter(Meeting.year == year)\
.order_by(Meeting.date.desc())\
.first()
return date_[0] if date_ else None
def register(app):
app.views['meeting'] = 'meeting_main'
gettext('meeting') # for babel extraction
@app.route('/meeting/', methods=['GET'])
@breadcrumb(app)
def meeting_main():
year = request.args.get('year')
date_ = request.args.get('date')
view_type = request.args.get('type', 'list')
if view_type=='list':
return render_template('meetings-list.html')
# find the right calendar
if not year:
if date_:
year = format_date(date(*map(int, date_.split('-'))), 'yyyy')
else:
year = date.today().year
if not date_:
# find the latest meeting date for the selected year
d = latest_meeting_date(year)
if d:
return redirect(url_for('meeting_main',
type='calendar',
date=format_date(d, 'yyyy-MM-dd')))
# meetings of the day (optional)
meetings_of_the_day = None
if date_:
if not date_re.match(date_):
abort(404)
date_ = date(*map(int, date_.split('-')))
meetings_of_the_day = Meeting.query.filter_by(date=date_)
# meetings of the year
meetings_of_the_year =\
db_session.query(Meeting.date)\
.filter(Meeting.year == year)\
.group_by(Meeting.date)
meetings_of_the_year = (
{
'date': meeting_date,
'url': url_for('meeting_main', type='calendar',
date=format_date(meeting_date, 'yyyy-MM-dd'))
}
for (meeting_date,) in meetings_of_the_year
)
return render_template('meetings-calendar.html',
year=int(year),
date=date_,
meetings_of_the_year=meetings_of_the_year,
meetings_of_the_day=meetings_of_the_day,
)
@app.route('/meeting/list', methods=['GET'])
def meetings_list():
def truncate(issues, l=5):
if issues:
s = ''.join(['<li>' + i for i in issues[:l]])
if len(issues) > l:
s += '<li>...'
return '<small><ul class="no-bullets">%s</ul></small>' % s
else:
None
def wrap(data):
return [{
'DT_RowId': d.id,
'DT_RowClass': 'clickable',
'date': d.date.isoformat(),
'assembly_id': d.parliament_id,
'session_id': d.session_id,
'sitting_id': d.sitting_id,
'committee': d.committee,
'issues': truncate(d.issues)
} for d in data]
draw = int(request.args.get('draw', 1)) # iteration number
start = int(request.args.get('start', 0)) # starting row's id
length = int(request.args.get('length', 10)) # number of rows in page
# order by
columns = ['date', 'parliament_id', 'session_id', 'sitting_id', 'committee']
if request.args.get('order[0][column]'):
order_column = columns[int(request.args.get('order[0][column]'))]
if request.args.get('order[0][dir]', 'asc')=='desc':
order_column += ' desc'
else:
order_column = 'date desc' # default
meetings = Meeting.query.order_by(order_column)
filtered = meetings.offset(start).limit(length)
response = {
'draw': draw,
'data': wrap(filtered),
'recordsTotal': meetings.count(),
'recordsFiltered': meetings.count()
}
return jsonify(**response)
@app.route('/meeting/<id>/', methods=['GET'])
@breadcrumb(app, 'meeting')
def meeting(id):
try:
meeting = Meeting.query.filter_by(id=id)\
.options(undefer('issues')).one()
except NoResultFound, e:
abort(404)
return render_template('meeting.html', meeting=meeting)
@app.route('/meeting/<id>/dialog', methods=['GET'])
@breadcrumb(app, 'meeting')
def meeting_dialogue(id):
glossary_js = generate_glossary_js()
try:
meeting = Meeting.query.filter_by(id=id)\
.options(undefer_group('extra')).one()
except NoResultFound, e:
abort(404)
return render_template('meeting-dialogue.html',\
meeting=meeting, glossary_js=glossary_js)
@app.route('/meeting/<id>/pdf', methods=['GET'])
def meeting_pdf(id):
try:
meeting = Meeting.query.filter_by(id=id).one()
except NoResultFound, e:
abort(404)
if meeting.document_pdf_path:
response = send_file(meeting.document_pdf_path)
response.headers['Content-Disposition'] = 'filename=%s.pdf' % id
return response
else:
abort(404)
@cache.memoize(timeout=60*60*24)
def generate_glossary_js():
datadir = os.path.join(current_app.root_path, 'data')
terms_regex = open('%s/glossary-terms.regex' % datadir).read().decode('utf-8').strip()
dictionary = open('%s/glossary-map.json' % datadir).read().decode('utf-8').strip()
return render_template('js/glossary.js', terms_regex=terms_regex,
dictionary=dictionary)
| teampopong/pokr.kr | pokr/views/meeting.py | Python | apache-2.0 | 6,379 |
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_hotplug_volume_status import V1HotplugVolumeStatus
class TestV1HotplugVolumeStatus(unittest.TestCase):
""" V1HotplugVolumeStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1HotplugVolumeStatus(self):
"""
Test V1HotplugVolumeStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_hotplug_volume_status.V1HotplugVolumeStatus()
pass
if __name__ == '__main__':
unittest.main()
| kubevirt/client-python | test/test_v1_hotplug_volume_status.py | Python | apache-2.0 | 951 |
from flask import (
Blueprint,
g,
request,
url_for,
Response,
redirect,
flash
)
from .utils import project_file, template, user_audit_record, app_logger
from .auth import require_login
from .models import User, Transcript, Taxonomy
admin = Blueprint('admin', __name__)
@admin.route('/admin', methods=['GET', 'POST'])
@require_login
def admin_page():
user = getattr(g, 'user')
users = User.find_all()
taxonomies = Taxonomy.find_by_index('idx_owned', user.id)
transcripts = [
t for t in Transcript.find_by_index('idx_owned', user.id)
if not t.tagger
]
transcripts.sort(key=Transcript.sort_key)
taxonomies.sort(key=Taxonomy.sort_key)
# GET is easy...
if request.method == 'GET':
return template("admin.html", **locals())
# POST: They are requesting a transcript assignment
assignee = request.values.get('user', '')
script_id = request.values.get('script', '')
transcript = Transcript.find_one(script_id) if script_id else None
taxonomy = request.values.get('taxonomy', '')
if transcript and assignee:
transcript = transcript.assigned_copy(assignee, taxonomy)
transcript.save()
user_audit_record(transcript, "Transcript ASSIGNED to " + assignee)
flash("Transcript has been assigned")
else:
flash("Nothing assigned")
return redirect(url_for('admin.admin_page'))
@admin.route('/upload/transcript', methods=['POST'])
@require_login
def upload_transcript():
user = getattr(g, 'user')
file = request.files['file']
if file:
data = file.read()
try:
transcript = Transcript.from_xml(data)
transcript.owner = user.id
transcript.save()
user_audit_record(transcript, "Transcript UPLOAD Accepted")
flash("Transcript was saved")
except:
flash("Transcript was invalid - nothing was saved", "error")
else:
flash("Transcript was NOT uploaded - no file supplied", "error")
return redirect(url_for('admin.admin_page'))
@admin.route('/upload/taxonomy', methods=['POST'])
@require_login
def upload_taxonomy():
user = getattr(g, 'user')
file = request.files['file']
name = request.values.get('name', '')
if file and name:
data = file.read()
try:
tax = Taxonomy.from_yaml(data)
tax.validate()
tax.name = name
tax.owner = user.id
tax.save()
app_logger().info("Taxonomy UPLOAD: " + tax.id)
flash("Taxonomy was saved")
except:
flash("Taxonomy was NOT saved - file was invalid", "error")
else:
flash("Taxonomy was NOT saved - no file supplied", "error")
return redirect(url_for('admin.admin_page'))
@admin.route('/sample/transcript', methods=['GET'])
@require_login
def sample_transcript():
with open(project_file("config/sample_transcript.xml")) as f:
data = f.read()
return Response(data, mimetype='text/xml')
@admin.route('/sample/taxonomy', methods=['GET'])
@require_login
def sample_taxonomy():
with open(project_file("config/default_taxonomy.yaml")) as f:
data = f.read()
return Response(data, mimetype='text/x-yaml')
| memphis-iis/gluten | gluten/admin.py | Python | apache-2.0 | 3,285 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import metrics
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.training.rmsprop import RMSPropOptimizer
@test_util.run_all_in_graph_and_eager_modes
class KerasMeanTest(test.TestCase):
# TODO(b/120949004): Re-enable garbage collection check
# @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_mean(self):
m = metrics.Mean(name='my_mean')
# check config
self.assertEqual(m.name, 'my_mean')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 2)
self.evaluate(variables.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
self.assertEqual(self.evaluate(m.count), 3)
# check reset_states()
m.reset_states()
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# Check save and restore config
m2 = metrics.Mean.from_config(m.get_config())
self.assertEqual(m2.name, 'my_mean')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, dtypes.float32)
self.assertEqual(len(m2.variables), 2)
def test_mean_with_sample_weight(self):
m = metrics.Mean(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50 / 0.5)
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52 / 1.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6
self.assertEqual(result, 10.45)
self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
def test_mean_graph_with_placeholder(self):
with context.graph_mode(), self.cached_session() as sess:
m = metrics.Mean()
v = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
self.evaluate(variables.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
self.assertEqual(result, 50 / 0.5)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
self.assertAlmostEqual(result, 52 / 1.7, 2)
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Mean()
checkpoint = checkpointable_utils.Checkpoint(mean=m)
self.evaluate(variables.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint mean object
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(200., self.evaluate(m.result()))
# restore to a different checkpoint mean object
restore_mean = metrics.Mean()
restore_checkpoint = checkpointable_utils.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(200., self.evaluate(restore_mean.result()))
self.assertEqual(3, self.evaluate(restore_mean.count))
@test_util.run_all_in_graph_and_eager_modes
class KerasAccuracyTest(test.TestCase):
def test_accuracy(self):
acc_obj = metrics.Accuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# Check save and restore config
a2 = metrics.Accuracy.from_config(acc_obj.get_config())
self.assertEqual(a2.name, 'my acc')
self.assertTrue(a2.stateful)
self.assertEqual(len(a2.variables), 2)
self.assertEqual(a2.dtype, dtypes.float32)
# check with sample_weight
result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_binary_accuracy(self):
acc_obj = metrics.BinaryAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_pred squeeze
update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertAlmostEqual(result, 0.75, 2) # 3/4
# check y_true squeeze
result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4/6
# check with sample_weight
result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
def test_binary_accuracy_threshold(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(variables.variables_initializer(acc_obj.variables))
result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_categorical_accuracy(self):
acc_obj = metrics.CategoricalAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[2], [1]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with context.graph_mode(), self.cached_session() as sess:
acc_obj = metrics.SparseCategoricalAccuracy(name='my acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
t = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
result_t = acc_obj(t, p, w)
result = sess.run(
result_t,
feed_dict=({
t: [2, 1],
p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
w: [[0.5], [0.2]]
}))
self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(name='my_precision', thresholds=[0.4, 0.9])
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(name='my_recall', thresholds=[0.4, 0.9])
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class CosineProximityTest(test.TestCase):
def test_config(self):
cosine_obj = metrics.CosineProximity(name='my_cos', dtype=dtypes.int32)
self.assertEqual(cosine_obj.name, 'my_cos')
self.assertEqual(cosine_obj._dtype, dtypes.int32)
# Check save and restore config
cosine_obj2 = metrics.CosineProximity.from_config(cosine_obj.get_config())
self.assertEqual(cosine_obj2.name, 'my_cos')
self.assertEqual(cosine_obj2._dtype, dtypes.int32)
def test_unweighted(self):
cosine_obj = metrics.CosineProximity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cosine_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cosine_obj.result()
self.assertAllClose(-0.60723, result, atol=1e-5)
def test_weighted(self):
cosine_obj = metrics.CosineProximity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(-0.59916, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=dtypes.int32)
self.assertEqual(mae_obj.name, 'my_mae')
self.assertEqual(mae_obj._dtype, dtypes.int32)
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, 'my_mae')
self.assertEqual(mae_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mae_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name='my_mape', dtype=dtypes.int32)
self.assertEqual(mape_obj.name, 'my_mape')
self.assertEqual(mape_obj._dtype, dtypes.int32)
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config())
self.assertEqual(mape_obj2.name, 'my_mape')
self.assertEqual(mape_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mape_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mape_obj.result()
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=dtypes.int32)
self.assertEqual(mse_obj.name, 'my_mse')
self.assertEqual(mse_obj._dtype, dtypes.int32)
# Check save and restore config
mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
self.assertEqual(mse_obj2.name, 'my_mse')
self.assertEqual(mse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name='my_msle', dtype=dtypes.int32)
self.assertEqual(msle_obj.name, 'my_msle')
self.assertEqual(msle_obj._dtype, dtypes.int32)
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config())
self.assertEqual(msle_obj2.name, 'my_msle')
self.assertEqual(msle_obj2._dtype, dtypes.int32)
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = msle_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = metrics.Hinge(name='hinge', dtype=dtypes.int32)
self.assertEqual(hinge_obj.name, 'hinge')
self.assertEqual(hinge_obj._dtype, dtypes.int32)
# Check save and restore config
hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config())
self.assertEqual(hinge_obj2.name, 'hinge')
self.assertEqual(hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = hinge_obj.result()
self.assertAllClose(0.65, result, atol=1e-5)
def test_weighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.65714, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=dtypes.int32)
self.assertEqual(sq_hinge_obj.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config())
self.assertEqual(sq_hinge_obj2.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = sq_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = sq_hinge_obj.result()
self.assertAllClose(0.65, result, atol=1e-5)
def test_weighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.65714, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = metrics.CategoricalHinge(
name='cat_hinge', dtype=dtypes.int32)
self.assertEqual(cat_hinge_obj.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
cat_hinge_obj2 = metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config())
self.assertEqual(cat_hinge_obj2.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cat_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class RootMeanSquaredErrorTest(test.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=dtypes.int32)
self.assertEqual(rmse_obj.name, 'rmse')
self.assertEqual(rmse_obj._dtype, dtypes.int32)
rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config())
self.assertEqual(rmse_obj2.name, 'rmse')
self.assertEqual(rmse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6))
y_pred = constant_op.constant((1, 3, 2))
update_op = rmse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(math.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6, 8))
y_pred = constant_op.constant((1, 3, 2, 3))
sample_weight = constant_op.constant((0, 1, 0, 1))
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class TopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'topkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())
self.assertEqual(a_obj2.name, 'topkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.TopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([[0, 0, 1], [0, 1, 0]])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.TopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_true = constant_op.constant([[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]])
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.TopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
@test_util.run_all_in_graph_and_eager_modes
class SparseTopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(
name='stopkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'stopkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(
a_obj.get_config())
self.assertEqual(a_obj2.name, 'stopkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.SparseTopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([2, 1])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss='mae',
metrics=compile_metrics,
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ResetStatesTest(keras_parameterized.TestCase):
def test_reset_states_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
def test_reset_states_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
def test_reset_states_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
def test_reset_states_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
def test_reset_states_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.tp), 50.)
self.assertEqual(self.evaluate(p_obj.fp), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.tp), 50.)
self.assertEqual(self.evaluate(p_obj.fp), 50.)
def test_reset_states_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.tp), 50.)
self.assertEqual(self.evaluate(r_obj.fn), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.tp), 50.)
self.assertEqual(self.evaluate(r_obj.fn), 50.)
def test_reset_states_sensitivity_at_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
def test_reset_states_specificity_at_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
if __name__ == '__main__':
test.main()
| hfp/tensorflow-xsmm | tensorflow/python/keras/metrics_test.py | Python | apache-2.0 | 67,231 |
from . import foreach
from . import toiterable | dbrattli/RxPY | rx/linq/observable/blocking/__init__.py | Python | apache-2.0 | 46 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Munki models module tests."""
import datetime
import tests.appenginesdk
import mox
import stubout
from google.appengine.ext import testbed
from google.apputils import app
from google.apputils import basetest
from tests.simian.mac.common import test
from simian.mac.models import munki as models
class CatalogTest(mox.MoxTestBase):
"""Test Catalog class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def _MockObtainLock(self, name, obtain=True):
if not hasattr(self, '_mock_obtain_lock'):
self.mox.StubOutWithMock(models.gae_util, 'ObtainLock')
self._mock_obtain_lock = True
models.gae_util.ObtainLock(name).AndReturn(obtain)
def _MockReleaseLock(self, name):
if not hasattr(self, '_mock_release_lock'):
self.mox.StubOutWithMock(models.gae_util, 'ReleaseLock')
self._mock_release_lock = True
models.gae_util.ReleaseLock(name).AndReturn(None)
def testGenerateAsync(self):
"""Tests calling Generate(delay=2)."""
name = 'catalogname'
utcnow = datetime.datetime(2010, 9, 2, 19, 30, 21, 377827)
self.mox.StubOutWithMock(datetime, 'datetime')
self.stubs.Set(models.deferred, 'defer', self.mox.CreateMockAnything())
deferred_name = 'create-catalog-%s-%s' % (
name, '2010-09-02-19-30-21-377827')
models.datetime.datetime.utcnow().AndReturn(utcnow)
models.deferred.defer(
models.Catalog.Generate, name, _name=deferred_name, _countdown=2)
self.mox.ReplayAll()
models.Catalog.Generate(name, delay=2)
self.mox.VerifyAll()
def testGenerateSuccess(self):
"""Tests the success path for Generate()."""
name = 'goodname'
plist1 = '<dict><key>foo</key><string>bar</string></dict>'
mock_plist1 = self.mox.CreateMockAnything()
pkg1 = test.GenericContainer(plist=mock_plist1, name='foo')
plist2 = '<dict><key>foo</key><string>bar</string></dict>'
mock_plist2 = self.mox.CreateMockAnything()
pkg2 = test.GenericContainer(plist=mock_plist2, name='bar')
self.mox.StubOutWithMock(models.Manifest, 'Generate')
self.mox.StubOutWithMock(models.PackageInfo, 'all')
self.mox.StubOutWithMock(models.Catalog, 'get_or_insert')
self.mox.StubOutWithMock(models.Catalog, 'DeleteMemcacheWrap')
self._MockObtainLock('catalog_lock_%s' % name)
mock_model = self.mox.CreateMockAnything()
models.PackageInfo.all().AndReturn(mock_model)
mock_model.filter('catalogs =', name).AndReturn(mock_model)
mock_model.fetch(None).AndReturn([pkg1, pkg2])
pkg1.plist.GetXmlContent(indent_num=1).AndReturn(plist1)
pkg2.plist.GetXmlContent(indent_num=1).AndReturn(plist2)
mock_catalog = self.mox.CreateMockAnything()
models.Catalog.get_or_insert(name).AndReturn(mock_catalog)
mock_catalog.put().AndReturn(None)
models.Catalog.DeleteMemcacheWrap(
name, prop_name='plist_xml').AndReturn(None)
models.Manifest.Generate(name, delay=1).AndReturn(None)
self._MockReleaseLock('catalog_lock_%s' % name)
self.mox.ReplayAll()
models.Catalog.Generate(name)
self.assertEqual(mock_catalog.name, name)
xml = '\n'.join([plist1, plist2])
expected_plist = models.constants.CATALOG_PLIST_XML % xml
self.assertEqual(expected_plist, mock_catalog.plist)
self.assertEqual(mock_catalog.package_names, ['foo', 'bar'])
self.mox.VerifyAll()
def testGenerateWithNoPkgsinfo(self):
"""Tests Catalog.Generate() where no coorresponding PackageInfo exist."""
name = 'emptyname'
self.mox.StubOutWithMock(models.Manifest, 'Generate')
self.mox.StubOutWithMock(models.PackageInfo, 'all')
self.mox.StubOutWithMock(models.Catalog, 'get_or_insert')
self.mox.StubOutWithMock(models.Catalog, 'DeleteMemcacheWrap')
self._MockObtainLock('catalog_lock_%s' % name)
mock_model = self.mox.CreateMockAnything()
models.PackageInfo.all().AndReturn(mock_model)
mock_model.filter('catalogs =', name).AndReturn(mock_model)
mock_model.fetch(None).AndReturn([])
mock_catalog = self.mox.CreateMockAnything()
models.Catalog.get_or_insert(name).AndReturn(mock_catalog)
mock_catalog.put().AndReturn(None)
models.Catalog.DeleteMemcacheWrap(
name, prop_name='plist_xml').AndReturn(None)
models.Manifest.Generate(name, delay=1).AndReturn(None)
self._MockReleaseLock('catalog_lock_%s' % name)
self.mox.ReplayAll()
models.Catalog.Generate(name)
self.assertEqual(mock_catalog.name, name)
expected_plist = models.constants.CATALOG_PLIST_XML % '\n'.join([])
self.assertEqual(expected_plist, mock_catalog.plist)
self.assertEqual(mock_catalog.package_names, [])
self.mox.VerifyAll()
def testGenerateWithPlistParseError(self):
"""Tests Generate() where plist.GetXmlDocument() raises plist.Error."""
name = 'goodname'
mock_plist1 = self.mox.CreateMockAnything()
pkg1 = test.GenericContainer(plist=mock_plist1, name='foo')
self._MockObtainLock('catalog_lock_%s' % name)
mock_model = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.PackageInfo, 'all')
models.PackageInfo.all().AndReturn(mock_model)
mock_model.filter('catalogs =', name).AndReturn(mock_model)
mock_model.fetch(None).AndReturn([pkg1])
mock_plist1.GetXmlContent(indent_num=1).AndRaise(models.plist_lib.Error)
self._MockReleaseLock('catalog_lock_%s' % name)
self.mox.ReplayAll()
self.assertRaises(
models.plist_lib.Error, models.Catalog.Generate, name)
self.mox.VerifyAll()
def testGenerateWithDbError(self):
"""Tests Generate() where put() raises db.Error."""
name = 'goodname'
plist1 = '<plist><dict><key>foo</key><string>bar</string></dict></plist>'
mock_plist1 = self.mox.CreateMockAnything()
pkg1 = test.GenericContainer(plist=mock_plist1, name='foo')
plist2 = '<plist><dict><key>foo</key><string>bar</string></dict></plist>'
mock_plist2 = self.mox.CreateMockAnything()
pkg2 = test.GenericContainer(plist=mock_plist2, name='bar')
self._MockObtainLock('catalog_lock_%s' % name)
mock_model = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.PackageInfo, 'all')
models.PackageInfo.all().AndReturn(mock_model)
mock_model.filter('catalogs =', name).AndReturn(mock_model)
mock_model.fetch(None).AndReturn([pkg1, pkg2])
mock_plist1.GetXmlContent(indent_num=1).AndReturn(plist1)
mock_plist2.GetXmlContent(indent_num=1).AndReturn(plist2)
mock_catalog = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.Catalog, 'get_or_insert')
models.Catalog.get_or_insert(name).AndReturn(mock_catalog)
mock_catalog.put().AndRaise(models.db.Error)
self._MockReleaseLock('catalog_lock_%s' % name)
self.mox.ReplayAll()
self.assertRaises(
models.db.Error, models.Catalog.Generate, name)
self.mox.VerifyAll()
def testGenerateLocked(self):
"""Tests Generate() where name is locked."""
name = 'lockedname'
self._MockObtainLock('catalog_lock_%s' % name, obtain=False)
# here is where Generate calls itself; can't stub the method we're
# testing, so mock the calls that happen as a result.
utcnow = datetime.datetime(2010, 9, 2, 19, 30, 21, 377827)
self.mox.StubOutWithMock(datetime, 'datetime')
self.stubs.Set(models.deferred, 'defer', self.mox.CreateMockAnything())
deferred_name = 'create-catalog-%s-%s' % (
name, '2010-09-02-19-30-21-377827')
models.datetime.datetime.utcnow().AndReturn(utcnow)
models.deferred.defer(
models.Catalog.Generate, name, _name=deferred_name, _countdown=10)
self.mox.ReplayAll()
models.Catalog.Generate(name)
self.mox.VerifyAll()
class ManifestTest(mox.MoxTestBase):
"""Test Manifest class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def _MockObtainLock(self, name, obtain=True):
if not hasattr(self, '_mock_obtain_lock'):
self.mox.StubOutWithMock(models.gae_util, 'ObtainLock')
self._mock_obtain_lock = True
models.gae_util.ObtainLock(name).AndReturn(obtain)
def _MockReleaseLock(self, name):
if not hasattr(self, '_mock_release_lock'):
self.mox.StubOutWithMock(models.gae_util, 'ReleaseLock')
self._mock_release_lock = True
models.gae_util.ReleaseLock(name).AndReturn(None)
def testGenerateAsync(self):
"""Tests calling Manifest.Generate(delay=2)."""
name = 'manifestname'
utcnow = datetime.datetime(2010, 9, 2, 19, 30, 21, 377827)
self.mox.StubOutWithMock(datetime, 'datetime')
self.stubs.Set(models.deferred, 'defer', self.mox.CreateMockAnything())
deferred_name = 'create-manifest-%s-%s' % (
name, '2010-09-02-19-30-21-377827')
models.datetime.datetime.utcnow().AndReturn(utcnow)
models.deferred.defer(
models.Manifest.Generate, name, _name=deferred_name, _countdown=2)
self.mox.ReplayAll()
models.Manifest.Generate(name, delay=2)
self.mox.VerifyAll()
def testGenerateSuccess(self):
"""Tests the success path for Manifest.Generate()."""
name = 'goodname'
pkg1 = test.GenericContainer(install_types=['footype1'], name='pkg1')
pkg2 = test.GenericContainer(
install_types=['footype1', 'footype2'], name='pkg2')
manifest_dict = {
'catalogs': [name, 'apple_update_metadata'],
pkg1.install_types[0]: [pkg1.name, pkg2.name],
pkg2.install_types[1]: [pkg2.name],
}
self._MockObtainLock('manifest_lock_%s' % name)
self.stubs.Set(
models.plist_lib,
'MunkiManifestPlist',
self.mox.CreateMock(models.plist_lib.MunkiManifestPlist))
mock_model = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.PackageInfo, 'all')
models.PackageInfo.all().AndReturn(mock_model)
mock_model.filter('manifests =', name).AndReturn(mock_model)
mock_model.fetch(None).AndReturn([pkg1, pkg2])
mock_manifest = self.mox.CreateMockAnything()
mock_manifest.plist = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.Manifest, 'get_or_insert')
models.Manifest.get_or_insert(name).AndReturn(mock_manifest)
mock_manifest.plist.SetContents(manifest_dict)
mock_manifest.put().AndReturn(None)
self.mox.StubOutWithMock(models.Manifest, 'DeleteMemcacheWrap')
models.Manifest.DeleteMemcacheWrap(name).AndReturn(None)
self._MockReleaseLock('manifest_lock_%s' % name)
self.mox.ReplayAll()
models.Manifest.Generate(name)
self.mox.VerifyAll()
def testGenerateDbError(self):
"""Tests Manifest.Generate() with db Error."""
name = 'goodname'
self._MockObtainLock('manifest_lock_%s' % name)
mock_model = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.PackageInfo, 'all')
models.PackageInfo.all().AndReturn(mock_model)
mock_model.filter('manifests =', name).AndReturn(mock_model)
mock_model.fetch(None).AndRaise(models.db.Error)
self._MockReleaseLock('manifest_lock_%s' % name)
self.mox.ReplayAll()
self.assertRaises(models.db.Error, models.Manifest.Generate, name)
self.mox.VerifyAll()
def testGenerateWithNoPkgsinfo(self):
"""Tests Manifest.Generate() where no coorresponding PackageInfo exist."""
name = 'emptyname'
manifest_dict = {
'catalogs': [name, 'apple_update_metadata'],
}
self._MockObtainLock('manifest_lock_%s' % name)
self.stubs.Set(
models.plist_lib,
'MunkiManifestPlist',
self.mox.CreateMock(models.plist_lib.MunkiManifestPlist))
mock_model = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.PackageInfo, 'all')
models.PackageInfo.all().AndReturn(mock_model)
mock_model.filter('manifests =', name).AndReturn(mock_model)
mock_model.fetch(None).AndReturn([])
mock_manifest = self.mox.CreateMockAnything()
mock_manifest.plist = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(models.Manifest, 'get_or_insert')
models.Manifest.get_or_insert(name).AndReturn(mock_manifest)
mock_manifest.plist.SetContents(manifest_dict)
mock_manifest.put().AndReturn(None)
self.mox.StubOutWithMock(models.Manifest, 'DeleteMemcacheWrap')
models.Manifest.DeleteMemcacheWrap(name).AndReturn(None)
self._MockReleaseLock('manifest_lock_%s' % name)
self.mox.ReplayAll()
models.Manifest.Generate(name)
self.mox.VerifyAll()
def testGenerateLocked(self):
"""Tests Manifest.Generate() where name is locked."""
name = 'lockedname'
self._MockObtainLock('manifest_lock_%s' % name, obtain=False)
# here is where Manifest.Generate calls itself; can't stub the method we're
# testing, so mock the calls that happen as a result.
utcnow = datetime.datetime(2010, 9, 2, 19, 30, 21, 377827)
self.mox.StubOutWithMock(datetime, 'datetime')
self.stubs.Set(models.deferred, 'defer', self.mox.CreateMockAnything())
deferred_name = 'create-manifest-%s-%s' % (
name, '2010-09-02-19-30-21-377827')
models.datetime.datetime.utcnow().AndReturn(utcnow)
models.deferred.defer(
models.Manifest.Generate, name, _name=deferred_name, _countdown=5)
self.mox.ReplayAll()
models.Manifest.Generate(name)
self.mox.VerifyAll()
class PackageInfoTest(mox.MoxTestBase):
"""Test PackageInfo class."""
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
USER_EMAIL='[email protected]',
USER_ID='1337',
USER_IS_ADMIN='0')
self.testbed.init_user_stub()
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.testbed.deactivate()
self.mox.UnsetStubs()
self.stubs.UnsetAll()
# TODO(user): create a base test class for non-handlers.
def _MockObtainLock(self, name, obtain=True, timeout=0):
if not hasattr(self, '_mock_obtain_lock'):
self.mox.StubOutWithMock(models.gae_util, 'ObtainLock')
self._mock_obtain_lock = True
models.gae_util.ObtainLock(name, timeout=timeout).AndReturn(obtain)
def _MockReleaseLock(self, name):
if not hasattr(self, '_mock_release_lock'):
self.mox.StubOutWithMock(models.gae_util, 'ReleaseLock')
self._mock_release_lock = True
models.gae_util.ReleaseLock(name).AndReturn(None)
def _GetTestPackageInfoPlist(self, d=None):
"""String concatenates a description and returns test plist xml."""
if not d:
d = {}
for k in ['desc', 'name', 'installer_item_hash', 'version']:
if k not in d:
d[k] = 'foo%s' % k
# Build xml array of catalogs.
if 'catalogs' not in d:
d['catalogs'] = ['unstable']
catalogs = []
for catalog in d['catalogs']:
catalogs.append('<string>%s</string>' % catalog)
d['catalogs'] = ''.join(catalogs)
return (
'<plist><dict><key>name</key><string>%(name)s</string>'
'<key>version</key><string>%(version)s</string>'
'<key>installer_item_hash</key><string>%(installer_item_hash)s</string>'
'<key>installer_item_location</key><string>filename.dmg</string>'
'<key>catalogs</key><array>%(catalogs)s</array>'
'<key>description</key><string>%(desc)s</string></dict></plist>' % d)
def testGetDescription(self):
"""Tests getting PackageInfo.description property."""
p = models.PackageInfo()
desc = 'basic'
p.plist = self._GetTestPackageInfoPlist({'desc': desc})
self.mox.ReplayAll()
self.assertEqual(desc, p._GetDescription())
self.mox.VerifyAll()
def testGetDescriptionEmptyStr(self):
"""Tests getting PackageInfo.description property, when desc is empty."""
p = models.PackageInfo()
desc = ''
p.plist = self._GetTestPackageInfoPlist({'desc': desc})
self.mox.ReplayAll()
self.assertEqual('', p._GetDescription())
self.mox.VerifyAll()
def testGetDescriptionWithAvgDurationText(self):
"""Tests PackageInfo.description property with avg duration text."""
p = models.PackageInfo()
basic_desc = 'basic'
avg_duration_text = models.PackageInfo.AVG_DURATION_TEXT % (25000, 41)
full_desc = '%s\n\n%s' % (basic_desc, avg_duration_text)
p.plist = self._GetTestPackageInfoPlist({'desc': full_desc})
self.mox.ReplayAll()
self.assertEqual(basic_desc, p._GetDescription())
self.mox.VerifyAll()
def testSetDescription(self):
"""Set PackageInfo.description property, sans avg duration text."""
p = models.PackageInfo()
desc = 'basic'
p.plist = self._GetTestPackageInfoPlist({'desc': desc})
self.mox.ReplayAll()
p._SetDescription(desc)
self.assertEqual(desc, p.description)
self.mox.VerifyAll()
def testSetDescriptionPreservingExistingAvgDurationText(self):
"""Set PackageInfo.description property, preserving avg duration text."""
p = models.PackageInfo()
basic_desc = 'basic'
avg_duration_text = models.PackageInfo.AVG_DURATION_TEXT % (25000, 41)
full_desc = 'ANYTHING_HERE\n\n%s' % avg_duration_text
p.plist = self._GetTestPackageInfoPlist({'desc': full_desc})
expected_new_desc = full_desc.replace('ANYTHING_HERE', basic_desc)
self.mox.ReplayAll()
p._SetDescription(basic_desc)
self.assertEqual(basic_desc, p.description)
self.assertEqual(expected_new_desc, p.plist['description'])
self.mox.VerifyAll()
def testSetDescriptionWithUpdatedAvgDurationText(self):
"""Set PackageInfo.description property, preserving avg duration text."""
p = models.PackageInfo()
avg_duration_text = models.PackageInfo.AVG_DURATION_TEXT % (25000, 41)
old_full_desc = 'NOT_BASIC\n\n%s' % avg_duration_text
p.plist = self._GetTestPackageInfoPlist({'desc': old_full_desc})
basic_desc = 'basic'
avg_duration_text = models.PackageInfo.AVG_DURATION_TEXT % (25555, 45)
new_full_desc = '%s\n\n%s' % (basic_desc, avg_duration_text)
self.mox.ReplayAll()
p._SetDescription(new_full_desc)
self.assertEqual(basic_desc, p.description)
self.assertEqual(new_full_desc, p.plist['description'])
self.mox.VerifyAll()
def testUpdateWithObtainLockFailure(self):
"""Test Update() with a failure obtaining the lock."""
p = models.PackageInfo()
p.filename = 'foofile.dmg'
self._MockObtainLock('pkgsinfo_%s' % p.filename, obtain=False, timeout=5.0)
self.mox.ReplayAll()
self.assertRaises(models.PackageInfoLockError, p.Update)
self.mox.VerifyAll()
def testMakeSafeToModifyWithoutProposals(self):
"""Test MakeSafeToModify() with out proposals and package in catalogs."""
p = models.PackageInfo()
p.catalogs = ['unstable', 'testing', 'stable']
p.manifests = ['unstable', 'testing', 'stable']
self.mox.StubOutWithMock(models.PackageInfo, 'approval_required')
models.PackageInfo.approval_required = False
self.mox.StubOutWithMock(models.PackageInfo, 'Update')
models.PackageInfo.Update(catalogs=[], manifests=[])
self.mox.ReplayAll()
p.MakeSafeToModify()
self.mox.VerifyAll()
def _UpdateTestHelper(
self, filename, pkginfo, plist_xml=None, create_new=False,
safe_to_modify=True, unsafe_properties_changed=False,
filename_exists=False, **kwargs):
"""Test helper for Update()."""
catalogs = kwargs.get('catalogs')
manifests = kwargs.get('manifests')
install_types = kwargs.get('install_types')
manifest_mod_access = kwargs.get('manifest_mod_access')
name = kwargs.get('name')
display_name = kwargs.get('display_name')
unattended_install = kwargs.get('unattended_install')
unattended_uninstall = kwargs.get('unattended_uninstall')
description = kwargs.get('description')
version = kwargs.get('version')
minimum_os_version = kwargs.get('minimum_os_version')
maximum_os_version = kwargs.get('maximum_os_version')
force_install_after_date = kwargs.get('force_install_after_date')
self.mox.StubOutWithMock(models.PackageInfo, 'approval_required')
models.PackageInfo.approval_required = False
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
self.mox.StubOutWithMock(models.PackageInfo, 'get_by_key_name')
if create_new:
pkginfo = self.mox.CreateMockAnything()
pkginfo.catalogs = []
pkginfo.manifests = []
pkginfo.install_types = []
pkginfo.manifest_mod_access = []
if filename_exists:
models.PackageInfo.get_by_key_name(filename).AndReturn(True)
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.ReplayAll()
models.PackageInfo.UpdateFromPlist(plist_xml, create_new=create_new)
else:
models.PackageInfo.get_by_key_name(filename).AndReturn(None)
self.mox.StubOutWithMock(models.PackageInfo, '_New')
models.PackageInfo._New(filename).AndReturn(pkginfo)
elif plist_xml:
models.PackageInfo.get_by_key_name(filename).AndReturn(pkginfo)
if create_new:
original_plist = None
else:
original_plist = pkginfo.plist.GetXml()
pkginfo.filename = filename
self.mox.StubOutWithMock(pkginfo, 'IsSafeToModify')
pkginfo.IsSafeToModify().AndReturn(safe_to_modify)
if not safe_to_modify:
if plist_xml or unsafe_properties_changed:
# If not safe to modify and plist_xml was passed, an exception will be
# raised after releasing the lock.
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.ReplayAll()
if plist_xml:
models.PackageInfo.UpdateFromPlist(plist_xml)
else:
pkginfo.Update(**kwargs)
return
self.mox.StubOutWithMock(pkginfo, 'VerifyPackageIsEligibleForNewCatalogs')
pkginfo.VerifyPackageIsEligibleForNewCatalogs(
mox.IsA(list)).AndReturn(None)
self.mox.StubOutWithMock(pkginfo, 'put')
pkginfo.put().AndReturn(None)
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.StubOutWithMock(models.Catalog, 'Generate')
if plist_xml:
pl = models.plist_lib.MunkiPackageInfoPlist(plist_xml)
pl.Parse()
if create_new:
new_catalogs = []
changed_catalogs = []
else:
new_catalogs = pl['catalogs']
changed_catalogs = pkginfo.catalogs + new_catalogs
else:
new_catalogs = catalogs or pkginfo.catalogs
if catalogs:
changed_catalogs = set(catalogs + pkginfo.catalogs)
else:
changed_catalogs = pkginfo.catalogs
for catalog in sorted(changed_catalogs, reverse=True):
models.Catalog.Generate(catalog, delay=1).AndReturn(None)
self.mox.StubOutWithMock(models.users, 'get_current_user')
mock_user = self.mox.CreateMockAnything()
models.users.get_current_user().AndReturn(mock_user)
mock_user.email().AndReturn('[email protected]')
self.mox.StubOutWithMock(models.base, 'AdminPackageLog')
mock_log = self.mox.CreateMockAnything()
if safe_to_modify:
models.base.AdminPackageLog(
user='[email protected]', action='pkginfo', filename=filename,
catalogs=new_catalogs or [], manifests=manifests or [],
original_plist=original_plist, install_types=install_types or [],
manifest_mod_access=manifest_mod_access or [],
).AndReturn(mock_log)
else:
# if not safe to modify, only catalogs/manifests can be changed.
models.base.AdminPackageLog(
user='[email protected]', action='pkginfo', filename=filename,
catalogs=new_catalogs or [], manifests=manifests or [],
original_plist=original_plist, install_types=[],
manifest_mod_access=[],
).AndReturn(mock_log)
mock_log.put().AndReturn(None)
self.mox.ReplayAll()
if plist_xml:
models.PackageInfo.UpdateFromPlist(plist_xml, create_new=create_new)
else:
pkginfo.Update(
catalogs=catalogs, manifests=manifests,
install_types=install_types, manifest_mod_access=manifest_mod_access,
name=name, display_name=display_name,
unattended_install=unattended_install, description=description,
version=version, minimum_os_version=minimum_os_version,
maximum_os_version=maximum_os_version,
force_install_after_date=force_install_after_date,
unattended_uninstall=unattended_uninstall)
# Verify that the pkginfo.plist property was set.
self.assertEqual(mock_log.plist, pkginfo.plist)
return pkginfo
def testUpdatePromoteToStable(self):
"""Test Update() when promoting a package to stable."""
p = models.PackageInfo()
p.plist = self._GetTestPackageInfoPlist({'desc': 'foodesc'})
p.catalogs = ['unstable', 'testing']
p.manifests = ['unstable', 'testing']
catalogs = ['unstable', 'testing', 'stable']
manifests = ['unstable', 'testing', 'stable']
pkginfo = self._UpdateTestHelper(
'fooname.dmg', p, catalogs=catalogs, manifests=manifests)
self.assertEqual(pkginfo.catalogs, catalogs)
self.mox.VerifyAll()
def testUpdateDemoteFromStable(self):
"""Test Update() when demoting a package from stable."""
p = models.PackageInfo()
p.plist = self._GetTestPackageInfoPlist({'desc': 'foodesc'})
p.catalogs = ['unstable', 'testing', 'stable']
p.manifests = ['unstable', 'testing', 'stable']
catalogs = ['unstable', 'testing']
manifests = ['unstable', 'testing']
pkginfo = self._UpdateTestHelper(
'zooooo.dmg', p, catalogs=catalogs, manifests=manifests)
self.assertEqual(pkginfo.catalogs, catalogs)
self.mox.VerifyAll()
def testUpdateWithMultipleNewProperties(self):
"""Tests Update() with several some new and some updated properties."""
p = models.PackageInfo()
p.plist = self._GetTestPackageInfoPlist({'desc': 'foodesc'})
p.catalogs = ['unstable']
p.install_types = ['should be changed']
# pkginfo.manifests purposefully not set.
# pkginfo.manifest_mod_access purposefully not set.
install_types = ['managed_updates', 'optional_installs']
manifests = ['unstable']
manifest_mod_access = ['support']
pkginfo = self._UpdateTestHelper(
'foo.dmg', p, install_types=install_types, manifests=manifests,
manifest_mod_access=manifest_mod_access)
self.assertEqual(install_types, pkginfo.install_types)
self.assertEqual(manifests, pkginfo.manifests)
self.assertEqual(manifest_mod_access, pkginfo.manifest_mod_access)
self.mox.VerifyAll()
def testUpdateWithNewPlistProperties(self):
"""Test Update() when passing params that change plist XML properties."""
p = models.PackageInfo()
orig_version = '9.0.0.0.1'
p.plist = self._GetTestPackageInfoPlist(
{'desc': 'foodesc', 'version': orig_version})
p.catalogs = ['unstable']
description = 'zomg new description!!!'
install_types = ['managed_updates', 'optional_installs']
manifests = ['unstable']
version = '10.0.0.0.1-gg1'
minimum_os_version = '10.5.8'
maximum_os_version = ''
force_install_after_date = datetime.datetime(2012, 2, 23, 13, 0, 0)
pkginfo = self._UpdateTestHelper(
'foo.dmg', p, install_types=install_types, manifests=manifests,
description=description, version=version,
minimum_os_version=minimum_os_version,
maximum_os_version=maximum_os_version,
force_install_after_date=force_install_after_date)
self.assertEqual(description, pkginfo.plist['description'])
self.assertEqual(install_types, pkginfo.install_types)
self.assertEqual(manifests, pkginfo.manifests)
self.assertEqual(version, pkginfo.plist['version'])
self.assertEqual(minimum_os_version, pkginfo.plist['minimum_os_version'])
self.assertTrue('maximum_os_version' not in pkginfo.plist)
self.assertEqual(
force_install_after_date, pkginfo.plist['force_install_after_date'])
self.mox.VerifyAll()
def testUpdateWithNewPropertiesButIsNotSafeToModifySuccess(self):
"""Test Update() when IsSafeToModify() is False, but only cats changed."""
p = models.PackageInfo()
orig_desc = 'orig_desc'
orig_install_types = ['managed_updates', 'managed_installs']
p.plist = self._GetTestPackageInfoPlist({'desc': orig_desc})
p.catalogs = ['unstable', 'testing', 'stable']
p.manifests = ['unstable', 'testing', 'stable']
p.plist['unattended_install'] = True
p.plist['unattended_uninstall'] = True
p.plist['install_types'] = orig_install_types
manifests = ['unstable']
catalogs = ['unstable']
pkginfo = self._UpdateTestHelper(
'foo.dmg', p, catalogs=catalogs, manifests=manifests,
safe_to_modify=False, unsafe_properties_changed=False)
self.assertEqual(catalogs, pkginfo.catalogs)
self.assertEqual(manifests, pkginfo.manifests)
self.assertEqual(orig_desc, pkginfo.description)
self.assertEqual(True, pkginfo.plist['unattended_install'])
self.assertEqual(True, pkginfo.plist['unattended_uninstall'])
self.assertEqual(orig_install_types, pkginfo.plist['install_types'])
self.mox.VerifyAll()
def testUpdateWithNewPropertiesButIsNotSafeToModifyFailure(self):
"""Test Update() when IsSafeToModify() is False and Update fails."""
p = models.PackageInfo()
orig_desc = 'orig_desc'
orig_name = 'fooname'
p.plist = self._GetTestPackageInfoPlist(
{'desc': orig_desc, 'name': orig_name})
p.catalogs = ['unstable', 'testing', 'stable']
p.manifests = ['unstable', 'testing', 'stable']
p.name = orig_name
description = 'zomg new description!!!'
manifests = ['unstable']
catalogs = ['unstable']
self.assertRaises(
models.PackageInfoUpdateError,
self._UpdateTestHelper, 'foo.dmg', p, catalogs=catalogs,
manifests=manifests, name='NEWWW', description=description,
safe_to_modify=False, unsafe_properties_changed=True)
self.mox.VerifyAll()
def testUpdateFromPlist(self):
"""Test UpdateFromPlist() with new plist values."""
p = models.PackageInfo()
p.plist = self._GetTestPackageInfoPlist({'desc': 'OLD', 'name': 'OLD'})
new_desc = 'NEW DESC!!!'
new_name = 'newname'
new_hash = 'zomgHASH'
new_catalogs = ['unstable', 'testing', 'stable']
xml = self._GetTestPackageInfoPlist(
{'desc': new_desc, 'name': new_name, 'installer_item_hash': new_hash,
'catalogs': new_catalogs})
pkginfo = self._UpdateTestHelper('filename.dmg', p, plist_xml=xml)
self.assertEqual(new_name, pkginfo.name)
self.assertEqual(new_name, pkginfo.plist['name'])
self.assertEqual(new_desc, pkginfo.plist['description'])
self.assertEqual(new_hash, pkginfo.plist['installer_item_hash'])
self.assertEqual(new_hash, pkginfo.pkgdata_sha256)
self.assertEqual(new_catalogs, pkginfo.catalogs)
self.assertEqual(new_catalogs, pkginfo.plist['catalogs'])
self.mox.VerifyAll()
def testUpdateFromPlistWithPkginfoNotSafeToModify(self):
"""Test UpdateFromPlist() when pkginfo is not safe to mod."""
p = models.PackageInfo()
p.plist = self._GetTestPackageInfoPlist({'desc': 'OLD', 'name': 'OLD'})
xml = self._GetTestPackageInfoPlist({'desc': 'NEW', 'name': 'NEW'})
self.assertRaises(
models.PackageInfoUpdateError,
self._UpdateTestHelper,
'filename.dmg', p, plist_xml=xml, safe_to_modify=False)
def testUpdateFromPlistWithInvalidPlistXml(self):
"""Test UpdateFromPlist() with an invalid."""
self.assertRaises(
models.PackageInfoUpdateError,
models.PackageInfo.UpdateFromPlist, '<plist>NOT VALID PLIST</plist>')
def testUpdateFromPlistCreateNewTrue(self):
"""Test UpdateFromPlist(create_new=True)."""
filename = 'filename.dmg'
name = 'foopkgname'
catalogs = ['unstable', 'testing']
pkgdata_sha256 = 'abcd1234'
xml = self._GetTestPackageInfoPlist(
{'filename': filename, 'name': name, 'catalogs': catalogs,
'installer_item_hash': pkgdata_sha256})
pkginfo = self._UpdateTestHelper(
filename, None, plist_xml=xml, create_new=True)
self.assertEqual(name, pkginfo.name)
self.assertEqual(filename, pkginfo.filename)
# Test that catalogs were ignored/wiped.
self.assertEqual([], pkginfo.catalogs)
self.assertEqual([], pkginfo.plist['catalogs'])
self.assertEqual(pkgdata_sha256, pkginfo.plist['installer_item_hash'])
self.assertEqual(pkgdata_sha256, pkginfo.pkgdata_sha256)
self.mox.VerifyAll()
def testUpdateFromPlistCreateNewTrueButPreexistingKeyName(self):
"""Test UpdateFromPlist(create_new=True) where the filename is in use."""
filename = 'filename.dmg'
name = 'foopkgname'
xml = self._GetTestPackageInfoPlist(
{'filename': filename, 'name': name})
self.assertRaises(
models.PackageInfoUpdateError,
self._UpdateTestHelper,
filename, None, plist_xml=xml, create_new=True, filename_exists=True)
self.mox.VerifyAll()
def testBuildProposalBodyUrlEncodesFileName(self):
p = models.PackageInfo()
pip = models.PackageInfoProposal._New(p)
body = pip._BuildProposalBody('foo.com', 'file name.dmg')
self.assertTrue('https://foo.com/admin/package/file%20name.dmg' in body)
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
| sillywilly42/simian | src/tests/simian/mac/models/munki_test.py | Python | apache-2.0 | 33,982 |
from database.model import Session, Grainbin, TemperatureCable, TemperatureSensor
from monitor.rrd.rrd import RRD
from .message import Message
class GrainbinInfo(Message):
def __init__(self, source, destination):
Message.__init__(self, source, destination)
self.command = 'rescan'
self.grainbins = []
return
def save_rescan(self):
for grainbin in self.grainbins:
grainbin.save_rescan()
return
class GrainbinUpdate(Message):
def __init__(self, source, destination):
Message.__init__(self, source, destination)
self.command = 'update'
self.grainbins = []
self.timestamp = None
return
def save_to_db(self):
for grainbin in self.grainbins:
grainbin.save_update(timestamp=self.timestamp)
return
class GrainbinModel(object):
def __init__(self):
self.grainbin_id = None
self.bus_number = None
self.cable_info = {}
self.cables = []
self.sensor_type = 'unknown'
self.average_temp = None
return
def save_rescan(self):
session = Session()
grainbin = session.query(Grainbin).filter_by(id=self.grainbin_id).first()
if grainbin:
if grainbin.user_configured:
# grainbin has already been configured. skip this bin.
session.close()
return
grainbin.sensor_type = self.sensor_type
# make sure all cables that are in the update are in the database
for cable_number in self.cable_info:
temp_cable = session.query(TemperatureCable) \
.filter_by(grainbin_id=self.grainbin_id,
bin_cable_number=cable_number) \
.first()
if temp_cable:
temp_cable.cable_type = self.sensor_type
temp_cable.sensor_count = self.cable_info[cable_number]
else:
new_cable = TemperatureCable(grainbin.id)
new_cable.bin_cable_number = cable_number
new_cable.sensor_count = self.cable_info[cable_number]
new_cable.cable_type = self.sensor_type
session.add(new_cable)
# make sure all cales that are in the database are in the update
for cable in grainbin.cables:
if str(cable.bin_cable_number) not in self.cable_info:
session.delete(cable)
session.commit()
session.close()
return
def save_update(self, timestamp):
session = Session()
grainbin = session.query(Grainbin).filter_by(id=self.grainbin_id).first()
if grainbin:
if grainbin.user_configured:
grainbin.average_temp = self.average_temp
grainbin.total_updates += 1
for cable in self.cables:
db_cable = session.query(TemperatureCable)\
.filter_by(bin_cable_number=str(cable.cable_number),
grainbin_id=str(self.grainbin_id))\
.first()
values = {}
for sensor in cable.sensors:
db_sensor = session.query(TemperatureSensor)\
.filter_by(cable_id=db_cable.id,
templow=str(sensor.sensor_number))\
.first()
if db_sensor:
db_sensor.last_value = sensor.temperature
source_name = str(cable.cable_number) + "-" + str(sensor.sensor_number)
values[source_name] = sensor.temperature
# only update RRD if there is values to update
if values:
rrd = RRD(grainbin.device_id, 'temperature_cable',
service_number=db_cable.id)
rrd.update(values, timestamp=timestamp)
else:
# grainbin is not configured yet, so just save rescan
self.save_rescan()
session.commit()
return
class CableModel(object):
def __init__(self):
self.cable_number = None
self.sensors = []
return
class SensorModel(object):
def __init__(self):
self.cable_number = None
self.sensor_number = None
self.temperature = None
return
| nstoik/sensor_monitor | message/grainbin_message.py | Python | apache-2.0 | 4,897 |
import math
def solution(n):
steps = int(math.ceil(math.log(n,2)))
if n & 1 == 1:
steps += 1
if n > 1:
if int(math.log(n+1,2)) == math.log(n+1,2):
steps -= 1
if int(math.log(n-1,2)) == math.log(n+1,2):
steps -= 1
return steps
solutions = [ 0, 0, 1, 2, 2, 3, 3, 4, 3, 4, 4, 5, 4, 5, 5, 5, 4, 5, 5, 6, 5, 6, 6, 6, 5, 6, 6, 7, 6, 7, 6, 6, 5, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
for i in range(1,41):
s = solution(i)
print("{0:4d}: {1:d} vs {2:d} {3:d}".format(i, s, solutions[i], s - solutions[i] ))
# solution(4) returns 2: 4 -> 2 -> 1
# solution(15) returns 5: 15 -> 16 -> 8 -> 4 -> 2 -> 1
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 24
# soltions = [ 0, 0, 1, 2, 2, 3, 3, 4, 3, 4, 4, 5, 4, 5, 5, 5, 4, 5, 5, 6, 5, 6, 6, 6, 5, 6, 6, 7, 6, 7, 6, 6, 5]
# solution( 0) returns 0
# solution( 1) returns 0
# solution( 2) returns 1: 2 -> 1
# solution( 3) returns 2: 3 -> 2 -> 1
# solution( 4) returns 2: 4 -> 2 -> 1
# solution( 5) returns 3: 5 -> 4 -> 2 -> 1
# solution( 6) returns 3: 6 -> 3 -> 2 -> 1
# 6 -> 5 -> 4 -> 2 -> 1
# 6 -> 7 -> 8 -> 4 -> 2 -> 1
# solution( 7) returns 4: 7 -> 6 -> 3 -> 2 -> 1
# 7 -> 8 -> 4 -> 2 -> 1
# solution( 8) returns 3: 8 -> 4 -> 2 -> 1
# solution( 9) returns 4: 9 -> 8 -> 4 -> 2 -> 1
# 9 -> 10 -> 5 -> 4 -> 2 -> 1
# solution(10) returns 4: 10 -> 5 -> 4 -> 2 -> 1
# 10 -> 9 -> 8 -> 4 -> 2 -> 1
# solution(11) returns 5: 11 -> 10 -> 5 -> 4 -> 2 -> 1
# 11 -> 12 -> 6 -> 3 -> 2 -> 1
# solution(12) returns 4: 12 -> 6 -> 3 -> 2 -> 1
# solution(13) returns 5: 13 -> 12 -> 6 -> 3 -> 2 -> 1
# 13 -> 14 -> 7 -> 8 -> 4 -> 2 -> 1
# 13 -> 14 -> 7 -> 6 -> 3 -> 2 -> 1
# solution(14) returns 5: 14 -> 7 -> 6 -> 3 -> 2 -> 1
# 14 -> 15 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(15) returns 5: 15 -> 16 -> 8 -> 4 -> 2 -> 1
# 15 -> 14 -> 7 -> 6 -> 3 -> 2 -> 1
# solution(16) returns 4: 16 -> 8 -> 4 -> 2 -> 1
# solution(17) returns 5: 17 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(18) returns 5: 18 -> 17 -> 16 -> 8 -> 4 -> 2 -> 1
# 18 -> 9 -> 8 -> 4 -> 2 -> 1
# solution(19) returns 6: 19 -> 18 -> 9 -> 8 -> 4 -> 2 -> 1
# 19 -> 20 -> 10 -> 5 -> 4 -> 2 -> 1
# solution(20) returns 5: 20 -> 10 -> 5 -> 4 -> 2 -> 1
# solution(21) returns 6: 21 -> 20 -> 10 -> 5 -> 4 -> 2 -> 1
# 21 -> 22 -> 11 -> 10 -> 5 -> 4 -> 2 -> 1
# solution(22) returns 6: 22 -> 11 -> 10 -> 5 -> 4 -> 2 -> 1
# solution(23) returns 6: 23 -> 24 -> 12 -> 6 -> 3 -> 2 -> 1
# 23 -> 22 -> 11 -> 10 -> 5 -> 4 -> 2 -> 1
# solution(24) returns 5: 24 -> 12 -> 6 -> 3 -> 2 -> 1
# solution(25) returns 6: 25 -> 24 -> 12 -> 6 -> 3 -> 2 -> 1
# 25 -> 26 -> 13 -> 12 -> 6 -> 3 -> 2 -> 1
# solution(26) returns 6: 26 -> 13 -> 12 -> 6 -> 3 -> 2 -> 1
# solution(27) returns 7: 27 -> 26 -> 13 -> 12 -> 6 -> 3 -> 2 -> 1
# 27 -> 28 -> 14 -> 7 -> 6 -> 3 -> 2 -> 1
# solution(28) returns 6: 28 -> 14 -> 7 -> 6 -> 3 -> 2 -> 1
# solution(29) returns 7: 29 -> 30 -> 15 -> 16 -> 8 -> 4 -> 2 -> 1
# 29 -> 28 -> 14 -> 7 -> 6 -> 3 -> 2 -> 1
# solution(30) returns 6: 30 -> 15 -> 16 -> 8 -> 4 -> 2 -> 1
# 30 -> 31 -> 32 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(31) returns 6: 31 -> 32 -> 16 -> 8 -> 4 -> 2 -> 1
#
# solution(32) returns 5: 32 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(33) returns 6: 33 -> 32 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(34) returns 6: 34 -> 17 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(35) returns 7: 35 -> 36 -> 18 -> 9 -> 8 -> 4 -> 2 -> 1
# 35 -> 34 -> 17 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(36) returns 6:
# solution(37) returns 6:
# solution(38) returns 6:
# solution(39) returns 6:
# solution(40) returns 6:
# solution(41) returns 6:
# solution(42) returns 6:
# solution(43) returns 6:
#
# solution(91) returns 9: 91 -> 92 -> 46 -> 23 -> 24 -> 12 -> 6 -> 3 -> 2 -> 1
# 91 -> 90 -> 45 -> 44 -> 22 -> 11 -> 10 -> 5 -> 4 -> 2 -> 1
# 91 -> 90 -> 45 -> 46 -> 23 -> 24 -> 12 -> 6 -> 3 -> 2 -> 1
#
# solution(64) returns 6: 64 -> 32 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(128) returns 7: 128 -> 64 -> 32 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(256) returns 8: 256 -> 128 -> 64 -> 32 -> 16 -> 8 -> 4 -> 2 -> 1
# solution(2^N) returns N:
| perlygatekeeper/glowing-robot | google_test/fuel-injection-perfection/test_log_2.py | Python | artistic-2.0 | 4,655 |
# $language = "python"
# $interface = "1.0"
# for GCCSFRMs01-06, SCCSFRMs01-12
import os
import csv
import re
import sys
import msvcrt
int_total10g = 0
int_total1g = 0
def connect(str_hostname, str_username, str_password, csvws_10g, csvws_1g, csvws_pair):
"""
(str, str, str, csv.writer, csv.writer) -> None
Connects to str_hostname with credentials str_username and str_password, calls 'sh int desc',
and lists all avaliable 10g ports on csvws_10g document and 1g ports on csvws_1g document,
as well as recording the total number of available ports on each spreadsheet,
and for each pair on the csvws_pair spreadsheet
"""
# Declaring global variables to count total ports
global int_total10g
global int_total1g
# Resetting local variables that count avaliable ports for this host
int_port10g = 0
int_port1g = 0
# Connect to the session
str_cmd = "/SSH2 /L %s /PASSWORD %s /C 3DES /M SHA1 %s" % (str_username, str_password, str_hostname+".rns.fg.rbc.com")
crt.Session.Connect(str_cmd)
crt.Screen.Synchronous = True
# Write the hostname on the documents
csvws_10g.writerow([str_hostname])
csvws_1g.writerow([str_hostname])
crt.Screen.WaitForString("#")
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
list_str_waitfor = ["\n", str_hostname+"#"]
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates done.
int_result = crt.Screen.WaitForStrings( list_str_waitfor )
# If see a prompt, then we're done
if int_result == 2:
break
# Fetch current row and read the characters from the screen
int_screenrow = crt.Screen.CurrentRow - 1
str_readline = crt.Screen.Get(int_screenrow, 1, int_screenrow, 140)
# Split the line by whitespace, and only take the port name and its description
list_str_items = re.split(r'\s{2,}',str_readline.strip())
str_port = list_str_items[0]
try:
str_desc = list_str_items[3]
except IndexError:
str_desc = ''
# Match port names to differentiate between 10g and 1g ports
# Write the name of the port and its description to the document
if re.match(r'Eth\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_10g.writerow([str_port,str_desc])
int_total10g += 1
int_port10g += 1
elif re.match(r'Eth1\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_1g.writerow([str_port,str_desc])
int_total1g += 1
int_port1g += 1
# Write the number of available ports on the document
csvws_10g.writerow([str(int_port10g)+" ports available for "+str_hostname])
csvws_1g.writerow([str(int_port1g)+" ports available for "+str_hostname])
csvws_10g.writerow([])
csvws_1g.writerow([])
csvws_pair.writerow([str_hostname, str(int_port10g), str(int_port1g)])
# Disconnect from the session
crt.Screen.Synchronous = False
crt.Session.Disconnect()
# Main Function
def main():
# Get login information
str_usr = crt.Dialog.Prompt("Enter username: ", "Server Farm Login", "", False)
if str_usr == "": pass
str_pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
if str_pwd == "": pass
# Create csv worksheets to record to
file_10g = open("intdesc_both_10g.csv", 'wb')
csvws_10g = csv.writer(file_10g)
file_1g = open("intdesc_both_1g.csv", 'wb')
csvws_1g = csv.writer(file_1g)
file_pair = open("ports_per_pair.csv", 'wb')
csvws_pair = csv.writer(file_pair)
csvws_pair.writerow(["Hostname", "10G ports available", "1G ports available"])
# Iterate through each of the hosts, recording the available ports
for i in range(1,7):
connect("GCCSFRMs"+str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
for i in range(1,13):
connect("SCCSFRMs"+str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
# Done, display total port availability
crt.Dialog.MessageBox("Results:\n--------\n10G: "+str(int_total10g)+" available ports\n1G: "+str(int_total1g)+" available ports", "Final Report", 64)
# And write those totals on the documents
file_10g.write("Total available ports: "+str(int_total10g))
file_1g.write("Total available ports: "+str(int_total1g))
csvws_pair.writerow(["Total", str(int_total10g), str(int_total1g)])
file_10g.close()
file_1g.close()
file_pair.close()
if __name__ == '__builtin__':
main()
elif __name__ == '__main__':
print "This program must be run in SecureCRT"
print "Open SecureCRT, go to Script > Run... , then select this file"
print "("+sys.argv[0]+")"
print
print "Press any key to exit",
msvcrt.getch()
| kelvinongtoronto/SecureCRT | sfrms_pairs.py | Python | artistic-2.0 | 4,716 |
from lib.Site import *
from lib.Page import *
from lib.Element import *
from lib.Forms import *
from lib.Render import *
from flask import Flask, request, render_template, redirect, send_file
from flask.ext.basicauth import BasicAuth
from werkzeug import secure_filename
import json
import sys
import string
import html
if len( sys.argv) != 3:
print( 'usage: ' + sys.argv[0] + ' secrets_file site_name')
exit()
try:
secrets = open( sys.argv[1], 'r')
username = secrets.readline()
password = secrets.readline()
username = username.strip( '\n')
password = password.strip( '\n')
if len( username) == 0 or len( password) == 0:
raise
secrets.close()
except:
print( 'Could not parse ' + sys.argv[1] + '. Exiting')
exit()
if sys.argv[2] == 'lib' or sys.argv[2] == 'static' or sys.argv[2] == 'templates':
print( 'Site name is invalid. Exiting')
exit()
for char in sys.argv[2]:
if char not in string.ascii_letters + string.digits + '_':
print( 'Site name is invalid. Exiting')
exit()
site = Site( sys.argv[2])
site.name += '/'
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = username
app.config['BASIC_AUTH_PASSWORD'] = password
basic_auth = BasicAuth( app)
ALLOWED_EXTENSIONS = set( [ 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file( filename):
return '.' in filename and \
filename.rsplit( '.', 1)[1] in ALLOWED_EXTENSION
def edit_success( redir):
return render_template( 'bedit.html', edits = render_template( 'edit_success.html', redir = redir))
def edit_failure( redir):
return render_template( 'bedit.html', edits = render_template( 'edit_failure.html', redir = redir))
@app.route( '/')
def route_index():
try:
links = Navbar( site, 'index')
eles = Body( site, 'index')
except KeyError:
return '404 Not Found.', 404
return render_template( 'base.html', title = 'Home', header = render_template( 'header.html', links = links.render(), body = render_template( 'body_index.html', editloc = site.name + 'index', eles = eles.render())))
@app.route( '/<path:page_name>/')
def route_everything( page_name):
try:
links = Navbar( site, page_name)
eles = Body( site, page_name)
except KeyError:
return '404 Not Found.', 404
return render_template( 'base.html', title = page_name.capitalize(), header = render_template( 'header.html', links = links.render(), body = render_template( 'body.html', editloc = site.name + page_name, eles = eles.render())))
@app.route( '/<path:page_name>/assets/<imagename>')
def route_assets( page_name, imagename):
try:
return send_file( site.name + page_name + '/assets/' + imagename)
except:
return '404 Not Found.', 404
@app.route( '/edit/', methods=['GET'])
@basic_auth.required
def edit_page():
if request.method == 'POST':
if request.form['to_edit']:
return redirect( '/edit/' + request.form['to_edit'])
if request.form['to_create']:
site.add( Page( site.name + request.form['to_create']))
page_create = PageCreateForm( request.form)
page_create.to_create.data = ''
page_edit = PageEditForm( request.form)
page_edit.choose( site.render().keys())
return render_template( 'bedit.html', edits = render_template( 'edit.html', page_create = page_create, page_edit = page_edit))
@app.route( '/edit/goto/', methods=['POST'])
@basic_auth.required
def edit_page_goto():
if request.form['to_edit'] == 'Choose a page to edit...':
return redirect( '/edit/')
return redirect( '/edit/' + request.form['to_edit'])
@app.route( '/edit/create/', methods=['POST'])
@basic_auth.required
def edit_page_create():
for char in request.form['to_create']:
if char not in string.ascii_letters + string.digits + ' ':
return edit_failure( '/edit/')
if request.form['to_create'] and 'edit' not in request.form['to_create'] and 'static' not in request.form['to_create']:
try:
site.add( Page( site.name + request.form['to_create'].lower()))
except:
return edit_failure( '/edit/')
return edit_success( '/edit/')
@app.route( '/edit/<path:page_name>/', methods=['POST', 'GET'])
@basic_auth.required
def edit_page_specific( page_name):
if page_name not in site.render().keys():
return edit_failure( '/edit/')
ele_add = ElementAddForm( request.form)
ele_add.to_add_title.data = ''
ele_add.to_add_content.data = ''
ele_add_image = ElementAddForm_Image( request.form)
ele_add_image.to_add_title.data = ''
ele_add_image.to_add_caption.data = ''
ele_add_event = ElementAddForm_Event( request.form)
ele_add_event.to_add_title.data = ''
ele_add_event.to_add_content.data = ''
ele_add_event.to_add_when.data = ''
ele_add_event.to_add_where.data = ''
ele_add_blogpost = ElementAddForm_Blogpost( request.form)
ele_add_blogpost.to_add_title.data = ''
ele_add_blogpost.to_add_content.data = ''
ele_add_blogpost.to_add_posttime.data = 'p' #placeholder
ele_add_blogpost.to_add_postdate.data = 'p' #placeholder
ele_change = ElementChangeForm( request.form)
ele_change.choose( site.render()[page_name].render())
if page_name == site.name + 'index':
return render_template( 'bedit.html', edits = render_template( 'edit_specific_indexpage.html', ele_change = ele_change, ele_add = ele_add, ele_add_image = ele_add_image, page_name = page_name))
if page_name == site.name + 'events':
return render_template( 'bedit.html', edits = render_template( 'edit_specific_eventspage.html', ele_change = ele_change, ele_add_event = ele_add_event, page_name = page_name))
if page_name == site.name + 'photos':
return render_template( 'bedit.html', edits = render_template( 'edit_specific_photospage.html', ele_change = ele_change, ele_add_image = ele_add_image, page_name = page_name))
if page_name == site.name + 'blog':
return render_template( 'bedit.html', edits = render_template( 'edit_specific_blogpage.html', ele_change = ele_change, ele_add_blogpost = ele_add_blogpost, page_name = page_name))
return render_template( 'bedit.html', edits = render_template( 'edit_specific.html', ele_change = ele_change, ele_add = ele_add, ele_add_image = ele_add_image, page_name = page_name))
@app.route( '/edit/goto/<path:page_name>/', methods=['POST'])
@basic_auth.required
def edit_page_specific_goto( page_name):
if request.form['to_change'] == '':
return redirect( '/edit/' + page_name)
return redirect( '/edit/' + page_name + '/id/' + request.form['to_change'])
@app.route( '/edit/delete/<path:page_name>/', methods=['POST'])
@basic_auth.required
def edit_page_specific_delete( page_name):
try:
site.remove( page_name)
except:
edit_failure( '/edit/')
return edit_success( '/edit/')
@app.route( '/edit/add/<path:page_name>/', methods=['POST'])
@basic_auth.required
def edit_page_specific_add( page_name):
try:
if 'to_add_content' not in request.form:
ele_to_add = Element( 'image')
elif 'to_add_when' and 'to_add_where' in request.form:
ele_to_add = Element( 'event')
elif 'to_add_postdate' and 'to_add_posttime' in request.form:
ele_to_add = Element( 'blogpost')
else:
ele_to_add = Element( 'text')
ele_to_add.title = request.form['to_add_title']
if ele_to_add.frmt == 'text':
ele_to_add.content = request.form['to_add_content'].replace( '\n', '<br>')
if not ele_to_add.content:
return edit_failure( '/edit/' + page_name)
elif ele_to_add.frmt == 'image':
image = request.files['to_add_image']
imagename = secure_filename( image.filename)
if not imagename:
return edit_failure( '/edit/' + page_name)
image.save( page_name + '/assets/' + imagename)
ele_to_add.content = imagename
ele_to_add.caption = request.form['to_add_caption'].replace( '\n', '<br>')
elif ele_to_add.frmt == 'event':
ele_to_add.content = request.form['to_add_content'].replace( '\n', '<br>')
ele_to_add.when = request.form['to_add_when'].replace( '\n', '<br>')
ele_to_add.where = request.form['to_add_where']
if not ele_to_add.title or not ele_to_add.content or not ele_to_add.when or not ele_to_add.where:
return edit_failure( '/edit/' + page_name)
elif ele_to_add.frmt == 'blogpost':
ele_to_add.content = request.form['to_add_content'].replace( '\n', '<br>')
if not ele_to_add.title or not ele_to_add.content:
return edit_failure( '/edit/' + page_name)
ele_to_add.location = 'body'
site.render()[page_name].add( ele_to_add)
except:
return edit_failure( '/edit/' + page_name)
return edit_success( '/edit/' + page_name)
@app.route( '/edit/remove/<path:page_name>/', methods=['POST'])
@basic_auth.required
def edit_page_specific_remove( page_name):
try:
if request.form['to_remove'] == '':
return edit_failre( '/edit/' + page_name)
site.render()[page_name].remove( request.form['to_remove'])
except:
return edit_failure( '/edit/' + page_name)
return edit_success( '/edit/' + page_name)
@app.route( '/edit/<path:page_name>/id/<ele_id>/', methods=['POST', 'GET'])
@basic_auth.required
def edit_page_specific_element( page_name, ele_id):
if site.render()[page_name].retrieve( ele_id)['frmt'] == 'text':
ele_change = ElementAddForm( request.form)
ele_change.to_add_title.data = site.render()[page_name].render()[ele_id]['title']
ele_change.to_add_content.data = site.render()[page_name].render()[ele_id]['content'].replace( '<br>', '\n')
return render_template( 'bedit.html', edits = render_template( 'edit_specific_element.html', ele_change = ele_change, page_name = page_name, ele_id = ele_id))
elif site.render()[page_name].retrieve( ele_id)['frmt'] == 'image':
ele_change = ElementAddForm_Image( request.form)
ele_change.to_add_title.data = site.render()[page_name].render()[ele_id]['title']
ele_change.to_add_image.data = site.render()[page_name].render()[ele_id]['content']
ele_change.to_add_caption.data = site.render()[page_name].render()[ele_id]['caption'].replace( '<br>', '\n')
return render_template( 'bedit.html', edits = render_template( 'edit_specific_element_image.html', ele_change = ele_change, page_name = page_name, ele_id = ele_id, imagename = '/' + page_name[len( site.name):] + '/assets/' + site.render()[page_name].render()[ele_id]['content']))
elif site.render()[page_name].retrieve( ele_id)['frmt'] == 'event':
ele_change = ElementAddForm_Event( request.form)
ele_change.to_add_title.data = site.render()[page_name].render()[ele_id]['title']
ele_change.to_add_content.data = site.render()[page_name].render()[ele_id]['content'].replace( '<br>', '\n')
ele_change.to_add_when.data = site.render()[page_name].render()[ele_id]['when'].replace( '<br>', '\n')
ele_change.to_add_where.data = site.render()[page_name].render()[ele_id]['where']
return render_template( 'bedit.html', edits = render_template( 'edit_specific_element_event.html', ele_change = ele_change, page_name = page_name, ele_id = ele_id))
elif site.render()[page_name].retrieve( ele_id)['frmt'] == 'blogpost':
ele_change = ElementAddForm_Blogpost( request.form)
ele_change.to_add_title.data = site.render()[page_name].render()[ele_id]['title']
ele_change.to_add_content.data = site.render()[page_name].render()[ele_id]['content'].replace( '<br>', '\n')
return render_template( 'bedit.html', edits = render_template( 'edit_specific_element_blogpost.html', ele_change = ele_change, page_name = page_name, ele_id = ele_id))
@app.route( '/edit/<path:page_name>/id/change/<ele_id>/', methods=['POST'])
@basic_auth.required
def edit_page_specific_element_change( page_name, ele_id):
try:
tmp_ele = Element( site.render()[page_name].retrieve( ele_id)['frmt'])
tmp_ele.load( site.render()[page_name].retrieve( ele_id))
tmp_ele.title = request.form['to_add_title']
if tmp_ele.frmt == 'text':
tmp_ele.content = request.form['to_add_content']
elif tmp_ele.frmt == 'image':
image = request.files['to_add_image']
if image:
imagename = secure_filename( image.filename)
image.save( page_name + '/assets/' + imagename)
tmp_ele.content = imagename
tmp_ele.caption = request.form['to_add_caption']
elif tmp_ele.frmt == 'event':
tmp_ele.content = request.form['to_add_content']
tmp_ele.when = request.form['to_add_when']
tmp_ele.where = request.form['to_add_where']
elif tmp_ele.frmt == 'blogpost':
tmp_ele.content = request.form['to_add_content']
site.render()[page_name].remove( ele_id, skeleton = True)
site.render()[page_name].add( tmp_ele, old = True)
except:
return edit_failure( '/edit/' + page_name)
return edit_success( '/edit/' + page_name)
@app.route( '/edit/<path:page_name>/id/remove/<ele_id>/', methods=['POST'])
@basic_auth.required
def edit_page_specific_element_remove( page_name, ele_id):
try:
site.render()[page_name].remove( ele_id)
except:
return edit_failure( '/edit/' + page_name)
return edit_success( '/edit/' + page_name)
if __name__ == '__main__':
app.run( host = '0.0.0.0', debug = True)
| ayypot/dillys-place | app.py | Python | bsd-2-clause | 12,694 |
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaginationResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'requested_limit': 'int',
'requested_offset': 'int',
'page_size': 'int',
'total_results': 'int'
}
attribute_map = {
'requested_limit': 'RequestedLimit',
'requested_offset': 'RequestedOffset',
'page_size': 'PageSize',
'total_results': 'TotalResults'
}
def __init__(self, requested_limit=None, requested_offset=None, page_size=None, total_results=None): # noqa: E501
"""PaginationResponse - a model defined in Swagger""" # noqa: E501
self._requested_limit = None
self._requested_offset = None
self._page_size = None
self._total_results = None
self.discriminator = None
if requested_limit is not None:
self.requested_limit = requested_limit
if requested_offset is not None:
self.requested_offset = requested_offset
if page_size is not None:
self.page_size = page_size
if total_results is not None:
self.total_results = total_results
@property
def requested_limit(self):
"""Gets the requested_limit of this PaginationResponse. # noqa: E501
Limit from pagination request # noqa: E501
:return: The requested_limit of this PaginationResponse. # noqa: E501
:rtype: int
"""
return self._requested_limit
@requested_limit.setter
def requested_limit(self, requested_limit):
"""Sets the requested_limit of this PaginationResponse.
Limit from pagination request # noqa: E501
:param requested_limit: The requested_limit of this PaginationResponse. # noqa: E501
:type: int
"""
self._requested_limit = requested_limit
@property
def requested_offset(self):
"""Gets the requested_offset of this PaginationResponse. # noqa: E501
Offset from pagination request # noqa: E501
:return: The requested_offset of this PaginationResponse. # noqa: E501
:rtype: int
"""
return self._requested_offset
@requested_offset.setter
def requested_offset(self, requested_offset):
"""Sets the requested_offset of this PaginationResponse.
Offset from pagination request # noqa: E501
:param requested_offset: The requested_offset of this PaginationResponse. # noqa: E501
:type: int
"""
self._requested_offset = requested_offset
@property
def page_size(self):
"""Gets the page_size of this PaginationResponse. # noqa: E501
Number of results returned in this response # noqa: E501
:return: The page_size of this PaginationResponse. # noqa: E501
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""Sets the page_size of this PaginationResponse.
Number of results returned in this response # noqa: E501
:param page_size: The page_size of this PaginationResponse. # noqa: E501
:type: int
"""
self._page_size = page_size
@property
def total_results(self):
"""Gets the total_results of this PaginationResponse. # noqa: E501
Total number of results in dataset # noqa: E501
:return: The total_results of this PaginationResponse. # noqa: E501
:rtype: int
"""
return self._total_results
@total_results.setter
def total_results(self, total_results):
"""Sets the total_results of this PaginationResponse.
Total number of results in dataset # noqa: E501
:param total_results: The total_results of this PaginationResponse. # noqa: E501
:type: int
"""
self._total_results = total_results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PaginationResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaginationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mindbody/API-Examples | SDKs/Python/swagger_client/models/pagination_response.py | Python | bsd-2-clause | 6,069 |
import os
import datetime
import lxml
import subprocess
import HTMLParser
from urlparse import urlparse, urlunparse
from rknfilter.conf import settings
from rknfilter.exceptions import VerifyDumpSigError
class DumpFiles(object):
def __init__(self, **kwargs):
self.xml_path = kwargs.get('xml_path', settings.DUMP_XML_PATH)
self.sig_path = kwargs.get('sig_path', settings.DUMP_SIG_PATH)
class DumpFilesHelper(DumpFiles):
def __init__(self, *args, **kwargs):
super(DumpFilesHelper, self).__init__(*args, **kwargs)
self.openssl_path = kwargs.get('openssl_path', settings.OPENSSL_PATH)
# TODO: move this methods to the target
def save_xml(self, content):
with open(self.xml_path, 'wb') as f:
f.write(content)
def save_sig(self, content):
with open(self.sig_path, 'wb') as f:
f.write(content)
def verify(self):
try:
with open(os.devnull, 'w') as fnull:
subprocess.check_call(
[
self.openssl_path,
'smime',
'-verify',
'-in',
self.sig_path,
'-noverify',
'-inform',
'der',
'-content',
self.xml_path,
],
stdout=fnull,
stderr=fnull
)
except subprocess.CalledProcessError:
raise VerifyDumpSigError('Dump signature check failed')
class DumpFilesParser(DumpFiles):
_include_time_format = '%Y-%m-%dT%H:%M:%S'
_decision_date_format = '%Y-%m-%d'
def get_data(self):
for event, element in lxml.etree.iterparse(self.xml_path, tag='content'):
yield self._parse(element)
element.clear()
def _url_fix(self, url):
'''
Fix some common url errors in the RKN blacklist:
1) delete fragment, e.g. http://repressive.very#repressive => http://repressive.very
2) delete dot in the end of the domain name, e.g. http://ya.ru./ to http://ya.ru/
3) delete dot in the end of path, e.g. http://site.com/. => http://site.com/
'''
url_parsed = urlparse(url)
return urlunparse((url_parsed.scheme, url_parsed.hostname.rstrip('.'), url_parsed.path.rstrip('.'), url_parsed.params, url_parsed.query, None))
def _domain_fix(self, domain):
'''
Fix some common domain errors in the RKN blacklist:
1) delete dot in the end of the domain name, e.g. stpdrkn.rkn. => rtpdrkn.rkn
'''
return domain.rstrip('.')
def _parse(self, c):
_content = dict(c.items())
content = dict(
rkn_id = int(_content['id']),
include_date = datetime.datetime.strptime(_content['includeTime'], self._include_time_format),
urgency_type = int(_content.get('urgencyType', 0)),
entry_type = int(_content['entryType']),
hash = _content.get('hash', None),
block_type = _content.get('blockType', 'default'),
)
_decision = dict(c.find('decision').items())
decision = dict(
decision_date = datetime.datetime.strptime(_decision['date'], self._decision_date_format),
decision_org = _decision['org'],
decision_num = _decision['number'],
)
domains = (self._domain_fix(i.text) for i in c.findall('domain'))
urls = (self._url_fix(i.text) for i in c.findall('url'))
ips = (i.text for i in c.findall('ip'))
ip_subnets = (i.text for i in c.findall('ipSubnet'))
return content, decision, domains, urls, ips, ip_subnets
| DmitryFillo/rknfilter | rknfilter/core/dump.py | Python | bsd-2-clause | 3,801 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'DistributionChannel'
db.delete_table('subscriptions_distributionchannel')
# Renaming field 'RssChannel.distributionchannel_ptr' as 'RssChannel.id'
db.rename_column('subscriptions_rsschannel', 'distributionchannel_ptr_id', 'id')
# Adding field 'RssChannel.recipient'
db.add_column('subscriptions_rsschannel', 'recipient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True), keep_default=False)
# Renaming field 'EmailChannel.distributionchannel_ptr' as 'EmailChannel.id'
db.rename_column('subscriptions_emailchannel', 'distributionchannel_ptr_id', 'id')
# Adding field 'EmailChannel.recipient'
db.add_column('subscriptions_emailchannel', 'recipient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True), keep_default=False)
# Deleting field 'SmsChannel.distributionchannel_ptr' as 'SmsChannel.id'
db.rename_column('subscriptions_smschannel', 'distributionchannel_ptr_id', 'id')
# Adding field 'SmsChannel.recipient'
db.add_column('subscriptions_smschannel', 'recipient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True), keep_default=False)
# Changing field 'Subscription.channel'
db.alter_column('subscriptions_subscription', 'channel_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['subscriptions.EmailChannel'], null=True))
def backwards(self, orm):
# Adding model 'DistributionChannel'
db.create_table('subscriptions_distributionchannel', (
('recipient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('subscriptions', ['DistributionChannel'])
# Adding field 'RssChannel.distributionchannel_ptr'
db.add_column('subscriptions_rsschannel', 'distributionchannel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(default=0, to=orm['subscriptions.DistributionChannel'], unique=True, primary_key=True), keep_default=False)
# Deleting field 'RssChannel.id'
db.delete_column('subscriptions_rsschannel', 'id')
# Deleting field 'RssChannel.recipient'
db.delete_column('subscriptions_rsschannel', 'recipient_id')
# Adding field 'EmailChannel.distributionchannel_ptr'
db.add_column('subscriptions_emailchannel', 'distributionchannel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(default=0, to=orm['subscriptions.DistributionChannel'], unique=True, primary_key=True), keep_default=False)
# Deleting field 'EmailChannel.id'
db.delete_column('subscriptions_emailchannel', 'id')
# Deleting field 'EmailChannel.recipient'
db.delete_column('subscriptions_emailchannel', 'recipient_id')
# Adding field 'SmsChannel.distributionchannel_ptr'
db.add_column('subscriptions_smschannel', 'distributionchannel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(default=0, to=orm['subscriptions.DistributionChannel'], unique=True, primary_key=True), keep_default=False)
# Deleting field 'SmsChannel.id'
db.delete_column('subscriptions_smschannel', 'id')
# Deleting field 'SmsChannel.recipient'
db.delete_column('subscriptions_smschannel', 'recipient_id')
# Changing field 'Subscription.channel'
db.alter_column('subscriptions_subscription', 'channel_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['subscriptions.DistributionChannel'], null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'subscriptions.emailchannel': {
'Meta': {'object_name': 'EmailChannel'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'subscriptions.rsschannel': {
'Meta': {'object_name': 'RssChannel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'subscriptions.searchsubscription': {
'Meta': {'object_name': 'SearchSubscription', '_ormbases': ['subscriptions.Subscription']},
'query': ('django.db.models.fields.TextField', [], {}),
'subscription_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['subscriptions.Subscription']", 'unique': 'True', 'primary_key': 'True'})
},
'subscriptions.smschannel': {
'Meta': {'object_name': 'SmsChannel'},
'carrier': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'subscriptions.subscription': {
'Meta': {'object_name': 'Subscription'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subscriptions.EmailChannel']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sent': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['subscriptions']
| mjumbewu/django-subscriptions | subscriptions/migrations/0004_auto__del_distributionchannel__del_field_rsschannel_distributionchanne.py | Python | bsd-2-clause | 9,074 |
## An example of the simple Schnorr sigma protocol
## to prove that one knows x, such that h = g^x for
## a public generator h and g.
from petlib.bn import Bn
from petlib.ec import EcGroup, EcPt
from hashlib import sha256
def challenge(elements):
"""Packages a challenge in a bijective way"""
elem = [len(elements)] + elements
elem_str = map(str, elem)
elem_len = map(lambda x: "%s||%s" % (len(x) , x), elem_str)
state = "|".join(elem_len)
H = sha256()
H.update(state.encode("utf8"))
return H.digest()
def setup():
G = EcGroup(713)
g = G.generator()
o = G.order()
return G, g, o
def prove(params, h, g, x, m=""):
"""Schnorr proof of the statement ZK(x ; h = g^x)"""
assert x * g == h
G, _, o = params
w = o.random()
W = w * g
state = ['schnorr', G.nid(), g, h, m, W]
hash_c = challenge(state)
c = Bn.from_binary(hash_c) % o
r = (w - c * x) % o
return (c, r)
def verify(params, h, g, proof, m=""):
"""Verify the statement ZK(x ; h = g^x)"""
G, _, o = params
c, r = proof
W = (r * g + c * h)
state = ['schnorr', G.nid(), g, h, m, W]
hash_c = challenge(state)
c2 = Bn.from_binary(hash_c) % o
return c == c2
def test_zkp():
params = setup()
G, g, o = params
x = o.random()
h = x * g
## Use it as a Zk proof
proof = prove(params, h, g, x)
assert verify(params, h, g, proof)
assert not verify(params, g, h, proof)
## Use it as a signature scheme
proofm = prove(params, h, g, x, m = "Hello World!")
assert verify(params, h, g, proofm, m = "Hello World!")
assert not verify(params, h, g, proofm, m = "Other String")
| gdanezis/petlib | examples/zkp.py | Python | bsd-2-clause | 1,694 |
__version__ = '0.1.1'
from .OxCaptcha import OxCaptcha
| gbaydin/OxCaptcha | OxCaptcha/__init__.py | Python | bsd-2-clause | 56 |
#!/usr/bin/env python
"""
Copyright (c) 2013, Citrix Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import md5
import os.path
import xml.dom.minidom
import ConfigParser
import xcp.version as version
import xcp.xmlunwrap as xmlunwrap
class Package:
pass
class BzippedPackage(Package):
def __init__(self, repository, label, size, md5sum, optional, fname, root):
(
self.repository,
self.label,
self.size,
self.md5sum,
self.optional,
self.filename,
self.destination
) = ( repository, label, long(size), md5sum, (optional==True), fname, root )
def __repr__(self):
return "<BzippedPackage '%s'>" % self.label
class RPMPackage(Package):
def __init__(self, repository, label, size, md5sum, optional, fname, options):
(
self.repository,
self.label,
self.size,
self.md5sum,
self.optional,
self.filename,
self.options
) = ( repository, label, long(size), md5sum, (optional==True), fname, options )
def __repr__(self):
return "<RPMPackage '%s'>" % self.label
class DriverRPMPackage(RPMPackage):
def __init__(self, repository, label, size, md5sum, fname, kernel, options):
(
self.repository,
self.label,
self.size,
self.md5sum,
self.filename,
self.kernel,
self.options
) = ( repository, label, long(size), md5sum, fname, kernel, options )
def __repr__(self):
return "<DriverRPMPackage '%s', kernel '%s'>" % (self.label, self.kernel)
class DriverPackage(Package):
def __init__(self, repository, label, size, md5sum, fname, root):
(
self.repository,
self.label,
self.size,
self.md5sum,
self.filename,
self.destination
) = ( repository, label, long(size), md5sum, fname, root )
def __repr__(self):
return "<DriverPackage '%s'>" % self.label
class FirmwarePackage(Package):
def __init__(self, repository, label, size, md5sum, fname):
(
self.repository,
self.label,
self.size,
self.md5sum,
self.filename
) = ( repository, label, long(size), md5sum, fname )
def __repr__(self):
return "<FirmwarePackage '%s'>" % self.label
class NoRepository(Exception):
pass
class RepoFormatError(Exception):
pass
class BaseRepository(object):
""" Represents a repository containing packages and associated meta data. """
def __init__(self, access, base = ""):
self.access = access
self.base = base
@classmethod
def findRepositories(cls, access):
repos = YumRepository.findRepositories(access)
try:
repos += Repository.findRepositories(access)
except RepoFormatError:
pass
return repos
@classmethod
def getRepoVer(cls, access):
access.start()
is_yum = YumRepository.isRepo(access, "")
access.finish()
if is_yum:
return YumRepository.getRepoVer(access)
return Repository.getRepoVer(access)
class YumRepository(BaseRepository):
""" Represents a Yum repository containing packages and associated meta data. """
REPOMD_FILENAME = "repodata/repomd.xml"
TREEINFO_FILENAME = ".treeinfo"
@classmethod
def findRepositories(cls, access):
access.start()
is_repo = cls.isRepo(access, "")
access.finish()
if not is_repo:
return []
return [ YumRepository(access, "") ]
def __init__(self, access, base = ""):
BaseRepository.__init__(self, access, base)
@classmethod
def isRepo(cls, access, base):
""" Return whether there is a repository at base address
'base' accessible using accessor."""
return False not in map(lambda x: access.access(os.path.join(base, x)),
[cls.TREEINFO_FILENAME, cls.REPOMD_FILENAME])
@classmethod
def getRepoVer(cls, access):
repo_ver = None
access.start()
try:
treeinfofp = access.openAddress(cls.TREEINFO_FILENAME)
treeinfo = ConfigParser.SafeConfigParser()
treeinfo.readfp(treeinfofp)
treeinfofp.close()
ver_str = treeinfo.get('platform', 'version')
repo_ver = version.Version.from_string(ver_str)
except Exception, e:
raise RepoFormatError, "Failed to open %s: %s" % (cls.TREEINFO_FILENAME, str(e))
access.finish()
return repo_ver
class Repository(BaseRepository):
""" Represents a XenSource repository containing packages and associated
meta data. """
REPOSITORY_FILENAME = "XS-REPOSITORY"
PKGDATA_FILENAME = "XS-PACKAGES"
REPOLIST_FILENAME = "XS-REPOSITORY-LIST"
XCP_MAIN_IDENT = "xcp:main"
XS_MAIN_IDENT = "xs:main"
OPER_MAP = {'eq': ' = ', 'ne': ' != ', 'lt': ' < ', 'gt': ' > ', 'le': ' <= ', 'ge': ' >= '}
@classmethod
def findRepositories(cls, access):
# Check known locations:
package_list = ['', 'packages', 'packages.main', 'packages.linux',
'packages.site']
repos = []
access.start()
try:
extra = access.openAddress(cls.REPOLIST_FILENAME)
if extra:
for line in extra:
package_list.append(line.strip())
extra.close()
except Exception, e:
raise RepoFormatError, "Failed to open %s: %s" % (cls.REPOLIST_FILENAME, str(e))
for loc in package_list:
if cls.isRepo(access, loc):
repos.append(Repository(access, loc))
access.finish()
return repos
def __init__(self, access, base, is_group = False):
BaseRepository.__init__(self, access, base)
self.is_group = is_group
self._md5 = md5.new()
self.requires = []
self.packages = []
access.start()
try:
repofile = access.openAddress(os.path.join(base, self.REPOSITORY_FILENAME))
except Exception, e:
access.finish()
raise NoRepository, e
self._parse_repofile(repofile)
repofile.close()
try:
pkgfile = access.openAddress(os.path.join(base, self.PKGDATA_FILENAME))
except Exception, e:
access.finish()
raise NoRepository, e
self._parse_packages(pkgfile)
pkgfile.close()
access.finish()
def __repr__(self):
return "<Repository '%s', version '%s'>" % (self.identifier, self.product_version)
def __str__(self):
out = "Repository '%s', version '%s'" % (self.identifier, self.product_version)
if len(self.requires) > 0:
out += ", Requires: %s" % str(self.requires)
if len(self.packages) > 0:
out += ", Packages: %s" % str(self.packages)
return out
def _parse_repofile(self, repofile):
""" Parse repository data -- get repository identifier and name. """
repofile_contents = repofile.read()
repofile.close()
# update md5sum for repo
self._md5.update(repofile_contents)
# build xml doc object
try:
xmldoc = xml.dom.minidom.parseString(repofile_contents)
except:
raise RepoFormatError, "%s not in XML" % self.REPOSITORY_FILENAME
try:
repo_node = xmlunwrap.getElementsByTagName(xmldoc, ['repository'], mandatory = True)
attrs = ('originator', 'name', 'product', 'version', 'build')
optional_attrs = ('build')
for attr in attrs:
self.__dict__[attr] = xmlunwrap.getStrAttribute(repo_node[0], [attr], default = None,
mandatory = (attr not in optional_attrs))
desc_node = xmlunwrap.getElementsByTagName(xmldoc, ['description'], mandatory = True)
self.description = xmlunwrap.getText(desc_node[0])
for req_node in xmlunwrap.getElementsByTagName(xmldoc, ['requires']):
req = {}
for attr in ['originator', 'name', 'test', 'version', 'build']:
req[attr] = xmlunwrap.getStrAttribute(req_node, [attr])
if req['build'] == '':
del req['build']
assert req['test'] in self.OPER_MAP
self.requires.append(req)
except:
raise RepoFormatError, "%s format error" % self.REPOSITORY_FILENAME
self.identifier = "%s:%s" % (self.originator, self.name)
ver_str = self.version
if self.build:
ver_str += '-'+self.build
self.product_version = version.Version.from_string(ver_str)
def _parse_packages(self, pkgfile):
pkgfile_contents = pkgfile.read()
pkgfile.close()
# update md5sum for repo
self._md5.update(pkgfile_contents)
# build xml doc object
try:
xmldoc = xml.dom.minidom.parseString(pkgfile_contents)
except:
raise RepoFormatError, "%s not in XML" % self.PKGDATA_FILENAME
for pkg_node in xmlunwrap.getElementsByTagName(xmldoc, ['package']):
pkg = self._create_package(pkg_node)
self.packages.append(pkg)
constructor_map = {
'tbz2': [ BzippedPackage, ( 'label', 'size', 'md5', 'optional', 'fname', 'root' ) ],
'rpm': [ RPMPackage, ( 'label', 'size', 'md5', 'optional', 'fname', 'options' ) ],
'driver-rpm': [ DriverRPMPackage, ( 'label', 'size', 'md5', 'fname', 'kernel', 'options' ) ],
# obsolete
'driver': [ DriverPackage, ( 'label', 'size', 'md5', 'fname', 'root' ) ],
'firmware': [ FirmwarePackage, ('label', 'size', 'md5', 'fname') ]
}
optional_attrs = ['optional', 'options']
def _create_package(self, node):
args = [ self ]
ptype = xmlunwrap.getStrAttribute(node, ['type'], mandatory = True)
for attr in self.constructor_map[ptype][1]:
if attr == 'fname':
args.append(xmlunwrap.getText(node))
else:
args.append(xmlunwrap.getStrAttribute(node, [attr], mandatory = attr not in self.optional_attrs))
return self.constructor_map[ptype][0](*args)
@classmethod
def isRepo(cls, access, base):
""" Return whether there is a repository at base address
'base' accessible using accessor."""
return False not in map(lambda x: access.access(os.path.join(base, x)),
[cls.REPOSITORY_FILENAME, cls.PKGDATA_FILENAME])
@classmethod
def getRepoVer(cls, access):
repo_ver = None
try:
repos = cls.findRepositories(access)
for r in repos:
if r.identifier == cls.XCP_MAIN_IDENT:
repo_ver = r.product_version
break
except:
pass
return repo_ver
| xenserver/python-libs | xcp/repository.py | Python | bsd-2-clause | 12,464 |
import unittest
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from skmultilearn.ensemble import RakelD
from skmultilearn.tests.classifier_basetest import ClassifierBaseTest
TEST_LABELSET_SIZE = 3
class RakelDTest(ClassifierBaseTest):
def get_rakeld_with_svc(self):
return RakelD(
base_classifier=SVC(probability=True),
base_classifier_require_dense=[False, True],
labelset_size=TEST_LABELSET_SIZE
)
def get_rakeld_with_nb(self):
return RakelD(
base_classifier=GaussianNB(),
base_classifier_require_dense=[True, True],
labelset_size=TEST_LABELSET_SIZE
)
def test_if_sparse_classification_works_on_non_dense_base_classifier(self):
classifier = self.get_rakeld_with_svc()
self.assertClassifierWorksWithSparsity(classifier, 'sparse')
self.assertClassifierPredictsProbabilities(classifier, 'sparse')
def test_if_dense_classification_works_on_non_dense_base_classifier(self):
classifier = self.get_rakeld_with_svc()
self.assertClassifierWorksWithSparsity(classifier, 'dense')
self.assertClassifierPredictsProbabilities(classifier, 'dense')
def test_if_sparse_classification_works_on_dense_base_classifier(self):
classifier = self.get_rakeld_with_nb()
self.assertClassifierWorksWithSparsity(classifier, 'sparse')
self.assertClassifierPredictsProbabilities(classifier, 'sparse')
def test_if_dense_classification_works_on_dense_base_classifier(self):
classifier = self.get_rakeld_with_nb()
self.assertClassifierWorksWithSparsity(classifier, 'dense')
self.assertClassifierPredictsProbabilities(classifier, 'dense')
def test_if_works_with_cross_validation(self):
classifier = self.get_rakeld_with_nb()
self.assertClassifierWorksWithCV(classifier)
if __name__ == '__main__':
unittest.main()
| scikit-multilearn/scikit-multilearn | skmultilearn/ensemble/tests/test_rakeld.py | Python | bsd-2-clause | 1,966 |
__version__ = '0.4.4'
__url__ = 'https://github.com/imposeren/django-happenings'
| imposeren/django-happenings | happenings/__init__.py | Python | bsd-2-clause | 81 |
"""
@package mi.dataset.driver.wfp.ctdpfk.test.test_driver
@file marine-integrations/mi/dataset/driver/wfp/ctdpfk/test/test_driver.py
@author Bill French (template)
@author Roger Unwin
@brief Test cases for wfp/ctdpfk driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
import unittest
import gevent
import os
import time
import hashlib
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger ; log = get_logger()
from exceptions import Exception
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetTestConfig
from mi.idk.dataset.unit_test import DataSetUnitTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.core.exceptions import ConfigurationException
from mi.core.exceptions import SampleException
from mi.core.exceptions import InstrumentParameterException
from mi.idk.exceptions import SampleTimeout
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.dataset_driver import DriverParameter, DriverStateKey
from mi.core.instrument.instrument_driver import DriverEvent
from mi.dataset.parser.nutnrb import NutnrbDataParticle
from mi.dataset.driver.issm.nutnrb.driver import IssmRiNUTNRBDataSetDriver
from pyon.agent.agent import ResourceAgentState
from interface.objects import CapabilityType
from interface.objects import AgentCapability
from interface.objects import ResourceAgentErrorEvent
from interface.objects import ResourceAgentConnectionLostErrorEvent
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.issm.nutnrb.driver',
driver_class='IssmRiNUTNRBDataSetDriver',
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = IssmRiNUTNRBDataSetDriver.stream_config(),
startup_config = {
DataSourceConfigKey.HARVESTER:
{
DataSetDriverConfigKeys.DIRECTORY: '/tmp/dsatest',
DataSetDriverConfigKeys.STORAGE_DIRECTORY: '/tmp/stored_dsatest',
DataSetDriverConfigKeys.PATTERN: '*.log',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataSourceConfigKey.PARSER: {}
}
)
SAMPLE_STREAM = 'nutnrb_parsed'
###############################################################################
# INT TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@unittest.skip('Parser is broken, need to fix timestamp')
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def setUp(self):
super(IntegrationTest, self).setUp()
def test_harvester_config_exception(self):
"""
Start the a driver with a bad configuration. Should raise
an exception.
"""
with self.assertRaises(ConfigurationException):
self.driver = IssmRiNUTNRBDataSetDriver({},
self.memento,
self.data_callback,
self.state_callback,
self.event_callback,
self.exception_callback)
def test_get(self):
"""
Test that we can get data from files. Verify that the driver sampling
can be started and stopped.
"""
self.clear_sample_data()
# Start sampling and watch for an exception
self.driver.start_sampling()
self.clear_async_data()
self.create_sample_data('test_data_1.log', "DATA001.log")
self.assert_data(NutnrbDataParticle, 'test_data_1.log.result.yml', count=1, timeout=10)
self.clear_async_data()
self.create_sample_data('test_data_3.log', "DATA002.log")
self.assert_data(NutnrbDataParticle, 'test_data_3.log.result.yml', count=8, timeout=10)
self.clear_async_data()
self.create_sample_data('20121213.nutnr.log', "DATA003.log")
self.assert_data(NutnrbDataParticle, count=435, timeout=20)
self.driver.stop_sampling()
self.driver.start_sampling()
self.clear_async_data()
self.create_sample_data('test_data_1.log', "DATA004.log")
self.assert_data(NutnrbDataParticle, count=1, timeout=10)
def test_stop_resume(self):
"""
Test the ability to stop and restart the process
"""
self.create_sample_data('test_data_1.log', "DATA001.log")
self.create_sample_data('test_data_3.log', "DATA002.log")
# get file metadata for use in state memento
startup_config = self._driver_config()['startup_config']
file_path_1 = os.path.join(startup_config[DataSourceConfigKey.HARVESTER].get(DataSetDriverConfigKeys.DIRECTORY),
"DATA001.log")
# need to reset file mod time since file is created again
mod_time_1 = os.path.getmtime(file_path_1)
file_size_1 = os.path.getsize(file_path_1)
with open(file_path_1) as filehandle:
md5_checksum_1 = hashlib.md5(filehandle.read()).hexdigest()
file_path_2 = os.path.join(startup_config[DataSourceConfigKey.HARVESTER].get(DataSetDriverConfigKeys.DIRECTORY),
"DATA002.log")
# need to reset file mod time since file is created again
mod_time_2 = os.path.getmtime(file_path_2)
file_size_2 = os.path.getsize(file_path_2)
with open(file_path_2) as filehandle:
md5_checksum_2 = hashlib.md5(filehandle.read()).hexdigest()
# Create and store the new driver state, after completed reading "DATA001.log"
# Note, since file "DATA001.log" is ingested, parser state is not looked at, in a real run there would be a state in there
self.memento = {"DATA001.log":{'ingested': True,
'file_mod_date': mod_time_1,
'file_checksum': md5_checksum_1,
'file_size': file_size_1,
'parser_state': {}
},
"DATA002.log":{'ingested': False,
'file_mod_date': mod_time_2,
'file_checksum': md5_checksum_2,
'file_size': file_size_2,
'parser_state': {'position': 2628}
}
}
self.driver = IssmRiNUTNRBDataSetDriver(
self._driver_config()['startup_config'],
self.memento,
self.data_callback,
self.state_callback,
self.event_callback,
self.exception_callback)
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data(NutnrbDataParticle, 'test_data_3.log.partial_results.yml', count=5, timeout=10)
###############################################################################
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@unittest.skip('Parser is broken, need to fix timestamp')
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def setUp(self):
super(QualificationTest, self).setUp()
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
self.create_sample_data('test_data_1.log', 'DATA001.log')
self.assert_initialize()
# Verify we get one sample
try:
result = self.data_subscribers.get_samples(SAMPLE_STREAM)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'test_data_1.log.result.yml')
except Exception as e:
log.error("Exception trapped: %s", e)
self.fail("Sample timeout.")
def test_large_import(self):
"""
There is a bug when activating an instrument go_active times out and
there was speculation this was due to blocking behavior in the agent.
https://jira.oceanobservatories.org/tasks/browse/OOIION-1284
"""
self.create_sample_data('20121213.nutnr.log', '20121213.nutnr.log')
self.assert_initialize()
result = self.get_samples(SAMPLE_STREAM, 433, 220)
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
self.create_sample_data('test_data_1.log', 'DATA001.log')
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
result = self.get_samples(SAMPLE_STREAM)
# Verify values
self.assert_data_values(result, 'test_data_1.log.result.yml')
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
self.create_sample_data('test_data_3.log', 'DATA003.log')
# Now read the first three records of the second file then stop
result = self.get_samples(SAMPLE_STREAM, 3)
self.assert_stop_sampling()
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
# Restart sampling and ensure we get the last 5 records of the file
self.assert_start_sampling()
result = self.get_samples(SAMPLE_STREAM, 5)
self.assert_data_values(result, 'test_data_3.log.partial_results.yml')
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_parser_exception(self):
"""
Test an exception raised after the driver is started during
record parsing.
"""
self.clear_sample_data()
self.create_sample_data('test_data_2.log', 'DATA002.log')
self.assert_initialize()
self.event_subscribers.clear_events()
result = self.get_samples(SAMPLE_STREAM, 9)
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 10)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
| ooici/marine-integrations | mi/dataset/driver/issm/nutnrb/test/test_driver.py | Python | bsd-2-clause | 11,328 |
# -*- coding: utf-8 -*-
import unittest
from romanizer import to_hepburn, to_kunrei
class HepbernTestCase(unittest.TestCase):
def test_hepburn(self):
self.assertEqual(u'', to_hepburn(u''))
self.assertEqual(u'aiueo', to_hepburn(u'あいうえお'))
self.assertEqual(u'gumma', to_hepburn(u'ぐんま'))
self.assertEqual(u'chimpui', to_hepburn(u'チンプイ'))
self.assertEqual(u'bampuobuchikin', to_hepburn(u'バンプオブチキン'))
self.assertEqual(u'kanda', to_hepburn(u'かんだ'))
self.assertEqual(u'bozuga jozuni byobuni bozuno eokaita', to_hepburn(u'ボウズガ ジョウズニ ビョウブニ ボウズノ エヲカイタ'))
self.assertEqual(u'beppu', to_hepburn(u'べっぷ'))
self.assertEqual(u'kotchi', to_hepburn(u'コッチ'))
self.assertEqual(u'nnnnnn', to_hepburn(u'んんんンンン'))
self.assertEqual(u'oi toru onishi', to_hepburn(u'おーい とおる おおにし'))
self.assertEqual(u"shinichi", to_hepburn(u'シンイチ'))
self.assertEqual(u'shonin', to_hepburn(u'しょーにん'))
self.assertEqual(u'joho', to_hepburn(u'じょうほう'))
def test_kunrei(self):
self.assertEqual(u'', to_kunrei(u''))
self.assertEqual(u'aiueo', to_kunrei(u'あいうえお'))
self.assertEqual(u'gunma', to_kunrei(u'ぐんま'))
self.assertEqual(u'tinpui', to_kunrei(u'チンプイ'))
self.assertEqual(u'banpuobutikin', to_kunrei(u'バンプオブチキン'))
self.assertEqual(u'kanda', to_kunrei(u'かんだ'))
self.assertEqual(u'bôzuga zyôzuni byôbuni bôzuno ewokaita', to_kunrei(u'ボウズガ ジョウズニ ビョウブニ ボウズノ エヲカイタ'))
self.assertEqual(u'beppu', to_kunrei(u'べっぷ'))
self.assertEqual(u'kotti', to_kunrei(u'コッチ'))
self.assertEqual(u'nnnnnn', to_kunrei(u'んんんンンン'))
self.assertEqual(u'ôi tôru ônisi', to_kunrei(u'おーい とおる おおにし'))
self.assertEqual(u"sin'iti", to_kunrei(u'シンイチ'))
self.assertEqual(u'syônin', to_kunrei(u'しょーにん'))
self.assertEqual(u'zyôhô', to_kunrei(u'じょうほう'))
if __name__ == '__main__':
unittest.main()
| kosugi/alfred.romanizer | test_romanizer.py | Python | bsd-2-clause | 2,289 |
import re
import struct
import string
import timepro
import logging
import uuid
import sys
# Import lzma this way so we get the built in version for
# Python 3.3 or the backported one otherwize. Don't just
# do a try/catch for import lzma because the older
# pyliblzma uses that package name, and we do not want
# to use it.
if sys.version_info[0:3] >= (3,3,0):
import lzma
else:
try:
from backports import lzma
except ImportError as e:
# On Fedora/OLPC we have a namespace conflict
# with another package, so we had to rename the
# namespace until we get it resolved upstream
from backportslzma import lzma
try:
# repose.lru does not help much in production
# web serving. It makes a HUGE difference in
# pre-processing. So we don't want to make it
# a hard dependency.
from repoze.lru import LRUCache
except ImportError as e:
class LRUCache(object):
def __init__(self, cache_size):
return
def get(self, key, value=None):
return value
def put(self, key, value):
return
def clear(self):
return
from StringIO import StringIO
logger = logging.getLogger(__name__)
HEADER_FORMAT = [
('I', 'magicNumber'),
('I', 'version'),
#('Q', 'uuidLow'),
#('Q', 'uuidHigh'),
('B', 'uuid0'),
('B', 'uuid1'),
('B', 'uuid2'),
('B', 'uuid3'),
('B', 'uuid4'),
('B', 'uuid5'),
('B', 'uuid6'),
('B', 'uuid7'),
('B', 'uuid8'),
('B', 'uuid9'),
('B', 'uuid10'),
('B', 'uuid11'),
('B', 'uuid12'),
('B', 'uuid13'),
('B', 'uuid14'),
('B', 'uuid15'),
('I', 'articleCount'),
('I', 'clusterCount'),
('Q', 'urlPtrPos'),
('Q', 'titlePtrPos'),
('Q', 'clusterPtrPos'),
('Q', 'mimeListPos'),
('I', 'mainPage'),
('I', 'layoutPage'),
('Q', 'checksumPos')
]
ARTICLE_ENTRY_FORMAT = [
('H', 'mimetype'),
('B', 'parameterLen'),
('c', 'namespace'),
('I', 'revision'),
('I', 'clusterNumber'),
('I', 'blobNumber')
# Null terminated url
# Null terminated title
# variable length parameter data
]
REDIRECT_ENTRY_FORMAT = [
('H', 'mimetype'),
('B', 'parameterLen'),
('c', 'namespace'),
('I', 'revision'),
('I', 'redirectIndex')
# Null terminated url
# Null terminated title
# variable length parameter data
]
CLUSTER_FORMAT = [
('B', 'compressionType')
]
# A null byte
NULL = struct.pack('B', 0)
def format_from_rich(rich_format):
return "<" + string.join([x[0] for x in rich_format], "")
@timepro.profile()
def read_null_terminated(f, encoding='utf-8'):
s = ""
while True:
b = f.read(1)
if b == NULL:
return s.decode(encoding)
s = s + b
def binary_search(f, t, min, max):
while 1:
if max < min:
return None
m = (min + max) / 2
v = f(m)
if v < t:
min = m + 1
elif v > t:
max = m - 1
else:
return m
def full_url(namespace, url):
return namespace + '/' + url
class Format(object):
def __init__(self, rich_format):
self.rich_fmt = rich_format
self.fmt = format_from_rich(rich_format)
self.compiled = struct.Struct(self.fmt)
self.size = self.compiled.size
@timepro.profile()
def unpack_format(self, buffer, offset=0):
fields = self.compiled.unpack_from(buffer, offset)
d = []
for field, entry in zip(fields, self.rich_fmt):
d.append((entry[1], field))
return d
@timepro.profile()
def unpack_format_from_file(self, f, seek=None):
if seek is not None:
timepro.start("seek")
f.seek(seek)
timepro.end("seek")
buf = f.read(self.size)
d = self.unpack_format(buf)
return d
@timepro.profile()
def unpack(self, buffer, offset=0):
"""Override this to get more complex behavior"""
return self.unpack_format(buffer, offset)
@timepro.profile()
def unpack_from_file(self, f, seek=None):
"""Override this to get more complex behavior"""
return self.unpack_format_from_file(f, seek)
class HeaderFormat(Format):
def __init__(self):
super(HeaderFormat, self).__init__(HEADER_FORMAT)
class ClusterFormat(Format):
def __init__(self):
super(ClusterFormat, self).__init__(CLUSTER_FORMAT)
class ClusterCache(object):
def __init__(self, cache_size):
self.lru = LRUCache(cache_size)
self.hits = 0
self.misses = 0
def get(self, file_buffer, ptr):
v = self.lru.get((file_buffer, ptr))
if v is not None:
self.hits += 1
return v
v = ClusterData(file_buffer, ptr)
self.lru.put((file_buffer, ptr), v)
self.misses += 1
return v
def clear(self):
logger.debug("CACHE HITS " + str(self.hits) + " VS MISSES " + str(self.misses))
self.lru.clear()
class ClusterData(object):
@timepro.profile()
def __init__(self, file_buffer, ptr):
cluster_info = dict(ClusterFormat().unpack_from_file(file_buffer, ptr))
self.compressed = cluster_info['compressionType'] == 4
self.file_buf = file_buffer
self.uncomp_buf = None
self.ptr = ptr
self.offsets = []
if self.compressed:
self._decompress()
self.read_offsets()
@timepro.profile()
def _decompress(self, chunk_size=32000):
"""Decompresses the cluster if compression flag was found. Stores
uncompressed results internally."""
if not self.compressed:
return
self.file_buf.seek(self.ptr + 1)
# Store uncompressed cluster data for use as uncompressed data
self.uncomp_buf = StringIO()
decomp = lzma.LZMADecompressor()
while not decomp.eof:
timepro.start("file_buf read")
comp_data = self.file_buf.read(chunk_size)
timepro.end("file_buf read")
timepro.start("decompress")
uncomp_data = decomp.decompress(comp_data)
timepro.end("decompress")
timepro.start("write")
self.uncomp_buf.write(uncomp_data)
timepro.end("write")
return self.uncomp_buf
def source_buffer(self):
"""Returns the buffer to read from, either the file buffer
passed or the uncompressed lzma data. Will seek to the
beginning of the cluster after the 1 byte compression flag"""
if self.compressed:
self.uncomp_buf.seek(0)
return self.uncomp_buf
else:
self.file_buf.seek(self.ptr + 1)
return self.file_buf
def unpack_blob_index(self, buf):
ptr = struct.unpack('I', buf)[0]
return ptr
@timepro.profile()
def read_offsets(self):
"""Reads the cluster header with the offsets of the blobs"""
src_buf = self.source_buffer()
raw = src_buf.read(4)
offset0 = self.unpack_blob_index(raw)
self.offsets.append(offset0)
nblob = offset0 / 4
for idx in xrange(nblob-1):
raw = src_buf.read(4)
offset = self.unpack_blob_index(raw)
self.offsets.append(offset)
return self.offsets
@timepro.profile()
def read_blob(self, blob_index):
"""Reads a blob from the cluster"""
if blob_index >= len(self.offsets) - 1:
raise IOError("Blob index exceeds number of blobs available: %s" % blob_index)
src_buf = self.source_buffer()
blob_size = self.offsets[blob_index+1] - self.offsets[blob_index]
# For uncompressed data, seek from beginning of file
# Otherwise seek the compressed data with just and offset
if not self.compressed:
seek_beg = self.ptr + 1
else:
seek_beg = 0
src_buf.seek(seek_beg + self.offsets[blob_index])
blob_data = src_buf.read(blob_size)
return blob_data
class ArticleEntryFormat(Format):
def __init__(self):
super(ArticleEntryFormat, self).__init__(ARTICLE_ENTRY_FORMAT)
def unpack(self, buffer, offset=0):
raise Exception("Unimplemented")
def unpack_from_file(self, f, seek=None):
d = super(ArticleEntryFormat, self).unpack_from_file(f, seek)
url = read_null_terminated(f)
title = read_null_terminated(f)
parameter = f.read(dict(d)['parameterLen'])
d.extend([('url', url),
('title', title),
('parameter', parameter)]
)
return d
class RedirectEntryFormat(Format):
def __init__(self):
super(RedirectEntryFormat, self).__init__(REDIRECT_ENTRY_FORMAT)
def unpack(self, buffer, offset=0):
raise Exception("Unimplemented")
def unpack_from_file(self, f, seek=None):
d = super(RedirectEntryFormat, self).unpack_from_file(f, seek)
url = read_null_terminated(f)
title = read_null_terminated(f)
parameter = f.read(dict(d)['parameterLen'])
d.extend([('url', url),
('title', title),
('parameter', parameter)]
)
return d
class MimeTypeListFormat(Format):
def __init__(self):
super(MimeTypeListFormat, self).__init__("")
def unpack(self, buffer, offset=0):
raise Exception("Unimplemented")
def unpack_from_file(self, f, seek=None):
if seek is not None:
f.seek(seek)
mimetypes = []
while True:
s = read_null_terminated(f)
if s == "":
return mimetypes
mimetypes.append(s)
class ZimFile(object):
def __init__(self, filename, cache_size=4):
self.filename = filename
self.redirectEntryFormat = RedirectEntryFormat()
self.articleEntryFormat = ArticleEntryFormat()
self.clusterFormat = ClusterFormat()
self.f = open(filename, "r")
self.header = dict(HeaderFormat().unpack_from_file(self.f))
self.mimeTypeList = MimeTypeListFormat().unpack_from_file(self.f, self.header['mimeListPos'])
self.clusterCache = ClusterCache(cache_size=cache_size)
def close(self):
self.clusterCache.clear()
self.f.close()
def get_uuid(self):
"""Returns the UUID for this ZIM file"""
h = self.header
uuid_bytes = [h['uuid0'], h['uuid1'], h['uuid2'], h['uuid3'], h['uuid4'],
h['uuid5'], h['uuid6'], h['uuid7'], h['uuid8'], h['uuid9'],
h['uuid10'], h['uuid11'], h['uuid12'], h['uuid13'], h['uuid14'],
h['uuid15']]
s = string.join([chr(x) for x in uuid_bytes], "")
return uuid.UUID(bytes=s)
def get_kiwix_uuid(self):
"""Kiwix seems to have a bug in their library.xml which causes the
third UUID group to be repeated."""
u = self.get_uuid()
s = str(u).split("-")
return s[0] + "-" + s[1] + "-" + s[2] + "-" + s[2] + "-" + s[3] + s[4]
@timepro.profile()
def read_directory_entry(self, offset):
"""May return either a Redirect or Article entry depending on flag"""
timepro.start("seek")
self.f.seek(offset)
timepro.end("seek")
timepro.start("read(2)")
buf = self.f.read(2)
timepro.end("read(2)")
fields = struct.unpack('H', buf)
if fields[0] == 0xffff: # Then redirect
return dict(self.redirectEntryFormat.unpack_from_file(self.f, offset))
else:
return dict(self.articleEntryFormat.unpack_from_file(self.f, offset))
@timepro.profile()
def read_url_pointer(self, index):
self.f.seek(self.header['urlPtrPos'] + 8 * index)
buf = self.f.read(8)
fields = struct.unpack('Q', buf)
return fields[0]
def read_title_pointer(self, index):
self.f.seek(self.header['titlePtrPos'] + 4 * index)
buf = self.f.read(4)
fields = struct.unpack('L', buf)
return fields[0]
@timepro.profile()
def read_cluster_pointer(self, index):
"""Returns a pointer to the cluster"""
self.f.seek(self.header['clusterPtrPos'] + 8 * index)
buf = self.f.read(8)
fields = struct.unpack('Q', buf)
return fields[0]
@timepro.profile()
def read_directory_entry_by_index(self, index):
ptr = self.read_url_pointer(index)
d = self.read_directory_entry(ptr)
d['index'] = index
return d
@timepro.profile()
def read_blob(self, cluster_index, blob_index):
ptr = self.read_cluster_pointer(cluster_index)
cluster_data = self.clusterCache.get(self.f, ptr)
return cluster_data.read_blob(blob_index)
@timepro.profile()
def get_article_by_index(self, index, follow_redirect=True):
entry = self.read_directory_entry_by_index(index)
if 'redirectIndex' in entry.keys():
if follow_redirect:
logger.debug("REDIRECT TO " + str(entry['redirectIndex']))
return self.get_article_by_index(entry['redirectIndex'], follow_redirect)
else:
return None, entry['redirectIndex'], entry['namespace']
data = self.read_blob(entry['clusterNumber'], entry['blobNumber'])
mime = self.mimeTypeList[entry['mimetype']]
namespace = entry['namespace']
return data, mime, namespace
@timepro.profile()
def get_entry_by_url_linear(self, namespace, url):
for i in xrange(self.header['articleCount']):
entry = self.read_directory_entry_by_index(i)
if entry['url'] == url and entry['namespace'] == namespace:
return i
return None
@timepro.profile()
def get_entry_by_url(self, namespace, url):
nsurl = full_url(namespace, url)
def check(idx):
entry = self.read_directory_entry_by_index(idx)
return full_url(entry['namespace'], entry['url'])
m = binary_search(check, nsurl, 0, self.header['articleCount'])
if m is None:
return None, None
entry = self.read_directory_entry_by_index(m)
return entry, m
def get_article_by_url(self, namespace, url, follow_redirect=True):
entry, idx = self.get_entry_by_url(namespace, url)
if idx is None:
return None, None, None
return self.get_article_by_index(idx, follow_redirect=follow_redirect)
def get_main_page(self):
main_index = self.header['mainPage']
return self.get_article_by_index(main_index)
@timepro.profile()
def metadata(self):
metadata = {}
for i in xrange(self.header['articleCount'] - 1, -1, -1):
entry = self.read_directory_entry_by_index(i)
if entry['namespace'] == 'M':
m_name = entry['url']
# Lower case first letter to match kiwix-library names convention
m_name = re.sub(r'^([A-Z])', lambda pat: pat.group(1).lower(), m_name)
metadata[m_name] = self.get_article_by_index(i)[0]
else:
break
return metadata
def articles(self):
"""Generator which iterates through all articles"""
for i in xrange(self.header['articleCount']):
entry = self.read_directory_entry_by_index(i)
entry['fullUrl'] = full_url(entry['namespace'], entry['url'])
yield entry
def validate(self):
"""This is a mostly a self-test, but will validate various assumptions"""
# Test that URLs are properly ordered
last = None
for i in xrange(self.header['articleCount']):
entry = self.read_directory_entry_by_index(i)
assert entry is not None
nsurl = full_url(entry['namespace'], entry['url'])
if last is not None:
assert nsurl > last
last = nsurl
timepro.log_all()
timepro.reset()
# Test load by url performance
for i in xrange(0, self.header['articleCount'], 100):
entry = self.read_directory_entry_by_index(i)
entry2, idx = self.get_entry_by_url(entry['namespace'], entry['url'])
assert entry2 is not None
timepro.log_all()
timepro.reset()
# Test load of the last article
article, mime, ns = self.get_article_by_index(self.header['articleCount'] - 1)
entry = self.read_directory_entry_by_index(self.header['articleCount'] - 1)
entry2, idx = self.get_entry_by_url(entry['namespace'], entry['url'])
assert entry2 is not None
# Test load subset of all articles
for i in xrange(0, self.header['articleCount'], 100):
article, mime, ns = self.get_article_by_index(i)
if article is None: # Redirect
assert mime is not None
timepro.log_all()
timepro.reset()
def list_articles_by_url(self):
"""Mostly for testing"""
s = ""
for i in xrange(self.header['articleCount']):
entry = self.read_directory_entry_by_index(i)
s += full_url(entry['namespace'], entry['url']) + "\n"
return s
| braddockcg/internet-in-a-box | iiab/zimpy.py | Python | bsd-2-clause | 17,389 |
import math
import numpy
import random
import operator
import types
import numpy as np
import h5py
import IPython as ipy
import os
import sys
def one_l_print(string, pad=20):
for _ in range(pad): string += ' '
string += '\r'
sys.stdout.write(string)
sys.stdout.flush()
# Define a context manager to suppress stdout
class suppress_stdout(object):
'''
A context manager for doing a "deep suppression" of stdout in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
'''
def __init__(self):
# Open a null file
self.null_fds = os.open(os.devnull,os.O_RDWR)
# Save the actual stdout file descriptor
self.save_fds = os.dup(1)
def __enter__(self):
# Assign the null pointers to stdout
os.dup2(self.null_fds,1)
os.close(self.null_fds)
def __exit__(self, *_):
# Re-assign the real stdout back
os.dup2(self.save_fds,1)
# Close the null file
os.close(self.save_fds)
class Transform(object):
"""
Rotation and translation represented as 4 x 4 matrix
"""
def __init__(self, matrix):
self.matrix = numpy.array(matrix)
self.matrix_inv = None
self.zRot = False
def inverse(self):
"""
Returns transformation matrix that is the inverse of this one
"""
if self.matrix_inv == None:
self.matrix_inv = numpy.linalg.inv(self.matrix)
return Transform(self.matrix_inv)
def __neg__(self):
return self.inverse()
def compose(self, trans):
"""
Returns composition of self and trans
"""
tr = Transform(numpy.dot(self.matrix, trans.matrix))
if self.zRot and trans.zRot:
return tr.pose()
else:
return tr
def __mul__(self, other):
return self.compose(other)
def pose(self, zthr = 0.01, fail = True):
"""
Convert to Pose
"""
if abs(1 - self.matrix[2][2]) < zthr:
theta = math.atan2(self.matrix[1][0], self.matrix[0][0])
return Pose(self.matrix[0][3], self.matrix[1][3], self.matrix[2][3], theta)
elif fail:
print self.matrix
raise Exception, "Not a valid 2.5D Pose"
else:
return None
def point(self):
return self.pose().point()
def applyToPoint(self, point):
"""
Transform a point into a new point.
"""
p = numpy.dot(self.matrix, point.matrix())
return Point(p[0], p[1], p[2], p[3])
def __call__(self, point):
return self.applyToPoint(point)
def __repr__(self):
return str(self.matrix)
def shortStr(self, trim = False):
return self.__repr__()
__str__ = __repr__
class Pose(Transform): # 2.5D transform
"""
Represent the x, y, z, theta pose of an object in 2.5D space
"""
def __init__(self, x, y, z, theta):
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.theta = fixAngle02Pi(theta)
"""rotation in radians"""
self.initTrans()
self.zRot = True
def initTrans(self):
cosTh = math.cos(self.theta)
sinTh = math.sin(self.theta)
self.reprString = None
Transform.__init__(self, [[cosTh, -sinTh, 0.0, self.x],
[sinTh, cosTh, 0.0, self.y],
[0.0, 0.0, 1.0, self.z],
[0, 0, 0, 1]])
def setX(self, x):
self.x = x
self.initTrans()
def setY(self, y):
self.y = y
self.initTrans()
def setZ(self, z):
self.z = z
self.initTrans()
def setTheta(self, theta):
self.theta = theta
self.initTrans()
def average(self, other, alpha):
"""
Weighted average of this pose and other
"""
return Pose(alpha * self.x + (1 - alpha) * other.x,
alpha * self.y + (1 - alpha) * other.y,
alpha * self.z + (1 - alpha) * other.z,
angleAverage(self.theta, other.theta, alpha))
def point(self):
"""
Return just the x, y, z parts represented as a C{Point}
"""
return Point(self.x, self.y, self.z)
def pose(self, fail = False):
return self
def near(self, pose, distEps, angleEps):
"""
Return True if pose is within distEps and angleEps of self
"""
return self.point().isNear(pose.point(), distEps) and \
nearAngle(self.theta, pose.pose().theta, angleEps)
def diff(self, pose):
"""
Return a pose that is the difference between self and pose (in
x, y, z, and theta)
"""
return Pose(self.x-pose.x,
self.y-pose.y,
self.z-pose.z,
fixAnglePlusMinusPi(self.theta-pose.theta))
def distance(self, pose):
"""
Return the distance between the x,y,z part of self and the x,y,z
part of pose.
"""
return self.point().distance(pose.point())
def totalDist(self, pose, angleScale = 1):
return self.distance(pose) + \
abs(fixAnglePlusMinusPi(self.theta-pose.theta)) * angleScale
def inverse(self):
"""
Return a transformation matrix that is the inverse of the
transform associated with this pose.
"""
return super(Pose, self).inverse().pose()
def xyztTuple(self):
"""
Representation of pose as a tuple of values
"""
return (self.x, self.y, self.z, self.theta)
def corrupt(self, e, eAng = None):
def corrupt(x, e):
return x + random.uniform(-e, e)
eAng = eAng or e
return Pose(corrupt(self.x, e), corrupt(self.y, e), corrupt(self.z, e),
fixAnglePlusMinusPi(corrupt(self.theta, eAng)))
def corruptGauss(self, mu, sigma, noZ = False):
def corrupt(x):
return x + random.gauss(mu, sigma)
return Pose(corrupt(self.x), corrupt(self.y),
self.z if noZ else corrupt(self.z),
fixAnglePlusMinusPi(corrupt(self.theta)))
def __repr__(self):
if not self.reprString:
# An attempt to make string equality useful
self.reprString = 'Pose[' + prettyString(self.x) + ', ' +\
prettyString(self.y) + ', ' +\
prettyString(self.z) + ', ' +\
(prettyString(self.theta) \
if self.theta <= 6.283 else prettyString(0.0))\
+ ']'
#self.reprString = 'Pose'+ prettyString(self.xyztTuple())
return self.reprString
def shortStr(self, trim = False):
return self.__repr__()
def __eq__(self, other):
return str(self) == str(other)
def __hash__(self):
return str(self).__hash__()
__str__ = __repr__
class Point:
"""
Represent a point with its x, y, z values
"""
def __init__(self, x, y, z, w=1.0):
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.w = w
"""w coordinate"""
def matrix(self):
# recompute each time to allow changing coords... reconsider this later
return numpy.array([self.x, self.y, self.z, self.w])
def isNear(self, point, distEps):
"""
Return true if the distance between self and point is less
than distEps
"""
return self.distance(point) < distEps
def distance(self, point):
"""
Euclidean distance between two points
"""
dx = self.x - point.x
dy = self.y - point.y
dz = self.z - point.z
return math.sqrt(dx*dx + dy*dy + dz*dz)
def distanceXY(self, point):
"""
Euclidean distance (squared) between two points
"""
return math.sqrt((self.x - point.x)**2 + (self.y - point.y)**2)
def distanceSq(self, point):
"""
Euclidean distance (squared) between two points
"""
dx = self.x - point.x
dy = self.y - point.y
dz = self.z - point.z
return dx*dx + dy*dy + dz*dz
def distanceSqXY(self, point):
"""
Euclidean distance (squared) between two points
"""
dx = self.x - point.x
dy = self.y - point.y
return dx*dx + dy*dy
def magnitude(self):
"""
Magnitude of this point, interpreted as a vector in 3-space
"""
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def xyzTuple(self):
"""
Return tuple of x, y, z values
"""
return (self.x, self.y, self.z)
def pose(self, angle = 0.0):
"""
Return a pose with the position of the point.
"""
return Pose(self.x, self.y, self.z, angle)
def point(self):
"""
Return a point, that is, self.
"""
return self
def __repr__(self):
if self.w == 1:
return 'Point'+ prettyString(self.xyzTuple())
if self.w == 0:
return 'Delta'+ prettyString(self.xyzTuple())
else:
return 'PointW'+ prettyString(self.xyzTuple()+(self.w,))
def shortStr(self, trim = False):
return self.__repr__()
def angleToXY(self, p):
"""
Return angle in radians of vector from self to p (in the xy projection)
"""
dx = p.x - self.x
dy = p.y - self.y
return math.atan2(dy, dx)
def add(self, point):
"""
Vector addition
"""
return Point(self.x + point.x, self.y + point.y, self.z + point.z)
def __add__(self, point):
return self.add(point)
def sub(self, point):
"""
Vector subtraction
"""
return Point(self.x - point.x, self.y - point.y, self.z - point.z)
def __sub__(self, point):
return self.sub(point)
def scale(self, s):
"""
Vector scaling
"""
return Point(self.x*s, self.y*s, self.z*s)
def __rmul__(self, s):
return self.scale(s)
def dot(self, p):
"""
Dot product
"""
return self.x*p.x + self.y*p.y + self.z*p.z
class LineXY:
"""
Line in 2D space
"""
def __init__(self, p1, p2):
"""
Initialize with two points that are on the line. Actually
store a normal and an offset from the origin
"""
self.theta = p1.angleToXY(p2)
"""normal angle"""
self.nx = -math.sin(self.theta)
"""x component of normal vector"""
self.ny = math.cos(self.theta)
"""y component of normal vector"""
self.off = p1.x * self.nx + p1.y * self.ny
"""offset along normal"""
def pointOnLine(self, p, eps):
"""
Return true if p is within eps of the line
"""
dist = abs(p.x*self.nx + p.y*self.ny - self.off)
return dist < eps
def __repr__(self):
return 'LineXY'+ prettyString((self.nx, self.ny, self.off))
def shortStr(self, trim = False):
return self.__repr__()
class LineSeg(LineXY):
"""
Line segment in 2D space
"""
def __init__(self, p1, p2):
"""
Initialize with two points that are on the line. Store one of
the points and the vector between them.
"""
self.B = p1
"""One point"""
self.C = p2
"""Other point"""
self.M = p2 - p1
"""Vector from the stored point to the other point"""
LineXY.__init__(self, p1, p2)
"""Initialize line attributes"""
def closestPoint(self, p):
"""
Return the point on the line that is closest to point p
"""
t0 = self.M.dot(p - self.B) / self.M.dot(self.M)
if t0 <= 0:
return self.B
elif t0 >= 1:
return self.B + self.M
else:
return self.B + t0 * self.M
def distToPoint(self, p):
"""
Shortest distance between point p and this line
"""
return p.distance(self.closestPoint(p))
def __repr__(self):
return 'LineSeg'+ prettyString((self.B, self.M))
#####################
def localToGlobal(pose, point):
return pose.transformPoint(point)
def localPoseToGlobalPose(pose1, pose2):
return pose1.compose(pose2)
# Given robot's pose in a global frame and a point in the global frame
# return coordinates of point in local frame
def globalToLocal(pose, point):
return pose.inverse().transformPoint(point)
def globalPoseToLocalPose(pose1, pose2):
return pose1.inverse().compose(pose2)
def sum(items):
"""
Defined to work on items other than numbers, which is not true for
the built-in sum.
"""
if len(items) == 0:
return 0
else:
result = items[0]
for item in items[1:]:
result += item
return result
def smash(lists):
return [item for sublist in lists for item in sublist]
def within(v1, v2, eps):
"""
Return True if v1 is with eps of v2. All params are numbers
"""
return abs(v1 - v2) < eps
def nearAngle(a1,a2,eps):
"""
Return True if angle a1 is within epsilon of angle a2 Don't use
within for this, because angles wrap around!
"""
return abs(fixAnglePlusMinusPi(a1-a2)) < eps
def nearlyEqual(x,y):
"""
Like within, but with the tolerance built in
"""
return abs(x-y)<.0001
def fixAnglePlusMinusPi(a):
"""
A is an angle in radians; return an equivalent angle between plus
and minus pi
"""
pi2 = 2.0* math.pi
while abs(a) > math.pi:
if a > math.pi:
a = a - pi2
elif a < -math.pi:
a = a + pi2
return a
def fixAngle02Pi(a):
"""
A is an angle in radians; return an equivalent angle between 0
and 2 pi
"""
pi2 = 2.0* math.pi
while a < 0 or a > pi2:
if a < 0:
a = a + pi2
elif a > pi2:
a = a - pi2
return a
def reverseCopy(items):
"""
Return a list that is a reversed copy of items
"""
itemCopy = items[:]
itemCopy.reverse()
return itemCopy
def dotProd(a, b):
"""
Return the dot product of two lists of numbers
"""
return sum([ai*bi for (ai,bi) in zip(a,b)])
def argmax(l, f):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the element of C{l} that has the highest score
"""
vals = [f(x) for x in l]
return l[vals.index(max(vals))]
def argmaxWithVal(l, f):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the element of C{l} that has the highest score and the score
"""
best = l[0]; bestScore = f(best)
for x in l:
xScore = f(x)
if xScore > bestScore:
best, bestScore = x, xScore
return (best, bestScore)
def argmaxIndex(l, f = lambda x: x):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the index of C{l} that has the highest score
"""
best = 0; bestScore = f(l[best])
for i in range(len(l)):
xScore = f(l[i])
if xScore > bestScore:
best, bestScore = i, xScore
return (best, bestScore)
def argmaxIndexWithTies(l, f = lambda x: x):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the index of C{l} that has the highest score
"""
best = []; bestScore = f(l[0])
for i in range(len(l)):
xScore = f(l[i])
if xScore > bestScore:
best, bestScore = [i], xScore
elif xScore == bestScore:
best, bestScore = best + [i], xScore
return (best, bestScore)
def randomMultinomial(dist):
"""
@param dist: List of positive numbers summing to 1 representing a
multinomial distribution over integers from 0 to C{len(dist)-1}.
@returns: random draw from that distribution
"""
r = random.random()
for i in range(len(dist)):
r = r - dist[i]
if r < 0.0:
return i
return "weird"
def clip(v, vMin, vMax):
"""
@param v: number
@param vMin: number (may be None, if no limit)
@param vMax: number greater than C{vMin} (may be None, if no limit)
@returns: If C{vMin <= v <= vMax}, then return C{v}; if C{v <
vMin} return C{vMin}; else return C{vMax}
"""
try:
return [clip(x, vMin, vMax) for x in v]
except TypeError:
if vMin == None:
if vMax == None:
return v
else:
return min(v, vMax)
else:
if vMax == None:
return max(v, vMin)
else:
return max(min(v, vMax), vMin)
def flatten(M):
"""
basically a nice wrapper around reshape
@param M: matrix
@returns v: flattened matrix into a vector
"""
return np.reshape(M, (M.shape[0]*M.shape[1]))
def sign(x):
"""
Return 1, 0, or -1 depending on the sign of x
"""
if x > 0.0:
return 1
elif x == 0.0:
return 0
else:
return -1
def make2DArray(dim1, dim2, initValue):
"""
Return a list of lists representing a 2D array with dimensions
dim1 and dim2, filled with initialValue
"""
result = []
for i in range(dim1):
result = result + [makeVector(dim2, initValue)]
return result
def make2DArrayFill(dim1, dim2, initFun):
"""
Return a list of lists representing a 2D array with dimensions
dim1 and dim2, filled by calling initFun with every pair of
indices
"""
result = []
for i in range(dim1):
result = result + [makeVectorFill(dim2, lambda j: initFun(i, j))]
return result
def make3DArray(dim1, dim2, dim3, initValue):
"""
Return a list of lists of lists representing a 3D array with dimensions
dim1, dim2, and dim3 filled with initialValue
"""
result = []
for i in range(dim1):
result = result + [make2DArray(dim2, dim3, initValue)]
return result
def mapArray3D(array, f):
"""
Map a function over the whole array. Side effects the array. No
return value.
"""
for i in range(len(array)):
for j in range(len(array[0])):
for k in range(len(array[0][0])):
array[i][j][k] = f(array[i][j][k])
def makeVector(dim, initValue):
"""
Return a list of dim copies of initValue
"""
return [initValue]*dim
def makeVectorFill(dim, initFun):
"""
Return a list resulting from applying initFun to values from 0 to
dim-1
"""
return [initFun(i) for i in range(dim)]
def prettyString(struct):
"""
Make nicer looking strings for printing, mostly by truncating
floats
"""
if type(struct) == list:
return '[' + ', '.join([prettyString(item) for item in struct]) + ']'
elif type(struct) == tuple:
return '(' + ', '.join([prettyString(item) for item in struct]) + ')'
elif type(struct) == dict:
return '{' + ', '.join([str(item) + ':' + prettyString(struct[item]) \
for item in struct]) + '}'
elif type(struct) == float or type(struct) == numpy.float64:
struct = round(struct, 3)
if struct == 0: struct = 0 # catch stupid -0.0
return "%5.3f" % struct
else:
return str(struct)
def swapRange(x, y):
if x < y:
return range(x, y)
if x > y:
r = range(y, x)
r.reverse()
return r
return [x]
def avg(a, b):
if type(a) in (types.TupleType, types.ListType) and \
type(b) in (types.TupleType, types.ListType) and \
len(a) == len(b):
return tuple([avg(a[i], b[i]) for i in range(len(a))])
else:
return (a + b)/2.0
def recoverPath(volume, start, end):
if not volume:
return None
p = []
current = start
while current != end:
p.append(current)
successors = [[current[0] + i, current[1] + j] for (i,j) in [(1,0), (0, 1), (-1, 0), (0, -1)]]
for v in volume:
if list(v) in successors and not list(v) in p:
current = list(v)
continue
p.append(end)
return p
class SymbolGenerator:
"""
Generate new symbols guaranteed to be different from one another
Optionally, supply a prefix for mnemonic purposes
Call gensym("foo") to get a symbol like 'foo37'
"""
def __init__(self):
self.count = 0
def gensym(self, prefix = 'i'):
self.count += 1
return prefix + '_' + str(self.count)
gensym = SymbolGenerator().gensym
"""Call this function to get a new symbol"""
def logGaussian(x, mu, sigma):
"""
Log of the value of the gaussian distribution with mean mu and
stdev sigma at value x
"""
return -((x-mu)**2 / (2*sigma**2)) - math.log(sigma*math.sqrt(2*math.pi))
def gaussian(x, mu, sigma):
"""
Value of the gaussian distribution with mean mu and
stdev sigma at value x
"""
return math.exp(-((x-mu)**2 / (2*sigma**2))) /(sigma*math.sqrt(2*math.pi))
def lineIndices((i0, j0), (i1, j1)):
"""
Takes two cells in the grid (each described by a pair of integer
indices), and returns a list of the cells in the grid that are on the
line segment between the cells.
"""
ans = [(i0,j0)]
di = i1 - i0
dj = j1 - j0
t = 0.5
if abs(di) > abs(dj): # slope < 1
m = float(dj) / float(di) # compute slope
t += j0
if di < 0: di = -1
else: di = 1
m *= di
while (i0 != i1):
i0 += di
t += m
ans.append((i0, int(t)))
else:
if dj != 0: # slope >= 1
m = float(di) / float(dj) # compute slope
t += i0
if dj < 0: dj = -1
else: dj = 1
m *= dj
while j0 != j1:
j0 += dj
t += m
ans.append((int(t), j0))
return ans
def angleDiff(x, y):
twoPi = 2*math.pi
z = (x - y)%twoPi
if z > math.pi:
return z - twoPi
else:
return z
def inRange(v, r):
return r[0] <= v <= r[1]
def rangeOverlap(r1, r2):
return r2[0] <= r1[1] and r1[0] <= r2[1]
def rangeIntersect(r1, r2):
return (max(r1[0], r2[0]), min(r1[1], r2[1]))
def average(stuff):
return (1./float(len(stuff)))*sum(stuff)
def tuplify(x):
if isIterable(x):
return tuple([tuplify(y) for y in x])
else:
return x
def squash(listOfLists):
return reduce(operator.add, listOfLists)
# Average two angles
def angleAverage(th1, th2, alpha):
return math.atan2(alpha * math.sin(th1) + (1 - alpha) * math.sin(th2),
alpha * math.cos(th1) + (1 - alpha) * math.cos(th2))
def floatRange(lo, hi, stepsize):
"""
@returns: a list of numbers, starting with C{lo}, and increasing
by C{stepsize} each time, until C{hi} is equaled or exceeded.
C{lo} must be less than C{hi}; C{stepsize} must be greater than 0.
"""
if stepsize == 0:
print 'Stepsize is 0 in floatRange'
result = []
v = lo
while v <= hi:
result.append(v)
v += stepsize
return result
def euclideanDistance(x, y):
return math.sqrt(sum([(xi - yi)**2 for (xi, yi) in zip(x, y)]))
def pop(x):
if isinstance(x, list):
if len(x) > 0:
return x.pop(0)
else:
return None
else:
try:
return x.next()
except StopIteration:
return None
def isIterable(x):
if type(x) in (str, unicode):
return False
try:
x_iter = iter(x)
return True
except:
return False
def tangentSpaceAdd(a, b):
res = a + b
for i in range(3, len(res), 4):
res[i, 0] = fixAnglePlusMinusPi(res[i, 0])
return res
def scalarMult(l, c):
return type(l)([i*c for i in l])
def componentAdd(a, b):
return type(a)([i + j for (i, j) in zip(a, b)])
def componentSubtract(a, b):
return componentAdd(a, [-1*i for i in b])
| dhadfieldmenell/bootstrapping-lfd | scripts/dhm_utils.py | Python | bsd-2-clause | 24,683 |
from fabric.api import settings, sudo, hide
def vhost_exists(vhost_name):
with settings(hide('running', 'stdout', 'stderr'), warn_only=True):
return sudo('rabbitmqctl list_vhosts | grep -c {0}'.format(vhost_name)) >= '1'
def user_exists(username):
with settings(hide('running', 'stdout', 'stderr'), warn_only=True):
return sudo('rabbitmqctl list_users | grep -c {0}'.format(username)) >= '1'
def create_vhost(vhost_name):
with settings(hide('running', 'stdout')):
sudo('rabbitmqctl add_vhost {vhost}'.format(vhost=vhost_name))
def create_user(username, password):
with settings(hide('running', 'stdout')):
sudo('rabbitmqctl add_user {username} {password}'.format(username=username, password=password))
def set_permissions(vhost, user):
with settings(hide('running', 'stdout')):
sudo('rabbitmqctl set_permissions -p {vhost} {user} ".*" ".*" ".*"'.format(vhost=vhost, user=user))
| sociateru/fabtools | fabtools/rabbitmq.py | Python | bsd-2-clause | 948 |
import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file,sweeps
from kid_readout.analysis.resonator import Resonator
ri = baseband.RoachBasebandWide()
#ri.initialize(use_config=False)
#f0s = np.load('/home/gjones/workspace/apps/f8_fit_resonances.npy')
#f0s = np.load('/home/gjones/workspace/apps/first_pass_sc3x3_0813f9.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f10_first_pass.npy')#[:4]
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f9_2014-02-11.npy')
f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f5_2014-02-27.npy')
f0s.sort()
#f0s = f0s*(1-4e-5)
nf = len(f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
offsets = np.linspace(-4882.8125,4638.671875,20)#[5:15]
#offsets = np.concatenate(([-40e3,-20e3],offsets,[20e3,40e3]))/1e6
offsets = np.concatenate(([-40e3],offsets,[40e3]))/1e6
offsets = offsets*4
print f0s
print len(f0s)
start = time.time()
measured_freqs = sweeps.prepare_sweep(ri,f0s,offsets,nsamp=2**21)
print "loaded waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
atten_list = [30]#np.linspace(27,34,8)#[30]#[35.5,33.5,46.5,43.5,40.5,37.5]
for atten in atten_list:
df = data_file.DataFile()
ri.set_dac_attenuator(atten)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.2)
dmod,addr = ri.get_data_seconds(4,demod=True)
print nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, tsg=tsg)
df.sync()
df.log_hw_state(ri)
df.nc.sync()
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
| ColumbiaCMB/kid_readout | apps/data_taking_scripts/old_scripts/fast_noise_sweep_downstairs.py | Python | bsd-2-clause | 2,991 |
from pyomo.environ import *
infinity = float('inf')
model = AbstractModel()
# Foods
model.F = Set()
# Nutrients
model.N = Set()
# Cost of each food
model.c = Param(model.F, within=PositiveReals)
# Amount of nutrient in each food
model.a = Param(model.F, model.N, within=NonNegativeReals)
# Lower and upper bound on each nutrient
model.Nmin = Param(model.N, within=NonNegativeReals, default=0.0)
model.Nmax = Param(model.N, within=NonNegativeReals, default=infinity)
# Volume per serving of food
model.V = Param(model.F, within=PositiveReals)
# Maximum volume of food consumed
model.Vmax = Param(within=PositiveReals)
# Number of servings consumed of each food
model.x = Var(model.F, within=NonNegativeIntegers)
# Minimize the cost of food that is consumed
def cost_rule(model):
return sum(model.c[i]*model.x[i] for i in model.F)
model.cost = Objective(rule=cost_rule)
# Limit nutrient consumption for each nutrient
def nutrient_rule(model, j):
value = sum(model.a[i,j]*model.x[i] for i in model.F)
return inequality(model.Nmin[j], value, model.Nmax[j])
model.nutrient_limit = Constraint(model.N, rule=nutrient_rule)
# Limit the volume of food consumed
def volume_rule(model):
return sum(model.V[i]*model.x[i] for i in model.F) <= model.Vmax
model.volume = Constraint(rule=volume_rule)
| Pyomo/PyomoGallery | diet/diet.py | Python | bsd-2-clause | 1,317 |
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2014, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from datetime import timedelta, datetime
from django.db import models
from thing.models.planetarycolony import Colony
from thing.models.item import Item
class Pin(models.Model):
"""Planetary Pin"""
pin_id = models.BigIntegerField(db_index=True)
colony = models.ForeignKey(Colony, on_delete=models.DO_NOTHING)
type = models.ForeignKey(Item, related_name='+', on_delete=models.DO_NOTHING)
schematic = models.IntegerField()
cycle_time = models.IntegerField()
quantity_per_cycle = models.IntegerField()
installed = models.DateTimeField()
expires = models.DateTimeField()
last_launched = models.DateTimeField()
content_size = models.DecimalField(max_digits=16, decimal_places=4, default=0)
class Meta:
app_label = 'thing'
def __str__(self):
return '%s - %s' % (self.colony, self.type.name)
EXTRACTORS = [2848, 3060, 3061, 3062, 3063, 3064, 3067, 3068]
LAUNCHPADS = [2544, 2543, 2552, 2555, 2542, 2556, 2557, 2256]
STORAGE = [2541, 2536, 2257, 2558, 2535, 2560, 2561, 2562] + LAUNCHPADS
def get_capacity(self):
if self.type_id in self.LAUNCHPADS:
return 10000
elif self.type_id in self.STORAGE:
return 12000
return 0
def percent_full(self):
cap = self.get_capacity()
if cap > 0:
return (self.content_size / cap) * 100
else:
return 0
def alert_class(self):
diff = self.expires - datetime.now()
if diff >= timedelta(days=1):
return 'success'
elif diff > timedelta(hours=8):
return 'warning'
else:
return 'danger'
class PinContent(models.Model):
pin = models.ForeignKey(Pin, on_delete=models.DO_NOTHING)
item = models.ForeignKey(Item, related_name='+', on_delete=models.DO_NOTHING)
quantity = models.IntegerField()
class Meta:
app_label = 'thing'
| cmptrgeekken/evething | thing/models/planetarypin.py | Python | bsd-2-clause | 3,456 |
import os
import uuid
from api.authentication.AuthenticatedHandler import AuthenticatedHandler
from api.Utils import authenticated
from api.model.models import User
class PUTHandler(AuthenticatedHandler):
@authenticated
def post(self, user_name):
session = self.settings['db']
allowed_types = [
{'type': 'image/jpeg', 'extension': '.jpg'},
{'type': 'image/png', 'extension': '.png'}
]
files = self.request.files
file_name = ""
# The user can only upload one avatar at a time.
if len(files) > 1:
response = {'Error': 'Only one avatar at at time can be uploaded to the server.'}
self.set_status(413, "Error")
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
for field_name, files in files.items():
for info in files:
filename, content_type = info['filename'], info['content_type']
filtered_type = [x for x in allowed_types if x['type'] == content_type]
if len(filtered_type) is not 1:
response = {'Error': 'Only JPEG and PNG files are allowed.'}
self.set_status(400, "Error")
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
extension = filtered_type[0]['extension']
save_name = user_name + "_" + str(uuid.uuid4())
file_path = self.save_file(save_name, extension, info['body'])
# Save user avatar.
user = session.query(User).filter(User.username == user_name).one()
old_avatar_name = user.avatar
user.avatar = file_path
session.commit()
self.delete_file(old_avatar_name)
response = {'path': file_path}
self.set_status(200, "Ok")
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@staticmethod
def save_file(name, extension, content):
filename = name + extension
current_dir = os.getcwd()
file_path = os.path.join(current_dir, "static/avatars/") + filename
with open(file_path, 'wb') as f:
f.write(content)
return filename
@staticmethod
def delete_file(filename):
current_dir = os.getcwd()
path = os.path.join(current_dir, "static/avatars/") + filename
os.remove(path)
def options(self, user_name):
response = {}
self.set_header("Content-Type", "application/json;charset=UTF-8")
self.set_header("Accept", "multipart/form-data, '*'")
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "Authorization, Content-Type, X-Requested-With")
self.set_header("Access-Control-Allow-Methods ", "GET, POST, PUT, DELETE, OPTIONS")
self.set_status(200, "Ok")
self.write(response)
return
| CrystalKoan/morpheus-api | api/routes/Uploads.py | Python | bsd-2-clause | 3,102 |
import logging
import os
import types
from io import BytesIO, IOBase
import pickle
import string
from collections import defaultdict
import archinfo
from archinfo.arch_soot import SootAddressDescriptor, ArchSoot
import cle
from .misc.ux import deprecated
l = logging.getLogger(name=__name__)
def load_shellcode(shellcode, arch, start_offset=0, load_address=0):
"""
Load a new project based on a string of raw bytecode.
:param shellcode: The data to load
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0)
"""
return Project(
BytesIO(shellcode),
main_opts={
'backend': 'blob',
'arch': arch,
'entry_point': start_offset,
'base_addr': load_address,
}
)
class Project:
"""
This is the main class of the angr module. It is meant to contain a set of binaries and the relationships between
them, and perform analyses on them.
:param thing: The path to the main executable object to analyze, or a CLE Loader object.
The following parameters are optional.
:param default_analysis_mode: The mode of analysis to use by default. Defaults to 'symbolic'.
:param ignore_functions: A list of function names that, when imported from shared libraries, should
never be stepped into in analysis (calls will return an unconstrained value).
:param use_sim_procedures: Whether to replace resolved dependencies for which simprocedures are
available with said simprocedures.
:param exclude_sim_procedures_func: A function that, when passed a function name, returns whether or not to wrap
it with a simprocedure.
:param exclude_sim_procedures_list: A list of functions to *not* wrap with simprocedures.
:param arch: The target architecture (auto-detected otherwise).
:param simos: a SimOS class to use for this project.
:param bool translation_cache: If True, cache translated basic blocks rather than re-translating them.
:param support_selfmodifying_code: Whether we aggressively support self-modifying code. When enabled, emulation
will try to read code from the current state instead of the original memory,
regardless of the current memory protections.
:type support_selfmodifying_code: bool
:param store_function: A function that defines how the Project should be stored. Default to pickling.
:param load_function: A function that defines how the Project should be loaded. Default to unpickling.
:param analyses_preset: The plugin preset for the analyses provider (i.e. Analyses instance).
:type analyses_preset: angr.misc.PluginPreset
:param engines_preset: The plugin preset for the engines provider (i.e. EngineHub instance).
:type engines_preset: angr.misc.PluginPreset
Any additional keyword arguments passed will be passed onto ``cle.Loader``.
:ivar analyses: The available analyses.
:type analyses: angr.analysis.Analyses
:ivar entry: The program entrypoint.
:ivar factory: Provides access to important analysis elements such as path groups and symbolic execution results.
:type factory: AngrObjectFactory
:ivar filename: The filename of the executable.
:ivar loader: The program loader.
:type loader: cle.Loader
:ivar storage: Dictionary of things that should be loaded/stored with the Project.
:type storage: defaultdict(list)
"""
def __init__(self, thing,
default_analysis_mode=None,
ignore_functions=None,
use_sim_procedures=True,
exclude_sim_procedures_func=None,
exclude_sim_procedures_list=(),
arch=None, simos=None,
load_options=None,
translation_cache=True,
support_selfmodifying_code=False,
store_function=None,
load_function=None,
analyses_preset=None,
concrete_target=None,
engines_preset=None,
**kwargs):
# Step 1: Load the binary
if load_options is None: load_options = {}
load_options.update(kwargs)
if arch is not None:
load_options.update({'arch': arch})
if isinstance(thing, cle.Loader):
if load_options:
l.warning("You provided CLE options to angr but you also provided a completed cle.Loader object!")
self.loader = thing
self.filename = self.loader.main_object.binary
elif hasattr(thing, 'read') and hasattr(thing, 'seek'):
l.info("Loading binary from stream")
self.filename = None
self.loader = cle.Loader(thing, **load_options)
elif not isinstance(thing, str) or not os.path.exists(thing) or not os.path.isfile(thing):
raise Exception("Not a valid binary file: %s" % repr(thing))
else:
# use angr's loader, provided by cle
l.info("Loading binary %s", thing)
self.filename = thing
self.loader = cle.Loader(self.filename, concrete_target=concrete_target, **load_options)
# Step 2: determine its CPU architecture, ideally falling back to CLE's guess
if isinstance(arch, str):
self.arch = archinfo.arch_from_id(arch) # may raise ArchError, let the user see this
elif isinstance(arch, archinfo.Arch):
self.arch = arch
elif arch is None:
self.arch = self.loader.main_object.arch
else:
raise ValueError("Invalid arch specification.")
# Step 3: Set some defaults and set the public and private properties
if not default_analysis_mode:
default_analysis_mode = 'symbolic'
if not ignore_functions:
ignore_functions = []
if isinstance(exclude_sim_procedures_func, types.LambdaType):
l.warning("Passing a lambda type as the exclude_sim_procedures_func argument to "
"Project causes the resulting object to be un-serializable.")
self._sim_procedures = {}
self.concrete_target = concrete_target
# It doesn't make any sense to have auto_load_libs
# if you have the concrete target, let's warn the user about this.
if self.concrete_target and load_options.get('auto_load_libs', None):
l.critical("Incompatible options selected for this project, please disable auto_load_libs if "
"you want to use a concrete target.")
raise Exception("Incompatible options for the project")
if self.concrete_target and self.arch.name not in ['X86', 'AMD64', 'ARMHF']:
l.critical("Concrete execution does not support yet the selected architecture. Aborting.")
raise Exception("Incompatible options for the project")
self._default_analysis_mode = default_analysis_mode
self._exclude_sim_procedures_func = exclude_sim_procedures_func
self._exclude_sim_procedures_list = exclude_sim_procedures_list
self._should_use_sim_procedures = use_sim_procedures
self._ignore_functions = ignore_functions
self._support_selfmodifying_code = support_selfmodifying_code
self._translation_cache = translation_cache
self._executing = False # this is a flag for the convenience API, exec() and terminate_execution() below
self._is_java_project = None
self._is_java_jni_project = None
if self._support_selfmodifying_code:
if self._translation_cache is True:
self._translation_cache = False
l.warning("Disabling IRSB translation cache because support for self-modifying code is enabled.")
self.entry = self.loader.main_object.entry
self.storage = defaultdict(list)
self.store_function = store_function or self._store
self.load_function = load_function or self._load
# Step 4: Set up the project's plugin hubs
# Step 4.1: Engines. Get the preset from the loader, from the arch, or use the default.
engines = EngineHub(self)
if engines_preset is not None:
engines.use_plugin_preset(engines_preset)
elif self.loader.main_object.engine_preset is not None:
try:
engines.use_plugin_preset(self.loader.main_object.engine_preset)
except AngrNoPluginError:
raise ValueError("The CLE loader asked to use a engine preset: %s" % \
self.loader.main_object.engine_preset)
else:
try:
engines.use_plugin_preset(self.arch.name)
except AngrNoPluginError:
engines.use_plugin_preset('default')
self.engines = engines
self.factory = AngrObjectFactory(self)
# Step 4.2: Analyses
self.analyses = AnalysesHub(self)
self.analyses.use_plugin_preset(analyses_preset if analyses_preset is not None else 'default')
# Step 4.3: ...etc
self.kb = KnowledgeBase(self)
# Step 5: determine the guest OS
if isinstance(simos, type) and issubclass(simos, SimOS):
self.simos = simos(self) #pylint:disable=invalid-name
elif isinstance(simos, str):
self.simos = os_mapping[simos](self)
elif simos is None:
self.simos = os_mapping[self.loader.main_object.os](self)
else:
raise ValueError("Invalid OS specification or non-matching architecture.")
# Step 6: Register simprocedures as appropriate for library functions
if isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support:
# If we execute a Java archive that includes native JNI libraries,
# we need to use the arch of the native simos for all (native) sim
# procedures.
sim_proc_arch = self.simos.native_arch
else:
sim_proc_arch = self.arch
for obj in self.loader.initial_load_objects:
self._register_object(obj, sim_proc_arch)
# Step 7: Run OS-specific configuration
self.simos.configure_project()
def _register_object(self, obj, sim_proc_arch):
"""
This scans through an objects imports and hooks them with simprocedures from our library whenever possible
"""
# Step 1: get the set of libraries we are allowed to use to resolve unresolved symbols
missing_libs = []
for lib_name in self.loader.missing_dependencies:
try:
missing_libs.append(SIM_LIBRARIES[lib_name])
except KeyError:
l.info("There are no simprocedures for missing library %s :(", lib_name)
# additionally provide libraries we _have_ loaded as a fallback fallback
# this helps in the case that e.g. CLE picked up a linux arm libc to satisfy an android arm binary
for lib in self.loader.all_objects:
if lib.provides in SIM_LIBRARIES:
simlib = SIM_LIBRARIES[lib.provides]
if simlib not in missing_libs:
missing_libs.append(simlib)
# Step 2: Categorize every "import" symbol in each object.
# If it's IGNORED, mark it for stubbing
# If it's blacklisted, don't process it
# If it matches a simprocedure we have, replace it
for reloc in obj.imports.values():
# Step 2.1: Quick filter on symbols we really don't care about
func = reloc.symbol
if func is None:
continue
if not func.is_function and func.type != cle.backends.symbol.SymbolType.TYPE_NONE:
continue
if not reloc.resolved:
# This is a hack, effectively to support Binary Ninja, which doesn't provide access to dependency
# library names. The backend creates the Relocation objects, but leaves them unresolved so that
# we can try to guess them here. Once the Binary Ninja API starts supplying the dependencies,
# The if/else, along with Project._guess_simprocedure() can be removed if it has no other utility,
# just leave behind the 'unresolved' debug statement from the else clause.
if reloc.owner.guess_simprocs:
l.debug("Looking for matching SimProcedure for unresolved %s from %s with hint %s",
func.name, reloc.owner, reloc.owner.guess_simprocs_hint)
self._guess_simprocedure(func, reloc.owner.guess_simprocs_hint)
else:
l.debug("Ignoring unresolved import '%s' from %s ...?", func.name, reloc.owner)
continue
export = reloc.resolvedby
if self.is_hooked(export.rebased_addr):
l.debug("Already hooked %s (%s)", export.name, export.owner)
continue
# Step 2.2: If this function has been resolved by a static dependency,
# check if we actually can and want to replace it with a SimProcedure.
# We opt out of this step if it is blacklisted by ignore_functions, which
# will cause it to be replaced by ReturnUnconstrained later.
if export.owner is not self.loader._extern_object and \
export.name not in self._ignore_functions:
if self._check_user_blacklists(export.name):
continue
owner_name = export.owner.provides
if isinstance(self.loader.main_object, cle.backends.pe.PE):
owner_name = owner_name.lower()
if owner_name not in SIM_LIBRARIES:
continue
sim_lib = SIM_LIBRARIES[owner_name]
if not sim_lib.has_implementation(export.name):
continue
l.info("Using builtin SimProcedure for %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch))
# Step 2.3: If 2.2 didn't work, check if the symbol wants to be resolved
# by a library we already know something about. Resolve it appropriately.
# Note that _check_user_blacklists also includes _ignore_functions.
# An important consideration is that even if we're stubbing a function out,
# we still want to try as hard as we can to figure out where it comes from
# so we can get the calling convention as close to right as possible.
elif reloc.resolvewith is not None and reloc.resolvewith in SIM_LIBRARIES:
sim_lib = SIM_LIBRARIES[reloc.resolvewith]
if self._check_user_blacklists(export.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get_stub(export.name, sim_proc_arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch))
# Step 2.4: If 2.3 didn't work (the symbol didn't request a provider we know of), try
# looking through each of the SimLibraries we're using to resolve unresolved
# functions. If any of them know anything specifically about this function,
# resolve it with that. As a final fallback, just ask any old SimLibrary
# to resolve it.
elif missing_libs:
for sim_lib in missing_libs:
if sim_lib.has_metadata(export.name):
if self._check_user_blacklists(export.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get_stub(export.name, sim_proc_arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", export.name, sim_lib.name)
self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch))
break
else:
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", export.name)
self.hook_symbol(export.rebased_addr, missing_libs[0].get(export.name, sim_proc_arch))
# Step 2.5: If 2.4 didn't work (we have NO SimLibraries to work with), just
# use the vanilla ReturnUnconstrained, assuming that this isn't a weak func
elif not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", export.name)
self.hook_symbol(export.rebased_addr, SIM_PROCEDURES['stubs']['ReturnUnconstrained'](display_name=export.name, is_stub=True))
def _guess_simprocedure(self, f, hint):
"""
Does symbol name `f` exist as a SIM_PROCEDURE? If so, return it, else return None.
Narrows down the set of libraries to search based on hint.
Part of the hack to enable Binary Ninja support. Remove if _register_objects() stops using it.
"""
# First, filter the SIM_LIBRARIES to a reasonable subset based on the hint
hinted_libs = []
if hint == "win":
hinted_libs = filter(lambda lib: lib if lib.endswith(".dll") else None, SIM_LIBRARIES)
else:
hinted_libs = filter(lambda lib: lib if ".so" in lib else None, SIM_LIBRARIES)
for lib in hinted_libs:
if SIM_LIBRARIES[lib].has_implementation(f.name):
l.debug("Found implementation for %s in %s", f, lib)
self.hook_symbol(f.relative_addr, (SIM_LIBRARIES[lib].get(f.name, self.arch)))
break
else:
l.debug("Could not find matching SimProcedure for %s, ignoring.", f.name)
def _check_user_blacklists(self, f):
"""
Has symbol name `f` been marked for exclusion by any of the user
parameters?
"""
return not self._should_use_sim_procedures or \
f in self._exclude_sim_procedures_list or \
f in self._ignore_functions or \
(self._exclude_sim_procedures_func is not None and self._exclude_sim_procedures_func(f))
@staticmethod
def _addr_to_str(addr):
return "%s" % repr(addr) if isinstance(addr, SootAddressDescriptor) else "%#x" % addr
#
# Public methods
# They're all related to hooking!
#
# pylint: disable=inconsistent-return-statements
def hook(self, addr, hook=None, length=0, kwargs=None, replace=False):
"""
Hook a section of code with a custom function. This is used internally to provide symbolic
summaries of library functions, and can be used to instrument execution or to modify
control flow.
When hook is not specified, it returns a function decorator that allows easy hooking.
Usage::
# Assuming proj is an instance of angr.Project, we will add a custom hook at the entry
# point of the project.
@proj.hook(proj.entry)
def my_hook(state):
print("Welcome to execution!")
:param addr: The address to hook.
:param hook: A :class:`angr.project.Hook` describing a procedure to run at the
given address. You may also pass in a SimProcedure class or a function
directly and it will be wrapped in a Hook object for you.
:param length: If you provide a function for the hook, this is the number of bytes
that will be skipped by executing the hook by default.
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:param replace: Control the behavior on finding that the address is already hooked. If
true, silently replace the hook. If false (default), warn and do not
replace the hook. If none, warn and replace the hook.
"""
if hook is None:
# if we haven't been passed a thing to hook with, assume we're being used as a decorator
return self._hook_decorator(addr, length=length, kwargs=kwargs)
if kwargs is None: kwargs = {}
l.debug('hooking %s with %s', self._addr_to_str(addr), str(hook))
if self.is_hooked(addr):
if replace is True:
pass
elif replace is False:
l.warning("Address is already hooked, during hook(%s, %s). Not re-hooking.", self._addr_to_str(addr), hook)
return
else:
l.warning("Address is already hooked, during hook(%s, %s). Re-hooking.", self._addr_to_str(addr), hook)
if isinstance(hook, type):
raise TypeError("Please instanciate your SimProcedure before hooking with it")
if callable(hook):
hook = SIM_PROCEDURES['stubs']['UserHook'](user_func=hook, length=length, **kwargs)
self._sim_procedures[addr] = hook
def is_hooked(self, addr):
"""
Returns True if `addr` is hooked.
:param addr: An address.
:returns: True if addr is hooked, False otherwise.
"""
return addr in self._sim_procedures
def hooked_by(self, addr):
"""
Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked.
"""
if not self.is_hooked(addr):
l.warning("Address %s is not hooked", self._addr_to_str(addr))
return None
return self._sim_procedures[addr]
def unhook(self, addr):
"""
Remove a hook.
:param addr: The address of the hook.
"""
if not self.is_hooked(addr):
l.warning("Address %s not hooked", self._addr_to_str(addr))
return
del self._sim_procedures[addr]
def hook_symbol(self, symbol_name, simproc, kwargs=None, replace=None):
"""
Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that
address. If the symbol was not available in the loaded libraries, this address may be provided
by the CLE externs object.
Additionally, if instead of a symbol name you provide an address, some secret functionality will
kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some
yet-unknown scary ABI that has its function pointers point to something other than the actual
functions, in which case it'll do the right thing.
:param symbol_name: The name of the dependency to resolve.
:param simproc: The SimProcedure instance (or function) with which to hook the symbol
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:param replace: Control the behavior on finding that the address is already hooked. If
true, silently replace the hook. If false, warn and do not replace the
hook. If none (default), warn and replace the hook.
:returns: The address of the new symbol.
:rtype: int
"""
if type(symbol_name) is not int:
sym = self.loader.find_symbol(symbol_name)
if sym is None:
# it could be a previously unresolved weak symbol..?
new_sym = None
for reloc in self.loader.find_relevant_relocations(symbol_name):
if not reloc.symbol.is_weak:
raise Exception("Symbol is strong but we couldn't find its resolution? Report to @rhelmot.")
if new_sym is None:
new_sym = self.loader.extern_object.make_extern(symbol_name)
reloc.resolve(new_sym)
reloc.relocate([])
if new_sym is None:
l.error("Could not find symbol %s", symbol_name)
return None
sym = new_sym
basic_addr = sym.rebased_addr
else:
basic_addr = symbol_name
symbol_name = None
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=basic_addr)
self.hook(hook_addr, simproc, kwargs=kwargs, replace=replace)
return hook_addr
def is_symbol_hooked(self, symbol_name):
"""
Check if a symbol is already hooked.
:param str symbol_name: Name of the symbol.
:return: True if the symbol can be resolved and is hooked, False otherwise.
:rtype: bool
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
return self.is_hooked(hook_addr)
def unhook_symbol(self, symbol_name):
"""
Remove the hook on a symbol.
This function will fail if the symbol is provided by the extern object, as that would result in a state where
analysis would be unable to cope with a call to this symbol.
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
if sym.owner is self.loader._extern_object:
l.warning("Refusing to unhook external symbol %s, replace it with another hook if you want to change it",
symbol_name)
return False
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
self.unhook(hook_addr)
return True
def rehook_symbol(self, new_address, symbol_name):
"""
Move the hook for a symbol to a specific address
:param new_address: the new address that will trigger the SimProc execution
:param symbol_name: the name of the symbol (f.i. strcmp )
:return: None
"""
new_sim_procedures = {}
for key_address, simproc_obj in self._sim_procedures.items():
if simproc_obj.display_name == symbol_name:
new_sim_procedures[new_address] = simproc_obj
else:
new_sim_procedures[key_address] = simproc_obj
self._sim_procedures = new_sim_procedures
#
# A convenience API (in the style of triton and manticore) for symbolic execution.
#
def execute(self, *args, **kwargs):
"""
This function is a symbolic execution helper in the simple style
supported by triton and manticore. It designed to be run after
setting up hooks (see Project.hook), in which the symbolic state
can be checked.
This function can be run in three different ways:
- When run with no parameters, this function begins symbolic execution
from the entrypoint.
- It can also be run with a "state" parameter specifying a SimState to
begin symbolic execution from.
- Finally, it can accept any arbitrary keyword arguments, which are all
passed to project.factory.full_init_state.
If symbolic execution finishes, this function returns the resulting
simulation manager.
"""
if args:
state = args[0]
else:
state = self.factory.full_init_state(**kwargs)
pg = self.factory.simulation_manager(state)
self._executing = True
return pg.run(until=lambda lpg: not self._executing)
def terminate_execution(self):
"""
Terminates a symbolic execution that was started with Project.execute().
"""
self._executing = False
#
# Private methods related to hooking
#
def _hook_decorator(self, addr, length=0, kwargs=None):
"""
Return a function decorator that allows easy hooking. Please refer to hook() for its usage.
:return: The function decorator.
"""
def hook_decorator(func):
self.hook(addr, func, length=length, kwargs=kwargs)
return func
return hook_decorator
#
# Pickling
#
def __getstate__(self):
try:
store_func, load_func = self.store_function, self.load_function
self.store_function, self.load_function = None, None
return dict(self.__dict__)
finally:
self.store_function, self.load_function = store_func, load_func
def __setstate__(self, s):
self.__dict__.update(s)
def _store(self, container):
# If container is a filename.
if isinstance(container, str):
with open(container, 'wb') as f:
try:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
except RuntimeError as e: # maximum recursion depth can be reached here
l.error("Unable to store Project: '%s' during pickling", e)
# If container is an open file.
elif isinstance(container, IOBase):
try:
pickle.dump(self, container, pickle.HIGHEST_PROTOCOL)
except RuntimeError as e: # maximum recursion depth can be reached here
l.error("Unable to store Project: '%s' during pickling", e)
# If container is just a variable.
else:
try:
container = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
except RuntimeError as e: # maximum recursion depth can be reached here
l.error("Unable to store Project: '%s' during pickling", e)
@staticmethod
def _load(container):
if isinstance(container, str):
# If container is a filename.
if all(c in string.printable for c in container) and os.path.exists(container):
with open(container, 'rb') as f:
return pickle.load(f)
# If container is a pickle string.
else:
return pickle.loads(container)
# If container is an open file
elif isinstance(container, IOBase):
return pickle.load(container)
# What else could it be?
else:
l.error("Cannot unpickle container of type %s", type(container))
return None
def __repr__(self):
return '<Project %s>' % (self.filename if self.filename is not None else 'loaded from stream')
#
# Properties
#
@property
def use_sim_procedures(self):
return self._should_use_sim_procedures
@property
def is_java_project(self):
"""
Indicates if the project's main binary is a Java Archive.
"""
if self._is_java_project is None:
self._is_java_project = isinstance(self.arch, ArchSoot)
return self._is_java_project
@property
def is_java_jni_project(self):
"""
Indicates if the project's main binary is a Java Archive, which
interacts during its execution with native libraries (via JNI).
"""
if self._is_java_jni_project is None:
self._is_java_jni_project = isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support
return self._is_java_jni_project
#
# Compatibility
#
@property
@deprecated(replacement='simos')
def _simos(self):
return self.simos
from .errors import AngrNoPluginError
from .factory import AngrObjectFactory
from angr.simos import SimOS, os_mapping
from .analyses.analysis import AnalysesHub
from .knowledge_base import KnowledgeBase
from .engines import EngineHub
from .procedures import SIM_PROCEDURES, SIM_LIBRARIES
| iamahuman/angr | angr/project.py | Python | bsd-2-clause | 33,203 |
import json
import re
from gernetHelpers import *
import operator
def recurseDependencies(dependenciesDict):
newDependenciesDict = dict()
hasNewData = False
for k,v in dependenciesDict.items():
read_data = readJson(os.path.join(PROJECTS_ROOT_PATH,getPath(v["path"]),'gernet.json') )
if read_data == None:
continue
for v in read_data["topology"]+read_data["depends"]:
if not dependenciesDict.has_key(v["path"]) and not newDependenciesDict.has_key(v["path"]):
newDependenciesDict[v["path"]] = v
hasNewData = True
if hasNewData:
newDependenciesDict = recurseDependencies(newDependenciesDict)
i = len(dependenciesDict)
for k,v in newDependenciesDict.items():
if not dependenciesDict.has_key(v["path"]):
dependenciesDict[v["path"]] = v
dependenciesDict[v["path"]]['_order'] = i
i = i + 1
return dependenciesDict
def getDependenciesList(a):
dependenciesDict = dict()
i = len(dependenciesDict)
for v in a.read_data["topology"]+a.read_data["depends"]:
if not dependenciesDict.has_key(v["path"]):
dependenciesDict[v["path"]] = v
dependenciesDict[v["path"]]['_order'] = i
i = i + 1
dependenciesDict = recurseDependencies(dependenciesDict)
#sorting
dictToSort = dict()
for k,v in dependenciesDict.items():
dictToSort[v["_order"]] = v
return sorted(dictToSort.items(), key=operator.itemgetter(0))
def getReaderWriterArgumentsStrarrDel0(a):
readerWriterArgumentsStrArr = []
readerWriterArguments = a.rwArguments
if readerWriterArguments[0]["name"] != "gridId":
raise Exception("getReaderWriterArgumentsStrArr: readerWriterArguments[0][\"name\"]!=\"gridId\"")
for value in readerWriterArguments:
if value["type"] == "unsigned":
value["type"] = "int"
readerWriterArgumentsStrArr.append(value["type"]+" "+value["name"])
del readerWriterArgumentsStrArr[0]
return readerWriterArgumentsStrArr
def getReaderWriterArgumentsStr(a):
readerWriterArgumentsStrArr = ["_NAME_","_that"]
readerWriterArguments = a.rwArguments
if readerWriterArguments[0]["name"] != "gridId":
raise Exception("getReaderWriterArgumentsStrArr: readerWriterArguments[0][\"name\"]!=\"gridId\"")
for value in readerWriterArguments:
if value["type"] == "unsigned":
value["type"] = "int"
readerWriterArgumentsStrArr.append("_"+value["name"])
return ','.join(readerWriterArgumentsStrArr)
def getFieldsArrStr(a):
arr = []
props = []
if a.read_data.has_key("props"):
props = a.read_data["props"]
for v in a.read_data["args"]+props:
t, isObject, isArray, isSerializable = filterTypes_c(v["type"])
if v.has_key("size"):
if not isArray:
raise Exception("getFieldsArrStr: size of property "+str(v["name"])+" was specified but type is not array!")
# if isArray:
# if not v.has_key("size")
# arr.append(v["type"][:-2]+" _"+v["name"]+"_["+str(v["size"])+"]")
# else:
# raise Exception("prop "+v["type"]+" "+v["name"]+" is Array, but size was not specified")
# v["type"] = t
arr.append(artifactId(t)+" "+v["name"])
for i,v in enumerate(a.read_data["emit"]):
arr.append("writer w"+str(i))
for i,v in enumerate(a.read_data["receive"]):
arr.append("reader r"+str(i))
noSelectors = False
if a.read_data.has_key("noSelectors"):
noSelectors = a.read_data["noSelectors"]
if len(a.read_data["receive"]) > 1 and not noSelectors:
arr.append("reader rSelect")
arr.append("selector_cnets_osblinnikov_github_com readersSelector")
return arr
def getargsArrStrs(a):
arr = ["_NAME_"]
for v in a.read_data["args"]:
t, isObject, isArray, isSerializable = filterTypes_c(v["type"])
# v["type"] = t
arr.append("_"+v["name"])
for i,v in enumerate(a.read_data["emit"]):
arr.append("_w"+str(i))
for i,v in enumerate(a.read_data["receive"]):
arr.append("_r"+str(i))
return arr
def groupId(path):
path = path.split(".")
del path[-1]
return '.'.join(path)
def artifactId(path):
fullNameList = path.split('.')
return '_'.join(fullNameList)
def getPath(path):
path = path.split('.')
arr = []
arr.append(path[1]+"."+path[0])
to_delete = [0,1]
for offset, index in enumerate(to_delete):
index -= offset
del path[index]
return '/'.join(arr+path)
def parsingGernet(a):
a.read_data = None
a.read_data = readJson(a.prefix)
fullName = a.read_data["path"]
a.fullName_ = artifactId(fullName)
# a.version = a.read_data["ver"]
fullNameList = fullName.split('.')
a.className = fullNameList[-1]
a.companyDomain = fullNameList[1]+'.'+fullNameList[0]
a.company = fullNameList[1]
del fullNameList[-1]
a.domainName = '.'.join(fullNameList)
fullNameList = fullName.split('.')
to_delete = [0,1]
for offset, index in enumerate(to_delete):
index -= offset
del fullNameList[index]
a.domainPath = a.companyDomain+'/'+('/'.join(fullNameList))
if not a.read_data.has_key("type") or a.read_data["type"]!="buffer":
if len(a.read_data["topology"])==0:
a.classImplements = "Runnable"
else:
a.classImplements = "" #GetRunnables
else:
a.classImplements = "readerWriterInterface"
a.defaulRwArguments = [{"name":"gridId","type":"unsigned"}]
a.rwArguments = [{"name":"gridId","type":"unsigned"}]
if a.read_data.has_key("rwArgs"):
a.rwArguments+=a.read_data["rwArgs"]
# a.arrDel0 = getReaderWriterArgumentsStrarrDel0(a.rwArguments)
a.rwArgumentsStr = getReaderWriterArgumentsStr(a)
def getProps(a):
fieldsArray = getFieldsArrStr(a)
out = " "+';'.join(fieldsArray)+';\n' if len(fieldsArray)>0 else ''
return out
def getConstructor(a):
out = ""
argsArray = getargsArrStrs(a)
out += "#define "+a.fullName_+"_create("+','.join(argsArray)+")"
out += "\\\n "+a.fullName_+" _NAME_;"
for value in a.read_data["args"]:
out += "\\\n _NAME_."+value["name"]+" = _"+value["name"]+";"
#SELECTABLE
selectableArgs = []
for i,v in enumerate(a.read_data["args"]):
if v.has_key("selectable") and v["selectable"] == True:
if v["type"] != 'reader[]':
raise Exception("every selectable argument should have reader[] type, but we have "+v["type"]+" "+v["name"])
selectableArgs.append(v)
noSelectors = False
if a.read_data.has_key("noSelectors"):
noSelectors = a.read_data["noSelectors"]
if not noSelectors and (len(a.read_data["receive"]) > 1 or len(selectableArgs)>0):
selectablesCount = str(len(a.read_data["receive"]))
for i,v in enumerate(selectableArgs):
selectablesCount += " + "+str(v["name"])+".length"
out += "\\\n arrayObject_create(_NAME_##_arrReaders_, reader, "+selectablesCount+")"
lastId = 0
for i,v in enumerate(a.read_data["receive"]):
out += "\\\n ((reader*)_NAME_##_arrReaders_.array)["+str(i)+"] = _r"+str(i)+";"
lastId = i
if len(selectableArgs)>0:
out += "\\\n int totalLength = "+str(lastId + 1)+";"
for i,v in enumerate(selectableArgs):
out += "\\\n for(int i=0;i<"+str(v["name"])+".length; i++){"
out += "\\\n ((reader*)_NAME_##_arrReaders_.array)[totalLength + i] = "+v["name"]+"[i];"
out += "\\\n }"
if i+1 != len(selectableArgs):
out += "\\\n totalLength += "+str(v["name"])+".length;"
out += "\\\n selector_cnets_osblinnikov_github_com_create(_NAME_##readersSelector, _NAME_##_arrReaders_);"
out += "\\\n _NAME_.readersSelector = _NAME_##readersSelector;"
out += "\\\n selector_cnets_osblinnikov_github_com_createReader(_NAME_##_rSelect_,&_NAME_.readersSelector,-1,0)"
out += "\\\n _NAME_.rSelect = _NAME_##_rSelect_;"
#END OF ARGS AND SELECTABLE ARG
out += "\\\n "+a.fullName_+"_onCreateMacro(_NAME_)"
if a.read_data.has_key("props"):
for value in a.read_data["props"]:
t, isObject, isArray, isSerializable = filterTypes_c(value["type"])
if value.has_key("value"):
out += "\\\n _NAME_."+value["name"]+" = "+value["value"]+";"
elif isArray:
arrItemType, itemIsObject, itemIsArray, itemisSerializable = filterTypes_c(value["type"][:-2])
if isinstance(value["size"], basestring):
value["size"] = "_"+value["size"]
out += "\\\n arrayObject_create(_NAME_##_"+value["name"]+"_, "+'_'.join(arrItemType.split('.'))+", "+str(value["size"])+")"
out += "\\\n _NAME_."+value["name"]+" = _NAME_##_"+value["name"]+"_;"
for i,v in enumerate(a.read_data["emit"]):
out += "\\\n _NAME_.w"+str(i)+" = _w"+str(i)+";"
for i,v in enumerate(a.read_data["receive"]):
out += "\\\n _NAME_.r"+str(i)+" = _r"+str(i)+";"
if a.read_data.has_key("props"):
for i,v in enumerate(a.read_data["props"]):
if v.has_key("value"):
out += "\\\n _NAME_."+v["name"]+" = "+v["value"]+";"
out += "\\\n "+a.fullName_+"_initialize(&_NAME_);"
out += initializeBuffers(a)
out += "\\\n "+a.fullName_+"_onKernels(&_NAME_);"
out += initializeKernels(a)
return out
def getContainerClass(a):
arrDel0 = getReaderWriterArgumentsStrarrDel0(a)
out = ""
if len(arrDel0)>0:
out += "\ntypedef struct "+a.fullName_+"_container{"
for rwArg in arrDel0:
out += "\n "+rwArg+";"
out += "\n}"+a.fullName_+"_container;"
return out
def getReaderWriter(a):
out = ""
out += "#define "+a.fullName_+"_createReader("+a.rwArgumentsStr+")"
if len(a.rwArguments) == 0:
raise Exception("len(a.rwArguments) == 0")
elif len(a.rwArguments) > 1:
out += "\\\n "+a.fullName_+"_container _NAME_##_container;"
for value in a.rwArguments:
if value['name'] != "gridId":
out += "\\\n _NAME_##_container."+value['name']+" = _"+value["name"]+";"
out += "\\\n reader _NAME_ = "+a.fullName_+"_getReader(_that,(void*)&_NAME_##_container,_gridId);"
else:
out += "\\\n reader _NAME_ = "+a.fullName_+"_getReader(_that,NULL,_gridId);"
out += "\n\n#define "+a.fullName_+"_createWriter("+a.rwArgumentsStr+")"
if len(a.rwArguments) == 0:
raise Exception("len(a.rwArguments) == 0")
elif len(a.rwArguments) > 1:
out += "\\\n "+a.fullName_+"_container _NAME_##_container;"
for value in a.rwArguments:
if value['name'] != "gridId":
out += "\\\n _NAME_##_container."+value['name']+" = _"+value["name"]+";"
out += "\\\n writer _NAME_ = "+a.fullName_+"_getWriter(_that,(void*)&_NAME_##_container,_gridId);"
else:
out += "\\\n writer _NAME_ = "+a.fullName_+"_getWriter(_that,NULL,_gridId);"
return out
def directoryFromBlockPath(path):
pathList = path.split('.')
domain = pathList[0]
del pathList[0]
domain = pathList[0]+"."+domain
del pathList[0]
fileName = pathList[-1]
# del pathList[-1]
return '/'.join([domain]+pathList+["c","include",fileName])
def importBlocks(a):
dependenciesDict = dict()
for v in a.read_data["topology"]+a.read_data["depends"]:
dependenciesDict[v["path"]] = v
out = ""
for k,v in dependenciesDict.items():
out+="\n#include \""+directoryFromBlockPath(v["path"])+".h\""
return out
def declareBlocks(a):
out = ""
hasParallel = False
for v in a.read_data["topology"]:
pathList = v["path"].split('.')
if v.has_key("parallel"):
hasParallel = True
out += "_".join(pathList)+"* "+v["name"]+";"
else:
out += "_".join(pathList)+" "+v["name"]+";"
a.sizeRunnables = 0
for k,v in enumerate(a.read_data["topology"]):
if v.has_key("type") and v["type"] == "buffer":
continue
a.sizeRunnables += 1
if a.sizeRunnables > 0:
if hasParallel:
out += "\nrunnablesContainer_cnets_osblinnikov_github_com* arrContainers;"
else:
out += "\nrunnablesContainer_cnets_osblinnikov_github_com arrContainers["+str(a.sizeRunnables)+"];"
return out
def checkPinId(arrPins, pinId):
for i,pin in enumerate(arrPins):
if pin.has_key("gridId"):
gridId = pin["gridId"]
if gridId == pinId:
if pin.has_key("is_busy"):
return -1
pin["is_busy"] = True
return i
if len(arrPins)>pinId:
pin = arrPins[pinId]
if pin.has_key("is_busy"):
return -1
pin["is_busy"] = True
return pinId
else:
return -1
def getReadersWriters(a,v, curBlock):
arr = []
#set writer to the buffer
for i,w in enumerate(v["emit"]):
blockId = w["blockId"]
if blockId == "export":
if checkPinId(a.read_data["emit"], w["pinId"]) != -1:
arr.append("_NAME_.w"+str(w["pinId"]))
else:
raise Exception("pinId _NAME_.w."+str(w["pinId"])+" was not found in the exported connection")
elif blockId != "internal":
rblock = a.read_data["topology"][int(blockId)]
if rblock["type"] != "buffer":
raise Exception("Connection from the block allowed only to the block with type='buffer'")
# r = rblock["receive"]
if checkPinId(rblock["receive"], w["pinId"]) != -1:
arr.append("_NAME_##"+rblock["name"]+"w"+str(w["pinId"]))
else:
raise Exception("pinId w."+str(w["pinId"])+" was not found in the destination buffer")
#get reader from buffer
for i,r in enumerate(v["receive"]):
blockId = r["blockId"]
if blockId == "export":
if checkPinId(a.read_data["receive"], r["pinId"]) != -1:
arr.append("_NAME_.r"+str(r["pinId"]))
else:
raise Exception("pinId _NAME_.r."+str(r["pinId"])+" was not found in the exported connection")
elif blockId != "internal":
wblock = a.read_data["topology"][int(blockId)]
if wblock["type"] != "buffer":
raise Exception("Connection from the block allowed only to the block with type='buffer'")
# r = wblock["emit"]
if checkPinId(wblock["emit"], r["pinId"]) != -1:
arr.append("_NAME_##"+wblock["name"]+"r"+str(r["pinId"]))
else:
raise Exception("pinId r."+str(r["pinId"])+" was not found in the destination buffer")
return arr
def connectBufferToReader(a, blockNum, i, w):
blockId = w["blockId"]
if blockId == "export":
raise Exception("Export readerWriter from buffer is forbidden! only kernels can do it [block id = "+str(blockNum)+"]")
elif blockId != "internal":
wblock = a.read_data["topology"][int(blockId)]
if wblock.has_key("type") and wblock["type"] == "buffer":
raise Exception("Interconnections of buffers ["+str(blockNum)+" and "+str(blockId)+"] are forbidden")
arr_id = checkPinId(wblock["receive"],w["pinId"])
if arr_id == -1:
raise Exception("pinId w."+str(w["pinId"])+" was not found in the destination buffer")
if w["pinId"] != arr_id:
raise Exception("wrong parameter gridId!=pinId in the block "+str(blockNum)+", pin "+str(i))
pinObject = wblock["receive"][arr_id]
if pinObject.has_key("blockId") and pinObject.has_key("pinId") and pinObject["blockId"] != "export":
if int(pinObject["blockId"])!=blockNum or int(pinObject["pinId"])!=i:
raise Exception("Connection of block "+str(blockNum)+", pin "+str(i)+" with "+str(blockId)+", pin "+str(w["pinId"])+" failed because the last already connected to "+str(pinObject["blockId"])+", "+str(pinObject["pinId"]))
pinObject.update({"blockId":blockNum})
pinObject.update({"pinId":i})
def getRwArgs(i,w):
gridId = i
if w.has_key("gridId"):
gridId = w["gridId"]
rwArgs = []
if w.has_key("rwArgs"):
for arg in w["rwArgs"]:
if not arg.has_key("value"):
raise Exception("rwArgs is specified but `value` field was not set")
rwArgs.append(str(arg["value"]))
return [str(gridId)]+rwArgs
def searchPropertyAndArgName(a, propName):
props = []
if a.read_data.has_key("props"):
props = a.read_data["props"]
for v in a.read_data["args"]+props:
if v["name"] == propName:
return True
return False
def initializeBuffers(a):
out = ""
#buffers
for blockNum, v in enumerate(a.read_data["channels"]):
# if not v.has_key("type") or v["type"] != "buffer":
# continue
pathList = v["path"].split('.')
argsList = []
for d in v["args"]:
castType = ""
if d.has_key("type"):
t, isObject, isArray ,isSerializable = filterTypes_c(d["type"])
if t != "arrayObject":
castType = "("+t+")"
argValue = str(d["value"])
if searchPropertyAndArgName(a,d["value"]):
argValue = "_NAME_."+argValue
argsList.append(castType+argValue)
#create variables
out += "\\\n "+'_'.join(pathList)+"_create("+','.join([v["name"]]+argsList)+")"
out += "\\\n _NAME_."+v["name"]+" = "+v["name"]+";"
#get writer from buffer
for i,w in enumerate(v["emit"]):
out += "\\\n "+'_'.join(pathList)+"_createReader("+','.join([ "_NAME_##"+v["name"]+"r"+str(i), "&_NAME_."+v["name"]] + getRwArgs(i,w))+")"
connectBufferToReader(a, blockNum, i, w)
#get reader from buffer
for i,w in enumerate(v["receive"]):
out += "\\\n "+'_'.join(pathList)+"_createWriter("+','.join([ "_NAME_##"+v["name"]+"w"+str(i), "&_NAME_."+v["name"]] + getRwArgs(i,w))+")"
return out
def initializeKernels(a):
out = ""
#kernels
hasParallel = "0"
for i,v in enumerate(a.read_data["topology"]):
if v.has_key("type") and v["type"] == "buffer":
continue
pathList = v["path"].split('.')
argsList = []
for d in v["args"]:
castType = ""
if d.has_key("type"):
t, isObject, isArray, isSerializable = filterTypes_c(d["type"])
if t != "arrayObject":
castType = "("+t+")"
argValue = str(d["value"])
if searchPropertyAndArgName(a,d["value"]):
argValue = "_NAME_."+argValue
argsList.append(castType+argValue)
if v.has_key("parallel"):
prefixParallel = ""
if not isinstance(v["parallel"], int ):
prefixParallel = "_NAME_."
hasParallel += "+"+prefixParallel+str(v["parallel"])
out += "\\\n "+'_'.join(pathList)+" _NAME_##_"+v["name"]+str(i)+"_##Container["+prefixParallel+str(v["parallel"])+"];"
out += "\\\n _NAME_."+v["name"]+" = _NAME_##_"+v["name"]+str(i)+"_##Container;"
out += "\\\n int _NAME_##_"+v["name"]+"_##_i;"
out += "\\\n for(_NAME_##_"+v["name"]+"_##_i=0;_NAME_##_"+v["name"]+"_##_i<(int)"+prefixParallel+str(v["parallel"])+";_NAME_##_"+v["name"]+"_##_i++){"
out += "\\\n "+'_'.join(pathList)+"_create("+','.join([v["name"]]+argsList+getReadersWriters(a,v,i))+");"
out += "\\\n _NAME_."+v["name"]+"[_NAME_##_"+v["name"]+"_##_i] = "+v["name"]+";"
out += "\\\n }"
else:
out += "\\\n "+'_'.join(pathList)+"_create("+','.join([v["name"]]+argsList+getReadersWriters(a,v,i))+");"
out += "\\\n _NAME_."+v["name"]+" = "+v["name"]+";"
hasParallel += "+1"
if hasParallel != "0":
out += "\\\n runnablesContainer_cnets_osblinnikov_github_com _NAME_##arrContainers["+evalSize(hasParallel)+"];"
out += "\\\n _NAME_.arrContainers = _NAME_##arrContainers;"
return out
def runBlocks(a):
out = []
hasParallel = False
#kernels
for i,v in enumerate(a.read_data["topology"]):
if v.has_key("type") and v["type"] == "buffer":
continue
if v.has_key("parallel"):
prefixParallel = ""
if not isinstance(v["parallel"], int ):
prefixParallel = "that->"
if not hasParallel:
hasParallel = True
out.append(" int j;")
out.append(" for(j=0;j<(int)"+prefixParallel+str(v["parallel"])+";j++){")
out.append(" that->"+v["name"]+"[j].run(&that->"+v["name"]+");")
out.append(" }")
else:
out.append(" that->"+v["name"]+".run(&that->"+v["name"]+");")
if len(out) > 0:
return " "+a.fullName_+" *that = ("+a.fullName_+"*)t;\n"+'\n'.join(out)
return ''
def getDefaultRunParameters(a):
argsList = ["classObj"]
for v in a.read_data["args"]:
t, isObject, isArray, isSerializable = filterTypes_c(v["type"])
if v.has_key("value_java"):
argsList.append(str(v["value_java"]))
elif v.has_key("value"):
argsList.append(str(v["value"]))
elif isArray or isObject:
# # t = t[:-2]
# argsList.append("new arrayObject")
# elif isObject:
argsList.append("arrayObjectNULL()")
else:
argsList.append("0")
for v in a.read_data["emit"]:
argsList.append("writerNULL()")
for v in a.read_data["receive"]:
argsList.append("readerNULL()")
return ','.join(argsList)
def startRunnables(a):
typeOfBlock = "kernel"
if a.read_data.has_key("type"):
typeOfBlock = a.read_data["type"]
out = a.fullName_+"_create("+getDefaultRunParameters(a)+");"
if typeOfBlock == "kernel":
out += '''
runnablesContainer_cnets_osblinnikov_github_com runnables = classObj.getRunnables(&classObj);
runnables.launch(&runnables,TRUE);
'''
return out
def testRunnables(a):
typeOfBlock = "kernel"
if a.read_data.has_key("type"):
typeOfBlock = a.read_data["type"]
out = a.fullName_+"_create("+getDefaultRunParameters(a)+");"
if typeOfBlock == "kernel":
out += '''
runnablesContainer_cnets_osblinnikov_github_com runnables = classObj.getRunnables(&classObj);
runnables.launch(&runnables,FALSE);
runnables.stop(&runnables);
'''
return out
def evalSize(sizeRunnables):
try:
evaluated = str(eval(sizeRunnables))
except:
evaluated = sizeRunnables
return evaluated
def getRunnables(a):
sizeRunnables = "0"
out = "\n"
hasParallel = False
for blockNum, v in enumerate(a.read_data["topology"]):
if v.has_key("type") and v["type"] == "buffer":
continue
if v.has_key("parallel"):
prefixParallel = ""
if not isinstance(v["parallel"], int ):
prefixParallel = "that->"
if not hasParallel:
out += " int j;\n"
hasParallel = True
out += " for(j=0;j<(int)"+prefixParallel+str(v["parallel"])+";j++){\n"
out += " that->arrContainers["+str(evalSize(sizeRunnables))+"+j] = that->"+v["name"]+"[j].getRunnables(&that->"+v["name"]+"[j]);\n"
out += " }\n"
sizeRunnables += "+"+prefixParallel+str(v["parallel"])
else:
out += " that->arrContainers["+str(sizeRunnables)+"] = that->"+v["name"]+".getRunnables(&that->"+v["name"]+");\n"
sizeRunnables += "+1"
if sizeRunnables == "0":
if len(str(a.read_data["spawnMode"])) == 0:
a.read_data["spawnMode"] = 1
return '''
runnablesContainer_cnets_osblinnikov_github_com_create(runnables)
RunnableStoppable_create(runnableStoppableObj,that, '''+a.fullName_+'''_)
runnables.setCore(&runnables,runnableStoppableObj, dispatcherCollector_getNextLocalId(), '''+str(a.read_data["spawnMode"])+''');
return runnables;'''
else:
return '''
runnablesContainer_cnets_osblinnikov_github_com_create(runnables)
'''+out+'''
arrayObject arr;
arr.array = (void*)&that->arrContainers;
arr.length = '''+str(evalSize(sizeRunnables))+''';
arr.itemSize = sizeof(runnablesContainer_cnets_osblinnikov_github_com);
runnables.setContainers(&runnables,arr);
return runnables;'''
| osblinnikov/gernet | src/parsing_c_kernel.py | Python | bsd-2-clause | 23,127 |
#! /usr/bin/env python3
import setuptools
import os
pkgs = setuptools.find_packages()
setuptools.setup(
name = "PiCon Registration Agent",
version = "0.0.1",
author = "Team Kickass",
author_email = "[email protected]",
description = "PiCon registration agent for the PiCon console registry",
license = "BSD",
keywords = "RaspberryPi Terminal Server Console",
url = "http://nanog.org",
packages=pkgs,
install_requires = ['daemonize','pyroute2','ipaddress','netifaces'],
long_description="See PiCon README",
)
| piconsole/picon | agent/setup.py | Python | bsd-2-clause | 549 |
import errno
import os
import re
import requests
import sys
import signal
import webbrowser
from contextlib import closing
from time import sleep
from distutils.version import StrictVersion
from livestreamer import (Livestreamer, StreamError, PluginError,
NoPluginError)
from livestreamer.cache import Cache
from livestreamer.stream import StreamProcess
from .argparser import parser
from .compat import stdout, is_win32
from .console import ConsoleOutput
from .constants import CONFIG_FILE, PLUGINS_DIR, STREAM_SYNONYMS
from .output import FileOutput, PlayerOutput
from .utils import NamedPipe, HTTPServer, ignored, stream_to_url
ACCEPTABLE_ERRNO = (errno.EPIPE, errno.EINVAL, errno.ECONNRESET)
args = console = livestreamer = plugin = None
def check_file_output(filename, force):
"""Checks if file already exists and ask the user if it should
be overwritten if it does."""
console.logger.debug("Checking file output")
if os.path.isfile(filename) and not force:
answer = console.ask("File {0} already exists! Overwrite it? [y/N] ",
filename)
if answer.lower() != "y":
sys.exit()
return FileOutput(filename)
def create_output():
"""Decides where to write the stream.
Depending on arguments it can be one of these:
- The stdout pipe
- A subprocess' stdin pipe
- A named pipe that the subprocess reads from
- A regular file
"""
if args.output:
if args.output == "-":
out = FileOutput(fd=stdout)
else:
out = check_file_output(args.output, args.force)
elif args.stdout:
out = FileOutput(fd=stdout)
else:
http = namedpipe = None
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
if args.player_fifo:
pipename = "livestreamerpipe-{0}".format(os.getpid())
console.logger.info("Creating pipe {0}", pipename)
try:
namedpipe = NamedPipe(pipename)
except IOError as err:
console.exit("Failed to create pipe: {0}", err)
elif args.player_http:
http = create_http_server()
console.logger.info("Starting player: {0}", args.player)
out = PlayerOutput(args.player, args=args.player_args,
quiet=not args.verbose_player,
kill=not args.player_no_close,
namedpipe=namedpipe, http=http)
return out
def create_http_server():
"""Creates a HTTP server listening on a random port."""
try:
http = HTTPServer()
http.bind()
except OSError as err:
console.exit("Failed to create HTTP server: {0}", err)
return http
def iter_http_requests(server, player):
"""Accept HTTP connections while the player is running."""
while player.running:
try:
yield server.open(timeout=2.5)
except OSError:
continue
def output_stream_http(plugin, streams):
"""Continuously output the stream over HTTP."""
server = create_http_server()
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
player = PlayerOutput(args.player, args=args.player_args,
filename=server.url,
quiet=not args.verbose_player)
stream_names = [resolve_stream_name(streams, s) for s in args.stream]
try:
console.logger.info("Starting player: {0}", args.player)
player.open()
except OSError as err:
console.exit("Failed to start player: {0} ({1})",
args.player, err)
for req in iter_http_requests(server, player):
user_agent = req.headers.get("User-Agent") or "unknown player"
console.logger.info("Got HTTP request from {0}".format(user_agent))
stream = stream_fd = None
while not stream_fd:
if not player.running:
break
try:
streams = streams or fetch_streams(plugin)
for stream_name in stream_names:
stream = streams.get(stream_name)
if stream: break
else:
stream = None
except PluginError as err:
console.logger.error("Unable to fetch new streams: {0}",
err)
if not stream:
console.logger.info("Stream not available, will re-fetch "
"streams in 10 sec")
streams = None
sleep(10)
continue
try:
console.logger.info("Opening stream: {0}", stream_name)
stream_fd, prebuffer = open_stream(stream)
except StreamError as err:
console.logger.error("{0}", err)
stream = streams = None
else:
console.logger.debug("Writing stream to player")
read_stream(stream_fd, server, prebuffer)
server.close(True)
player.close()
server.close()
def output_stream_passthrough(stream):
"""Prepares a filename to be passed to the player."""
filename = '"{0}"'.format(stream_to_url(stream))
out = PlayerOutput(args.player, args=args.player_args,
filename=filename, call=True,
quiet=not args.verbose_player)
try:
console.logger.info("Starting player: {0}", args.player)
out.open()
except OSError as err:
console.exit("Failed to start player: {0} ({1})", args.player, err)
return False
return True
def open_stream(stream):
"""Opens a stream and reads 8192 bytes from it.
This is useful to check if a stream actually has data
before opening the output.
"""
# Attempts to open the stream
try:
stream_fd = stream.open()
except StreamError as err:
raise StreamError("Could not open stream: {0}".format(err))
# Read 8192 bytes before proceeding to check for errors.
# This is to avoid opening the output unnecessarily.
try:
console.logger.debug("Pre-buffering 8192 bytes")
prebuffer = stream_fd.read(8192)
except IOError as err:
raise StreamError("Failed to read data from stream: {0}".format(err))
if not prebuffer:
raise StreamError("No data returned from stream")
return stream_fd, prebuffer
def output_stream(stream):
"""Open stream, create output and finally write the stream to output."""
try:
stream_fd, prebuffer = open_stream(stream)
except StreamError as err:
console.logger.error("{0}", err)
return
output = create_output()
try:
output.open()
except (IOError, OSError) as err:
if isinstance(output, PlayerOutput):
console.exit("Failed to start player: {0} ({1})",
args.player, err)
else:
console.exit("Failed to open output: {0} ({1})",
args.output, err)
with closing(output):
console.logger.debug("Writing stream to output")
read_stream(stream_fd, output, prebuffer)
return True
def read_stream(stream, output, prebuffer):
"""Reads data from stream and then writes it to the output."""
is_player = isinstance(output, PlayerOutput)
is_http = isinstance(output, HTTPServer)
is_fifo = is_player and output.namedpipe
show_progress = isinstance(output, FileOutput) and output.fd is not stdout
written = 0
while True:
try:
data = prebuffer or stream.read(8192)
except IOError as err:
console.logger.error("Error when reading from stream: {0}",
str(err))
break
if len(data) == 0:
break
# We need to check if the player process still exists when
# using named pipes on Windows since the named pipe is not
# automatically closed by the player.
if is_win32 and is_fifo:
output.player.poll()
if output.player.returncode is not None:
console.logger.info("Player closed")
break
try:
output.write(data)
except IOError as err:
if is_player and err.errno in ACCEPTABLE_ERRNO:
console.logger.info("Player closed")
elif is_http and err.errno in ACCEPTABLE_ERRNO:
console.logger.info("HTTP connection closed")
else:
console.logger.error("Error when writing to output: {0}",
err)
break
written += len(data)
prebuffer = None
if show_progress:
console.msg_inplace("Written {0} bytes", written)
if show_progress and written > 0:
console.msg_inplace_end()
stream.close()
console.logger.info("Stream ended")
def handle_stream(plugin, streams, stream_name):
"""Decides what to do with the selected stream.
Depending on arguments it can be one of these:
- Output internal command-line
- Output JSON represenation
- Continuously output the stream over HTTP
- Output stream data to selected output
"""
stream_name = resolve_stream_name(streams, stream_name)
stream = streams[stream_name]
# Print internal command-line if this stream
# uses a subprocess.
if args.cmdline:
if isinstance(stream, StreamProcess):
try:
cmdline = stream.cmdline()
except StreamError as err:
console.exit("{0}", err)
console.msg("{0}", cmdline)
else:
console.exit("Stream does not use a command-line")
# Print JSON representation of the stream
elif console.json:
console.msg_json(stream)
# Continuously output the stream over HTTP
elif args.player_continuous_http and not (args.output or args.stdout):
output_stream_http(plugin, streams)
# Output the stream
else:
# Find any streams with a '_alt' suffix and attempt
# to use these in case the main stream is not usable.
alt_streams = list(filter(lambda k: stream_name + "_alt" in k,
sorted(streams.keys())))
for stream_name in [stream_name] + alt_streams:
console.logger.info("Opening stream: {0}", stream_name)
stream = streams[stream_name]
stream_type = type(stream).shortname()
if (stream_type in args.player_passthrough and
not (args.output or args.stdout)):
success = output_stream_passthrough(stream)
else:
success = output_stream(stream)
if success:
break
def fetch_streams(plugin):
"""Fetches streams using correct parameters."""
return plugin.get_streams(stream_types=args.stream_types,
sorting_excludes=args.stream_sorting_excludes)
def resolve_stream_name(streams, stream_name):
"""Returns the real stream name of a synonym."""
if stream_name in STREAM_SYNONYMS:
for name, stream in streams.items():
if stream is streams[stream_name] and name not in STREAM_SYNONYMS:
return name
return stream_name
def format_valid_streams(streams):
"""Formats a dict of streams.
Filters out synonyms and displays them next to
the stream they point to.
"""
delimiter = ", "
validstreams = []
for name, stream in sorted(streams.items()):
if name in STREAM_SYNONYMS:
continue
synonymfilter = lambda n: stream is streams[n] and n is not name
synonyms = list(filter(synonymfilter, streams.keys()))
if len(synonyms) > 0:
joined = delimiter.join(synonyms)
name = "{0} ({1})".format(name, joined)
validstreams.append(name)
return delimiter.join(validstreams)
def handle_url():
"""The URL handler.
Attempts to resolve the URL to a plugin and then attempts
to fetch a list of available streams.
Proceeds to handle stream if user specified a valid one,
otherwise output list of valid streams.
"""
try:
plugin = livestreamer.resolve_url(args.url)
console.logger.info("Found matching plugin {0} for URL {1}",
plugin.module, args.url)
streams = fetch_streams(plugin)
except NoPluginError:
console.exit("No plugin can handle URL: {0}", args.url)
except PluginError as err:
console.exit("{0}", err)
if not streams:
console.exit("No streams found on this URL: {0}", args.url)
if args.stream:
for stream_name in args.stream:
if stream_name in streams:
handle_stream(plugin, streams, stream_name)
return
err = ("The specified stream(s) '{0}' could not be "
"found".format(", ".join(args.stream)))
if console.json:
console.msg_json(dict(streams=streams, plugin=plugin.module,
error=err))
else:
validstreams = format_valid_streams(streams)
console.exit("{0}.\n Available streams: {1}",
err, validstreams)
else:
if console.json:
console.msg_json(dict(streams=streams, plugin=plugin.module))
else:
validstreams = format_valid_streams(streams)
console.msg("Available streams: {0}", validstreams)
def print_plugins():
"""Outputs a list of all plugins Livestreamer has loaded."""
pluginlist = list(livestreamer.get_plugins().keys())
pluginlist_formatted = ", ".join(sorted(pluginlist))
if console.json:
console.msg_json(pluginlist)
else:
console.msg("Loaded plugins: {0}", pluginlist_formatted)
def authenticate_twitch_oauth():
"""Opens a web browser to allow the user to grant Livestreamer
access to their Twitch account."""
client_id = "ewvlchtxgqq88ru9gmfp1gmyt6h2b93"
redirect_uri = "http://livestreamer.tanuki.se/en/develop/twitch_oauth.html"
url = ("https://api.twitch.tv/kraken/oauth2/authorize/"
"?response_type=token&client_id={0}&redirect_uri="
"{1}&scope=user_read").format(client_id, redirect_uri)
console.msg("Attempting to open a browser to let you authenticate "
"Livestreamer with Twitch")
try:
if not webbrowser.open_new_tab(url):
raise webbrowser.Error
except webbrowser.Error:
console.exit("Unable to open a web browser, try accessing this URL "
"manually instead:\n{0}".format(url))
def load_plugins(dirs):
"""Attempts to load plugins from a list of directories."""
dirs = [os.path.expanduser(d) for d in dirs]
for directory in dirs:
if os.path.isdir(directory):
livestreamer.load_plugins(directory)
else:
console.logger.warning("Plugin path {0} does not exist or is not "
"a directory!", directory)
def setup_args():
"""Parses arguments."""
global args
arglist = sys.argv[1:]
# Load additional arguments from livestreamerrc
if os.path.exists(CONFIG_FILE):
arglist.insert(0, "@" + CONFIG_FILE)
args = parser.parse_args(arglist)
# Force lowercase to allow case-insensitive lookup
if args.stream:
args.stream = [stream.lower() for stream in args.stream]
def setup_console():
"""Console setup."""
global console
# All console related operations is handled via the ConsoleOutput class
console = ConsoleOutput(sys.stdout, livestreamer)
# Console output should be on stderr if we are outputting
# a stream to stdout.
if args.stdout or args.output == "-":
console.set_output(sys.stderr)
# We don't want log output when we are printing JSON or a command-line.
if not (args.json or args.cmdline or args.quiet):
console.set_level(args.loglevel)
if args.quiet_player:
console.logger.warning("The option --quiet-player is deprecated since "
"version 1.4.3 as hiding player output is now "
"the default.")
console.json = args.json
# Handle SIGTERM just like SIGINT
signal.signal(signal.SIGTERM, signal.default_int_handler)
def setup_proxies():
"""Sets the HTTP(S) proxies for this process."""
if args.http_proxy:
if not re.match("^http(s)?://", args.http_proxy):
args.http_proxy = "http://" + args.http_proxy
os.environ["http_proxy"] = args.http_proxy
if args.https_proxy:
if not re.match("^http(s)?://", args.https_proxy):
args.https_proxy = "https://" + args.https_proxy
os.environ["https_proxy"] = args.https_proxy
def setup_plugins():
"""Loads any additional plugins."""
if os.path.isdir(PLUGINS_DIR):
load_plugins([PLUGINS_DIR])
if args.plugin_dirs:
load_plugins(args.plugin_dirs)
def setup_livestreamer():
"""Creates the Livestreamer session."""
global livestreamer
livestreamer = Livestreamer()
def setup_options():
"""Sets Livestreamer options."""
livestreamer.set_option("errorlog", args.errorlog)
if args.rtmpdump:
livestreamer.set_option("rtmpdump", args.rtmpdump)
if args.rtmpdump_proxy:
livestreamer.set_option("rtmpdump-proxy", args.rtmpdump_proxy)
if args.hds_live_edge is not None:
livestreamer.set_option("hds-live-edge", args.hds_live_edge)
if args.hds_fragment_buffer is not None:
livestreamer.set_option("hds-fragment-buffer",
args.hds_fragment_buffer)
if args.ringbuffer_size:
livestreamer.set_option("ringbuffer-size", args.ringbuffer_size)
if args.jtv_cookie:
livestreamer.set_plugin_option("justintv", "cookie",
args.jtv_cookie)
livestreamer.set_plugin_option("twitch", "cookie",
args.jtv_cookie)
if args.jtv_password:
livestreamer.set_plugin_option("justintv", "password",
args.jtv_password)
livestreamer.set_plugin_option("twitch", "password",
args.jtv_password)
if args.twitch_oauth_token:
livestreamer.set_plugin_option("twitch", "oauth_token",
args.twitch_oauth_token)
if args.ustream_password:
livestreamer.set_plugin_option("ustreamtv", "password",
args.ustream_password)
if args.crunchyroll_username:
livestreamer.set_plugin_option("crunchyroll", "username",
args.crunchyroll_username)
if args.crunchyroll_username and not args.crunchyroll_password:
crunchyroll_password = console.askpass("Enter Crunchyroll password: ")
else:
crunchyroll_password = args.crunchyroll_password
if crunchyroll_password:
livestreamer.set_plugin_option("crunchyroll", "password",
crunchyroll_password)
if args.crunchyroll_purge_credentials:
livestreamer.set_plugin_option("crunchyroll", "purge_credentials",
args.crunchyroll_purge_credentials)
# Deprecated options
if args.jtv_legacy_names:
console.logger.warning("The option --jtv/twitch-legacy-names is "
"deprecated and will be removed in the future.")
if args.gomtv_username:
console.logger.warning("The option --gomtv-username is deprecated "
"and will be removed in the future.")
if args.gomtv_password:
console.logger.warning("The option --gomtv-password is deprecated "
"and will be removed in the future.")
if args.gomtv_cookie:
console.logger.warning("The option --gomtv-cookie is deprecated "
"and will be removed in the future.")
def check_root():
if hasattr(os, "getuid"):
if os.geteuid() == 0 and not args.yes_run_as_root:
print("livestreamer is not supposed to be run as root. "
"If you really must you can do it by passing "
"--yes-run-as-root.")
sys.exit(1)
def check_version():
cache = Cache(filename="cli.json")
latest_version = cache.get("latest_version")
if not latest_version:
res = requests.get("https://pypi.python.org/pypi/livestreamer/json")
data = res.json()
latest_version = data.get("info").get("version")
cache.set("latest_version", latest_version, (60 * 60 * 24))
installed_version = StrictVersion(livestreamer.version)
latest_version = StrictVersion(latest_version)
if latest_version > installed_version:
console.logger.info("A new version of Livestreamer ({0}) is "
"available!".format(latest_version))
def main():
setup_args()
check_root()
setup_livestreamer()
setup_console()
setup_proxies()
setup_plugins()
with ignored(Exception):
check_version()
if args.plugins:
print_plugins()
elif args.url:
with ignored(KeyboardInterrupt):
setup_options()
handle_url()
elif args.twitch_oauth_authenticate:
authenticate_twitch_oauth()
else:
parser.print_help()
| breunigs/livestreamer | src/livestreamer_cli/main.py | Python | bsd-2-clause | 22,087 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json, sys, copy, os, re
import pprint
import locale
import csv
locale.setlocale(locale.LC_ALL, 'en_US')
from collections import defaultdict
from operator import itemgetter
pp = pprint.PrettyPrinter(indent=4)
# For MacPorts ... need to eliminate TODO
sys.path.append('/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages')
"""
Iterate a dictionary, generate a string buffer of key\tvalue pairs, assuming number
Allows second dictionary (d2), treated as denominator
"""
def dict_value_sort(d,d2=None):
hdr="\n\tUSD"
if d2!=None:
hdr+="\t#Projects\tUSD/Proj\n"
buf=""
for key in sorted(d, key=d.get, reverse=True):
buf += "%s\t%s" % (key, locale.format("%12d", d[key], grouping=True))
if d2 != None:
buf += "\t%s\t%s" % (locale.format("%6d", d2[key], grouping=True), locale.format("%6d", d[key]/d2[key], grouping=True))
buf += "\n"
return hdr+buf
def read_usd_fx_table(usd_fx_csv_pathname):
fxusd = dict();
with open(usd_fx_csv_pathname, 'rb') as csvfile:
fxreader = csv.reader(csvfile, delimiter=',')
for row in fxreader:
if len(row)>0 and row[0]!='Currency':
fxusd[row[1]] = {
'Name': row[0],
'cur_buys_usd': float(row[2]),
'usd_buys_cur': float(row[3])
}
return fxusd
def prep_predicates(filters):
preds = []
for filter in filters:
(path,value) = re.split('\s*=\s*',filter)
path_els = path.split('/')
values = value.split(',')
preds.append({"path_els":path_els,"values":values})
return preds
def project_predicate_test(proj,predicates):
v = proj
match = False
for pred in predicates:
for path_el in pred['path_els']:
v = v[path_el]
sv=str(v)
if (sv==pred['values']) or (type(pred['values'])==list and sv in pred['values']):
match = True
break
return match
def main(wr_kickstarter_json_path,usd_fx_pathname,filter_predicates=[]):
predicates = prep_predicates(filter_predicates) if filter_predicates else []
fxusd = read_usd_fx_table(usd_fx_pathname)
report = gen_ks_report(wr_kickstarter_json_path,fxusd,predicates)
print report
def gen_ks_report(wr_kickstarter_json_path,fxusd,predicates=[]):
json_data = open(wr_kickstarter_json_path).read()
j = json.loads(json_data)
schema_tree = defaultdict(dict)
tots = defaultdict(dict)
corpus = []
template = {
"pled_ctry" : dict(),
"goal_ctry" : dict(),
"cnt_ctry" : dict(),
"pled_cat" : dict(),
"goal_cat" : dict(),
"cnt_cat" : dict(),
"pled_state" : 0,
"goal_state" : 0,
"cnt_state" : 0
}
"""
92562 failed
74635 successful
17296 canceled
6496 live
395 suspended
"""
cnt_all = 0
for block_of_projects in j:
proj_count = len(block_of_projects["projects"])
for i in range(proj_count):
proj = block_of_projects["projects"][i]
if predicates and not project_predicate_test(proj,predicates):
continue
# Grab project values
pled = proj["pledged"] * fxusd[proj["currency"]]["cur_buys_usd"]
#goal = proj["goal"] * fxusd[proj["currency"]]["cur_buys_usd"]
ctry = proj["country"]
cat = "%s (%s)" % (proj["category"]["name"],proj["category"]["id"])
state = proj["state"]
# Ingest descriptive text for TF-IDF
corpus.append( "%s %s" % (proj["blurb"].lower(), proj["name"].lower()) )
# Ensure accumulation skeleton exists
if state not in tots:
tots[state] = copy.deepcopy(template)
# Accumulate totals, increment counters
tots[state]["pled_ctry"][ctry] = tots[state]["pled_ctry"][ctry] + pled if ctry in tots[state]["pled_ctry"] else pled
#tots[state]["goal_ctry"][ctry] = tots[state]["goal_ctry"][ctry] + goal if ctry in tots[state]["goal_ctry"] else goal
tots[state]["cnt_ctry"][ctry] = tots[state]["cnt_ctry"][ctry] + 1 if ctry in tots[state]["cnt_ctry"] else 1
tots[state]["pled_cat"][cat] = tots[state]["pled_cat"][cat] + pled if cat in tots[state]["pled_cat"] else pled
#tots[state]["goal_cat"][cat] = tots[state]["goal_cat"][cat] + goal if cat in tots[state]["goal_cat"] else goal
tots[state]["cnt_cat"][cat] = tots[state]["cnt_cat"][cat] + 1 if cat in tots[state]["cnt_cat"] else 1
tots[state]["pled_state"] += pled
#tots[state]["goal_state"] += goal
tots[state]["cnt_state"] += 1
cnt_all += 1
# Generate the report
buf = ""
for state in tots:
buf += "Per country, %s: %s\n" % (state,dict_value_sort(tots[state]["pled_ctry"],tots[state]["cnt_ctry"]))
buf += "Per category, %s: %s\n" % (state,dict_value_sort(tots[state]["pled_cat"],tots[state]["cnt_cat"]))
buf += "Pledged overall for %s: %s\n" % (state,locale.format("%6d", tots[state]["pled_state"], grouping=True))
#buf += "Goal overall for %s: %s\n" % (state,locale.format("%6d", tots[state]["goal_state"], grouping=True))
buf += "Count overall for %s: %s\n" % (state,locale.format("%6d", tots[state]["cnt_state"], grouping=True))
buf += "Per project for %s: %s\n" % (state,locale.format("%6d", tots[state]["pled_state"]/tots[state]["cnt_state"], grouping=True))
buf += "'%s\n" % ("=" * 40)
buf += "Number of projects, overall: %d\n" % cnt_all
return buf
if __name__ == '__main__':
min_args = 3
if (len(sys.argv)<min_args) or (not os.path.exists(sys.argv[1]) or not os.path.exists(sys.argv[2])):
print "Usage: wr_ks_reader.py <webrobots_ks_data.json> <usd_fx_csv>"
print "e.g. ./wr_ks_reader.py sample-data/five_projects_from-2014-12-02.json sample-data/usd_all_2015-03-25.csv"
exit()
main(sys.argv[1],sys.argv[2],sys.argv[3:] if len(sys.argv)>min_args else None)
| Kevin-Prichard/webrobots-kickstarter-python | wr_ks_reader.py | Python | bsd-2-clause | 6,159 |
"""Parse MUV datasets.
MUV datasets are provided as supporting data to
http://jcheminf.springeropen.com/articles/10.1186/1758-2946-5-26
This script can also be used for the other datasets in the supporting data.
"""
import gflags as flags
import glob
import gzip
import logging
import os
import re
import sys
flags.DEFINE_string('root', None, 'Root directory containing datasets.')
flags.DEFINE_string('prefix', 'aid', 'Prefix to append to output filenames.')
FLAGS = flags.FLAGS
logging.getLogger().setLevel(logging.INFO)
def extract_smiles(suffix):
for filename in glob.glob(os.path.join(FLAGS.root, '*_%s.dat.gz' % suffix)):
match = re.search('cmp_list_.*?_(.*?)_%s' % suffix, filename)
name = match.group(1)
logging.info('%s -> %s', filename, name)
output_filename = '%s%s-%s.smi' % (FLAGS.prefix, name, suffix)
with open(output_filename, 'wb') as outfile:
count = 0
with gzip.open(filename) as f:
for line in f:
if line.startswith('#'):
continue
source_id, data_id, smi = line.split()
outfile.write('%s\t%s\n' % (smi, data_id))
count += 1
logging.info('%s: %d', output_filename, count)
def main():
extract_smiles('actives')
extract_smiles('decoys')
if __name__ == '__main__':
flags.MarkFlagAsRequired('root')
FLAGS(sys.argv)
main()
| skearnes/color-features | paper/code/parse_muv.py | Python | bsd-3-clause | 1,468 |
from django.conf.urls import *
from corehq.apps.export.views import (
CreateFormExportView,
CreateCaseExportView,
CreateCustomFormExportView,
CreateCustomCaseExportView,
EditCustomFormExportView,
EditCustomCaseExportView,
DeleteCustomExportView,
)
urlpatterns = patterns(
'corehq.apps.export.views',
url(r"^create/form/$", CreateFormExportView.as_view(), name=CreateFormExportView.urlname),
url(r"^create/case/$", CreateCaseExportView.as_view(), name=CreateCaseExportView.urlname),
url(r"^customize/form/$", CreateCustomFormExportView.as_view(), name=CreateCustomFormExportView.urlname),
url(r"^customize/case/$", CreateCustomCaseExportView.as_view(), name=CreateCustomCaseExportView.urlname),
url(r"^custom/form/(?P<export_id>[\w\-]+)/edit/$", EditCustomFormExportView.as_view(),
name=EditCustomFormExportView.urlname),
url(r"^custom/case/(?P<export_id>[\w\-]+)/edit/$", EditCustomCaseExportView.as_view(),
name=EditCustomCaseExportView.urlname),
url(r"^custom/(?P<export_id>[\w\-]+)/delete/$", DeleteCustomExportView.as_view(), name=DeleteCustomExportView.urlname),
)
| puttarajubr/commcare-hq | corehq/apps/export/urls.py | Python | bsd-3-clause | 1,147 |
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.linked_domain.models import DomainLink
class DomainLinkUrlsTest(TestCase):
domain = 'domain-link-tests'
@classmethod
def setUpClass(cls):
super(DomainLinkUrlsTest, cls).setUpClass()
cls.downstream = create_domain('downstream-domain')
cls.upstream = create_domain('upstream-domain')
@classmethod
def tearDownClass(cls):
super(DomainLinkUrlsTest, cls).tearDownClass()
cls.downstream.delete()
cls.upstream.delete()
def test_upstream_url(self):
domain_link = DomainLink.link_domains(linked_domain=self.downstream.name, master_domain=self.upstream.name)
self.addCleanup(domain_link.delete)
expected_upstream_url = '/a/upstream-domain/settings/project/domain_links/'
self.assertEqual(expected_upstream_url, domain_link.upstream_url)
def test_remote_upstream_url(self):
domain_link = DomainLink.link_domains(linked_domain=self.downstream.name, master_domain=self.upstream.name)
domain_link.remote_base_url = 'test.base.url'
domain_link.save()
self.addCleanup(domain_link.delete)
expected_upstream_url = 'test.base.url/a/upstream-domain/'
self.assertEqual(expected_upstream_url, domain_link.upstream_url)
def test_downstream_url(self):
domain_link = DomainLink.link_domains(linked_domain=self.downstream.name, master_domain=self.upstream.name)
self.addCleanup(domain_link.delete)
expected_downstream_url = '/a/downstream-domain/settings/project/domain_links/'
self.assertEqual(expected_downstream_url, domain_link.downstream_url)
def test_remote_downstream_url(self):
domain_link = DomainLink.link_domains(linked_domain=self.downstream.name, master_domain=self.upstream.name)
domain_link.remote_base_url = 'test.base.url'
domain_link.save()
self.addCleanup(domain_link.delete)
expected_downstream_url = self.downstream.name # remote downstream urls are equal to the name
self.assertEqual(expected_downstream_url, domain_link.downstream_url)
| dimagi/commcare-hq | corehq/apps/linked_domain/tests/test_domain_link_methods.py | Python | bsd-3-clause | 2,197 |
# Copyright 2012 Dorival de Moraes Pedroso. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
bl_info = {
'name' : 'FEMmesh mesh generator',
'author' : 'Dorival Pedroso',
'version' : (1, 0),
'blender' : (2, 6, 3),
'location' : 'View3D > Tools panel > FEMmesh mesh generator',
'warning' : '',
'description' : 'Generate V and C lists for FEMmesh',
'wiki_url' : '',
'tracker_url' : '',
'category' : '3D View'}
import bgl
import blf
import bpy
from bpy_extras import view3d_utils
import subprocess
def draw_callback_px(self, context):
wm = context.window_manager
sc = context.scene
if not wm.do_show_tags: return
ob = context.object
if not ob: return
if not ob.type == 'MESH': return
# status
font_id = 0
blf.position(font_id, 45, 45, 0)
#blf.size(font_id, 20, 48) # 20, 72)
blf.size(font_id, 15, 72)
blf.draw(font_id, "displaying tags")
# region
reg = bpy.context.region
r3d = bpy.context.space_data.region_3d
# transformation matrix (local co => global co)
T = ob.matrix_world.copy()
# vert tags
if len(ob.vtags)>0 and sc.pyfem_show_vtag:
blf.size(font_id, sc.pyfem_vert_font, 72)
r, g, b = sc.pyfem_vert_color
bgl.glColor4f(r, g, b, 1.0)
for v in ob.vtags.values():
if v.tag >= 0: continue
pm = ob.data.vertices[v.idx].co
co = view3d_utils.location_3d_to_region_2d(reg, r3d, T*pm)
blf.position(font_id, co[0], co[1], 0)
blf.draw(font_id, "%d"%v.tag)
# edge tags
if len(ob.etags)>0 and sc.pyfem_show_etag:
blf.size(font_id, sc.pyfem_edge_font, 72)
r, g, b = sc.pyfem_edge_color
bgl.glColor4f(r, g, b, 1.0)
for v in ob.etags.values():
if v.tag >= 0: continue
pa = ob.data.vertices[v.v0].co
pb = ob.data.vertices[v.v1].co
pm = (pa+pb)/2.0
co = view3d_utils.location_3d_to_region_2d(reg, r3d, T*pm)
blf.position(font_id, co[0], co[1], 0)
blf.draw(font_id, "%d"%v.tag)
# cell tags
if len(ob.ctags)>0 and sc.pyfem_show_ctag:
blf.size(font_id, sc.pyfem_cell_font, 72)
r, g, b = sc.pyfem_cell_color
bgl.glColor4f(r, g, b, 1.0)
for v in ob.ctags.values():
if v.tag >= 0: continue
c = ob.data.polygons[v.idx]
pm = ob.data.vertices[c.vertices[0]].co.copy()
for k in range(1, len(c.vertices)):
pm += ob.data.vertices[c.vertices[k]].co
pm /= float(len(c.vertices))
co = view3d_utils.location_3d_to_region_2d(reg, r3d, T*pm)
blf.position(font_id, co[0], co[1], 0)
blf.draw(font_id, "%d"%v.tag)
class FEMmeshDisplayTags(bpy.types.Operator):
bl_idname = "view3d.show_tags"
bl_label = "Show Tags"
bl_description = "Display tags on top of mesh"
last_activity = 'NONE'
_handle = None
def modal(self, context, event):
# redraw
if context.area:
context.area.tag_redraw()
# stop script
if not context.window_manager.do_show_tags:
context.region.callback_remove(self._handle)
return {'CANCELLED'}
return {'PASS_THROUGH'}
def cancel(self, context):
if context.window_manager.do_show_tags:
context.region.callback_remove(self._handle)
context.window_manager.do_show_tags = False
return {'CANCELLED'}
def invoke(self, context, event):
if context.area.type == 'VIEW_3D':
# operator is called for the first time, start everything
if context.window_manager.do_show_tags == False:
context.window_manager.do_show_tags = True
context.window_manager.modal_handler_add(self)
self._handle = context.region.callback_add(draw_callback_px, (self, context), 'POST_PIXEL')
return {'RUNNING_MODAL'}
# operator is called again, stop displaying
else:
context.window_manager.do_show_tags = False
return {'CANCELLED'}
else:
self.report({'WARNING'}, "View3D not found, can't run operator")
return {'CANCELLED'}
class SetVertexTag(bpy.types.Operator):
bl_idname = "pyfem.set_vert_tag"
bl_label = "Set vertex tag"
bl_description = "Set vertex tag (for selected vertices)"
@classmethod
def poll(cls, context):
return context.object and (context.object.type == 'MESH') and ('EDIT' in context.object.mode)
def execute(self, context):
bpy.ops.object.editmode_toggle()
sc = context.scene
ob = context.object
vids = [v.idx for v in ob.vtags.values()]
for v in ob.data.vertices:
if v.select == True: # vertex is selected
if v.index in vids: # update
ob.vtags[vids.index(v.index)].tag = sc.pyfem_default_vert_tag
else:
new = ob.vtags.add()
new.tag = sc.pyfem_default_vert_tag
new.idx = v.index
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
class SetEdgeTag(bpy.types.Operator):
bl_idname = "pyfem.set_edge_tag"
bl_label = "Set edge tag"
bl_description = "Set edge tag (for selected edges)"
@classmethod
def poll(cls, context):
return context.object and (context.object.type == 'MESH') and ('EDIT' in context.object.mode)
def execute(self, context):
bpy.ops.object.editmode_toggle()
sc = context.scene
ob = context.object
ekeys = [(v.v0,v.v1) for v in ob.etags.values()]
for e in ob.data.edges:
if e.select == True: # edge is selected
if e.key in ekeys: # update
ob.etags[ekeys.index(e.key)].tag = sc.pyfem_default_edge_tag
else:
new = ob.etags.add()
new.tag = sc.pyfem_default_edge_tag
new.v0 = e.key[0]
new.v1 = e.key[1]
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
class SetCellTag(bpy.types.Operator):
bl_idname = "pyfem.set_cell_tag"
bl_label = "Set cell tag"
bl_description = "Set cell tag (for selected faces)"
@classmethod
def poll(cls, context):
return context.object and (context.object.type == 'MESH') and ('EDIT' in context.object.mode)
def execute(self, context):
bpy.ops.object.editmode_toggle()
sc = context.scene
ob = context.object
cids = [v.idx for v in ob.ctags.values()]
for p in ob.data.polygons:
if p.select == True: # polygon is selected
if p.index in cids: # update
ob.ctags[cids.index(p.index)].tag = sc.pyfem_default_cell_tag
else:
new = ob.ctags.add()
new.tag = sc.pyfem_default_cell_tag
new.idx = p.index
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
def write_mesh_to_file(filepath, context, drawmesh=False, ids=False, tags=True, tol=0.0001, flatten=False):
sc = context.scene
ob = context.object
me = ob.data
T = ob.matrix_world.copy()
vids = [v.idx for v in ob.vtags.values()]
ekeys = [(v.v0,v.v1) for v in ob.etags.values()]
cids = [v.idx for v in ob.ctags.values()]
errors = ''
# header
l = ''
if drawmesh:
l += 'from msys_drawmesh import DrawMesh\n'
# vertices
l += 'V=[\n'
for k, v in enumerate(me.vertices):
if flatten and abs(v.co[2]) > 0.0:
v.co[2] = 0.0
co = T * v.co
tg = ob.vtags[vids.index(v.index)].tag if (v.index in vids) else 0
l += ' [%d, %d, %.8f, %.8f]' % (k, tg, co[0], co[1])
if k<len(me.vertices)-1: l += ','
l += '\n'
# cells
nc = len(ob.data.polygons) # number of cells
l += ']\nC=[\n'
for i, p in enumerate(ob.data.polygons):
tg = ob.ctags[cids.index(p.index)].tag if (p.index in cids) else 0
n = p.normal
err = ''
if abs(n[0])>tol or abs(n[1])>tol:
err += 'Face has normal non-parallel to z'
if n[2]<tol:
err += 'Face has wrong normal; vertices must be counter-clockwise'
l += ' [%d, %d, [' % (i, tg)
et = {} # edge tags
nv = len(p.vertices) # number of vertices
for k in range(nv):
v0, v1 = ob.data.vertices[p.vertices[k]].index, ob.data.vertices[p.vertices[(k+1)%nv]].index
l += '%d' % v0
if k<nv-1: l += ','
else: l += ']'
ek = (v0,v1) if v0<v1 else (v1,v0) # edge key
if ek in ekeys:
if ob.etags[ekeys.index(ek)].tag >=0: continue
et[k] = ob.etags[ekeys.index(ek)].tag
if len(et)>0: l += ', {'
k = 0
for idx, tag in et.items():
l += '%d:%d' % (idx, tag)
if k<len(et)-1: l += ', '
else: l += '}'
k += 1
if i<nc-1: l += '],'
else: l += ']'
if err!='':
l += '# ' + err
errors = err
l += '\n'
l += ']\n'
# footer
if drawmesh:
l += 'd = DrawMesh(V, C)\n'
l += 'd.draw(with_ids=%s, with_tags=%s)\n' % (str(ids), str(tags))
l += 'd.show()\n'
# write to file
f = open(filepath, 'w')
f.write(l)
f.close()
return errors
class FEMmeshExporter(bpy.types.Operator):
bl_idname = "pyfem.export_mesh"
bl_label = "Export V and C lists"
bl_description = "Save file with V and C lists"
filepath = bpy.props.StringProperty(subtype='FILE_PATH',)
check_existing = bpy.props.BoolProperty(
name = "Check Existing",
description = "Check and warn on overwriting existing files",
default = True,
options = {'HIDDEN'},)
@classmethod
def poll(cls, context):
return context.object and (context.object.type == 'MESH')
def execute(self, context):
bpy.ops.object.editmode_toggle()
errors = write_mesh_to_file(self.filepath, context,
flatten=context.scene.pyfem_flatten)
if errors!='': self.report({'WARNING'}, errors)
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
def invoke(self, context, event):
if not self.filepath:
self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ".py")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
class FEMmeshMsysDrawmesh(bpy.types.Operator):
bl_idname = "pyfem.msys_drawmesh"
bl_label = "Draw 2D mesh using Matplotlib"
bl_description = "View current mesh with Pylab"
@classmethod
def poll(cls, context):
return context.object and (context.object.type == 'MESH')
def execute(self, context):
bpy.ops.object.editmode_toggle()
fn = bpy.app.tempdir + 'temporary.fem_blender_addon.py'
errors = write_mesh_to_file(fn, context, True,
context.scene.pyfem_msys_with_ids,
context.scene.pyfem_msys_with_tags,
flatten=context.scene.pyfem_flatten)
if errors=='':
try: subprocess.Popen(['python', fn])
except: self.report({'WARNING'}, 'calling external Python command failed')
else: self.report({'WARNING'}, errors)
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
class ObjectVertTag(bpy.types.PropertyGroup):
tag = bpy.props.IntProperty()
idx = bpy.props.IntProperty()
class ObjectEdgeTag(bpy.types.PropertyGroup):
tag = bpy.props.IntProperty()
v0 = bpy.props.IntProperty()
v1 = bpy.props.IntProperty()
class ObjectCellTag(bpy.types.PropertyGroup):
tag = bpy.props.IntProperty()
idx = bpy.props.IntProperty()
def init_properties():
# object data
bpy.utils.register_class(ObjectVertTag)
bpy.utils.register_class(ObjectEdgeTag)
bpy.utils.register_class(ObjectCellTag)
bpy.types.Object.vtags = bpy.props.CollectionProperty(type=ObjectVertTag)
bpy.types.Object.etags = bpy.props.CollectionProperty(type=ObjectEdgeTag)
bpy.types.Object.ctags = bpy.props.CollectionProperty(type=ObjectCellTag)
# scene data
scene = bpy.types.Scene
scene.pyfem_default_edge_tag = bpy.props.IntProperty(
name ="E",
description = "Default Edge Tag",
default = -10,
min = -99,
max = 0)
scene.pyfem_default_vert_tag = bpy.props.IntProperty(
name ="V",
description = "Default Vertex Tag",
default = -100,
min = -1000,
max = 0)
scene.pyfem_default_cell_tag = bpy.props.IntProperty(
name ="C",
description = "Default Cell Tag",
default = -1,
min = -99,
max = 0)
# show tags
scene.pyfem_show_etag = bpy.props.BoolProperty(
name = "Edge",
description = "Display Edge Tags",
default = True)
scene.pyfem_show_vtag = bpy.props.BoolProperty(
name = "Vertex",
description = "Display Vertex Tags",
default = True)
scene.pyfem_show_ctag = bpy.props.BoolProperty(
name = "Cell",
description = "Display Cell Tags",
default = True)
# font sizes
scene.pyfem_vert_font = bpy.props.IntProperty(
name = "V",
description = "Vertex font size",
default = 12,
min = 6,
max = 100)
scene.pyfem_edge_font = bpy.props.IntProperty(
name = "E",
description = "Edge font size",
default = 12,
min = 6,
max = 100)
scene.pyfem_cell_font = bpy.props.IntProperty(
name = "C",
description = "Edge font size",
default = 20,
min = 6,
max = 100)
# font colors
scene.pyfem_vert_color = bpy.props.FloatVectorProperty(
name = "V",
description = "Vertex color",
default = (1.0, 0.805, 0.587),
min = 0,
max = 1,
subtype = 'COLOR')
scene.pyfem_edge_color = bpy.props.FloatVectorProperty(
name = "E",
description = "Edge color",
default = (0.934, 0.764, 1.0),
min = 0,
max = 1,
subtype = 'COLOR')
scene.pyfem_cell_color = bpy.props.FloatVectorProperty(
name = "C",
description = "Cell color",
default = (0.504, 0.786, 1.0),
min = 0,
max = 1,
subtype = 'COLOR')
# export data
scene.pyfem_flatten = bpy.props.BoolProperty(
name = "Project z back to 0",
description = "Project z coordinates back to 0 (flatten)",
default = False)
# view in pylab
scene.pyfem_msys_with_ids = bpy.props.BoolProperty(
name = "W. IDs",
description = "Display IDs in Matplotlib",
default = False)
scene.pyfem_msys_with_tags = bpy.props.BoolProperty(
name = "W. Tags",
description = "Display Tags in Matplotlib",
default = True)
# do_show_tags is initially always False and it is in the window manager, not the scene
wm = bpy.types.WindowManager
wm.do_show_tags = bpy.props.BoolProperty(default=False)
class FEMmeshPanel(bpy.types.Panel):
bl_label = "CIVL4250 FEMmesh"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOL_PROPS"
def draw(self, context):
sc = context.scene
wm = context.window_manager
l = self.layout
l.label("Set tags:")
c = l.column(align=True)
r = c.row(align=True); r.prop(sc, "pyfem_default_vert_tag"); r.operator("pyfem.set_vert_tag")
r = c.row(align=True); r.prop(sc, "pyfem_default_edge_tag"); r.operator("pyfem.set_edge_tag")
r = c.row(align=True); r.prop(sc, "pyfem_default_cell_tag"); r.operator("pyfem.set_cell_tag")
l.label("Show/hide:")
c = l.column(align=True)
r = c.row(align=True)
r.prop(sc, "pyfem_show_vtag")
r.prop(sc, "pyfem_show_etag")
r.prop(sc, "pyfem_show_ctag")
if not wm.do_show_tags:
l.operator("view3d.show_tags", text="Start display", icon='PLAY')
else:
l.operator("view3d.show_tags", text="Stop display", icon='PAUSE')
l.label("Font size and colors:")
c = l.column(align=True)
r = c.row(align=True); r.prop(sc, "pyfem_vert_font"); r.prop(sc, "pyfem_vert_color", text="")
r = c.row(align=True); r.prop(sc, "pyfem_edge_font"); r.prop(sc, "pyfem_edge_color", text="")
r = c.row(align=True); r.prop(sc, "pyfem_cell_font"); r.prop(sc, "pyfem_cell_color", text="")
l.label("Export data:")
l.prop(sc, "pyfem_flatten")
l.operator("pyfem.export_mesh", text="Save .py File")
l.label("View in Matplotlib:")
c = l.column(align=True)
r = c.row(align=True)
r.prop(sc, "pyfem_msys_with_ids")
r.prop(sc, "pyfem_msys_with_tags")
l.operator("pyfem.msys_drawmesh", text="View with Pylab")
def register():
init_properties()
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| cpmech/tlfem | scripts/tlfem_blender_addon.py | Python | bsd-3-clause | 17,995 |
from __future__ import division, print_function, absolute_import
import numpy.testing as npt
import numpy as np
from scipy._lib.six import xrange
import pytest
from scipy import stats
from .common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_edge_support,
check_named_args, check_random_state_property,
check_pickling, check_rvs_broadcast, check_freezing)
from scipy.stats._distr_params import distdiscrete
vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4])
distdiscrete += [[stats.rv_discrete(values=vals), ()]]
def cases_test_discrete_basic():
seen = set()
for distname, arg in distdiscrete:
yield distname, arg, distname not in seen
seen.add(distname)
@pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic())
def test_discrete_basic(distname, arg, first_case):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
np.random.seed(9765456)
rvs = distfn.rvs(size=2000, *arg)
supp = np.unique(rvs)
m, v = distfn.stats(*arg)
check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf')
check_pmf_cdf(distfn, arg, distname)
check_oth(distfn, arg, supp, distname + ' oth')
check_edge_support(distfn, arg)
alpha = 0.01
check_discrete_chisquare(distfn, arg, rvs, alpha,
distname + ' chisquare')
if first_case:
locscale_defaults = (0,)
meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, }
k = spec_k.get(distname, 1)
check_named_args(distfn, k, arg, locscale_defaults, meths)
if distname != 'sample distribution':
check_scale_docstring(distfn)
check_random_state_property(distfn, arg)
check_pickling(distfn, arg)
check_freezing(distfn, arg)
# Entropy
check_entropy(distfn, arg, distname)
if distfn.__class__._entropy != stats.rv_discrete._entropy:
check_private_entropy(distfn, arg, stats.rv_discrete)
@pytest.mark.parametrize('distname,arg', distdiscrete)
def test_moments(distname, arg):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
m, v, s, k = distfn.stats(*arg, moments='mvsk')
check_normalization(distfn, arg, distname)
# compare `stats` and `moment` methods
check_moment(distfn, arg, m, v, distname)
check_mean_expect(distfn, arg, m, distname)
check_var_expect(distfn, arg, m, v, distname)
check_skew_expect(distfn, arg, m, v, s, distname)
if distname not in ['zipf', 'yulesimon']:
check_kurt_expect(distfn, arg, m, v, k, distname)
# frozen distr moments
check_moment_frozen(distfn, arg, m, 1)
check_moment_frozen(distfn, arg, v+m*m, 2)
@pytest.mark.parametrize('dist,shape_args', distdiscrete)
def test_rvs_broadcast(dist, shape_args):
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace']
try:
distfunc = getattr(stats, dist)
except TypeError:
distfunc = dist
dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk)
loc = np.zeros(2)
nargs = distfunc.numargs
allargs = []
bshape = []
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 3,) + (1,)*(k + 1)
param_val = shape_args[k]
allargs.append(np.full(shp, param_val))
bshape.insert(0, shp[0])
allargs.append(loc)
bshape.append(loc.size)
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_])
def check_cdf_ppf(distfn, arg, supp, msg):
# cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer}
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg),
supp, msg + '-roundtrip')
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg),
supp, msg + '-roundtrip')
if not hasattr(distfn, 'xk'):
_a, _b = distfn.support(*arg)
supp1 = supp[supp < _b]
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg),
supp1 + distfn.inc, msg + ' ppf-cdf-next')
# -1e-8 could cause an error if pmf < 1e-8
def check_pmf_cdf(distfn, arg, distname):
if hasattr(distfn, 'xk'):
index = distfn.xk
else:
startind = int(distfn.ppf(0.01, *arg) - 1)
index = list(range(startind, startind + 10))
cdfs = distfn.cdf(index, *arg)
pmfs_cum = distfn.pmf(index, *arg).cumsum()
atol, rtol = 1e-10, 1e-10
if distname == 'skellam': # ncx2 accuracy
atol, rtol = 1e-5, 1e-5
npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0],
atol=atol, rtol=rtol)
def check_moment_frozen(distfn, arg, m, k):
npt.assert_allclose(distfn(*arg).moment(k), m,
atol=1e-10, rtol=1e-10)
def check_oth(distfn, arg, supp, msg):
# checking other methods of distfn
npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg),
atol=1e-10, rtol=1e-10)
q = np.linspace(0.01, 0.99, 20)
npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg),
atol=1e-10, rtol=1e-10)
median_sf = distfn.isf(0.5, *arg)
npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
"""Perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
"""
wsupp = 0.05
# construct intervals with minimum mass `wsupp`.
# intervals are left-half-open as in a cdf difference
_a, _b = distfn.support(*arg)
lo = int(max(_a, -1000))
high = int(min(_b, 1000)) + 1
distsupport = xrange(lo, high)
last = 0
distsupp = [lo]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii, *arg)
if current - last >= wsupp - 1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1 - wsupp):
break
if distsupp[-1] < _b:
distsupp.append(_b)
distmass.append(1 - last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp + 1e-8
histsupp[0] = _a
# find sample frequencies and perform chisquare test
freq, hsupp = np.histogram(rvs, histsupp)
chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass)
npt.assert_(pval > alpha,
'chisquare - test for %s at arg = %s with pval = %s' %
(msg, str(arg), str(pval)))
def check_scale_docstring(distfn):
if distfn.__doc__ is not None:
# Docstrings can be stripped if interpreter is run with -OO
npt.assert_('scale' not in distfn.__doc__)
| jamestwebber/scipy | scipy/stats/tests/test_discrete_basic.py | Python | bsd-3-clause | 8,418 |
"""
pyshtools constants for Earth's Moon.
Each object is an astropy Constant that possesses the attributes name, value,
unit, uncertainty, and reference.
"""
import numpy as _np
from astropy.constants import Constant as _Constant
from astropy.constants import G as _G
gm = _Constant(
abbrev='gm_moon',
name='Gravitational constant times the mass of the Moon',
value=4902.80007e9,
unit='m3 / s2',
uncertainty=0.00014e9,
reference='Williams, J. G., A. S. Konopliv, D. H. Boggs, '
'R. S. Park, D.-N. Yuan, F. G. Lemoine, S. Goossens, E. Mazarico, '
'F. Nimmo, R. C. Weber, S. W. Asmar, H. J. Melosh, G. A. Neumann, '
'R. J. Phillips, D. E. Smith, S. C. Solomon, M. M. Watkins, M. A. '
'Wieczorek, J. C. Andrews-Hanna, J. W. Head, W. S. Kiefer, I. '
'Matsuyama, P. J. McGovern, G. J. Taylor, and M. T. Zuber (2014). '
'Lunar interior properties from the GRAIL mission, J. Geophys. Res. '
'Planets, 119, 1546-1578, doi:10.1002/2013JE004559.')
mass = _Constant(
abbrev='mass_moon',
name='Mass of the Moon',
value=gm.value / _G.value,
unit='kg',
uncertainty=_np.sqrt((gm.uncertainty / _G.value)**2 +
(gm.value * _G.uncertainty / _G.value**2)**2
),
reference='Derived from gm_moon and G.')
r = _Constant(
abbrev='r_moon',
name='Mean radius of the Moon',
value=1737151.0,
unit='m',
uncertainty=0.0,
reference='LOLA2600p: Wieczorek, M. A. (2015). Gravity and topography '
'of the terrestrial planets. In T. Spohn & G. Schubert (Eds.), '
'Treatise on Geophysics, 2nd ed., Vol. 10, pp. 153-193). Oxford, '
'Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.')
density = _Constant(
abbrev='density_moon',
name='Mean density of the Moon',
value=3 * mass.value / (_np.pi * 4 * r.value**3),
unit='kg / m3',
uncertainty=_np.sqrt((3 * mass.uncertainty /
(_np.pi * 4 * r.value**3))**2
+ (3 * 3 * mass.value *
r.uncertainty /
(_np.pi * 4 * r.value**4))**2
),
reference='Derived from mass_moon and r_moon.')
g0 = _Constant(
abbrev='g0_moon',
name='Surface gravity of the Moon, ignoring rotation and tides',
value=gm.value / r.value**2,
unit='m / s2',
uncertainty=_np.sqrt((gm.uncertainty / r.value**2)**2
+ (2 * gm.value * r.uncertainty
/ r.value**3)**2
),
reference='Derived from gm_moon and r_moon.')
a_orbit = _Constant(
abbrev='a_orbit_moon',
name='Semimajor axis of the orbit of the Moon',
value=384399.014e3,
unit='m',
uncertainty=0.0,
reference='Williams, J. G., D. H. Boggs, and W. M. Folkner '
'(2013), DE430 lunar orbit, physical librations, and surface '
'coordinates, IOM 335-JW,DB, WF-20130722-016, July 22, 2013, '
'Jet Propul. Lab., Pasadena, Calif.')
omega = _Constant(
abbrev='omega_moon',
name='Angular spin rate of the Moon',
value=2 * _np.pi / (27.321582 * 24 * 60 * 60),
unit='rad / s',
uncertainty=0.0,
reference='Yoder, C. F. (1995). Astrometric and geodetic properties '
'of Earth and the solar system. In: Ahrens TJ (ed.) Global Earth '
'Physics: A Handbook of Physical Constants. AGU Reference Shelf, '
'vol. 1, pp. 1-31. American Geophysical Union.')
i_solid = _Constant(
abbrev='i_solid_moon',
name='Mean normalized moment of inertia of the solid portion of the '
'Moon using the mean radius',
value=0.393112,
unit='',
uncertainty=0.000012,
reference='Williams, J. G., A. S. Konopliv, D. H. Boggs, '
'R. S. Park, D.-N. Yuan, F. G. Lemoine, S. Goossens, E. Mazarico, '
'F. Nimmo, R. C. Weber, S. W. Asmar, H. J. Melosh, G. A. Neumann, '
'R. J. Phillips, D. E. Smith, S. C. Solomon, M. M. Watkins, M. A. '
'Wieczorek, J. C. Andrews-Hanna, J. W. Head, W. S. Kiefer, I. '
'Matsuyama, P. J. McGovern, G. J. Taylor, and M. T. Zuber (2014). '
'Lunar interior properties from the GRAIL mission, J. Geophys. Res. '
'Planets, 119, 1546-1578, doi:10.1002/2013JE004559.')
beta = _Constant(
abbrev='beta_moon',
name='Libration parameter (C-A)/B of the Moon',
value=631.0213e-6,
unit='',
uncertainty=0.0031e-6,
reference='Williams, J. G., A. S. Konopliv, D. H. Boggs, '
'R. S. Park, D.-N. Yuan, F. G. Lemoine, S. Goossens, E. Mazarico, '
'F. Nimmo, R. C. Weber, S. W. Asmar, H. J. Melosh, G. A. Neumann, '
'R. J. Phillips, D. E. Smith, S. C. Solomon, M. M. Watkins, M. A. '
'Wieczorek, J. C. Andrews-Hanna, J. W. Head, W. S. Kiefer, I. '
'Matsuyama, P. J. McGovern, G. J. Taylor, and M. T. Zuber (2014). '
'Lunar interior properties from the GRAIL mission, J. Geophys. Res. '
'Planets, 119, 1546-1578, doi:10.1002/2013JE004559.')
gamma = _Constant(
abbrev='beta_moon',
name='Libration parameter (B-A)/C of the Moon',
value=227.7317e-6,
unit='',
uncertainty=0.0042e-6,
reference='Williams, J. G., A. S. Konopliv, D. H. Boggs, '
'R. S. Park, D.-N. Yuan, F. G. Lemoine, S. Goossens, E. Mazarico, '
'F. Nimmo, R. C. Weber, S. W. Asmar, H. J. Melosh, G. A. Neumann, '
'R. J. Phillips, D. E. Smith, S. C. Solomon, M. M. Watkins, M. A. '
'Wieczorek, J. C. Andrews-Hanna, J. W. Head, W. S. Kiefer, I. '
'Matsuyama, P. J. McGovern, G. J. Taylor, and M. T. Zuber (2014). '
'Lunar interior properties from the GRAIL mission, J. Geophys. Res. '
'Planets, 119, 1546-1578, doi:10.1002/2013JE004559.')
__all__ = ['gm', 'mass', 'r', 'density', 'g0', 'a_orbit', 'omega', 'i_solid',
'beta', 'gamma']
| MarkWieczorek/SHTOOLS | pyshtools/constants/Moon.py | Python | bsd-3-clause | 5,749 |
"""
Copyright (c) 2013, Jurriaan Bremer
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the darm developer(s) nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import darmtbl
import darmtbl2
import itertools
import sys
import textwrap
def instruction_name(x):
return x.split('{')[0].split('<')[0].split()[0]
def instruction_names(arr):
"""List of all unique instruction names."""
return ['INVLD'] + sorted(set(instruction_name(x) for x in arr))
def enum_table(name, arr):
"""Enumeration."""
text = '\n '.join(textwrap.wrap(', '.join(arr), 74))
return 'typedef enum _%s_t {\n %s\n} %s_t;\n' % (name, text, name)
def typed_table(typ, name, arr):
"""A table with a given type."""
text = '\n '.join(textwrap.wrap(', '.join(arr), 74))
# if it's not a pointer, append a space
if typ[-1] != '*':
typ += ' '
return '%s%s[] = {\n %s\n};\n' % (typ, name, text)
def string_table(name, arr):
"""A string table."""
return typed_table('const char *', name, ('"%s"' % x for x in arr))
def instruction_names_enum(arr):
"""Enumeration of all instruction names."""
return enum_table('darm_instr',
['I_%s' % x for x in instruction_names(arr)] +
['I_INSTRCNT'])
def instruction_names_table(arr):
"""Table of strings of all instructions."""
return string_table('darm_mnemonics', instruction_names(arr))
def instruction_types_table(arr, kind):
"""Lookup table for the types of instructions."""
arr = ['T_%s' % arr[x][1][1] if x in arr else 'T_INVLD'
for x in range(256)]
return typed_table('darm_enctype_t', '%s_instr_types' % kind, arr)
def instruction_names_index_table(arr, kind):
"""Lookup table for instruction label for each instruction index."""
arr = ['I_%s' % arr[x][0] if x in arr else 'I_INVLD'
for x in range(256)]
return typed_table('darm_instr_t', '%s_instr_labels' % kind, arr)
def type_lookup_table(name, *args):
"""Create a lookup table for a certain instruction type."""
arr = ('I_%s' % x.upper() if x else 'I_INVLD' for x in args)
return typed_table('darm_instr_t', '%s_instr_lookup' % name, arr)
def type_encoding_enum(enumname, arr):
text = []
for _, name, info, encodings, _, affects in arr:
text.append(
' // info:\n' +
' // %s\n //\n' % info +
' // encodings:\n // ' +
'\n // '.join(encodings) + '\n //\n' +
' // affects:\n // ' +
'\n // '.join(textwrap.wrap(', '.join(affects), 74)) + '\n' +
' T_%s,' % name)
return 'typedef enum _%s_t {\n%s\n} %s_t;\n' % (enumname,
'\n\n'.join(text),
enumname)
def type_encoding_table(tblname, arr):
"""Table of strings of all instructions."""
return string_table(tblname, (x[1] for x in arr))
def generate_format_strings(arr):
ret = {}
# a set of rules to transform a string representation as given by the
# armv7 manual, into our own custom format string
rules = [
# if this instruction updates the condition flags, then an S is added
# to the end of the instruction
'{S}', 's',
# if this instruction is conditional, then the condition under which
# it executes is appended to the instruction
'<c>', 'c',
# memory address
'[<Rn>,#+/-<imm12>]', 'M',
'[<Rn>,+/-<Rm>{,<shift>}]', 'M',
# memory address with Rn as base register and an immediate or Rm
# operand as offset
'[<Rn>]', 'B',
'#+/-<imm12>', 'O',
'#+/-<imm8>', 'O',
'+/-<Rm>', 'O',
# various register operands
'<Rd>', 'd',
'<Rn>', 'n',
'<Rm>', 'm',
'<Ra>', 'a',
'<Rt>', 't',
'<Rt2>', '2',
'<RdHi>', 'h',
'<RdLo>', 'l',
# immediate values
'#<const>', 'i',
'#<imm4>', 'i',
'#<imm5>', 'i',
'#<imm16>', 'i',
'#<imm24>', 'i',
# immediate and register shift
'{,<shift>}', 'S',
'#<shift>', 'S',
'<type> <Rs>', 'S',
# some bit instructions take a lsb and width as operand
'#<lsb>', 'L',
'#<width>', 'w',
# for branch instructions
'<label>', 'b',
# option immediate for various obscure instructions
'#<option>', 'o',
# either a list of registers, reglist, or a single register
'<registers>', 'r',
# exclamation mark to specify the write-back bit
'{!}', '!',
# the SETEND instruction takes a one or zero as operand
'<endian_specifier>', 'e',
# some signed multiplication instructions take an X flag, which
# means that you can swap halfwords of the second operand
'{X}', 'x',
# certain signed multiplication instructions take these flags in
# order to swap halfwords that are being used
'<x><y>', 'X',
# rounding flag for various signed multiplication instructions
'{R}', 'R',
# rotation of operands
'{,<rotation>}', 'A',
# the PKH instruction has either a TB or BT postfix, specified by
# the T member of the darm object
'<T>', 'T',
]
for row in arr:
full = row[0]
instr = instruction_name(full)
# strip the instruction
full = full[len(instr):]
# apply all rules
for k, v in zip(*[iter(rules)]*2):
full = full.replace(k, v)
full = full.replace(',', '').replace(' ', '')
if instr not in ret:
ret[instr] = [full]
elif ret[instr][0] == full[:len(ret[instr][0])]:
ret[instr][0] = full
else:
ret[instr].append(full)
return ret
def magic_open(fname):
# python magic!
sys.stdout = open(fname, 'w')
# print the license
print('/*')
print(__doc__.strip())
print('*/')
d = darmtbl
d2 = darmtbl2
def notype(*x):
return (0,) + x
def armv7(*x):
return (1,) + x
def thumb(*x):
return (2,) + x
def thumb2(*x):
return (3,) + x
# we specify various instruction types
instr_types = [
notype('INVLD', 'Invalid or non-existent type',
['I_INVLD'], lambda x, y, z: False),
armv7('ADR', 'ADR Instruction, which is an optimization of ADD',
['ADR<c> <Rd>,<label>'], lambda x, y, z: y[:3] == 'ADR'),
armv7('UNCOND', 'All unconditional instructions',
['ins <endian_specifier>', 'ins [<Rn>,#+/-<imm12>]',
'ins [<Rn>,#<imm12>]', 'ins', 'ins #<option>', 'ins <label>'],
lambda x, y, z: False),
armv7('MUL', 'All multiplication instructions',
['ins{S}<c> <Rd>,<Rn>,<Rm>', 'ins{S}<c> <Rd>,<Rn>,<Rm>,<Ra>',
'ins{S}<c> <RdLo>,<RdHi>,<Rn>,<Rm>'],
lambda x, y, z: x[1:5] == (0,)*4 and x[-5:-1] == (1, 0, 0, 1)),
armv7('STACK0', 'Various STR and LDR instructions',
['ins<c> <Rt>,[<Rn>,#+/-<imm12>]', 'ins<c> <Rt>,[<Rn>],#+/-<imm12>',
'ins<c> <Rt>,[<Rn>],+/-<Rm>{,<shift>}'],
lambda x, y, z: x[1:3] == (0, 1) and not (x[3] == 1 == x[-2])),
armv7('STACK1', 'Various unprivileged STR and LDR instructions',
['ins<c> <Rt>,[<Rn>],+/-<Rm>', 'ins<c> <Rt>,[<Rn>]{,#+/-<imm8>}'],
lambda x, y, z: x[-5] == 1 and x[-2] == 1 and x[-4:-2] != (0, 0) and
x[1:5] == (0, 0, 0, 0) and x[7] == 1),
armv7('STACK2', 'Various other STR and LDR instructions',
['ins<c> <Rt>,<Rt2>,[<Rn>],+/-<Rm>',
'ins<c> <Rt>,[<Rn>],+/-<Rm>',
'ins<c> <Rt>,<Rt2>,[<Rn>],#+/-<imm8>',
'ins<c> <Rt>,<Rt2>,[<Rn>,#+/-<imm8>]',
'ins<c> <Rt>,[<Rn>,#+/-<imm8>]', ],
lambda x, y, z: x[1:4] == (0,)*3 and x[-2] == 1 and x[-5] == 1 and
x[-4:-2] != (0, 0) and not x[-1] in (0, 1) and
not (x[4] == 0 and x[7] == 1)),
armv7('ARITH_SHIFT',
'Arithmetic instructions which take a shift for the second source',
['ins{S}<c> <Rd>,<Rn>,<Rm>{,<shift>}',
'ins{S}<c> <Rd>,<Rn>,<Rm>,<type> <Rs>'],
lambda x, y, z: d.Rn in x and d.Rd in x and x[-3] == d.type_
and x[-1] == d.Rm),
armv7('ARITH_IMM',
'Arithmetic instructions which take an immediate as second source',
['ins{S}<c> <Rd>,<Rn>,#<const>'],
lambda x, y, z: d.Rn in x and d.Rd in x and d.imm12 in x),
armv7('BITS', 'Bit field magic',
[], lambda x, y, z: d.lsb in x),
armv7('BRNCHSC', 'Branch and System Call instructions',
['B(L)<c> <label>', 'SVC<c> #<imm24>'],
lambda x, y, z: x[-1] == d.imm24),
armv7('BRNCHMISC', 'Branch and Misc instructions',
['B(L)X(J)<c> <Rm>', 'BKPT #<imm16>', 'MSR<c> <spec_reg>,<Rn>'],
lambda x, y, z: x[1:9] == (0, 0, 0, 1, 0, 0, 1, 0) and
not y[0] == 'Q'),
armv7('MOV_IMM', 'Move immediate to a register (possibly negating it)',
['ins{S}<c> <Rd>,#<const>'],
lambda x, y, z: x[-1] == d.imm12 and x[-2] == d.Rd),
armv7('CMP_OP', 'Comparison instructions which take two operands',
['ins<c> <Rn>,<Rm>{,<shift>}', 'ins<c> <Rn>,<Rm>,<type> <Rs>'],
lambda x, y, z: x[-1] == d.Rm and x[-3] == d.type_ and
(x[-4] == d.imm5 and x[-8:-4] == (0, 0, 0, 0) or
x[-5] == d.Rs and x[-9:-5] == (0, 0, 0, 0))),
armv7('CMP_IMM', 'Comparison instructions which take an immediate',
['ins<c> <Rn>,#<const>'],
lambda x, y, z: x[-1] == d.imm12 and x[-6] == d.Rn),
armv7('OPLESS', 'Instructions which don\'t take any operands',
['ins<c>'],
lambda x, y, z: len(x) == 29),
armv7('DST_SRC', 'Manipulate and move a register to another register',
['ins{S}<c> <Rd>,<Rm>', 'ins{S}<c> <Rd>,<Rm>,#<imm>',
'ins{S}<c> <Rd>,<Rn>,<Rm>'],
lambda x, y, z: z == 26 or z == 27),
armv7('LDSTREGS', 'Load or store multiple registers at once',
['ins<c> <Rn>{!},<registers>'],
lambda x, y, z: x[-1] == d.register_list),
armv7('BITREV', 'Bit reverse instructions',
['ins<c> <Rd>,<Rm>'],
lambda x, y, z: x[-1] == d.Rm and x[-10] == d.Rd and
x[-11] != d.Rn),
armv7('MISC', 'Various miscellaneous instructions',
['ins{S}<c> <Rd>,<Rm>,<type> <Rs>', 'ins{S}<c> <Rd>,<Rm>{,<shift>}',
'ins<c> #<imm4>', 'ins<c> #<option>',
'ins<c> <Rd>,<Rn>,<Rm>{,<type> #<imm>}', 'ins<c> <Rd>,<Rn>,<Rm>'],
lambda x, y, z: instruction_name(y) in ('MVN', 'SMC', 'DBG', 'PKH',
'SEL')),
armv7('SM', 'Various signed multiply instructions', [],
lambda x, y, z: y[:2] == 'SM'),
armv7('PAS', 'Parallel signed and unsigned addition and subtraction',
['ins<c> <Rd>,<Rn>,<Rm>'],
lambda x, y, z: z in (97, 98, 99, 101, 102, 103)),
armv7('SAT', 'Saturating addition and subtraction instructions',
['ins<c> <Rd>,<Rn>,<Rm>'],
lambda x, y, z: y[0] == 'Q'),
armv7('SYNC', 'Synchronization primitives',
['ins{B}<c> <Rt>,<Rt2>,[<Rn>]', 'ins<c> <Rd>,<Rt>,[<Rn>]',
'ins<c> <Rt>,<Rt2>,[<Rn>]', 'ins<c> <Rt>,[<Rn>]'],
lambda x, y, z: x[1:5] == (0, 0, 0, 1) and
(x[-5:-1] == (1, 0, 0, 1) or x[-8:-4] == (1, 0, 0, 1))),
armv7('PUSR', 'Packing, unpacking, saturation, and reversal instructions',
['ins<c> <Rd>,#<imm>,<Rn>', 'ins<c> <Rd>,#<imm>,<Rn>{,<shift>}',
'ins<c> <Rd>,<Rn>,<Rm>{,<rotation>}',
'ins<c> <Rd>,<Rm>{,<rotation>}'],
lambda x, y, z: x[1:6] == (0, 1, 1, 0, 1)),
thumb('ONLY_IMM8', 'Instructions which only take an 8-byte immediate',
['ins<c> #<imm8>'],
lambda x, y, z: d2.imm8 in x and len(x) == 9),
]
if __name__ == '__main__':
armv7_table, thumb_table, thumb2_table = {}, {}, {}
# the last item (a list) will contain the instructions affected by this
# encoding type
instr_types = [list(x) + [[]] for x in instr_types]
# prepend the instruction set to the encoding types
insns_types = '', 'ARM_', 'THUMB_', 'THUMB2_'
instr_types = [[x[0]] + [insns_types[x[0]] + x[1]] + x[2:6]
for x in instr_types]
# list of encoding types which should not be emitted in the table (because
# they are handled somewhere else, in a somewhat hardcoded fashion)
type_ignore = 'ARM_MUL', 'ARM_STACK0', 'ARM_STACK1', 'ARM_STACK2', \
'ARM_SAT', 'ARM_SYNC', 'ARM_PUSR', 'ARM_ADR'
for description in darmtbl.ARMv7:
instr = description[0]
bits = description[1:]
identifier = []
remainder = []
for x in range(1 if bits[0] == darmtbl.cond else 4, len(bits)):
if isinstance(bits[x], int):
identifier.append(str(bits[x]))
elif len(identifier) + bits[x].bitsize > 8:
identifier += ['01'] * (8-len(identifier))
remainder = bits[x:]
else:
identifier += ['01'] * bits[x].bitsize
# first handle all unconditional instructions, i.e., whitelist those
# instructions that have already been implemented
if bits[:4] == (1, 1, 1, 1) and \
bits[4:7] in ((0, 0, 0), (0, 1, 0), (0, 1, 1), (1, 0, 1)):
# hardcoded index for the T_UNCOND type encoding
instr_types[2][-1].append(instr)
continue
for x in itertools.product(*identifier[:8]):
idx = sum(int(x[y])*2**(7-y) for y in range(8))
# for each conditional instruction, check which type of
# instruction this is
for y in instr_types:
if y[0] == 1 and bits[0] == d.cond and y[4](bits, instr, idx):
if not y[1] in type_ignore:
armv7_table[idx] = instruction_name(instr), y
y[-1].append(instr)
break
for description in darmtbl2.thumbs:
instr = description[0]
bits = description[1:]
bitcount = sum(1 if isinstance(x, int) else x.bitsize for x in bits)
if bitcount in (16, 17, 18, 19):
# TODO fix >16
identifier, remainder = [], []
for x in range(len(bits)):
if isinstance(bits[x], int):
identifier.append(str(bits[x]))
elif len(identifier) + bits[x].bitsize > 8:
identifier += ['01'] * (8-len(identifier))
remainder = bits[x:]
else:
identifier += ['01'] * bits[x].bitsize
for x in itertools.product(*identifier[:8]):
idx = sum(int(x[y])*2**(7-y) for y in range(8))
for y in (_ for _ in instr_types if _[0] == 2):
if y[4](bits, instr, 0):
thumb_table[idx] = instruction_name(instr), y
y[-1].append(instr)
# make a list of unique instructions affected by each encoding type,
# we remove the first item from the instruction names, as this is I_INVLD
instr_types = [x[:5] + [instruction_names(x[5])[1:]] for x in instr_types]
#
# darm-tbl.h
#
magic_open('darm-tbl.h')
fmtstrs = generate_format_strings(darmtbl.ARMv7)
# until we remove all unused instructions..
instrcnt = len(open('instructions.txt').readlines())
# print required headers
print('#ifndef __DARM_TBL__')
print('#define __DARM_TBL__')
print('#include <stdint.h>')
# print type info for each encoding type
print(type_encoding_enum('darm_enctype', instr_types))
# print all instruction labels
print(instruction_names_enum(open('instructions.txt')))
count = len(instruction_names(open('instructions.txt')))
print('extern const char *darm_mnemonics[%d];' % count)
print('extern const char *darm_enctypes[%d];' % len(instr_types))
print('extern const char *darm_registers[16];')
# define constants 0b0 up upto 0b11111111
for x in range(256):
print('#define %s %d' % (bin(x)[1:], x))
# define partial constants with leading zeroes, such as 0b0001
for x in range(2, 7):
for y in itertools.product('01', repeat=x):
num = ''.join(y)
print('#define b%s %d' % (num, int(num, 2)))
print('#endif')
#
# thumb-tbl.h
#
magic_open('thumb-tbl.h')
# print required headers
print('#ifndef __THUMB_TBL__')
print('#define __THUMB_TBL__')
print('#include <stdint.h>')
print('#include "darm-tbl.h"')
# print some required definitions
print('extern darm_enctype_t thumb_instr_types[256];')
print('extern darm_instr_t thumb_instr_labels[256];')
print('#endif')
#
# armv7-tbl.h
#
magic_open('armv7-tbl.h')
# print required headers
print('#ifndef __ARMV7_TBL__')
print('#define __ARMV7_TBL__')
print('#include <stdint.h>')
print('#include "darm-tbl.h"')
# print some required definitions
print('extern darm_enctype_t armv7_instr_types[256];')
print('extern darm_enctype_t thumb2_instr_types[256];')
def type_lut(name, bits):
print('darm_instr_t type_%s_instr_lookup[%d];' % (name, 2**bits))
print('extern darm_instr_t armv7_instr_labels[256];')
type_lut('shift', 4)
type_lut('brnchmisc', 4)
type_lut('opless', 3)
type_lut('uncond2', 3)
type_lut('mul', 3)
type_lut('stack0', 5)
type_lut('stack1', 3)
type_lut('stack2', 3)
type_lut('bits', 2)
type_lut('pas', 6)
type_lut('sat', 2)
type_lut('sync', 4)
type_lut('pusr', 4)
print('const char *armv7_format_strings[%d][3];' % instrcnt)
print('#endif')
#
# darm-tbl.c
#
magic_open('darm-tbl.c')
print('#include <stdio.h>')
print('#include <stdint.h>')
print('#include "darm-tbl.h"')
print(instruction_names_table(open('instructions.txt')))
print(type_encoding_table('darm_enctypes', instr_types))
reg = 'r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 FP IP SP LR PC'
print(string_table('darm_registers', reg.split()))
#
# thumb-tbl.c
#
magic_open('thumb-tbl.c')
print('#include <stdio.h>')
print('#include <stdint.h>')
print('#include "thumb-tbl.h"')
# print a table containing all the types of instructions
print(instruction_types_table(thumb_table, 'thumb'))
# print a table containing the instruction label for each entry
print(instruction_names_index_table(thumb_table, 'thumb'))
#
# armv7-tbl.c
#
magic_open('armv7-tbl.c')
print('#include <stdio.h>')
print('#include <stdint.h>')
print('#include "armv7-tbl.h"')
# print a table containing all the types of instructions
print(instruction_types_table(armv7_table, 'armv7'))
# print a table containing the instruction label for each entry
print(instruction_names_index_table(armv7_table, 'armv7'))
# print a lookup table for the shift type (which is a sub-type of
# the dst-src type), the None types represent instructions of the
# STR family, which we'll handle in the next handler, T_STR.
t_shift = {
0b0000: 'lsl',
0b0001: 'lsl',
0b0010: 'lsr',
0b0011: 'lsr',
0b0100: 'asr',
0b0101: 'asr',
0b0110: 'ror',
0b0111: 'ror',
0b1000: 'lsl',
0b1001: None,
0b1010: 'lsr',
0b1011: None,
0b1100: 'asr',
0b1101: None,
0b1110: 'ror',
0b1111: None}
print(type_lookup_table('type_shift',
*[t_shift[x] for x in range(16)]))
t4 = 'msr', 'bx', 'bxj', 'blx', None, 'qsub', None, 'bkpt', 'smlaw', \
None, 'smulw', None, 'smlaw', None, 'smulw', None
print(type_lookup_table('type_brnchmisc', *t4))
t_opless = 'nop', 'yield', 'wfe', 'wfi', 'sev', None, None, None
print(type_lookup_table('type_opless', *t_opless))
t_uncond2 = None, 'clrex', None, None, 'dsb', 'dmb', 'isb', None
print(type_lookup_table('type_uncond2', *t_uncond2))
t_mul = 'mul', 'mla', 'umaal', 'mls', 'umull', 'umlal', \
'smull', 'smlal'
print(type_lookup_table('type_mul', *t_mul))
t_stack0 = {
0b00000: 'str',
0b00001: 'ldr',
0b00010: 'strt',
0b00011: 'ldrt',
0b00100: 'strb',
0b00101: 'ldrb',
0b00110: 'strbt',
0b00111: 'ldrbt',
0b01000: 'str',
0b01001: 'ldr',
0b01010: 'strt',
0b01011: 'ldrt',
0b01100: 'strb',
0b01101: 'ldrb',
0b01110: 'strbt',
0b01111: 'ldrbt',
0b10000: 'str',
0b10001: 'ldr',
0b10010: 'str',
0b10011: 'ldr',
0b10100: 'strb',
0b10101: 'ldrb',
0b10110: 'strb',
0b10111: 'ldrb',
0b11000: 'str',
0b11001: 'ldr',
0b11010: 'str',
0b11011: 'ldr',
0b11100: 'strb',
0b11101: 'ldrb',
0b11110: 'strb',
0b11111: 'ldrb',
}
print(type_lookup_table('type_stack0',
*[t_stack0[x] for x in range(32)]))
t_stack1 = None, None, 'strht', 'ldrht', None, 'ldrsbt', \
None, 'ldrsht'
print(type_lookup_table('type_stack1', *t_stack1))
t_stack2 = None, None, 'strh', 'ldrh', 'ldrd', 'ldrsb', \
'strd', 'ldrsh'
print(type_lookup_table('type_stack2', *t_stack2))
print(type_lookup_table('type_bits', None, 'sbfx', 'bfi', 'ubfx'))
t_pas = {
0b000: 'add16',
0b001: 'asx',
0b010: 'sax',
0b011: 'sub16',
0b100: 'add8',
0b111: 'sub8',
}
t_pas_prefix = 's', 'q', 'sh', 'u', 'uq', 'uh'
t_pas = dict(((1 + (idx > 2) + idx) * 2**3 + k, x + v)
for idx, x in enumerate(t_pas_prefix)
for k, v in t_pas.items())
print(type_lookup_table('type_pas',
*[t_pas.get(x) for x in range(64)]))
print(type_lookup_table('type_sat', 'qadd', 'qsub', 'qdadd', 'qdsub'))
t_sync = 'swp', None, None, None, 'swpb', None, None, None, \
'strex', 'ldrex', 'strexd', 'ldrexd', 'strexb', 'ldrexb', \
'strexh', 'ldrexh'
print(type_lookup_table('type_sync', *t_sync))
t_pusr = 'sxtab16', 'sxtb16', None, None, 'sxtab', 'sxtb', \
'sxtah', 'sxth', 'uxtab16', 'uxtb16', None, None, \
'uxtab', 'uxtb', 'uxtah', 'uxth'
print(type_lookup_table('type_pusr', *t_pusr))
lines = []
for instr, fmtstr in fmtstrs.items():
fmtstr = ', '.join('"%s"' % x for x in set(fmtstr))
lines.append(' [I_%s] = {%s},' % (instr, fmtstr))
print('const char *armv7_format_strings[%d][3] = {' % instrcnt)
print('\n'.join(sorted(lines)))
print('};')
| radare/darm | darmgen.py | Python | bsd-3-clause | 24,307 |
"""
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True, null=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accesor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
objectb = models.ForeignKey(ObjectB)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __str__(self):
return "one2one " + self.new_name
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, null=True)
d = models.ForeignKey(ModelD)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, to_field='name')
responsibility = models.ForeignKey('Responsibility', to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, null=True)
b = models.ForeignKey(FK2, null=True)
c = models.ForeignKey(FK3, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter')
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph')
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, related_name='owner')
creator = models.ForeignKey(BaseUser, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, related_name='user')
def __str__(self):
return self.staff
| denisenkom/django | tests/queries/models.py | Python | bsd-3-clause | 13,742 |
import json
import uuid
from datetime import datetime, timedelta
from django.conf import settings
from django.test import TestCase
from django.utils.http import urlencode
from casexml.apps.case.mock import CaseBlock
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.api.resources import v0_4
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import get_es_new, send_to_elasticsearch
from corehq.form_processor.tests.utils import create_form_for_test
from corehq.apps.es.tests.utils import ElasticTestMixin
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX_INFO
from corehq.pillows.reportxform import transform_xform_for_report_forms_index
from corehq.pillows.xform import transform_xform_for_elasticsearch
from corehq.util.elastic import reset_es_index
from pillowtop.es_utils import initialize_index_and_mapping
from .utils import APIResourceTest, FakeFormESView
@es_test
class TestXFormInstanceResource(APIResourceTest):
"""
Tests the XFormInstanceResource, currently only v0_4
TODO: Provide tests for each version, especially for those aspects
which differ between versions. They should call into reusable tests
for the functionality that is not different.
"""
resource = v0_4.XFormInstanceResource
def setUp(self):
self.es = get_es_new()
reset_es_index(XFORM_INDEX_INFO)
initialize_index_and_mapping(self.es, XFORM_INDEX_INFO)
def test_fetching_xform_cases(self):
# Create an xform that touches a case
case_id = uuid.uuid4().hex
form = submit_case_blocks(
CaseBlock.deprecated_init(
case_id=case_id,
create=True,
).as_text(),
self.domain.name
)[0]
send_to_elasticsearch('forms', transform_xform_for_elasticsearch(form.to_json()))
self.es.indices.refresh(XFORM_INDEX_INFO.index)
# Fetch the xform through the API
response = self._assert_auth_get_resource(self.single_endpoint(form.form_id) + "?cases__full=true")
self.assertEqual(response.status_code, 200)
cases = json.loads(response.content)['cases']
# Confirm that the case appears in the resource
self.assertEqual(len(cases), 1)
self.assertEqual(cases[0]['id'], case_id)
def _send_forms(self, forms):
# list of form tuples [(xmlns, received_on)]
to_ret = []
for xmlns, received_on in forms:
backend_form = create_form_for_test(
xmlns=xmlns or 'fake-xmlns',
domain=self.domain.name,
received_on=received_on or datetime.utcnow(),
edited_on=datetime.utcnow(),
form_data={
'#type': 'fake-type',
'@xmlns': xmlns or 'fake-xmlns',
'meta': {'userID': 'metadata-user-id'},
},
auth_context={
'user_id': 'auth-user-id',
'domain': self.domain.name,
'authenticated': True,
},
)
to_ret.append(backend_form)
send_to_elasticsearch('forms', transform_xform_for_elasticsearch(backend_form.to_json()))
self.es.indices.refresh(XFORM_INDEX_INFO.index)
return to_ret
def test_get_list(self):
"""
Any form in the appropriate domain should be in the list from the API.
"""
xmlns = 'http://xmlns1'
received_on = datetime(2019, 1, 2)
self._send_forms([(xmlns, received_on)])
response = self._assert_auth_get_resource(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 1)
api_form = api_forms[0]
self.assertEqual(api_form['form']['@xmlns'], xmlns)
self.assertEqual(api_form['received_on'], json_format_datetime(received_on))
self.assertEqual(api_form['metadata']['userID'], 'metadata-user-id')
self.assertEqual(api_form['edited_by_user_id'], 'auth-user-id')
def test_get_by_xmlns(self):
xmlns1 = 'https://xmlns1'
self._send_forms([(xmlns1, None), ('https://xmlns2', None)])
response = self._assert_auth_get_resource(
'%s?%s' % (self.list_endpoint, urlencode({'xmlns': xmlns1})))
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 1)
api_form = api_forms[0]
self.assertEqual(api_form['form']['@xmlns'], xmlns1)
def test_get_by_received_on(self):
date = datetime(2019, 1, 2)
xmlns1 = 'https://xmlns1'
self._send_forms([(xmlns1, date), (None, datetime(2019, 3, 1))])
params = {
'received_on_start': datetime(2019, 1, 1).strftime("%Y-%m-%dT%H:%M:%S"),
'received_on_end': datetime(2019, 1, 4).strftime("%Y-%m-%dT%H:%M:%S"),
}
response = self._assert_auth_get_resource(
'%s?%s' % (self.list_endpoint, urlencode(params)))
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 1)
api_form = api_forms[0]
self.assertEqual(api_form['form']['@xmlns'], xmlns1)
def test_received_on_order(self):
date1 = datetime(2019, 1, 2)
xmlns1 = 'https://xmlns1'
date2 = datetime(2019, 1, 5)
xmlns2 = 'https://xmlns2'
self._send_forms([(xmlns1, date1), (xmlns2, date2)])
# test asc order
response = self._assert_auth_get_resource('%s?order_by=received_on' % self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 2)
api_form = api_forms[0]
self.assertEqual(api_form['form']['@xmlns'], xmlns1)
# test desc order
response = self._assert_auth_get_resource('%s?order_by=-received_on' % self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 2)
api_form = api_forms[0]
self.assertEqual(api_form['form']['@xmlns'], xmlns2)
def test_get_by_indexed_on(self):
date1 = datetime(2019, 1, 2)
xmlns = 'https://xmlns1'
date2 = datetime(2019, 1, 5)
self._send_forms([(xmlns, date1), (xmlns, date2)])
yesterday = (datetime.utcnow() - timedelta(days=1)).strftime("%Y-%m-%dT%H:%M:%S")
response = self._assert_auth_get_resource(
'%s?%s' % (self.list_endpoint, urlencode({'indexed_on_start': yesterday}))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 2)
response = self._assert_auth_get_resource(
'%s?%s' % (self.list_endpoint, urlencode({'indexed_on_end': yesterday}))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 0)
tomorrow = (datetime.utcnow() + timedelta(days=1)).strftime("%Y-%m-%dT%H:%M:%S")
response = self._assert_auth_get_resource(
'%s?%s' % (self.list_endpoint, urlencode({'indexed_on_start': tomorrow}))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 0)
response = self._assert_auth_get_resource(
'%s?%s' % (self.list_endpoint, urlencode({'indexed_on_end': tomorrow}))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 2)
def test_archived_forms(self):
xmlns1 = 'https://xmlns1'
xmlns2 = 'https://xmlns2'
forms = self._send_forms([(xmlns1, None), (xmlns2, None)])
# archive
update = forms[0].to_json()
update['doc_type'] = 'xformarchived'
send_to_elasticsearch('forms', transform_xform_for_elasticsearch(update))
self.es.indices.refresh(XFORM_INDEX_INFO.index)
# archived form should not be included by default
response = self._assert_auth_get_resource(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 1)
api_form = api_forms[0]
self.assertEqual(api_form['form']['@xmlns'], xmlns2)
# archived form should be included
response = self._assert_auth_get_resource(
'%s?%s' % (self.list_endpoint, urlencode({'include_archived': 'true'})))
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 2)
@es_test
class TestXFormInstanceResourceQueries(APIResourceTest, ElasticTestMixin):
"""
Tests that urlparameters get converted to expected ES queries.
"""
resource = v0_4.XFormInstanceResource
def _test_es_query(self, url_params, expected_query):
fake_xform_es = FakeFormESView()
prior_run_query = fake_xform_es.run_query
# A bit of a hack since none of Python's mocking libraries seem to do basic spies easily...
def mock_run_query(es_query):
actual = es_query['query']['bool']['filter']
self.checkQuery(actual, expected_query, is_raw_query=True)
return prior_run_query(es_query)
fake_xform_es.run_query = mock_run_query
v0_4.MOCK_XFORM_ES = fake_xform_es
response = self._assert_auth_get_resource('%s?%s' % (self.list_endpoint, urlencode(url_params)))
self.assertEqual(response.status_code, 200)
def test_get_list_xmlns(self):
expected = [
{'term': {'domain.exact': 'qwerty'}},
{'term': {'doc_type': 'xforminstance'}},
{'term': {'xmlns.exact': 'http://XMLNS'}},
{'match_all': {}}
]
self._test_es_query({'xmlns': 'http://XMLNS'}, expected)
def test_get_list_xmlns_exact(self):
expected = [
{'term': {'domain.exact': 'qwerty'}},
{'term': {'doc_type': 'xforminstance'}},
{'term': {'xmlns.exact': 'http://XMLNS'}},
{'match_all': {}}
]
self._test_es_query({'xmlns.exact': 'http://XMLNS'}, expected)
def test_get_list_received_on(self):
"""
Forms can be filtered by passing ?recieved_on_start=<date>&received_on_end=<date>
Since we not testing ElasticSearch, we only test that the proper query is generated.
"""
start_date = datetime(1969, 6, 14)
end_date = datetime(2011, 1, 2)
expected = [
{'term': {'domain.exact': 'qwerty'}},
{'term': {'doc_type': 'xforminstance'}},
{'range': {'received_on': {'gte': start_date.isoformat(), 'lte': end_date.isoformat()}}},
{'match_all': {}}
]
params = {
'received_on_end': end_date.isoformat(),
'received_on_start': start_date.isoformat(),
}
self._test_es_query(params, expected)
def test_get_list_ordering(self):
'''
Forms can be ordering ascending or descending on received_on; by default
ascending.
'''
fake_xform_es = FakeFormESView()
# A bit of a hack since none of Python's mocking libraries seem to do basic spies easily...
prior_run_query = fake_xform_es.run_query
prior_count_query = fake_xform_es.count_query
queries = []
def mock_run_query(es_query):
queries.append(es_query)
return prior_run_query(es_query)
def mock_count_query(es_query):
queries.append(es_query)
return prior_count_query(es_query)
fake_xform_es.run_query = mock_run_query
fake_xform_es.count_query = mock_count_query
v0_4.MOCK_XFORM_ES = fake_xform_es
# Runs *2* queries
response = self._assert_auth_get_resource('%s?order_by=received_on' % self.list_endpoint)
self.assertEqual(response.status_code, 200)
self.assertEqual(queries[0]['sort'], [{'received_on': {'missing': '_first', 'order': 'asc'}}])
# Runs *2* queries
response = self._assert_auth_get_resource('%s?order_by=-received_on' % self.list_endpoint)
self.assertEqual(response.status_code, 200)
self.assertEqual(queries[2]['sort'], [{'received_on': {'missing': '_last', 'order': 'desc'}}])
def test_get_list_archived(self):
expected = [
{
"term": {
"domain.exact": "qwerty"
}
},
{
"bool": {
"should": [
{"term": {"doc_type": "xforminstance"}},
{"term": {"doc_type": "xformarchived"}}
]
}
},
{
"match_all": {}
}
]
self._test_es_query({'include_archived': 'true'}, expected)
class TestReportPillow(TestCase):
def test_xformPillowTransform(self):
"""
Test to make sure report xform and reportxform pillows strip the appVersion dict to match the
mappings
"""
transform_functions = [transform_xform_for_report_forms_index, transform_xform_for_elasticsearch]
bad_appVersion = {
"_id": "foo",
"domain": settings.ES_XFORM_FULL_INDEX_DOMAINS[0],
'received_on': "2013-09-20T01:33:12Z",
"form": {
"meta": {
"@xmlns": "http://openrosa.org/jr/xforms",
"username": "someuser",
"instanceID": "foo",
"userID": "some_user_id",
"timeEnd": "2013-09-20T01:33:12Z",
"appVersion": {
"@xmlns": "http://commcarehq.org/xforms",
"#text": "CCODK:\"2.5.1\"(11126). v236 CC2.5b[11126] on April-15-2013"
},
"timeStart": "2013-09-19T01:13:20Z",
"deviceID": "somedevice"
}
}
}
for fn in transform_functions:
cleaned = fn(bad_appVersion)
self.assertFalse(isinstance(cleaned['form']['meta']['appVersion'], dict))
self.assertTrue(isinstance(cleaned['form']['meta']['appVersion'], str))
self.assertTrue(cleaned['form']['meta']['appVersion'], "CCODK:\"2.5.1\"(11126). v236 CC2.5b[11126] on April-15-2013")
| dimagi/commcare-hq | corehq/apps/api/tests/form_resources.py | Python | bsd-3-clause | 14,905 |
'''
test_custom_cache.py: Testing custom cache function
Copyright (c) 2016-2017, Vanessa Sochat. All rights reserved.
"Singularity" Copyright (c) 2016, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of any
required approvals from the U.S. Dept. of Energy). All rights reserved.
This software is licensed under a customized 3-clause BSD license. Please
consult LICENSE file distributed with the sources of this project regarding
your rights to use or distribute this software.
NOTICE. This Software was developed under funding from the U.S. Department of
Energy and the U.S. Government consequently retains certain rights. As such,
the U.S. Government has been granted for itself and others acting on its
behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software
to reproduce, distribute copies to the public, prepare derivative works, and
perform publicly and display publicly, and to permit other to do so.
'''
import os
import re
import sys
sys.path.append('..') # base_directory
from unittest import TestCase
import shutil
import tempfile
VERSION = sys.version_info[0]
print("*** PYTHON VERSION %s CLIENT TESTING START ***" %(VERSION))
class TestCustomCache(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.custom_cache = '%s/cache' %(self.tmpdir)
os.environ['SINGULARITY_CACHEDIR'] = self.custom_cache
os.environ['SINGULARITY_ROOTFS'] = self.tmpdir
print("\n---START----------------------------------------")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("---END------------------------------------------")
def test_get_cache_custom(self):
'''test_run_command tests sending a command to commandline
using subprocess
'''
print("Testing get_cache with environment set")
from defaults import SINGULARITY_CACHE
self.assertEqual(self.custom_cache,SINGULARITY_CACHE)
self.assertTrue(os.path.exists(SINGULARITY_CACHE))
if __name__ == '__main__':
unittest.main()
| Trophime/singularity | libexec/python/tests/test_custom_cache.py | Python | bsd-3-clause | 2,123 |
#!/usr/bin/env python
import os
import sys
from os.path import join
from distutils.sysconfig import get_python_inc
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('special', parent_package, top_path)
define_macros = []
if sys.platform=='win32':
# define_macros.append(('NOINFINITIES',None))
# define_macros.append(('NONANS',None))
define_macros.append(('_USE_MATH_DEFINES',None))
# C libraries
config.add_library('sc_c_misc',sources=[join('c_misc','*.c')])
config.add_library('sc_cephes',sources=[join('cephes','*.c')],
include_dirs=[get_python_inc()],
macros=define_macros)
# Fortran libraries
config.add_library('sc_mach',sources=[join('mach','*.f')],
config_fc={'noopt':(__file__,1)})
config.add_library('sc_toms',sources=[join('amos','*.f')])
config.add_library('sc_amos',sources=[join('toms','*.f')])
config.add_library('sc_cdf',sources=[join('cdflib','*.f')])
config.add_library('sc_specfun',sources=[join('specfun','*.f')])
# Extension _cephes
sources = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c',
'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c']
config.add_extension('_cephes', sources=sources,
libraries=['sc_amos','sc_toms','sc_c_misc','sc_cephes','sc_mach',
'sc_cdf', 'sc_specfun'],
depends=["ufunc_extras.h", "cephes.h",
"amos_wrappers.h", "toms_wrappers.h",
"cdf_wrappers.h", "specfun_wrappers.h",
"c_misc/misc.h", "cephes_doc.h",
"cephes/mconf.h", "cephes/cephes_names.h"],
define_macros = define_macros
)
# Extension specfun
config.add_extension('specfun',
sources=['specfun.pyf'],
f2py_options=['--no-wrap-functions'],
define_macros=[],
libraries=['sc_specfun'])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| huard/scipy-work | scipy/special/setup.py | Python | bsd-3-clause | 2,409 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Little utility that help automate tests and optimization with XDS.
"""
__version__ = "0.4.5"
__date__ = "12-06-2012"
__author__ = "Pierre Legrand (pierre.legrand _at_ synchrotron-soleil.fr)"
__copyright__ = "Copyright (c) 2004-2012 Pierre Legrand"
__license__ = "New BSD http://www.opensource.org/licenses/bsd-license.php"
import sys
import shutil
from xupy import saveLastVersion, xdsInp2Param, \
getProfilRefPar, run_xds, LP_names
if __name__ == '__main__':
xp = {}
#xp = xdsInp2Param()
#xp = XParam()
#xp.SPOT_RANGE = [2, 12],[44, 54]
#print xp.SPOT_RANGE
#xp.SPOT_RANGE = "2 12", "44 54"
#xp.DATA_RANGE = 8, 8
#xp.INCLUDE_RESOLUTION_RANGE= 70.0, 2.0
#xp.NBX = 3
#xp.NBY = 3
if "-a" in sys.argv:
sys.argv.remove('-a')
xp.update(getProfilRefPar())
xp["JOB"] = "DEFPIX", "INTEGRATE", "CORRECT"
xp["NUMBER_OF_PROFILE_GRID_POINTS_ALONG_ALPHA_BETA"] = 15
xp["NUMBER_OF_PROFILE_GRID_POINTS_ALONG_GAMMA"] = 15
xp["REFINE_INTEGRATE"] = "ORIENTATION", "BEAM", "CELL" #"DISTANCE",
shutil.copyfile("GXPARM.XDS","XPARM.XDS")
if "-p" in sys.argv:
sys.argv.remove('-p')
xp.update(getProfilRefPar())
xp["JOB"] = "DEFPIX", "INTEGRATE", "CORRECT"
xp["NUMBER_OF_PROFILE_GRID_POINTS_ALONG_ALPHA_BETA"] = 15
xp["NUMBER_OF_PROFILE_GRID_POINTS_ALONG_GAMMA"] = 15
xp["REFINE_INTEGRATE"] = "ORIENTATION", "BEAM", "CELL" #"DISTANCE",
if "-i" in sys.argv:
optid = sys.argv.index("-i")
_xds_input = sys.argv[optid+1]
xp.update(xdsInp2Param(inp_str=_xds_input))
sys.argv.remove('-i')
sys.argv.remove(_xds_input)
if "-norun" in sys.argv:
saveLastVersion(LP_names)
sys.exit()
else:
ARGV = sys.argv[1:]
while ARGV:
ARG = ARGV.pop()
xp.update(xdsInp2Param(inp_str=ARG))
run_xds(xp, inp_f="XDS.INP")
saveLastVersion(LP_names)
| jsburg/xdsme | XDS/runxds.py | Python | bsd-3-clause | 2,046 |
import tempfile
from django import forms
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.forms import BaseInlineFormSet
from django.utils.safestring import mark_safe
from olympia.amo.storage_utils import copy_stored_file
from olympia.amo.utils import resize_image
from .models import (
PrimaryHero, SecondaryHeroModule,
PrimaryHeroImage)
class ImageChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return mark_safe(
'<img class="select-image-preview" src="{}" />'.format(
obj.preview_url))
class PrimaryHeroInline(admin.StackedInline):
class Media:
css = {
'all': ('css/admin/discovery.css',)
}
model = PrimaryHero
fields = (
'description',
'promoted_addon',
'select_image',
'gradient_color',
'is_external',
'enabled')
view_on_site = False
can_delete = False
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'select_image':
kwargs['required'] = False
kwargs['widget'] = forms.RadioSelect(attrs={
'class': 'inline',
'style': 'vertical-align: top'
})
kwargs['queryset'] = PrimaryHeroImage.objects
kwargs['empty_label'] = mark_safe("""
<div class="select-image-noimage">
No image selected
</div>
""")
return ImageChoiceField(**kwargs)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
class PrimaryHeroImageAdmin(admin.ModelAdmin):
class Media:
css = {
'all': ('css/admin/discovery.css',)
}
list_display = ('preview_image', 'custom_image')
actions = ['delete_selected']
readonly_fields = ('preview_image',)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
size_thumb = (150, 120)
size_full = (960, 640)
with tempfile.NamedTemporaryFile(dir=settings.TMP_PATH) as tmp:
resize_image(obj.custom_image.path, tmp.name, size_thumb, 'jpg')
copy_stored_file(tmp.name, obj.thumbnail_path)
with tempfile.NamedTemporaryFile(dir=settings.TMP_PATH) as tmp:
resize_image(obj.custom_image.path, tmp.name, size_full, 'jpg')
copy_stored_file(tmp.name, obj.custom_image.path)
def delete_queryset(self, request, queryset):
for obj in queryset:
obj.delete()
class HeroModuleInlineFormSet(BaseInlineFormSet):
def clean(self):
super().clean()
if len(self.forms) != 3:
raise ValidationError(
'There must be exactly 3 modules in this shelf.')
class SecondaryHeroModuleInline(admin.StackedInline):
model = SecondaryHeroModule
view_on_site = False
max_num = 3
min_num = 3
can_delete = False
formset = HeroModuleInlineFormSet
class SecondaryHeroAdmin(admin.ModelAdmin):
class Media:
css = {
'all': ('css/admin/discovery.css',)
}
list_display = ('headline', 'description', 'enabled')
inlines = [SecondaryHeroModuleInline]
view_on_site = False
def has_delete_permission(self, request, obj=None):
qs = self.get_queryset(request).filter(enabled=True)
if obj and list(qs) == [obj]:
return False
return super().has_delete_permission(request=request, obj=obj)
| eviljeff/olympia | src/olympia/hero/admin.py | Python | bsd-3-clause | 3,617 |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Script for building docker image. This is expected to run inside container.
"""
import json
import logging
import tempfile
from atomic_reactor.build import InsideBuilder
from atomic_reactor.plugin import PostBuildPluginsRunner, PreBuildPluginsRunner, InputPluginsRunner, PrePublishPluginsRunner, \
ExitPluginsRunner, PluginFailedException
from atomic_reactor.source import get_source_instance_for
from atomic_reactor.util import ImageName
logger = logging.getLogger(__name__)
class BuildResults(object):
build_logs = None
dockerfile = None
built_img_inspect = None
built_img_info = None
base_img_inspect = None
base_img_info = None
base_plugins_output = None
built_img_plugins_output = None
container_id = None
return_code = None
class BuildResultsEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BuildResults):
return {
'build_logs': obj.build_logs,
'built_img_inspect': obj.built_img_inspect,
'built_img_info': obj.built_img_info,
'base_img_info': obj.base_img_info,
'base_plugins_output': obj.base_plugins_output,
'built_img_plugins_output': obj.built_img_plugins_output,
}
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
class BuildResultsJSONDecoder(json.JSONDecoder):
def decode(self, obj):
d = super(BuildResultsJSONDecoder, self).decode(obj)
results = BuildResults()
results.built_img_inspect = d.get('built_img_inspect', None)
results.built_img_info = d.get('built_img_info', None)
results.base_img_info = d.get('base_img_info', None)
results.base_plugins_output = d.get('base_plugins_output', None)
results.built_img_plugins_output = d.get('built_img_plugins_output', None)
return results
class TagConf(object):
"""
confguration of image names and tags to be applied
"""
def __init__(self):
# list of ImageNames with predictable names
self._primary_images = []
# list if ImageName instances with unpredictable names
self._unique_images = []
@property
def primary_images(self):
"""
primary image names are predictable and should be used for layering
this is consumed by metadata plugin
:return: list of ImageName
"""
return self._primary_images
@property
def images(self):
"""
list of all ImageNames
:return: list of ImageName
"""
return self._primary_images + self._unique_images
@property
def unique_images(self):
"""
unique image names are unpredictable and should be used for tracking only
this is consumed by metadata plugin
:return: list of ImageName
"""
return self._unique_images
def add_primary_image(self, image):
"""
add new primary image
used by tag_by_labels plugin
:param image: str, name of image (e.g. "namespace/httpd:2.4")
:return: None
"""
self._primary_images.append(ImageName.parse(image))
def add_unique_image(self, image):
"""
add image with unpredictable name
used by tag_by_labels plugin
:param image: str, name of image (e.g. "namespace/httpd:2.4")
:return: None
"""
self._unique_images.append(ImageName.parse(image))
def add_primary_images(self, images):
"""
add new primary images in bulk
used by tag_by_labels plugin
:param images: list of str, list of image names
:return: None
"""
for image in images:
self.add_primary_image(image)
class Registry(object):
def __init__(self, uri, insecure=False):
"""
abstraction for all registry classes
:param uri: str, uri for pulling (in case of docker-registry, pushing too)
:param insecure: bool
"""
self.uri = uri
self.insecure = insecure
class PulpRegistry(Registry):
""" pulp & crane """
def __init__(self, name, crane_uri, insecure=False):
"""
:param name: str, pulp's rest api is specified in dockpulp's config, we refer only by name
:param crane_uri: str, read-only docker registry api access point
:param insecure: bool
"""
super(PulpRegistry, self).__init__(crane_uri, insecure=insecure)
self.name = name
class DockerRegistry(Registry):
""" v1 docker registry """
class PushConf(object):
"""
configuration of remote registries: docker-registry or pulp
"""
def __init__(self):
self._registries = {
"docker": [],
"pulp": [],
}
def add_docker_registry(self, registry_uri, insecure=False):
if registry_uri is None:
raise RuntimeError("registry URI cannot be None")
r = DockerRegistry(registry_uri, insecure=insecure)
self._registries["docker"].append(r)
def add_docker_registries(self, registry_uris, insecure=False):
for registry_uri in registry_uris:
self.add_docker_registry(registry_uri, insecure=insecure)
def add_pulp_registry(self, name, crane_uri):
if crane_uri is None:
raise RuntimeError("registry URI cannot be None")
r = PulpRegistry(name, crane_uri)
self._registries["pulp"].append(r)
@property
def has_some_docker_registry(self):
return len(self.docker_registries) > 0
@property
def docker_registries(self):
return self._registries["docker"]
@property
def pulp_registries(self):
return self._registries["pulp"]
@property
def all_registries(self):
return self.docker_registries + self.pulp_registries
class DockerBuildWorkflow(object):
"""
This class defines a workflow for building images:
1. pull image from registry
2. tag it properly if needed
3. obtain source
4. build image
5. tag it
6. push it to registries
"""
def __init__(self, source, image, target_registries=None, prebuild_plugins=None,
prepublish_plugins=None, postbuild_plugins=None, exit_plugins=None,
plugin_files=None, target_registries_insecure=False, **kwargs):
"""
:param source: dict, where/how to get source code to put in image
:param image: str, tag for built image ([registry/]image_name[:tag])
:param target_registries: list of str, list of registries to push image to (might change in future)
:param prebuild_plugins: dict, arguments for pre-build plugins
:param prepublish_plugins: dict, arguments for test-build plugins
:param postbuild_plugins: dict, arguments for post-build plugins
:param plugin_files: list of str, load plugins also from these files
:param target_registries_insecure: bool, allow connecting to target registries over plain http
"""
self.source = get_source_instance_for(source, tmpdir=tempfile.mkdtemp())
self.image = image
self.prebuild_plugins_conf = prebuild_plugins
self.prepublish_plugins_conf = prepublish_plugins
self.postbuild_plugins_conf = postbuild_plugins
self.exit_plugins_conf = exit_plugins
self.prebuild_results = {}
self.postbuild_results = {}
self.build_failed = False
self.plugin_failed = False
self.plugin_files = plugin_files
self.kwargs = kwargs
self.builder = None
self.build_logs = []
self.built_image_inspect = None
self._base_image_inspect = None
self.pulled_base_images = set()
# When an image is exported into tarball, it can then be processed by various plugins.
# Each plugin that transforms the image should save it as a new file and append it to
# the end of exported_image_sequence. Other plugins should then operate with last
# member of this structure. Example:
# [{'path': '/tmp/foo.tar', 'size': 12345678, 'md5sum': '<md5>', 'sha256sum': '<sha256>'}]
# You can use util.get_exported_image_metadata to create a dict to append to this list.
self.exported_image_sequence = []
self.tag_conf = TagConf()
self.push_conf = PushConf()
if target_registries:
self.push_conf.add_docker_registries(target_registries, insecure=target_registries_insecure)
# mapping of downloaded files; DON'T PUT ANYTHING BIG HERE!
# "path/to/file" -> "content"
self.files = {}
if kwargs:
logger.warning("unprocessed keyword arguments: %s", kwargs)
@property
def build_process_failed(self):
"""
Has any aspect of the build process failed?
"""
return self.build_failed or self.plugin_failed
# inspect base image lazily just before it's needed - pre plugins may change the base image
@property
def base_image_inspect(self):
if self._base_image_inspect is None:
self._base_image_inspect = self.builder.tasker.inspect_image(self.builder.base_image)
return self._base_image_inspect
def build_docker_image(self):
"""
build docker image
:return: BuildResults
"""
self.builder = InsideBuilder(self.source, self.image)
try:
# time to run pre-build plugins, so they can access cloned repo
logger.info("running pre-build plugins")
prebuild_runner = PreBuildPluginsRunner(self.builder.tasker, self, self.prebuild_plugins_conf,
plugin_files=self.plugin_files)
try:
prebuild_runner.run()
except PluginFailedException as ex:
logger.error("one or more prebuild plugins failed: %s", ex)
raise
build_result = self.builder.build()
self.build_logs = build_result.logs
self.build_failed = build_result.is_failed()
if not build_result.is_failed():
self.built_image_inspect = self.builder.inspect_built_image()
# run prepublish plugins
prepublish_runner = PrePublishPluginsRunner(self.builder.tasker, self, self.prepublish_plugins_conf,
plugin_files=self.plugin_files)
try:
prepublish_runner.run()
except PluginFailedException as ex:
logger.error("one or more prepublish plugins failed: %s", ex)
raise
if not build_result.is_failed():
for registry in self.push_conf.docker_registries:
self.builder.push_built_image(registry.uri,
insecure=registry.insecure)
postbuild_runner = PostBuildPluginsRunner(self.builder.tasker, self, self.postbuild_plugins_conf,
plugin_files=self.plugin_files)
try:
postbuild_runner.run()
except PluginFailedException as ex:
logger.error("one or more postbuild plugins failed: %s", ex)
raise
return build_result
finally:
self.source.remove_tmpdir()
exit_runner = ExitPluginsRunner(self.builder.tasker, self,
self.exit_plugins_conf,
plugin_files=self.plugin_files)
try:
exit_runner.run()
except PluginFailedException as ex:
logger.error("one or more exit plugins failed: %s", ex)
def build_inside(input, input_args=None, substitutions=None):
"""
use requested input plugin to load configuration and then initiate build
"""
def process_keyvals(keyvals):
""" ["key=val", "x=y"] -> {"key": "val", "x": "y"} """
keyvals = keyvals or []
processed_keyvals = {}
for arg in keyvals:
key, value = arg.split("=", 1)
processed_keyvals[key] = value
return processed_keyvals
if not input:
raise RuntimeError("No input method specified!")
else:
logger.debug("getting build json from input %s", input)
cleaned_input_args = process_keyvals(input_args)
cleaned_subs = process_keyvals(substitutions)
cleaned_input_args['substitutions'] = cleaned_subs
input_runner = InputPluginsRunner([{'name': input, 'args': cleaned_input_args}])
build_json = input_runner.run()[input]
logger.debug("build json: %s", build_json)
if not build_json:
raise RuntimeError("No valid build json!")
# TODO: validate json
dbw = DockerBuildWorkflow(**build_json)
build_result = dbw.build_docker_image()
if not build_result or build_result.is_failed():
raise RuntimeError("no image built")
else:
logger.info("build has finished successfully \o/")
| fatherlinux/atomic-reactor | atomic_reactor/inner.py | Python | bsd-3-clause | 13,409 |
import functools
import datetime
from .exceptions import EmitterValidationError
REDUCE_TYPES = set(['sum', 'count', 'min', 'max', 'sumsqr'])
class base_emitter(object):
fluff_emitter = ''
def __init__(self, reduce_type='sum'):
assert reduce_type in REDUCE_TYPES, 'Unknown reduce type'
self.reduce_type = reduce_type
def __call__(self, fn):
@functools.wraps(fn)
def wrapped_f(*args):
generator = fn(*args)
for v in generator:
if isinstance(v, dict):
if 'value' not in v:
v['value'] = 1
if v.get('group_by') is None:
v['group_by'] = None
elif isinstance(v['group_by'], tuple):
v['group_by'] = list(v['group_by'])
elif not isinstance(v['group_by'], list):
v['group_by'] = [v['group_by']]
elif isinstance(v, list):
v = dict(date=v[0], value=v[1], group_by=None)
else:
v = dict(date=v, value=1, group_by=None)
try:
self.validate(v)
except EmitterValidationError as e:
generator.throw(e)
yield v
wrapped_f._reduce_type = self.reduce_type
wrapped_f._fluff_emitter = self.fluff_emitter
return wrapped_f
def validate(self, value):
pass
class custom_date_emitter(base_emitter):
fluff_emitter = 'date'
def validate(self, value):
def validate_date(dateval):
if not isinstance(dateval, (datetime.date, datetime.datetime)):
raise EmitterValidationError(
'Emitted value must be '
'a date or datetime object: {}'.format(
dateval
)
)
validate_date(value.get('date'))
if isinstance(value['date'], datetime.datetime):
value['date'] = value['date'].date()
class custom_null_emitter(base_emitter):
fluff_emitter = 'null'
def validate(self, value):
if isinstance(value, dict):
if 'date' not in value:
value['date'] = None
else:
if value['date'] is not None:
raise EmitterValidationError(
'Emitted value must be None: {}'.format(value['date']))
date_emitter = custom_date_emitter()
null_emitter = custom_null_emitter()
| qedsoftware/commcare-hq | corehq/ex-submodules/fluff/emitters.py | Python | bsd-3-clause | 2,557 |
from __future__ import absolute_import
import re
from django.conf.urls import patterns, include, url
from sentry.plugins import plugins
urlpatterns = patterns("")
for _plugin in plugins.all():
_plugin_url_module = _plugin.get_url_module()
if _plugin_url_module:
urlpatterns += (url("^%s/" % re.escape(_plugin.slug), include(_plugin_url_module)),)
| mvaled/sentry | src/sentry/plugins/base/urls.py | Python | bsd-3-clause | 368 |
#!/usr/bin/env python
# coding=utf-8
__author__ = 'bjorne'
"""Handles all commands regarding analysis, i.e. get_available_analysis_modules and perform_analysis.
Uses JSON objects to represent the analysismodules, which are returned from get_available_analysis_modules(username, dbHandler), on the form:
{
"analysismodules":
[
{
"name": "analysisModuleName",
"permission_level": "4",
"description": "blablabla",
"configParams":
[
{
"name": "exercise",
"description": "The exercise to use in the analysis",
"type": "enum",
"default": ["value"],
"range": "exercise_A, exercise_B",
"required": true/false,
"max_amount": 33, #negative value means arbitrary
"min_amount": 5 #negative value means arbitrary
}, {
"name": "good name",
"description": "descr...",
"type": "int",
"default": ["value"],
"range": "0-100",
"required": true/false,
"max_amount": 33, #negative value means arbitrary
"min_amount": 5 #negative value means arbitrary
}
]
}
]
}
The available types in configParams are:
enum, string, int, float, bool, exerciseID, dataID, deviceID, exerciseResultID, rehabilitationSetID, patientInformationID, patientConditionID, patientID, userID, userGropID, organizationID, analysisTaskID
The settings-object that are used as input in perform_analysis(username, moduleName, settings, dbHandler) is also an JSON object and it is on the form:
{
"configParams": {"exerciseID": ["y7tt347fyre", "ignfu4bh"],
"max_weight": [100]},
"notification": {"none": "[email protected]"},
"taskname": "The name of this task"
}
The analysisResult produced by the analysismodules are all on the following form:
{
"results":
[
{
"name": "a name",
"type": "plot",
"data": [[1,3],[2,634],[3,33],[5,233]],
"priority": 5 #How important is this result?,
"subtype": "lines",
"legend": "Legend to use"
"plotID": "id of the plot to make it possible to plot many things in same plot"
},
{
"name": "a name",
"type": "plot",
"data": [[1,3],[2,634],[3,33],[5,233]],
"priority": 5 #How important is this result?,
"subtype": "points",
"legend": "Legend to use"
"plotID": "id of the plot to make it possible to plot many things in same plot"
},
{
"name": "a name",
"type": "text",
"data": "here is a result",
"priority": 2
},
{
"name": "a name",
"type": "html",
"data": "{htmlcode...}",
"priority": 6,
"subtype": "body"
}
],
"model": {...}
}
"""
import threading
import glob
import importlib
import time
import smtplib
import json
import inspect
import sys, traceback
import logging
"""
## Logging ##
- https://docs.python.org/2/howto/logging.html
- https://docs.python.org/2/howto/logging-cookbook.html
### Logging levels ###
- DEBUG
+ Detailed information, typically of interest only when diagnosing problems.
- INFO
+ Confirmation that things are working as expected.
- WARNING
+ An indication that something unexpected happened, or indicative of some problem in the near future (e.g. ‘disk space low’)
- ERROR
+ Due to a more serious problem, the software has not been able to perform some function.
- CRITICAL
+ A serious error, indicating that the program itself may be unable to continue running.
"""
# Logger functionality
logger = logging.getLogger('AnalysisHandler') # Get Logger
logger.setLevel(logging.DEBUG) # Set logging level
# Logging to file
logFileHandler = logging.FileHandler('analysishandler.log') # Handler to log to file
logFileHandler.setLevel(logging.DEBUG) # Set logging level
# Logging to Console
logConsoleHandler = logging.StreamHandler() # Handler to log to console
logConsoleHandler.setLevel(logging.DEBUG) # Set logging level
# Log formatter for handlers
logFileFormatter = logging.Formatter('%(asctime)s - %(filename)s on line %(lineno)-6d - %(levelname)-8s - %(message)s')
logConsoleFormatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)-8s - %(message)s')
# Add log formatter to log handlers
logFileHandler.setFormatter(logFileFormatter)
logConsoleHandler.setFormatter(logConsoleFormatter)
# Add log handlers to logger
logger.addHandler(logFileHandler)
logger.addHandler(logConsoleHandler)
# ************************************************************** #
# The following section to overwrite console log formatter
#class MyStreamHandler(logging.StreamHandler):
# def handleError(self, record):
# raise
#console = MyStreamHandler()
#logger.addHandler(console)
# ************************************************************** #
# To log things use
# logger.debug('debug message')
# logger.info('info message')
# logger.warn('warn message')
# logger.error('error message')
# logger.critical('critical message')
# END OF LOGGER INIT
logger.debug("Debug: Logger Online")
logger.info("Info: Logger Online")
logger.warn("Warn: Logger Online")
logger.error("Error: Logger Online")
logger.critical("Critical: Logger Online")
# ************************************************************** #
modulesFolder = 'modules' #The name of the folder containing the analysismodules
threadList = []
analysisPermissionLevel = 7
analysisUsername = "Evalan_analysis_user" #This username is only used when calling methods regarding analysisTasks in the database
#Analysis task statuses
STATUS_RUNNING = "Running"
STATUS_READY = "Ready"
STATUS_FAILED = "Failed"
def _get_file_name_from_path(path):
"""Extracts the filename from the relative path
Assuming the path is on the form folder/fileName.extension
Args:
path (String): the path on the form folder/fileName.extension
Returns:
(String): The fileName without extension
"""
return path.split('/')[1].split('.')[0]
def _get_available_analysis_modules(username, dbHandler):
"""Finds all modules in the modulesFolder and their necessary configuration parameters.
Only includes the modules if the user is has correct permissionlevel.
Args:
username (String): The username.
dbHandler (DatabaseHandler): The databasehandler.
Returns:
modules (JSON): A JSON object on the form described at the top of this document.
"""
logger.debug("In _get_available_analysis_modules")
files = glob.glob(modulesFolder + "/*.py")
modules = []
for fileName in files:
if (fileName != modulesFolder + "/__init__.py"): #removes the __init__ file
name = _get_file_name_from_path(fileName)
try:
moduleClass = _get_analysis_module_class(name)
modulePermissionLevel = moduleClass.permission_level()
userPermissionLevel = dbHandler.get_usergroup_by_username(username)
if userPermissionLevel >= modulePermissionLevel:
continue
description = moduleClass.description()
params = moduleClass.necessary_config_params()
except Exception as e:
msg = "There was an error in module: " + str(name) + str(e) + " - "
logger.error(msg)
continue
modules.append({"name": name, "description": description, "permission_level": modulePermissionLevel, "configParams": params})
return modules
def _get_analysis_module_class(moduleName): #In this version we're assuming that each module only contains one class
"""Imports the analysis module and returns the modules class with the same name.
Args:
moduleName (String): The name of the module.
Returns:
moduleClass (Class): The class of the module.
Raises:
Exception if modulename doesn't exists or if module doesn't contain a class with modulename.
"""
logger.debug("in _get_analysis_module_class")
module = importlib.import_module(modulesFolder + "." + moduleName)
moduleClass = None
for name, obj in inspect.getmembers(module):
logger.debug(name)
if inspect.isclass(obj) and (name == moduleName): #Assumes that the module only contains one class
logger.debug("found a class-attr")
moduleClass = getattr(module, name)()
if moduleClass is None:
raise NameError
return moduleClass
def _start_analysis_thread(username, moduleName, moduleClass, settings, dbHandler):
"""The method that actually starts the analysisthread.
Saves the thread in a list, thread_list, so it can be accessed later, isn't used at the moment.
Args:
username (String): The username.
moduleName (String): The name of the module to use.
moduleClass (Class): The class of the module to use.
settings (String): JSON-formatted string with settings for the chosen analysismodule on the form specified at the top.
dbHandler (DatabaseHandler): The databasehandler.
Returns:
analysisTaskID (String): The ID of the created task.
Raises:
Exception if database is unable to create an analysisTask. Probably due to bad input.
"""
logger.debug("in _start_analysis_thread")
started = time.time()*1000
databaseMsg = ""
try:
logger.debug("Username: " + str(username))
databaseMsg = json.loads(dbHandler.create_analysistask(username=username, allowedOrganizations=dbHandler.get_usergroup_by_username(username).organizationID, taskname=settings["taskname"], analysisModule=moduleName, status=STATUS_RUNNING, notification=json.dumps(settings["notification"]), configurationParameters=json.dumps(settings["configParams"]), started=started))
logger.debug("Created analysisTask. Msg from database: " + str(databaseMsg))
analysisTaskID = databaseMsg["TaskID"]
except Exception, Argument:
Argument.message = Argument.message + ". Database msg: " + str(databaseMsg)
raise
t = threading.Thread(target=_analysis_thread, args=(username, moduleClass, settings, dbHandler, analysisTaskID,))
threadList.append(t) #To make the thread accessible
t.start()
logger.info("Analysisthread started")
return databaseMsg
def _analysis_thread(username, moduleClass, settings, dbHandler, analysisTaskID):
""" This is the thread that runs the analysis of the chosen module.
Args:
username (String): The username.
moduleClass (Class): The class of the module to use.
settings (String): JSON-formatted string with settings for the chosen analysismodule on the form specified at the top.
dbHandler (DatabaseHandler): The databasehandler.
analysisTaskID (String): The ID in the database of the created task.
"""
logger.debug("in _analysis_thread")
result = {}
try:
settings["configParams"]["thisAnalysisTaskID"] = analysisTaskID #Including taskID in settings to make it possible for the module to accessing itself, maybe for scheduling or other purposes
result = moduleClass.analyse(username, settings["configParams"], dbHandler, logger)
msg = "Status on '" + settings["taskname"] + "'' is: " + str(result.get("status")) + ".\n"
logger.info(msg)
ended = time.time()*1000
logger.info("analysisTaskID = " + analysisTaskID)
dbMessage = dbHandler.update_analysistask(username=username, analysisTaskID=analysisTaskID, field="analysisResult", value=json.dumps(result.get("results")))
logger.info("Update result: " + dbMessage)
dbMessage = dbHandler.update_analysistask(username=username, analysisTaskID=analysisTaskID, field="status", value=STATUS_READY)
logger.info("Update status: " + dbMessage)
dbMessage = dbHandler.update_analysistask(username=username, analysisTaskID=analysisTaskID, field="ended", value=ended)
logger.info("Update ended: " + dbMessage)
except Exception, Argument:
msg = traceback.format_exc()
logger.error(msg)
ended = time.time()*1000
dbMessage = dbHandler.update_analysistask(username=username, analysisTaskID=analysisTaskID, field="status", value=STATUS_FAILED)
logger.info("Update status: " + dbMessage)
dbMessage = dbHandler.update_analysistask(username=username, analysisTaskID=analysisTaskID, field="ended", value=ended)
logger.info("Update ended: " + dbMessage)
#notify user
sender = "[email protected]"
password = "eitrehabplatform"
for key in settings["notification"]: #should implement other notificationsmethod here, i.e. sms
if key == "email":
_email_user(sender,settings["notification"]["email"],password,"Analysisstatus", msg)
elif key == "none":
logger.info("No notification requested")
continue
else:
logger.info("notification-method " + key + " not implemented")
print "Result: " + str(result.get("results")) + "\n"
logger.info("Result: " + str(result.get("results")))
def _email_user(sender, receiver, password, subj, msg):
"""Sends an email to the user, e.g. with a notification that the analysis is ready and possibly where to find it.
Args:
sender (String): The email to send from.
receiver (String): The email to send to.
password (String): The password of the sender.
subj (String): The subject of the email.
msg (String): The message of the email.
Raises:
SMTPException: If email couldn't be sent.
"""
logger.debug("in _email_user")
receiver = [receiver]
message = 'Subject: %s\n\n%s' % (subj, msg)
try:
session = smtplib.SMTP('smtp.gmail.com',587)
session.ehlo()
session.starttls()
session.ehlo()
session.login(sender,password)
session.sendmail(sender,receiver,message)
session.quit()
logger.info("message sent!")
except smtplib.SMTPException:
logger.error("Could not send email.\n" + traceback.format_exc())
def get_available_analysis_modules(username, dbHandler):
"""Checks the modules folder for all available analysis modules.
If the user is unauthorized to do this an error message will return,
Args:
username (String): The username.
dbHandler (DatabaseHandler): The databasehandler.
Returns:
(String): A JSON-formatted respone string. It is either
a) A status message 200 and all the available analysisModules.
b) A status message 401, meaning that the User doesn't have the right to see available analysisModules.
c) A status message 500, meaning something went wrong.
"""
try:
logger.debug("in get_available_analysis_modules")
logger.debug("username: " + str(username))
if dbHandler.__permissionLevel_by_username__(username) > analysisPermissionLevel:
return json.dumps({'status_code': '401', 'msg': 'User not allowed to do this.'})
analysisModules = _get_available_analysis_modules(username, dbHandler)
return json.dumps({'status_code': '200', 'analysisModules': analysisModules})
except Exception, Argument:
errorMsg = {'status_code': '500', 'msg': 'An error occurred', 'ErrorText':Argument.message}
msg = traceback.format_exc()
logger.error(msg)
return json.dumps(errorMsg)
def perform_analysis(username, moduleName, settings, dbHandler):
"""The method for starting an analysis.
Args:
username (String): The username.
moduleName (String): The name of the analysismodule that should perform the analysis.
settings (String): A JSON-formatted string with the settings for to use for the analysis.
dbHandler (DatabaseHandler): The database handler.
Returns:
msg (String): A JSON-formatted respone string. It is either
a) A status message 201 and the analysisTaskID of the created analysisTask
b) A status message 401, meaning that the User doesn't have the right to create this analysisTask.
c) A status message 500, meaning something went wrong. """
logger.debug("in perform_analysis")
logger.debug("username: " + str(username))
try:
permissionlevel = dbHandler.get_usergroup_by_username(username).permissionLevel
logger.debug("permissionlevel: " + str(permissionlevel))
moduleClass = _get_analysis_module_class(moduleName)
modulePermissionlevel = moduleClass.permission_level()
logger.debug("modulePermissionlevel: " + str(modulePermissionlevel))
if permissionlevel > modulePermissionlevel:
return json.dumps({'status_code': '401', 'msg': 'User not allowed to run this analysis.'})
databaseMsg = _start_analysis_thread(username, moduleName, moduleClass, settings, dbHandler)
return json.dumps(databaseMsg)
except Exception, Argument:
errorMsg = {'status_code': '500', 'msg': 'An error occurred', 'ErrorText':Argument.message}
msg = traceback.format_exc()
logger.error(msg)
return json.dumps(errorMsg)
| improve-project/platform | AnalysisPackage/analysishandler.py | Python | bsd-3-clause | 16,009 |
from django.test import TestCase
class InsertRowFuncionalTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
| kula1922/kfusiontables | kfusiontables/tests/functional/test_insert_row.py | Python | bsd-3-clause | 151 |
class VirtualMachine:
def __init__(self, ram_size=512, executing=True):
self.data = {i: None for i in range(ram_size)}
self.stack = []
self.executing = executing
self.pc = 0
self.devices_start = 256
def push(self, value):
"""Push something onto the stack."""
self.stack += [value]
def pop(self):
"""Pop something from the stack. Crash if empty."""
return self.stack.pop()
def read_memory(self, index):
"""Read from memory, crashing if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
return self.data[index].read(index)
else:
return self.data[index]
def write_memory(self, index, value):
"""Write to memory. Crash if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
self.data[index].write(index, value)
else:
self.data[index] = value
def register_device(self, device, needed_addresses):
"""Given an instantiated device and the number of required addresses, registers it in memory"""
# If not enough addresses, just error out
if self.devices_start+needed_addresses > len(self.data):
raise Exception('Not enough addresses to allocate')
proxyed_device = DeviceProxy(device, self.devices_start)
for i in range(self.devices_start, self.devices_start+needed_addresses):
self.data[i] = proxyed_device
self.devices_start += needed_addresses
def run(self, bytecodes):
self.bytecodes = bytecodes
while self.executing:
increment = self.bytecodes[self.pc].autoincrement
self.bytecodes[self.pc].execute(self)
if increment:
self.pc += 1
class DeviceProxy:
"""Manages address translation between devices"""
def __init__(self, device, pos):
self.device = device
self.pos = pos
def read(self, index):
return self.device.read(self.pos-index)
def write(self, index, value):
self.device.write(self.pos-index, value)
| darbaga/simple_compiler | virtual_machine.py | Python | bsd-3-clause | 2,139 |
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as pdt
import pytest
class TestModalityEstimator(object):
@pytest.fixture
def step(self):
return 1.
@pytest.fixture
def vmax(self):
return 20.
@pytest.fixture(params=[2, 3])
def logbf_thresh(self, request):
return request.param
@pytest.fixture
def estimator(self, logbf_thresh):
from anchor.bayesian import BayesianModalities, ONE_PARAMETER_MODELS, \
TWO_PARAMETER_MODELS
return BayesianModalities(
one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=logbf_thresh)
def test_init(self, logbf_thresh):
from anchor import BayesianModalities, ModalityModel
from anchor.bayesian import ONE_PARAMETER_MODELS, \
TWO_PARAMETER_MODELS
estimator = BayesianModalities(
one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=logbf_thresh)
true_one_param_models = {k: ModalityModel(**v)
for k, v in ONE_PARAMETER_MODELS.items()}
true_two_param_models = {k: ModalityModel(**v)
for k, v in TWO_PARAMETER_MODELS.items()}
npt.assert_equal(estimator.logbf_thresh, logbf_thresh)
pdt.assert_dict_equal(estimator.one_param_models,
true_one_param_models)
pdt.assert_dict_equal(estimator.two_param_models,
true_two_param_models)
@pytest.mark.xfail
def test_fit_transform_greater_than1(self, estimator):
nrows = 10
ncols = 5
data = pd.DataFrame(
np.abs(np.random.randn(nrows, ncols).reshape(nrows, ncols))+10)
estimator.fit(data)
@pytest.mark.xfail
def test_fit_transform_less_than1(self, estimator):
nrows = 10
ncols = 5
data = pd.DataFrame(
np.abs(np.random.randn(nrows, ncols).reshape(nrows, ncols))-10)
estimator.fit(data)
def test_positive_control(self, estimator, positive_control):
"""Make sure estimator correctly assigns modalities to known events"""
log2bf = estimator.fit(positive_control.copy())
test = estimator.predict(log2bf)
pdt.assert_numpy_array_equal(test.values, test.index.values)
def test_violinplot(self, estimator):
estimator.violinplot(n=100)
fig = plt.gcf()
assert len(fig.axes) == len(estimator.models)
plt.close('all')
| YeoLab/anchor | anchor/tests/test_bayesian.py | Python | bsd-3-clause | 2,694 |
# -*- coding: utf-8 -*-
import unittest
import os
from mongrey.utils import get_db_config
from mongrey.storage.sql import models
from ...base import BaseFlaskTestCase
from ...web.test_login import LoginTestCaseMixin
@unittest.skipIf(os.environ.get('MONGREY_STORAGE', "sql") != "sql", "Skip no sql tests")
class LoginTestCase(LoginTestCaseMixin, BaseFlaskTestCase):
CONFIG = "mongrey.tests.storage.sql.flask_settings.Test"
db_settings = {
'host': 'sqlite:///../mongrey_test.db',
}
def _create_app(self):
settings, storage = get_db_config(**self.db_settings)
models.configure_peewee(drop_before=True, **settings)
from mongrey.web import create_app
app = create_app(config=self.CONFIG)
return app
def setUp(self):
BaseFlaskTestCase.setUp(self)
self._views(self.flask_app)
def test_login_with_basic_auth(self):
self._test_login_with_basic_auth(models)
def test_login_api_key(self):
self._test_login_api_key(models)
def test_login_with_form(self):
self._test_login_with_form(models)
| radical-software/mongrey | mongrey/tests/storage/sql/test_login.py | Python | bsd-3-clause | 1,144 |
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import time
from google.appengine.api import users
from google.appengine.ext import ndb
from framework.utils import now
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.exceptions import BusinessException
from plugins.tff_backend.models.payment import ThreeFoldTransaction, ThreeFoldPendingTransaction
from plugins.tff_backend.to.payment import WalletBalanceTO
def _get_balance_from_transactions(transactions, token):
# type: (list[ThreeFoldTransaction], unicode) -> WalletBalanceTO
available_balance = 0
total_balance = 0
total_description_details = []
# TODO set to minimum precision of all transactions when transactions have the 'precision' property
# (and multiply available / total amount depending on precision)
precision = 2
# for transaction in transactions:
# precision = max(transaction.precision, precision)
for transaction in transactions:
if transaction.token != token:
raise BusinessException('Invalid transaction supplied to _get_balance_from_transactions. '
'All transactions must have %s as token', token)
amount_spent = transaction.amount - transaction.amount_left
unlocked_amount = 0
now_ = now()
for unlock_timestamp, unlock_amount in zip(transaction.unlock_timestamps, transaction.unlock_amounts):
if unlock_timestamp <= now_:
unlocked_amount += unlock_amount
else:
total_description_details.append((unlock_timestamp, unlock_amount))
spendable_amount = unlocked_amount - amount_spent
available_balance += spendable_amount
total_balance += transaction.amount_left
if total_description_details:
total_description = u"""## %(token)s Unlock times'
|Date|#%(token)s|
|---|---:|
""" % {'token': token}
for unlock_timestamp, unlock_amount in sorted(total_description_details, key=lambda tup: tup[0]):
date = time.strftime('%a %d %b %Y %H:%M:%S GMT', time.localtime(unlock_timestamp))
amount = u'{:0,.2f}'.format(unlock_amount / 100.0)
total_description += u'\n|%s|%s|' % (date, amount)
else:
total_description = None
return WalletBalanceTO(available=available_balance, total=total_balance, description=total_description, token=token,
precision=precision)
@returns([WalletBalanceTO])
@arguments(username=unicode)
def get_all_balances(username):
transactions = ThreeFoldTransaction.list_with_amount_left(username)
token_types = set(map(lambda transaction: transaction.token, transactions))
results = []
for token in token_types:
transactions_per_token = [trans for trans in transactions if trans.token == token]
results.append(_get_balance_from_transactions(transactions_per_token, token))
return results
@returns(tuple)
@arguments(username=unicode, page_size=(int, long), cursor=unicode)
def get_pending_transactions(username, page_size, cursor):
# type: (users.User, long, unicode) -> tuple[list[ThreeFoldPendingTransaction], ndb.Cursor, bool]
return ThreeFoldPendingTransaction.list_by_user(username) \
.fetch_page(page_size, start_cursor=ndb.Cursor(urlsafe=cursor))
| threefoldfoundation/app_backend | plugins/tff_backend/bizz/payment.py | Python | bsd-3-clause | 3,919 |
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cyndiloza.apps.news import managers
from cyndiloza.apps.places.models import Place
from cyndiloza.apps.people.models import Person
class Author(models.Model):
name = models.CharField(_("name"), max_length=250)
class Meta:
ordering = ["name"]
verbose_name = _("author")
verbose_name_plural = _("authors")
def __unicode__(self):
return u"%s" % (self.name)
class Section(models.Model):
name = models.CharField(_("name"), max_length=250, help_text=_("Name of the section under which the article appeared, e.g. Business, Features, etc."))
slug = models.SlugField(_("slug"), unique=True)
class Meta:
ordering = ["name"]
verbose_name = _("section")
verbose_name_plural = _("sections")
def __unicode__(self):
return u"%s" % (self.name)
class Edition(models.Model):
name = models.CharField(_("name"), max_length=250, help_text=_("Name of the edition under which the article appeared, e.g. Final, Evening, etc."))
class Meta:
ordering = ["name"]
verbose_name = _("edition")
verbose_name_plural = _("editions")
def __unicode__(self):
return u"%s" % (self.name)
class Publication(models.Model):
FREQUENCY_CHOICES = (
('Daily', 'Daily'),
('Weekly', 'Weekly'),
('Bi-weekly', 'Bi-weekly'),
('Monthly', 'Monthly'),
('Quarterly', 'Quarterly'),
('Semesterly', 'Semesterly'),
('Yearly', 'Yearly'),
)
FORMAT_CHOICES = (
('Broadsheet', 'Broadsheet'),
('Tabloid', 'Tabloid'),
('Compact', 'Compact'),
('Berliner', 'Berliner'),
('Newsletter', 'Newsletter'),
('Magazine', 'Magazine'),
('Website', 'Website'),
('Blog', 'Blog'),
)
name = models.CharField(_("name"), max_length=250)
slug = models.SlugField(_("slug"), unique=True)
frequency = models.CharField(_("frequency"), max_length=10, choices=FREQUENCY_CHOICES, blank=True)
format = models.CharField(_("format"), max_length=10, choices=FORMAT_CHOICES, blank=True)
circulation = models.PositiveIntegerField(_("circulation"), blank=True, null=True)
place = models.ForeignKey(Place)
description = models.TextField(_("description"), help_text=_("A few words about the publication. Please use <a href=\"http://en.wikipedia.org/wiki/Markdown\">Markdown styling</a>."))
url = models.URLField(_("URL"), default="http://", blank=True)
logo = models.ImageField(_("logo"), upload_to="news/logos/", blank=True, help_text=_("A large, clean version of the logo."))
icon = models.ImageField(_("icon"), upload_to="places/icons/", blank=True)
rank = models.PositiveIntegerField(_("rank"), unique=True, help_text=_("The rank of this publication by order of importance, with \"1\" as the most important."))
objects = managers.VisibilityManager()
# city = models.CharField(_("city"), max_length=50, blank=True)
# state = models.CharField(_("state"), choices=US_STATES, max_length=255, blank=True)
# latitude = models.DecimalField(_("latitude"), max_digits=8, decimal_places=6, blank=True, null=True)
# longitude = models.DecimalField(_("longitude"), max_digits=9, decimal_places=6, blank=True, null=True)
class Meta:
ordering = ["rank", "name"]
verbose_name = _("publication")
verbose_name_plural = _("publications")
def __unicode__(self):
return u"%s" % (self.name)
def get_absolute_url(self):
return reverse("news_publication_detail", args=[str(self.slug)])
def website(self):
domain = re.sub(r"http://(?:www\.)", "", self.url)
pretty = re.sub(r"\/$", "", domain)
return u"%s" % (pretty)
def places(self):
publication_places = []
for article in self.article_set.all():
try:
if article.place != None:
if article.place not in publication_places:
publication_places.append(article.place)
except:
pass
return publication_places
class Prom(models.Model):
theme = models.CharField(_("theme"), max_length=250)
slug = models.SlugField(_("slug"), unique=True)
school = models.ForeignKey(Place, related_name="school")
location = models.ForeignKey(Place, related_name="location")
distance = models.DecimalField(max_digits=2, decimal_places=1, help_text="Distance between locations in miles. Decimals are OK.")
start_time = models.DateTimeField(_("start time"))
end_time = models.TimeField(_("end time"))
color_one = models.CharField(_("color one"), blank=True, max_length=50)
color_one_hex = models.CharField(_("color one hex"), blank=True, max_length=6)
color_two = models.CharField(_("color two"), blank=True, max_length=50)
color_two_hex = models.CharField(_("color two hex"), blank=True, max_length=6)
color_three = models.CharField(_("color three"), blank=True, max_length=50)
color_three_hex = models.CharField(_("color three hex"), blank=True, max_length=6)
notes = models.TextField(_("notes"), blank=True)
published = models.BooleanField(_("published"), default=True)
class Meta:
ordering = ["school"]
verbose_name = _("prom")
verbose_name_plural = _("proms")
def __unicode__(self):
return u"%s" % (self.theme)
class Article(models.Model):
MAPTYPE_CHOICES = (
('G_NORMAL_MAP', 'Normal'),
('G_SATELLITE_MAP', 'Satellite'),
('G_HYBRID_MAP', 'Hybrid'),
('G_PHYSICAL_MAP', 'Terrain'),
)
headline = models.CharField(max_length=250)
slug = models.SlugField(_("slug"), unique=True)
date = models.DateField(help_text="Written in YYYY-MM-DD format")
author = models.ForeignKey(Author, default=1)
contributors = models.CharField(max_length=250, blank=True, help_text="For contributing authors, separated by commas.")
publication = models.ForeignKey(Publication, default=4)
section = models.ForeignKey(Section, blank=True, null=True)
edition = models.ForeignKey(Edition, blank=True, null=True)
page = models.CharField(max_length=4, blank=True)
place = models.ForeignKey(Place, null=True, blank=True, help_text="Select one place that sums up your article, if applicable. A map of the location will appear on the article page.")
maptype = models.CharField("Map type", max_length=15, blank=True, choices=MAPTYPE_CHOICES)
summary = models.TextField(blank=True, help_text="Optional, but it looks snappy with anything, even the lead of a story.")
body = models.TextField(help_text="Did you...<br />1. Change curly marks and apostrophes to <strong>straight</strong> marks and apostrophes?<br />2. Make a <strong>line break</strong> between all paragraphs?<br />3. Use <strong><a href=\"http://en.wikipedia.org/wiki/Markdown\">Markdown styling</a></strong> for headers (####), lists and links?")
note = models.TextField("Editor's note", blank=True, help_text="For special notes that precede a newspaper article, like those for a series.")
photo = models.ImageField(upload_to="news/photos/", blank=True, help_text="Did you...<br />1. Download the <strong>enlarged</strong> version of the photo?<br />2. Rename the photo with a meaningful file name?<br />3. Rename the file extension from “.jpeg” to “.jpg”?")
photo_url = models.URLField("Photo URL", blank=True, help_text="Most likely the originating Flickr photo page, but if an “official” photo, it could be the photo page on the publication's website.")
caption = models.TextField(blank=True, help_text="Did you replace curly marks with straight marks?")
photographer = models.CharField(max_length=250, blank=True)
photographer_url = models.URLField("Photographer's URL", blank=True, help_text="Could be a Flickr profile page, Facebook profile page, etc.")
sidebarheadline = models.CharField("Sidebar headline", max_length=250, blank=True)
sidebarcontent = models.TextField("Sidebar story", blank=True, help_text="Use <a href=\"http://en.wikipedia.org/wiki/Markdown\">Markdown</a> as necessary for headers (####), lists and links.")
document_title = models.CharField(max_length=250, blank=True, help_text="The title of a document related to the article.")
document = models.FileField(upload_to="news/documents/", blank=True, help_text="A document related to the article.")
dipity = models.CharField(max_length=255, blank=True, help_text="The <iframe> code for a <a href=\"http://www.dipity.com\">Dipity</a> timeline.")
people = models.ManyToManyField(Person, blank=True, null=True)
url = models.URLField("URL", blank=True, help_text="URL of the article on the publication's official website.")
published = models.BooleanField(default=True, help_text="Check this box if you want the article published live on the website.")
featured = models.BooleanField(default=False, help_text="Check this box if you want the article prominently featured on the website homepage.")
objects = managers.VisibilityManager()
class Meta:
ordering = ["-date", "headline"]
verbose_name = _("article")
verbose_name_plural = _("articles")
def __unicode__(self):
return u"%s" % (self.headline)
def get_absolute_url(self):
return reverse("news_article_detail", args=[str(self.slug)])
def get_previous_published(self):
return self.get_previous_by_date(published=True)
def get_next_published(self):
return self.get_next_by_date(published=True)
class FavoriteList(models.Model):
title = models.CharField(_("title"), max_length=250)
slug = models.SlugField(_("slug"), unique=True)
published = models.BooleanField(_("published"), default=True, help_text="Check this box if you want the list published live on the website.")
class Meta:
ordering = ["title"]
verbose_name = _("favorite list")
verbose_name_plural = _("favorite lists")
def __unicode__(self):
return u"%s" % (self.title)
class Favorite(models.Model):
rank = models.PositiveIntegerField(_("rank #"))
article = models.ForeignKey(Article)
favoritelist = models.ForeignKey(FavoriteList, verbose_name="Favorite list")
class Meta:
ordering = ["rank"]
verbose_name = _("favorite")
verbose_name_plural = _("favorites")
def __unicode__(self):
return u"%s" % (self.rank)
| richardcornish/cyndiloza | cyndiloza/apps/news/models.py | Python | bsd-3-clause | 10,632 |
"""
Define the interface for a plugin class.
"""
import abc
class PluginPrototype(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def set_model(self, likelihood_model_instance):
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
"""
pass
@abc.abstractmethod
def get_name(self):
"""
Return a name for this data set (likely set during the constructor)
"""
pass
@abc.abstractmethod
def get_log_like(self):
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
pass
@abc.abstractmethod
def get_nuisance_parameters(self):
"""
Return a list of nuisance parameters. Return an empty list if there
are no nuisance parameters
"""
pass
@abc.abstractmethod
def inner_fit(self):
"""
This is used for the profile likelihood. Keeping fixed all parameters in the
LikelihoodModel, this method minimize the logLike over the remaining nuisance
parameters, i.e., the parameters belonging only to the model for this
particular detector. If there are no nuisance parameters, simply return the
logLike value.
"""
pass
| sybenzvi/3ML | threeML/plugin_prototype.py | Python | bsd-3-clause | 1,341 |
from setuptools import setup, find_packages
setup(
name='django-userprofile',
version='0.0.6',
description='Django user profile app.',
long_description = open('README.rst', 'r').read() + open('AUTHORS.rst', 'r').read() + open('CHANGELOG.rst', 'r').read(),
author='Praekelt Foundation',
author_email='[email protected]',
license='BSD',
url='http://github.com/praekelt/django-userprofile',
packages = find_packages(),
dependency_links = [
'http://github.com/praekelt/django-photologue/tarball/master#egg=django-photologue',
],
install_requires = [
'django-photologue',
'django-registration',
],
tests_require=[
'django-setuptest>=0.0.6',
],
test_suite="setuptest.SetupTestSuite",
include_package_data=True,
classifiers = [
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
| praekelt/django-userprofile | setup.py | Python | bsd-3-clause | 1,168 |
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
UserModel = get_user_model()
class ModelBackend:
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, request, username=None, password=None, **kwargs):
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a nonexistent user (#20760).
UserModel().set_password(password)
else:
if user.check_password(password) and self.user_can_authenticate(user):
return user
def user_can_authenticate(self, user):
"""
Reject users with is_active=False. Custom user models that don't have
that attribute are allowed.
"""
is_active = getattr(user, 'is_active', None)
return is_active or is_active is None
def _get_user_permissions(self, user_obj):
return user_obj.user_permissions.all()
def _get_group_permissions(self, user_obj):
user_groups_field = get_user_model()._meta.get_field('groups')
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
return Permission.objects.filter(**{user_groups_query: user_obj})
def _get_permissions(self, user_obj, obj, from_name):
"""
Return the permissions of `user_obj` from `from_name`. `from_name` can
be either "group" or "user" to return permissions from
`_get_group_permissions` or `_get_user_permissions` respectively.
"""
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
perm_cache_name = '_%s_perm_cache' % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
setattr(user_obj, perm_cache_name, {"%s.%s" % (ct, name) for ct, name in perms})
return getattr(user_obj, perm_cache_name)
def get_user_permissions(self, user_obj, obj=None):
"""
Return a set of permission strings the user `user_obj` has from their
`user_permissions`.
"""
return self._get_permissions(user_obj, obj, 'user')
def get_group_permissions(self, user_obj, obj=None):
"""
Return a set of permission strings the user `user_obj` has from the
groups they belong.
"""
return self._get_permissions(user_obj, obj, 'group')
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = {
*self.get_user_permissions(user_obj),
*self.get_group_permissions(user_obj),
}
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj)
def has_module_perms(self, user_obj, app_label):
"""
Return True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
user = UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
return user if self.user_can_authenticate(user) else None
class AllowAllUsersModelBackend(ModelBackend):
def user_can_authenticate(self, user):
return True
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, request, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. Return
the ``User`` object with the given username. Create a new ``User``
object if ``create_unknown_user`` is ``True``.
Return None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(user)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
return user if self.user_can_authenticate(user) else None
def clean_username(self, username):
"""
Perform any cleaning on the "username" prior to using it to get or
create the user object. Return the cleaned username.
By default, return the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configure a user after creation and return the updated user.
By default, return the user unmodified.
"""
return user
class AllowAllUsersRemoteUserBackend(RemoteUserBackend):
def user_can_authenticate(self, user):
return True
| ebar0n/django | django/contrib/auth/backends.py | Python | bsd-3-clause | 6,648 |
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..util.general import reshape
class alpine1:
'''
Alpine1 function
:param bounds: the box constraints to define the domain in which the function is optimized.
:param sd: standard deviation, to generate noisy evaluations of the function.
'''
def __init__(self,input_dim, bounds=None, sd=None):
if bounds is None:
self.bounds = bounds =[(-10,10)]*input_dim
else:
self.bounds = bounds
self.min = [(0)]*input_dim
self.fmin = 0
self.input_dim = input_dim
if sd==None:
self.sd = 0
else:
self.sd=sd
def f(self,X):
X = reshape(X,self.input_dim)
n = X.shape[0]
fval = np.abs(X*np.sin(X) + 0.1*X).sum(axis=1)
if self.sd ==0:
noise = np.zeros(n).reshape(n,1)
else:
noise = np.random.normal(0,self.sd,n)
return fval.reshape(n,1) + noise
class alpine2:
'''
Alpine2 function
:param bounds: the box constraints to define the domain in which the function is optimized.
:param sd: standard deviation, to generate noisy evaluations of the function.
'''
def __init__(self,input_dim, bounds=None, sd=None):
if bounds is None:
self.bounds = bounds =[(1,10)]*input_dim
else:
self.bounds = bounds
self.min = [(7.917)]*input_dim
self.fmin = -2.808**input_dim
self.input_dim = input_dim
if sd==None:
self.sd = 0
else:
self.sd=sd
def f(self,X):
X = reshape(X,self.input_dim)
n = X.shape[0]
fval = np.cumprod(np.sqrt(X),axis=1)[:,self.input_dim-1]*np.cumprod(np.sin(X),axis=1)[:,self.input_dim-1]
if self.sd ==0:
noise = np.zeros(n).reshape(n,1)
else:
noise = np.random.normal(0,self.sd,n).reshape(n,1)
return -fval.reshape(n,1) + noise
class gSobol:
'''
gSobol function
:param a: one-dimensional array containing the coefficients of the function.
:param sd: standard deviation, to generate noisy evaluations of the function.
'''
def __init__(self,a,bounds=None,sd=None):
self.a = a
self.input_dim = len(self.a)
if bounds is None:
self.bounds =[(-4,6)]*self.input_dim
else:
self.bounds = bounds
if not (self.a>0).all(): return 'Wrong vector of coefficients, they all should be positive'
self.S_coef = (1/(3*((1+self.a)**2))) / (np.prod(1+1/(3*((1+self.a)**2)))-1)
if sd==None: self.sd = 0
else: self.sd=sd
def f(self,X):
X = reshape(X,self.input_dim)
n = X.shape[0]
aux = (abs(4*X-2)+np.ones(n).reshape(n,1)*self.a)/(1+np.ones(n).reshape(n,1)*self.a)
fval = np.cumprod(aux,axis=1)[:,self.input_dim-1]
if self.sd ==0:
noise = np.zeros(n).reshape(n,1)
else:
noise = np.random.normal(0,self.sd,n).reshape(n,1)
return fval.reshape(n,1) + noise
class ackley:
'''
Ackley function
:param sd: standard deviation, to generate noisy evaluations of the function.
'''
def __init__(self, input_dim, bounds=None,sd=None):
self.input_dim = input_dim
if bounds is None:
self.bounds =[(-32.768,32.768)]*self.input_dim
else:
self.bounds = bounds
self.min = [(0.)*self.input_dim]
self.fmin = 0
if sd==None:
self.sd = 0
else:
self.sd=sd
def f(self,X):
X = reshape(X,self.input_dim)
print(X)
n = X.shape[0]
fval = (20+np.exp(1)-20*np.exp(-0.2*np.sqrt((X**2).sum(1)/self.input_dim))-np.exp(np.cos(2*np.pi*X).sum(1)/self.input_dim))
if self.sd ==0:
noise = np.zeros(n).reshape(n,1)
else:
noise = np.random.normal(0,self.sd,n).reshape(n,1)
return fval.reshape(n,1) + noise
| SheffieldML/GPyOpt | GPyOpt/objective_examples/experimentsNd.py | Python | bsd-3-clause | 4,131 |
from setuptools import setup, find_packages
install_requires = [
'South',
'django-piston',
'django-extensions',
'python-memcached',
'gunicorn',
'psycopg2',
]
setup(
name = "obudget",
version = "0.1",
url = 'http://github.com/hasadna/obudget',
description = "Bringing transperancy to the Israeli Knesset",
author = 'Benny Daon, Imri Goldberg and others',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = install_requires,
classifiers=['Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Natural Language :: Hebrew',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: JavaScript'],
)
| daonb/obudget | setup.py | Python | bsd-3-clause | 967 |
# Use stackless to bounce functions and avoid the stack.
def Bouncy(f):
try:
import stackless
def new_f(*args, **kwargs):
def wrap(channel, f, args, kwargs):
try:
result = f(*args, **kwargs)
channel.send(result)
except Exception as e:
channel.send(e)
channel = stackless.channel()
stackless.tasklet(wrap)(channel, f, args, kwargs)
result = channel.receive()
if isinstance(result, Exception):
raise result
else:
return result
new_f.__name__ = f.__name__
new_f.__doc__ = f.__doc__
return new_f
except:
return f
# Rename functions
def Rename(name):
def wrap(f):
f.__name__ = name
return f
return wrap
# Define syntax, it's up to the function to evaluate it's arguments.
# f.Evaluate will be bound to Evaluate(exp, env, k)
# f.Environment will be bound to the current environment
# f.Continuation will be bound to the current continuation
def Syntax(f):
f.Syntax = True
return f
| jpverkamp/schempy | Decorators.py | Python | bsd-3-clause | 960 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import page_sets
import re
from core import perf_benchmark
from telemetry.core import util
from telemetry.page import action_runner
from telemetry.page import page_test
from telemetry.timeline import async_slice as async_slice_module
from telemetry.timeline import slice as slice_module
from telemetry.value import scalar
from measurements import timeline_controller
from metrics import speedindex
class _ServiceWorkerTimelineMetric(object):
def AddResultsOfCounters(self, process, counter_regex_string, results):
counter_filter = re.compile(counter_regex_string)
for counter_name, counter in process.counters.iteritems():
if not counter_filter.search(counter_name):
continue
total = sum(counter.totals)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_counter_name = counter_name.replace('.', '_')
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name, 'count', total))
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name + '_avg', 'count',
total / float(len(counter.totals))))
def AddResultsOfEvents(
self, process, thread_regex_string, event_regex_string, results):
thread_filter = re.compile(thread_regex_string)
event_filter = re.compile(event_regex_string)
for thread in process.threads.itervalues():
thread_name = thread.name.replace('/', '_')
if not thread_filter.search(thread_name):
continue
filtered_events = []
for event in thread.IterAllEvents():
event_name = event.name.replace('.', '_')
if event_filter.search(event_name):
filtered_events.append(event)
async_events_by_name = collections.defaultdict(list)
sync_events_by_name = collections.defaultdict(list)
for event in filtered_events:
if isinstance(event, async_slice_module.AsyncSlice):
async_events_by_name[event.name].append(event)
elif isinstance(event, slice_module.Slice):
sync_events_by_name[event.name].append(event)
for event_name, event_group in async_events_by_name.iteritems():
times = [e.duration for e in event_group]
self._AddResultOfEvent(thread_name, event_name, times, results)
for event_name, event_group in sync_events_by_name.iteritems():
times = [e.self_time for e in event_group]
self._AddResultOfEvent(thread_name, event_name, times, results)
def _AddResultOfEvent(self, thread_name, event_name, times, results):
total = sum(times)
biggest_jank = max(times)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_event_name = event_name.replace('.', '_')
full_name = thread_name + '|' + sanitized_event_name
results.AddValue(scalar.ScalarValue(
results.current_page, full_name, 'ms', total))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_max', 'ms', biggest_jank))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_avg', 'ms', total / len(times)))
class _ServiceWorkerMeasurement(page_test.PageTest):
"""Measure Speed Index and TRACE_EVENTs"""
def __init__(self):
super(_ServiceWorkerMeasurement, self).__init__()
self._timeline_controller = timeline_controller.TimelineController()
self._speed_index = speedindex.SpeedIndexMetric()
self._page_open_times = collections.defaultdict(int)
def DidRunPage(self, platform):
if platform.tracing_controller.is_tracing_running:
platform.tracing_controller.StopTracing()
def WillNavigateToPage(self, page, tab):
self._timeline_controller.SetUp(page, tab)
self._timeline_controller.Start(tab)
self._speed_index.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
runner = action_runner.ActionRunner(tab)
# timeline_controller requires creation of at least a single interaction
# record. service_worker should be refactored to follow the
# timeline_based_measurement or it should not re-use timeline_controller
# logic for start & stop tracing.
with runner.CreateInteraction('_DummyInteraction'):
pass
tab.WaitForDocumentReadyStateToBeComplete(40)
self._timeline_controller.Stop(tab, results)
# Retrieve TRACE_EVENTs
timeline_metric = _ServiceWorkerTimelineMetric()
browser_process = self._timeline_controller.model.browser_process
filter_text = '(RegisterServiceWorker|'\
'UnregisterServiceWorker|'\
'ProcessAllocate|'\
'FindRegistrationForDocument|'\
'DispatchFetchEvent)'
timeline_metric.AddResultsOfEvents(
browser_process, 'IOThread', filter_text, results)
# Record Speed Index
def SpeedIndexIsFinished():
return self._speed_index.IsFinished(tab)
util.WaitFor(SpeedIndexIsFinished, 60)
self._speed_index.Stop(page, tab)
# Distinguish the first and second load from the subsequent loads
url = str(page)
chart_prefix = 'page_load'
self._page_open_times[url] += 1
if self._page_open_times[url] == 1:
chart_prefix += '_1st'
elif self._page_open_times[url] == 2:
chart_prefix += '_2nd'
else:
chart_prefix += '_later'
self._speed_index.AddResults(tab, results, chart_prefix)
class _ServiceWorkerMicroBenchmarkMeasurement(page_test.PageTest):
"""Record results reported by the JS microbenchmark."""
def __init__(self):
super(_ServiceWorkerMicroBenchmarkMeasurement, self).__init__()
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('window.done', 40)
json = tab.EvaluateJavaScript('window.results || {}')
for key, value in json.iteritems():
results.AddValue(scalar.ScalarValue(
results.current_page, key, value['units'], value['value']))
class ServiceWorkerPerfTest(perf_benchmark.PerfBenchmark):
"""Performance test of pages using ServiceWorker.
The page set contains pages like Trained to Thrill and svgomg.
Execution time of these pages will be shown as Speed Index, and TRACE_EVENTs
are subsidiary information to understand performance regressions in more
detail.
"""
test = _ServiceWorkerMeasurement
page_set = page_sets.ServiceWorkerPageSet
@classmethod
def Name(cls):
return 'service_worker.service_worker'
class ServiceWorkerMicroBenchmarkPerfTest(perf_benchmark.PerfBenchmark):
"""This test is a microbenchmark of service worker.
The page set is a benchmark page that generates many concurrent requests
handled by a service worker that does respondWith(new Response()). The test
result is the response times.
"""
test = _ServiceWorkerMicroBenchmarkMeasurement
page_set = page_sets.ServiceWorkerMicroBenchmarkPageSet
@classmethod
def Name(cls):
return 'service_worker.service_worker_micro_benchmark'
| highweb-project/highweb-webcl-html5spec | tools/perf/benchmarks/service_worker.py | Python | bsd-3-clause | 7,142 |
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 2000
n = 4000
k = 3000
testMehrotra = True
testIPF = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make Q a sparse semidefinite matrix
def Semidefinite(height):
Q = El.DistSparseMatrix()
Q.Resize(height,height)
localHeight = Q.LocalHeight()
Q.Reserve(localHeight)
for sLoc in xrange(localHeight):
s = Q.GlobalRow(sLoc)
Q.QueueLocalUpdate( sLoc, s, 1 );
Q.ProcessQueues()
return Q
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < width:
A.QueueLocalUpdate( sLoc, s, 11 )
if s >= 1 and s-1 < width:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if s+1 < width:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if s >= height and s-height < width:
A.QueueLocalUpdate( sLoc, s-height, -3 )
if s+height < width:
A.QueueLocalUpdate( sLoc, s+height, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.ProcessQueues()
return A
Q = Semidefinite(n)
A = Rectang(m,n)
G = Rectang(k,n)
# Generate a (b,h) which implies a primal feasible (x,s)
# ======================================================
# b := A xGen
# -----------
xGen = El.DistMultiVec()
El.Uniform(xGen,n,1,0.5,0.5)
b = El.DistMultiVec()
El.Zeros( b, m, 1 )
El.Multiply( El.NORMAL, 1., A, xGen, 0., b )
# h := G xGen + sGen
# ------------------
sGen = El.DistMultiVec()
El.Uniform(sGen,k,1,0.5,0.5)
h = El.DistMultiVec()
El.Copy( sGen, h )
El.Multiply( El.NORMAL, 1., G, xGen, 1., h )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMultiVec()
El.Gaussian(yGen,m,1)
zGen = El.DistMultiVec()
El.Uniform(zGen,k,1,0.5,0.5)
c = El.DistMultiVec()
El.Zeros(c,n,1)
El.Multiply( El.NORMAL, -1, Q, xGen, 1., c )
El.Multiply( El.TRANSPOSE, -1., A, yGen, 1., c )
El.Multiply( El.TRANSPOSE, -1., G, zGen, 1., c )
if display:
El.Display( Q, "Q" )
El.Display( A, "A" )
El.Display( G, "G" )
El.Display( b, "b" )
El.Display( c, "c" )
El.Display( h, "h" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.QPAffineCtrl_d()
xOrig = El.DistMultiVec()
yOrig = El.DistMultiVec()
zOrig = El.DistMultiVec()
sOrig = El.DistMultiVec()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,k,1,0.5,0.4999)
El.Uniform(sOrig,k,1,0.5,0.4999)
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
s = El.DistMultiVec()
if testMehrotra:
ctrl.approach = El.QP_MEHROTRA
ctrl.mehrotraCtrl.qsdCtrl.progress = progress
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
ctrl.mehrotraCtrl.time = True
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startMehrotra = El.mpi.Time()
El.QPAffine(Q,A,G,b,c,h,x,y,z,s,ctrl)
endMehrotra = El.mpi.Time()
if worldRank == 0:
print "Mehrotra time:", endMehrotra-startMehrotra
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
El.Display( s, "s Mehrotra" )
d = El.DistMultiVec()
El.Zeros( d, n, 1 )
El.Multiply( El.NORMAL, 1., Q, x, 0., d )
obj = El.Dot(x,d)/2 + El.Dot(c,x)
if worldRank == 0:
print "Mehrotra (1/2) x^T Q x + c^T x =", obj
if testIPF:
ctrl.approach = El.QP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startIPF = El.mpi.Time()
El.QPAffine(Q,A,G,b,c,h,x,y,z,s,ctrl)
endIPF = El.mpi.Time()
if worldRank == 0:
print "IPF time:", endIPF-startIPF
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
El.Display( s, "s IPF" )
d = El.DistMultiVec()
El.Zeros( d, n, 1 )
El.Multiply( El.NORMAL, 1., Q, x, 0., d )
obj = El.Dot(x,d)/2 + El.Dot(c,x)
if worldRank == 0:
print "IPF (1/2) x^T Q x + c^T x =", obj
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
| birm/Elemental | examples/interface/QPAffine.py | Python | bsd-3-clause | 4,826 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['Lag1Trend'] , ['Seasonal_WeekOfYear'] , ['ARX'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_Lag1Trend_Seasonal_WeekOfYear_ARX.py | Python | bsd-3-clause | 165 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Comment', fields ['fetched']
db.create_index(u'vkontakte_board_comment', ['fetched'])
# Adding index on 'Topic', fields ['comments_count']
db.create_index(u'vkontakte_board_topic', ['comments_count'])
# Adding index on 'Topic', fields ['fetched']
db.create_index(u'vkontakte_board_topic', ['fetched'])
def backwards(self, orm):
# Removing index on 'Topic', fields ['fetched']
db.delete_index(u'vkontakte_board_topic', ['fetched'])
# Removing index on 'Topic', fields ['comments_count']
db.delete_index(u'vkontakte_board_topic', ['comments_count'])
# Removing index on 'Comment', fields ['fetched']
db.delete_index(u'vkontakte_board_comment', ['fetched'])
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vkontakte_board.comment': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics_comments'", 'to': u"orm['vkontakte_users.User']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'50'"}),
'text': ('django.db.models.fields.TextField', [], {}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['vkontakte_board.Topic']"})
},
u'vkontakte_board.topic': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'Topic'},
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics_created'", 'to': u"orm['vkontakte_users.User']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'first_comment': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': u"orm['vkontakte_groups.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fixed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_comment': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'50'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics_updated'", 'to': u"orm['vkontakte_users.User']"})
},
u'vkontakte_groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['vkontakte_users.User']", 'symmetrical': 'False'})
},
u'vkontakte_places.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': u"orm['vkontakte_places.Country']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
u'vkontakte_places.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
u'vkontakte_users.user': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {}),
'activity': ('django.db.models.fields.TextField', [], {}),
'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'books': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers_users'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'games': ('django.db.models.fields.TextField', [], {}),
'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'has_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.TextField', [], {}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'livejournal': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'movies': ('django.db.models.fields.TextField', [], {}),
'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'tv': ('django.db.models.fields.TextField', [], {}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'wall_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'vkontakte_wall.comment': {
'Meta': {'ordering': "['post', '-date']", 'object_name': 'Comment'},
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'from_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'like_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'like_comments'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wall_comments'", 'to': u"orm['vkontakte_wall.Post']"}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'raw_json': ('annoying.fields.JSONField', [], {'default': '{}', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_for_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'reply_for_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vkontakte_wall.Comment']", 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_comments'", 'to': u"orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'vkontakte_wall.post': {
'Meta': {'ordering': "['wall_owner_id', '-date']", 'object_name': 'Post'},
'attachments': ('django.db.models.fields.TextField', [], {}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_posts'", 'to': u"orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'copy_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_copy_posts'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'copy_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'copy_post': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vkontakte_wall.Post']", 'null': 'True'}),
'copy_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'geo': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'like_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'like_posts'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'media': ('django.db.models.fields.TextField', [], {}),
'online': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'post_source': ('django.db.models.fields.TextField', [], {}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'raw_json': ('annoying.fields.JSONField', [], {'default': '{}', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'repost_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'repost_posts'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'reposts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'signer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_posts'", 'to': u"orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['vkontakte_board'] | ramusus/django-vkontakte-board | vkontakte_board/migrations/0006_auto.py | Python | bsd-3-clause | 17,402 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Test Fixtures
.. moduleauthor:: Timothy Helton <[email protected]>
"""
from chromalog.mark.objects import Mark
import testfixtures as tf
class ChromalogLogCapture(tf.LogCapture):
"""Class will remove color markings from chromalog logger entries.
..info:: The Chromalog package facilitates colored logging to the \
screen and removes the color tags when printed to a file. \
During unit testing the color tags are captured from stderror, which
complicates verify logging statements. \
This class provides an additional method to the base class \
testfixtures.LogCapture, which extracts the string from the \
chromalog object for the logger name and level.
"""
def __init__(self):
super().__init__()
def filter_records(self):
"""Remove chromalog color markings from LogCapture attributes."""
for (idx, record) in enumerate(self.records):
if isinstance(record.name, Mark):
self.records[idx].name = record.name.obj
if isinstance(record.levelname, Mark):
self.records[idx].levelname = record.levelname.obj
| TimothyHelton/strumenti | strumenti/tests/fixtures.py | Python | bsd-3-clause | 1,215 |
"""
Put Canvas specific configuration here.
Note that this project has three different configuration files:
/var/canvas/website/settings.py
That is where we keep all Django specific settings.
/var/canvas/common/configuration.py
That is where we keep AWS and infrastructure related settings. Note that this one is outside the /website
package, which means that you need some pythonpath magic to use it inside canvas
/var/canvas/website/canvas/knobs.py
That is where we keep static vars that you can use around Canvas.
"""
from drawquest.knobs import *
# how many times a use gets to sticker before he or she is shown a sticker prompt.
LOGGED_OUT_STICKER_LIMIT = 4
EPIC_STICKER_COST_THRESHOLD = 5
# This allows you to override the default template filename for specific notifications.
OVERRIDE_NOTIFICATION_TEMPLATE = {
"EmailChannel": {
"newsletter": {
"body": "email/newsletter_final.html",
"subject": "email/newsletter_final_subject.txt"
}
}
}
FLAG_RATE_LIMITS = {
'm': (15, 2*60,),
'h': (50, 60*60,),
}
# The number of (#1) stickers users get when they visit everyday. This is a retention award.
DAILY_FREE_STICKERS = 3
SIGNUP_FREE_STICKERS = 10
# The number of stickers required to reach each level.
STICKER_SCHEDULE = [5,10,15,20,25,30,40,50,60,70,80,90,100]
# The award (in #1 stickers) a user gets when she achieves a level.
STICKER_REWARDS = [3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5]
TWENTYFOUR_HOUR_EMAIL_COMMENT_COUNT = 9
TAGLINE = 'Share and play with images!'
FACEBOOK_SHARE_IMAGE_TYPE = ['small_column', 'stream', 'thumbnail']
# The max filesize in KB before we try the next smaller image type, from the list above.
FACEBOOK_SHARE_IMAGE_SIZE_CUTOFF = 60
VIEW_THREAD_PAGE_NUM_TOP = 8
COMMENTS_PER_PAGE = 50
DEFAULT_FOOTER_STICKER = 'smiley'
POST_TEXT_TRUNCATION_LENGTH = 140
FOOTER_UPDATE_ATTEMPTS = 3
POST_TITLE_MAX_LENGTH = 140
STICKER_MESSAGE_MAX_LENGTH = 140
# How many points given for one of your posts being remixed.
REMIX_POINTS = 1
PUBLIC_API_RATE_LIMIT = 1000
PUBLIC_API_MAX_ITEMS = 100
PUBLIC_API_PAGINATION_SIZE = 100
FOLLOWING_MENU_ROWS = 15
FOLLOWING_MENU_COLUMNS = FOLLOWING_MENU_ROWS * 4
REMIX_IMAGES_STAFF_PICKS = [
# This is the abcde from http://example.com/p/abcde
'2hdv9',
'1km2r',
'2ypcj',
'1f1a9',
'25gna',
'1umn4',
'222zn',
'8wfp8',
'89bkc',
'qix8v',
'lakze',
'4uqym',
'4luij',
'42k6w',
'awg15',
'ocmpt',
'pkztj',
'2f6zm',
'21ypq',
'1ese3',
'221qd',
'1i8xo',
'6v79z',
'78ykf',
'u2zw9',
'qydyh',
'tif0q',
'rc328',
'piusb',
]
FEED_PROMOTION_STICKER_COST_THRESHOLD = 5
FEED_ITEMS_PER_PAGE = 50
FOLLOWED_TAGS_SHOWN = 100
FOLLOWED_TAGS_REALTIME_THRESHOLD = 10
ACTIVITY_STREAM_PER_PAGE = 20
SUGGESTED_USERS = [
'Enin',
'Tati5001',
'calhaus',
'RedmonBray',
'Jiakko',
'CyberTaco',
'Harbltron',
'lollajames',
'TmsT',
'Sunset',
'Xeno_Mezphy',
'AngelOsario',
'ravenunknown',
'abeeiamnot',
'Coutoon',
'nicepunk',
'GrogMalBlood',
'ZombieLincolnFP',
'TrueBlue',
'mradmack',
'jerm',
'the7thcolumn',
'BrettZki',
'francesco9001',
'sanamkan',
'Grga',
'nsbarr',
'dmauro',
'moobraz',
'dagfooyo',
'echapa',
'bhudapop',
'ChasM',
'metaknight',
'Photocopier',
'lukebn',
'Zoucas',
'AvengerOfBoredom',
'mikshaw',
'Anominous',
]
SUGGESTED_TOPICS = [
'abstract',
'art',
'canvas',
'cats',
'challenges',
'cute',
'drawing',
'exploitable',
'funny',
'games',
'gif_bin',
'glitch_art',
'photography',
'pop_culture',
'request',
'video_games',
]
OFFLINE_SUGGESTED_TOPICS = list(sorted([
'8bit','90s','bookmarklet','darker_side','dogs','drawfriends','fashion','food','glitch_art','minecraft','movies','music','nerdy','partyhard','politics','premium','random','scripts','technology','trololololo','wallpaper','wtf',
'abstract',
'art',
'canvas',
'cats',
'challenges',
'cute',
'drawing',
'exploitable',
'funny',
'games',
'gif_bin',
'glitch_art',
'photography',
'pop_culture',
'request',
'video_games',
]))
SUGGESTED_TOPIC_PREVIEWS = {
"abstract" : "cd12831f5c633ed00c4f483dc3006eb3c0cca345",
"art" : "bd457cc102df633df440c96dc2aaae107de3979a",
"canvas" : "41eb1025e73b62b297e48e7736098457da32d16c",
"cats" : "5c4279694ef21e9be365d6f9d7f6900e48edaba6",
"challenges" : "c28e1df3b622ec88203949620b23b82eeacfa6e5",
"cute" : "dd2871c89dec7e589425bdfc8b6de1e4b8eafa75",
"drawing" : "eddd46ab6992e867a7f45f3e56aa9e95122ae419",
"exploitable" : "853e684737772002f3dc99a628b14a60db133fa6",
"funny" : "9823b39e77698f7371071310094567d4542e82d0",
"games" : "5be3b62cae5538e5457bc24574849af46c02a009",
"gif_bin" : "14aba9e1d8a126a7dd2bfad5c9fbc803e0d314c6",
"glitch_art" : "bbf5af5e5580dbfb7db2bc73c5ae1172ad281a19",
"photography" : "b28d0a7931c11cc5909f05c1bf5e7368ea1bfb32",
"pop_culture" : "0d04b9d7ae641a31ea12e50b98e156912f2ad5ef",
"request" : "299071ee0d48065c76bd940caa252680d210183f",
"video_games" : "91096f74bc169f67c8c62279103eebf73babad0b",
}
SUGGESTED_USERS_TO_FOLLOW_COUNT = 3
| canvasnetworks/canvas | website/canvas/knobs.py | Python | bsd-3-clause | 5,468 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
==============
desiutil.plots
==============
Module for code plots.
"""
import os
import warnings
from datetime import date
from types import MethodType
import numpy as np
import numpy.ma
import matplotlib.pyplot as plt
import matplotlib.dates
from matplotlib.cm import ScalarMappable
from matplotlib.collections import PolyCollection
from matplotlib.colors import Normalize, colorConverter
from matplotlib.patches import Ellipse
from astropy.coordinates import SkyCoord, HeliocentricTrueEcliptic, ICRS, Longitude
import astropy.units as u
from astropy.time import Time
from astropy.utils import iers
from .iers import freeze_iers
def plot_slices(x, y, x_lo, x_hi, y_cut, num_slices=5, min_count=100, axis=None,
set_ylim_from_stats=True, scatter=True):
"""Scatter plot with 68, 95 percentiles superimposed in slices.
Modified from code written by D. Kirkby
Requires that the matplotlib package is installed.
Parameters
----------
x : array of :class:`float`
X-coordinates to scatter plot. Points outside [x_lo, x_hi] are
not displayed.
y : array of :class:`float`
Y-coordinates to scatter plot. Y values are assumed to be roughly
symmetric about zero.
x_lo : :class:`float`
Minimum value of `x` to plot.
x_hi : :class:`float`
Maximum value of `x` to plot.
y_cut : :class:`float`
The target maximum value of :math:`|y|`. A dashed line at this value
is added to the plot, and the vertical axis is clipped at
:math:`|y|` = 1.25 * `y_cut` (but values outside this range are
included in the percentile statistics).
num_slices : :class:`int`, optional
Number of equally spaced slices to divide the interval [x_lo, x_hi]
into.
min_count : :class:`int`, optional
Do not use slices with fewer points for superimposed percentile
statistics.
axis : :class:`matplotlib.axes.Axes`, optional
Uses the current axis if this is not set.
set_ylim_from_stats : :class:`bool`, optional
Set ylim of plot from 95% stat.
scatter : bool, optional
Show the data as a scatter plot.
Best to limit to small datasets
Returns
-------
:class:`matplotlib.axes.Axes`
The Axes object used in the plot.
"""
if axis is None:
axis = plt.gca()
x_bins = np.linspace(x_lo, x_hi, num_slices + 1)
x_i = np.digitize(x, x_bins) - 1
limits = []
counts = []
for s in range(num_slices):
# Calculate percentile statistics for ok fits.
y_slice = y[(x_i == s)]
counts.append(len(y_slice))
if counts[-1] > 0:
limits.append(np.percentile(y_slice, (2.5, 16, 50, 84, 97.5)))
else:
limits.append((0., 0., 0., 0., 0.))
limits = np.array(limits)
counts = np.array(counts)
# Plot points
if scatter:
axis.scatter(x, y, s=15, marker='.', lw=0, color='b', alpha=0.5, zorder=1)
# Plot quantiles in slices with enough fits.
stepify = lambda y: np.vstack([y, y]).transpose().flatten()
y_m2 = stepify(limits[:, 0])
y_m1 = stepify(limits[:, 1])
y_med = stepify(limits[:, 2])
y_p1 = stepify(limits[:, 3])
y_p2 = stepify(limits[:, 4])
xstack = stepify(x_bins)[1:-1]
max_yr, max_p2, min_m2 = 0., -1e9, 1e9
for i in range(num_slices):
s = slice(2 * i, 2 * i + 2)
if counts[i] >= min_count:
axis.fill_between(
xstack[s], y_m2[s], y_p2[s], alpha=0.15, color='red', zorder=10)
axis.fill_between(
xstack[s], y_m1[s], y_p1[s], alpha=0.25, color='red', zorder=10)
axis.plot(xstack[s], y_med[s], 'r-', lw=2.)
# For ylim
max_yr = max(max_yr, np.max(y_p2[s]-y_m2[s]))
max_p2 = max(max_p2, np.max(y_p2[s]))
min_m2 = min(min_m2, np.min(y_m2[s]))
# xlim
xmin, xmax = np.min(x), np.max(x)
axis.set_xlim(np.min(x)-(xmax-xmin)*0.02, np.max(x)+(xmax-xmin)*0.02)
# ylim
if set_ylim_from_stats:
axis.set_ylim(min_m2-max_yr/2., max_p2+max_yr/2.)
# Plot cut lines.
axis.axhline(+y_cut, ls=':', color='k')
axis.axhline(0., ls='-', color='k')
axis.axhline(-y_cut, ls=':', color='k')
return axis
class MaskedArrayWithLimits(numpy.ma.MaskedArray):
"""Masked array with additional `vmin`, `vmax` attributes.
This class accepts the same arguments as
:class:`~numpy.ma.MaskedArray`.
This is not a general-purpose subclass and is only intended to simplify
passing `vmin`, `vmax` limits from :func:`~desiutil.plots.prepare_data` to
the plotting utility methods defined in this module.
Parameters
----------
vmin : :class:`float`, optional
Minimum value when used for clipping or masking.
vmax : :class:`float`, optional
Maximum value when used for clipping or masking.
Attributes
----------
vmin : :class:`float`
Minimum value when used for clipping or masking.
vmax : :class:`float`
Maximum value when used for clipping or masking.
"""
def __new__(cls, *args, **kwargs):
try:
obj = super(MaskedArrayWithLimits, cls).__new__(cls, *args, **kwargs)
except TypeError:
# Numpy >= 1.20.0
trimmed_kwargs = kwargs.copy()
if 'vmin' in trimmed_kwargs:
del trimmed_kwargs['vmin']
if 'vmax' in trimmed_kwargs:
del trimmed_kwargs['vmax']
obj = super(MaskedArrayWithLimits, cls).__new__(cls, *args, **trimmed_kwargs)
if 'vmin' in kwargs:
obj._optinfo['vmin'] = kwargs['vmin']
# obj.vmin = kwargs['vmin']
# else:
# obj.vmin = None
if 'vmax' in kwargs:
obj._optinfo['vmax'] = kwargs['vmax']
# obj.vmax = kwargs['vmax']
# else:
# obj.vmax = None
return obj
@property
def vmin(self):
return self._optinfo.get('vmin', None)
@property
def vmax(self):
return self._optinfo.get('vmax', None)
def prepare_data(data, mask=None, clip_lo=None, clip_hi=None,
save_limits=False):
"""Prepare array data for color mapping.
Data is clipped and masked to be suitable for passing to matplotlib
routines that automatically assign colors based on input values.
Parameters
----------
data : array or masked array
Array of data values to assign colors for.
mask : array of bool or None
Array of bools with same shape as data, where True values indicate
values that should be ignored when assigning colors. When None, the
mask of a masked array will be used or all values of an unmasked
array will be used.
clip_lo : float or str
Data values below clip_lo will be clipped to the minimum color. If
clip_lo is a string, it should end with "%" and specify a percentile
of un-masked data to clip below.
clip_hi : float or str
Data values above clip_hi will be clipped to the maximum color. If
clip_hi is a string, it should end with "%" and specify a percentile
of un-masked data to clip above.
save_limits : bool
Save the calculated lo/hi clip values as attributes vmin, vmax of
the returned masked array. Use this flag to indicate that plotting
functions should use these vmin, vmax values when mapping the
returned data to colors.
Returns
-------
masked array
Masked numpy array with the same shape as the input data, with any
input mask applied (or copied from an input masked array) and values
clipped to [clip_lo, clip_hi].
Examples
--------
If no optional parameters are specified, the input data is returned
with only non-finite values masked:
>>> data = np.arange(5.)
>>> prepare_data(data)
masked_array(data = [0.0 1.0 2.0 3.0 4.0],
mask = [False False False False False],
fill_value = 1e+20)
<BLANKLINE>
Any mask selection is propagated to the output:
>>> prepare_data(data, data == 2)
masked_array(data = [0.0 1.0 -- 3.0 4.0],
mask = [False False True False False],
fill_value = 1e+20)
<BLANKLINE>
Values can be clipped by specifying any combination of percentiles
(specified as strings ending with "%") and numeric values:
>>> prepare_data(data, clip_lo='25%', clip_hi=3.5)
masked_array(data = [1.0 1.0 2.0 3.0 3.5],
mask = [False False False False False],
fill_value = 1e+20)
<BLANKLINE>
Clipped values are also masked when the clip value or percentile
is prefixed with "!":
>>> prepare_data(data, clip_lo='!25%', clip_hi=3.5)
masked_array(data = [-- 1.0 2.0 3.0 3.5],
mask = [ True False False False False],
fill_value = 1e+20)
<BLANKLINE>
An input masked array is passed through without any copying unless
clipping is requested:
>>> masked = numpy.ma.arange(5)
>>> masked is prepare_data(masked)
True
Use the save_limits option to store the clipping limits as vmin, vmax
attributes of the returned object:
>>> d = prepare_data(data, clip_lo=1, clip_hi=10, save_limits=True)
>>> d.vmin, d.vmax
(1.0, 10.0)
These attributes can then be used by plotting routines to fix the input
range used for colormapping, independently of the actual range of data.
"""
data = np.asanyarray(data)
if mask is None:
try:
# Use the mask associated with a MaskedArray.
cmask = data.mask
# If no clipping is requested, pass the input through.
if clip_lo is None and clip_hi is None:
return data
except AttributeError:
# Nothing is masked by default.
cmask = np.zeros_like(data, dtype=bool)
else:
#
# Make every effort to ensure that modifying the mask of the output
# does not modify the input mask.
#
cmask = np.array(mask)
if cmask.shape != data.shape:
raise ValueError('Invalid mask shape.')
# Mask any non-finite values.
cmask |= ~np.isfinite(data)
unmasked_data = data[~cmask]
# Convert percentile clip values to absolute values.
def get_clip(value):
clip_mask = False
if isinstance(value, str):
if value.startswith('!'):
clip_mask = True
value = value[1:]
if value.endswith('%'):
value = np.percentile(unmasked_data, float(value[:-1]))
return float(value), clip_mask
if clip_lo is None:
clip_lo, mask_lo = np.min(unmasked_data), False
else:
clip_lo, mask_lo = get_clip(clip_lo)
if clip_hi is None:
clip_hi, mask_hi = np.max(unmasked_data), False
else:
clip_hi, mask_hi = get_clip(clip_hi)
if save_limits:
clipped = MaskedArrayWithLimits(
np.clip(data, clip_lo, clip_hi), cmask, vmin=clip_lo, vmax=clip_hi)
else:
clipped = numpy.ma.MaskedArray(
np.clip(data, clip_lo, clip_hi), cmask)
# Mask values outside the clip range, if requested. The comparisons
# below might trigger warnings for non-finite data.
settings = np.seterr(all='ignore')
if mask_lo:
clipped.mask[data < clip_lo] = True
if mask_hi:
clipped.mask[data > clip_hi] = True
np.seterr(**settings)
return clipped
def init_sky(projection='mollweide', ra_center=120,
galactic_plane_color='red', ecliptic_plane_color='red',
ax=None):
"""Initialize matplotlib axes with a projection of the full sky.
Parameters
----------
projection : :class:`str`, optional
Projection to use. Defaults to 'mollweide'. To show the available projections,
call :func:`matplotlib.projections.get_projection_names`.
ra_center : :class:`float`, optional
Projection is centered at this RA in degrees. Default is +120°, which avoids splitting
the DESI northern and southern regions.
galactic_plane_color : color name, optional
Draw a solid curve representing the galactic plane using the specified color, or do
nothing when ``None``.
ecliptic_plane_color : color name, optional
Draw a dotted curve representing the ecliptic plane using the specified color, or do
nothing when ``None``.
ax : :class:`~matplotlib.axes.Axes`, optional
Axes to use for drawing this map, or create new axes if ``None``.
Returns
-------
:class:`~matplotlib.axes.Axes`
A matplotlib Axes object. Helper methods ``projection_ra()`` and ``projection_dec()``
are added to the object to facilitate conversion to projection coordinates.
Notes
-----
If requested, the ecliptic and galactic planes are plotted with ``zorder`` set to 20.
This keeps them above most other plotted objects, but legends should be set to
a ``zorder`` higher than this value, for example::
leg = ax.legend(ncol=2, loc=1)
leg.set_zorder(25)
"""
#
# Internal functions.
#
def projection_ra(self, ra):
r"""Shift `ra` to the origin of the Axes object and convert to radians.
Parameters
----------
ra : array-like
Right Ascension in degrees.
Returns
-------
array-like
`ra` converted to plot coordinates.
Notes
-----
In matplotlib, map projections expect longitude (RA), latitude (Dec)
in radians with limits :math:`[-\pi, \pi]`, :math:`[-\pi/2, \pi/2]`,
respectively.
"""
#
# Shift RA values.
#
r = np.remainder(ra + 360 - ra_center, 360)
#
# Scale conversion to [-180, 180].
#
r[r > 180] -= 360
#
# Reverse the scale: East to the left.
#
r = -r
return np.radians(r)
def projection_dec(self, dec):
"""Shift `dec` to the origin of the Axes object and convert to radians.
Parameters
----------
dec : array-like
Declination in degrees.
Returns
-------
array-like
`dec` converted to plot coordinates.
"""
return np.radians(dec)
#
# Create ax.
#
if ax is None:
fig = plt.figure(figsize=(10.0, 5.0), dpi=100)
ax = plt.subplot(111, projection=projection)
#
# Prepare labels.
#
base_tick_labels = np.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210])
base_tick_labels = np.remainder(base_tick_labels+360+ra_center, 360)
tick_labels = np.array(['{0}°'.format(l) for l in base_tick_labels])
#
# Galactic plane.
#
if galactic_plane_color is not None:
galactic_l = np.linspace(0, 2 * np.pi, 1000)
galactic = SkyCoord(l=galactic_l*u.radian, b=np.zeros_like(galactic_l)*u.radian,
frame='galactic').transform_to(ICRS)
#
# Project to map coordinates and display. Use a scatter plot to
# avoid wrap-around complications.
#
paths = ax.scatter(projection_ra(0, galactic.ra.degree),
projection_dec(0, galactic.dec.degree),
marker='.', s=20, lw=0, alpha=0.75,
c=galactic_plane_color, zorder=20)
# Make sure the galactic plane stays above other displayed objects.
# paths.set_zorder(20)
#
# Ecliptic plane.
#
if ecliptic_plane_color is not None:
ecliptic_l = np.linspace(0, 2 * np.pi, 50)
ecliptic = SkyCoord(lon=ecliptic_l*u.radian, lat=np.zeros_like(ecliptic_l)*u.radian, distance=1 * u.Mpc,
frame='heliocentrictrueecliptic').transform_to(ICRS)
#
# Project to map coordinates and display. Use a scatter plot to
# avoid wrap-around complications.
#
paths = ax.scatter(projection_ra(0, ecliptic.ra.degree),
projection_dec(0, ecliptic.dec.degree),
marker='.', s=20, lw=0, alpha=0.75,
c=ecliptic_plane_color, zorder=20)
# paths.set_zorder(20)
#
# Set RA labels.
#
labels = ax.get_xticklabels()
for l, item in enumerate(labels):
item.set_text(tick_labels[l])
ax.set_xticklabels(labels)
#
# Set axis labels.
#
ax.set_xlabel('R.A. [deg]')
# ax.xaxis.label.set_fontsize(12)
ax.set_ylabel('Dec. [deg]')
# ax.yaxis.label.set_fontsize(12)
ax.grid(True)
#
# Attach helper methods.
#
if hasattr(ax, '_ra_center'):
warnings.warn("Attribute '_ra_center' detected. Will be overwritten!")
ax._ra_center = ra_center
if hasattr(ax, 'projection_ra'):
warnings.warn("Attribute 'projection_ra' detected. Will be overwritten!")
ax.projection_ra = MethodType(projection_ra, ax)
if hasattr(ax, 'projection_dec'):
warnings.warn("Attribute 'projection_dec' detected. Will be overwritten!")
ax.projection_dec = MethodType(projection_dec, ax)
return ax
def plot_healpix_map(data, nest=False, cmap='viridis', colorbar=True,
label=None, ax=None, **kwargs):
"""Plot a healpix map using an all-sky projection.
Pass the data array through :func:`prepare_data` to select a subset to plot
and clip the color map to specified values or percentiles.
This function is similar to :func:`plot_grid_map` but is generally slower
at high resolution and has less elegant handling of pixels that wrap around
in RA, which are not drawn.
Requires that matplotlib and healpy are installed.
Additional keyword parameters will be passed to :func:`init_sky`.
Parameters
----------
data : array or masked array
1D array of data associated with each healpix. Must have a size that
exactly matches the number of pixels for some NSIDE value. Use the
output of :func:`prepare_data` as a convenient way to specify
data cuts and color map clipping.
nest : :class:`bool`, optional
If ``True``, assume NESTED pixel ordering. Otheriwse, assume RING pixel
ordering.
cmap : colormap name or object, optional
Matplotlib colormap to use for mapping data values to colors.
colorbar : :class:`bool`, optional
Draw a colorbar below the map when ``True``.
label : :class:`str`, optional
Label to display under the colorbar. Ignored unless colorbar is ``True``.
ax : :class:`~matplotlib.axes.Axes`, optional
Axes to use for drawing this map, or create default axes using
:func:`init_sky` when ``None``.
Returns
-------
:class:`~matplotlib.axes.Axes`
The axis object used for the plot.
"""
import healpy as hp
data = prepare_data(data)
if len(data.shape) != 1:
raise ValueError('Invalid data array, should be 1D.')
nside = hp.npix2nside(len(data))
#
# Create axes.
#
if ax is None:
ax = init_sky(**kwargs)
proj_edge = ax._ra_center - 180
#
# Find the projection edge.
#
while proj_edge < 0:
proj_edge += 360
#
# Get pixel boundaries as quadrilaterals.
#
corners = hp.boundaries(nside, np.arange(len(data)), step=1, nest=nest)
corner_theta, corner_phi = hp.vec2ang(corners.transpose(0, 2, 1))
corner_ra, corner_dec = (np.degrees(corner_phi),
np.degrees(np.pi/2-corner_theta))
#
# Convert sky coords to map coords.
#
x, y = ax.projection_ra(corner_ra), ax.projection_dec(corner_dec)
#
# Regroup into pixel corners.
#
verts = np.array([x.reshape(-1, 4), y.reshape(-1, 4)]).transpose(1, 2, 0)
#
# Find and mask any pixels that wrap around in RA.
#
uv_verts = np.array([corner_phi.reshape(-1, 4),
corner_theta.reshape(-1, 4)]).transpose(1, 2, 0)
theta_edge = np.unique(uv_verts[:, :, 1])
phi_edge = np.radians(proj_edge)
eps = 0.1 * np.sqrt(hp.nside2pixarea(nside))
wrapped1 = hp.ang2pix(nside, theta_edge, phi_edge - eps, nest=nest)
wrapped2 = hp.ang2pix(nside, theta_edge, phi_edge + eps, nest=nest)
wrapped = np.unique(np.hstack((wrapped1, wrapped2)))
data.mask[wrapped] = True
#
# Normalize the data using its vmin, vmax attributes, if present.
#
try:
norm = Normalize(vmin=data.vmin, vmax=data.vmax)
except AttributeError:
norm = None
#
# Make the collection and add it to the plot.
#
collection = PolyCollection(verts, array=data, cmap=cmap, norm=norm,
edgecolors='none')
ax.add_collection(collection)
ax.autoscale_view()
if colorbar:
bar = plt.colorbar(collection, ax=ax,
orientation='horizontal', spacing='proportional',
pad=0.11, fraction=0.05, aspect=50)
if label:
bar.set_label(label)
return ax
def plot_grid_map(data, ra_edges, dec_edges, cmap='viridis', colorbar=True,
label=None, ax=None, **kwargs):
r"""Plot an array of 2D values using an all-sky projection.
Pass the data array through :func:`prepare_data` to select a subset to plot
and clip the color map to specified values or percentiles.
This function is similar to :func:`plot_healpix_map` but is generally faster
and has better handling of RA wrap around artifacts.
Additional keyword parameters will be passed to :func:`init_sky`.
Parameters
----------
data : array or masked array
2D array of data associated with each grid cell, with shape
:math:`(N_{\mathrm{RA}}, N_{\mathrm{Dec}})`.
Use the output of :func:`prepare_data` as a convenient
way to specify data cuts and color map clipping.
ra_edges : array
1D array of :math:`N_{\mathrm{RA}} + 1` RA grid edge values in degrees,
which must span the full circle, *i.e.*,
``ra_edges[0] == ra_edges[-1] - 360``. The RA grid does not need to match
the edges of the projection, in which case any wrap-around cells will
be duplicated on both edges.
dec_edges : array
1D array of :math:`N_{\mathrm{Dec}} + 1` Dec grid edge values in degrees.
Values are not required to span the full range ``[-90, +90]``.
cmap : colormap name or object, optional
Matplotlib colormap to use for mapping data values to colors.
colorbar : :class:`bool`, optional
Draw a colorbar below the map when ``True``.
label : :class:`str`, optional
Label to display under the colorbar. Ignored unless colorbar is ``True``.
ax : :class:`~matplotlib.axes.Axes`, optional
Axes to use for drawing this map, or create default axes using
:func:`init_sky` when ``None``.
Returns
-------
:class:`~matplotlib.axes.Axes`
The axis object used for the plot.
"""
data = prepare_data(data)
if len(data.shape) != 2:
raise ValueError('Expected 2D data array.')
n_dec, n_ra = data.shape
#
# Normalize the data using its vmin, vmax attributes, if present.
# Need to do this before hstack-ing below, which drops vmin, vmax.
#
try:
norm = Normalize(vmin=data.vmin, vmax=data.vmax)
except AttributeError:
norm = None
#
# Silently flatten, sort, and remove duplicates from the edges arrays.
#
ra_edges = np.unique(ra_edges)
dec_edges = np.unique(dec_edges)
if len(ra_edges) != n_ra + 1:
raise ValueError('Invalid ra_edges.')
if len(dec_edges) != n_dec + 1:
raise ValueError('Invalid dec_edges.')
if ra_edges[0] != ra_edges[-1] - 360:
raise ValueError('Invalid ra_edges, do not span 360 degrees.')
#
# Create axes.
#
if ax is None:
ax = init_sky(**kwargs)
#
# Find the projection edge.
#
proj_edge = ax._ra_center - 180
while proj_edge < 0:
proj_edge += 360
#
# Find the first RA gridline that fits within the map's right edge.
#
first = np.where(ra_edges >= proj_edge)[0][0]
if first > 0:
# Wrap the data beyond the left edge around to the right edge.
# Remember to use numpy.ma.hstack for the data to preserve the mask.
if ra_edges[first] > proj_edge:
# Split a wrap-around column into separate left and right columns.
ra_edges = np.hstack(([proj_edge], ra_edges[first:],
ra_edges[1:first] + 360, [proj_edge+360]))
data = numpy.ma.hstack((data[:, (first - 1):first],
data[:, first:],
data[:, :(first - 1)],
data[:, (first - 1):first]))
else:
# One of the ra_edge values is exactly on the edge.
ra_edges = np.hstack((ra_edges[first:], ra_edges[:(first + 1)] + 360))
data = numpy.ma.hstack((data[:, first:], data[:, :(first + 1)]))
# Transform to projection coordinates. By construction, the first value should be positive.
proj_ra = ax.projection_ra(ra_edges)
if proj_ra[0] < 0:
proj_ra[0] *= -1
# Build a 2D array of grid line intersections.
grid_ra, grid_dec = np.meshgrid(proj_ra, ax.projection_dec(dec_edges))
ax.grid(False)
mesh = ax.pcolormesh(grid_ra, grid_dec,
data, cmap=cmap, norm=norm, edgecolor='none', lw=0)
# grid turned off for pcolormesh; turn it back on.
ax.grid(True)
if colorbar:
bar = plt.colorbar(mesh, ax=ax,
orientation='horizontal', spacing='proportional',
pad=0.1, fraction=0.05, aspect=50)
if label:
bar.set_label(label)
return ax
def plot_sky_circles(ra_center, dec_center, field_of_view=3.2, data=None,
cmap='viridis', facecolors='skyblue', edgecolor='none',
colorbar=True, colorbar_ticks=None, label=None,
ax=None, **kwargs):
"""Plot circles on an all-sky projection.
Pass the optional data array through :func:`prepare_data` to select a
subset to plot and clip the color map to specified values or percentiles.
Requires that matplotlib is installed.
Additional keyword parameters will be passed to :func:`init_sky`.
Parameters
----------
ra_center : array
1D array of RA in degrees at the centers of each circle to plot.
dec_center : array
1D array of DEC in degrees at the centers of each circle to plot.
field_of_view : array
Full sky openning angle in degrees of the circles to plot. The default
is appropriate for a DESI tile.
data : array, optional
1D array of data associated with each circle, used to set its facecolor.
cmap : colormap name, optional
Matplotlib colormap to use for mapping data values to colors. Ignored
unless data is specified.
facecolors : matplotlib color or array of colors, optional
Ignored when data is specified. An array must have one entry per circle
or a single value is used for all circles.
edgecolor : matplotlib color, optional
The edge color used for all circles. Use 'none' to hide edges.
colorbar : :class:`bool`, optional
Draw a colorbar below the map when ``True`` and data is provided.
colorbar_ticks : :class:`list`, optional
Use the specified colorbar ticks or determine them automatically
when None.
label : :class:`str`
Label to display under the colorbar. Ignored unless a colorbar is
displayed.
ax : :class:`~matplotlib.axes.Axes`, optional
Axes to use for drawing this map, or create default axes using
:func:`init_sky` when ``None``.
Returns
-------
:class:`~matplotlib.axes.Axes`
The axis object used for the plot.
"""
ra_center = np.asarray(ra_center)
dec_center = np.asarray(dec_center)
if len(ra_center.shape) != 1:
raise ValueError('Invalid ra_center, must be a 1D array.')
if len(dec_center.shape) != 1:
raise ValueError('Invalid dec_center, must be a 1D array.')
if len(ra_center) != len(dec_center):
raise ValueError('Arrays ra_center, dec_center must have same size.')
if data is not None:
data = prepare_data(data)
# Facecolors are determined by the data, when specified.
if data.shape != ra_center.shape:
raise ValueError('Invalid data shape, must match ra_center.')
# Colors associated with masked values in data will be ignored later.
try:
# Normalize the data using its vmin, vmax attributes, if present.
norm = Normalize(vmin=data.vmin, vmax=data.vmax)
except AttributeError:
# Otherwise use the data limits.
norm = Normalize(vmin=data.min(), vmax=data.max())
cmapper = ScalarMappable(norm, cmap)
facecolors = cmapper.to_rgba(data)
else:
colorbar = False
# Try to repeat a single fixed color for all circles.
try:
facecolors = np.tile(
[colorConverter.to_rgba(facecolors)],
(len(ra_center), 1))
except ValueError:
# Assume that facecolor is already an array.
facecolors = np.asarray(facecolors)
if len(facecolors) != len(ra_center):
raise ValueError('Invalid facecolor array.')
if ax is None:
ax = init_sky(**kwargs)
#
# Find the projection edge.
#
proj_edge = ax._ra_center - 180
while proj_edge < 0:
proj_edge += 360
#
# Convert field-of-view angle into dRA.
#
dRA = field_of_view / np.cos(np.radians(dec_center))
#
# Identify circles that wrap around the map edges in RA.
#
edge_dist = np.fabs(np.fmod(ra_center - proj_edge, 360))
wrapped = np.minimum(edge_dist, 360 - edge_dist) < 1.05 * 0.5 * dRA # convert dRA to radius.
#
# Loop over non-wrapped circles.
#
for ra, dec, dra, fc in zip(ax.projection_ra(ra_center[~wrapped]), ax.projection_dec(dec_center[~wrapped]),
dRA[~wrapped], facecolors[~wrapped]):
e = Ellipse((ra, dec), np.radians(dra), np.radians(field_of_view),
facecolor=fc, edgecolor=edgecolor)
ax.add_patch(e)
if colorbar:
mappable = ScalarMappable(norm=norm, cmap=cmap)
mappable.set_array(data)
bar = plt.colorbar(mappable, ax=ax,
orientation='horizontal', spacing='proportional',
pad=0.1, fraction=0.05, aspect=50,
ticks=colorbar_ticks)
if label:
bar.set_label(label)
return ax
def plot_sky_binned(ra, dec, weights=None, data=None, plot_type='grid',
max_bin_area=5, clip_lo=None, clip_hi=None, verbose=False,
cmap='viridis', colorbar=True, label=None, ax=None,
return_grid_data=False, **kwargs):
"""Show objects on the sky using a binned plot.
Bin values either show object counts per unit sky area or, if an array
of associated data values is provided, mean data values within each bin.
Objects can have associated weights.
Requires that matplotlib is installed. When plot_type is
"healpix", healpy must also be installed.
Additional keyword parameters will be passed to :func:`init_sky`.
Parameters
----------
ra : array
Array of object RA values in degrees. Must have the same shape as
dec and will be flattened if necessary.
dec : array
Array of object Dec values in degrees. Must have the same shape as
ra and will be flattened if necessary.
weights : array, optional
Optional of weights associated with each object. All objects are
assumed to have equal weight when this is None.
data : array, optional
Optional array of scalar values associated with each object. The
resulting plot shows the mean data value per bin when data is
specified. Otherwise, the plot shows counts per unit sky area.
plot_type : {'grid', 'healpix'}
Must be either 'grid' or 'healpix', and selects whether data in
binned in healpix or in (sin(Dec), RA).
max_bin_area : :class:`float`, optional
The bin size will be chosen automatically to be as close as
possible to this value but not exceeding it.
clip_lo : :class:`float` or :class:`str`, optional
Clipping is applied to the plot data calculated as counts / area
or the mean data value per bin. See :func:`prepare_data` for
details.
clip_hi : :class:`float` or :class:`str`, optional
Clipping is applied to the plot data calculated as counts / area
or the mean data value per bin. See :func:`prepare_data` for
details.
verbose : :class:`bool`, optional
Print information about the automatic bin size calculation.
cmap : colormap name or object, optional
Matplotlib colormap to use for mapping data values to colors.
colorbar : :class:`bool`, optional
Draw a colorbar below the map when True.
label : :class:`str`, optional
Label to display under the colorbar. Ignored unless colorbar is ``True``.
ax : :class:`~matplotlib.axes.Axes`, optional
Axes to use for drawing this map, or create default axes using
:func:`init_sky` when ``None``.
return_grid_data : :class:`bool`, optional
If ``True``, return (ax, grid_data) instead of just ax.
Returns
-------
:class:`~matplotlib.axes.Axes` or (ax, grid_data)
The axis object used for the plot, and the grid_data if
`return_grid_data` is ``True``.
"""
ra = np.asarray(ra).reshape(-1)
dec = np.asarray(dec).reshape(-1)
if len(ra) != len(dec):
raise ValueError('Arrays ra,dec must have same size.')
plot_types = ('grid', 'healpix',)
if plot_type not in plot_types:
raise ValueError('Invalid plot_type, should be one of {0}.'.format(', '.join(plot_types)))
if data is not None and weights is None:
weights = np.ones_like(data)
if plot_type == 'grid':
# Convert the maximum pixel area to steradians.
max_bin_area = max_bin_area * (np.pi / 180.) ** 2
# Pick the number of bins in cos(DEC) and RA to use.
n_cos_dec = int(np.ceil(2 / np.sqrt(max_bin_area)))
n_ra = int(np.ceil(4 * np.pi / max_bin_area / n_cos_dec))
# Calculate the actual pixel area in sq. degrees.
bin_area = 360 ** 2 / np.pi / (n_cos_dec * n_ra)
if verbose:
print('Using {0} x {1} grid in cos(DEC) x RA'.format(n_cos_dec, n_ra),
'with pixel area {:.3f} sq.deg.'.format(bin_area))
# Calculate the bin edges in degrees.
# ra_edges = np.linspace(-180., +180., n_ra + 1)
ra_edges = np.linspace(0.0, 360.0, n_ra + 1)
dec_edges = np.degrees(np.arcsin(np.linspace(-1., +1., n_cos_dec + 1)))
# Put RA values in the range [-180, 180).
# ra = np.fmod(ra, 360.)
# ra[ra >= 180.] -= 360.
# Histogram the input coordinates.
counts, _, _ = np.histogram2d(dec, ra, [dec_edges, ra_edges],
weights=weights)
if data is None:
grid_data = counts / bin_area
else:
sums, _, _ = np.histogram2d(dec, ra, [dec_edges, ra_edges],
weights=weights * data)
# This ratio might result in some nan (0/0) or inf (1/0) values,
# but these will be masked by prepare_data().
settings = np.seterr(all='ignore')
grid_data = sums / counts
np.seterr(**settings)
grid_data = prepare_data(grid_data, clip_lo=clip_lo, clip_hi=clip_hi)
ax = plot_grid_map(grid_data, ra_edges, dec_edges,
cmap=cmap, colorbar=colorbar, label=label,
ax=ax, **kwargs)
elif plot_type == 'healpix':
import healpy as hp
for n in range(1, 25):
nside = 2 ** n
bin_area = hp.nside2pixarea(nside, degrees=True)
if bin_area <= max_bin_area:
break
npix = hp.nside2npix(nside)
nest = False
if verbose:
print('Using healpix map with NSIDE={0}'.format(nside),
'and pixel area {:.3f} sq.deg.'.format(bin_area))
pixels = hp.ang2pix(nside, np.radians(90 - dec), np.radians(ra), nest)
counts = np.bincount(pixels, weights=weights, minlength=npix)
if data is None:
grid_data = counts / bin_area
else:
sums = np.bincount(pixels, weights=weights * data, minlength=npix)
grid_data = np.zeros_like(sums, dtype=float)
nonzero = counts > 0
grid_data[nonzero] = sums[nonzero] / counts[nonzero]
grid_data = prepare_data(grid_data, clip_lo=clip_lo, clip_hi=clip_hi)
ax = plot_healpix_map(grid_data, nest=nest,
cmap=cmap, colorbar=colorbar, label=label,
ax=ax, **kwargs)
if return_grid_data:
return (ax, grid_data)
else:
return ax
def plot_iers(which='auto', num_points=500, save=None):
"""Plot IERS data from 2015-2025.
Plots the UT1-UTC time offset and polar x,y motions over a 10-year
period that includes the DESI survey, to demonstrate the time ranges
when different sources (IERS-A, IERS-B) are used and where values
are predicted then fixed.
This function is primarily intended to document and debug the
:func:`desiutil.iers.freeze_iers` function.
Requires that the matplotlib package is installed.
Parameters
----------
which : {'auto', 'A', 'B', 'frozen'}
Select which IERS table source to use. The default 'auto' matches
the internal astropy default. Use 'A' or 'B' to force the source
to be either the latest IERS-A table (which will be downloaded),
or the IERS-B table packaged with the current version of astropy.
The 'frozen' option calls :func:`~desiutil.iers.freeze_iers`.
num_points : :class:`int`, optional
The number of times covering 2015-25 to calculate and plot.
save : :class:`str`, optional
Name of file where plot should be saved. Format is inferred from
the extension.
Returns
-------
:func:`tuple`
Tuple (figure, axes) returned by ``plt.subplots()``.
"""
#
# These values are copied from the desisurvey config.yaml file.
# They may or may not reflect the latest configuration.
#
# Survey nominally starts on night of this date. Format is YYYY-MM-DD.
first_day = date(2019, 12, 1)
# Survey nominally ends on morning of this date. Format is YYYY-MM-DD.
last_day = date(2024, 11, 30)
# Calculate UTC midnight timestamps covering 2015 - 2025
start = Time('2015-01-01 00:00')
stop = Time('2025-01-01 00:00')
mjd = np.linspace(start.mjd, stop.mjd, num_points)
times = Time(mjd, format='mjd')
t_lo = matplotlib.dates.date2num(start.datetime)
t_hi = matplotlib.dates.date2num(stop.datetime)
t = np.linspace(t_lo, t_hi, num_points)
t_start = matplotlib.dates.date2num(first_day)
t_stop = matplotlib.dates.date2num(last_day)
# Load the specified IERS table.
if which == 'auto':
iers = iers.IERS_Auto.open()
elif which == 'B':
iers = iers.IERS_B.open()
elif which == 'A':
# This requires network access to download the latest file.
iers = iers.IERS_A.open(iers.conf.iers_auto_url)
elif which == 'frozen':
freeze_iers()
iers = iers.IERS_Auto.open()
else:
raise ValueError('Invalid which option.')
# Calculate UT1-UTC using the IERS table.
dt, dt_status = iers.ut1_utc(times, return_status=True)
dt = dt.to(u.s).value
# Calculate polar x,y motion using the IERS table.
pmx, pmy, pm_status = iers.pm_xy(times, return_status=True)
pmx = pmx.to(u.arcsec).value
pmy = pmy.to(u.arcsec).value
assert np.all(dt_status == pm_status)
codes = np.unique(dt_status)
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(8, 6))
labels = {-2: 'Fixed', 0: 'IERS-B', 1: 'IERS-A', 2: 'Predict'}
styles = {-2: 'r:', 0: 'b-', 1: 'go', 2: 'r--'}
ms = 3
for code in codes:
sel = dt_status == code
ax[0].plot(t[sel], dt[sel], styles[code], ms=ms, label=labels[code])
ax[1].plot(t[sel], pmx[sel], styles[code], ms=ms, label='Polar x')
ax[1].plot(t[sel], pmy[sel], styles[code], ms=ms, label='Polar y')
ax[0].legend(ncol=4)
ax[0].set_ylabel('UT1 - UTC [s]')
ax[1].set_ylabel('Polar x,y motion [arcsec]')
for i in range(2):
# Vertical lines showing the survey start / stop dates.
ax[i].axvline(t_start, ls='--', c='k')
ax[i].axvline(t_stop, ls='--', c='k')
# Use year labels for the horizontal axis.
xaxis = ax[1].xaxis
xaxis.set_major_locator(matplotlib.dates.YearLocator())
xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y'))
ax[i].set_xlim(start.datetime, stop.datetime)
plt.tight_layout()
if save:
plt.savefig(save)
return fig, ax
| desihub/desiutil | py/desiutil/plots.py | Python | bsd-3-clause | 41,964 |
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to remove unused gconv charset modules from a build."""
from __future__ import print_function
import ahocorasick
import glob
import lddtree
import operator
import os
import stat
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import osutils
# Path pattern to search for the gconv-modules file.
GCONV_MODULES_PATH = 'usr/*/gconv/gconv-modules'
# Sticky modules. These charsets modules are always included even if they
# aren't used. You can specify any charset name as supported by 'iconv_open',
# for example, 'LATIN1' or 'ISO-8859-1'.
STICKY_MODULES = ('UTF-16', 'UTF-32', 'UNICODE')
# List of function names (symbols) known to use a charset as a parameter.
GCONV_SYMBOLS = (
# glibc
'iconv_open',
'iconv',
# glib
'g_convert',
'g_convert_with_fallback',
'g_iconv',
'g_locale_to_utf8',
'g_get_charset',
)
class GconvModules(object):
"""Class to manipulate the gconv/gconv-modules file and referenced modules.
This class parses the contents of the gconv-modules file installed by glibc
which provides the definition of the charsets supported by iconv_open(3). It
allows to load the current gconv-modules file and rewrite it to include only
a subset of the supported modules, removing the other modules.
Each charset is involved on some transformation between that charset and an
internal representation. This transformation is defined on a .so file loaded
dynamically with dlopen(3) when the charset defined in this file is requested
to iconv_open(3).
See the comments on gconv-modules file for syntax details.
"""
def __init__(self, gconv_modules_file):
"""Initialize the class.
Args:
gconv_modules_file: Path to gconv/gconv-modules file.
"""
self._filename = gconv_modules_file
# An alias map of charsets. The key (fromcharset) is the alias name and
# the value (tocharset) is the real charset name. We also support a value
# that is an alias for another charset.
self._alias = {}
# The modules dict goes from charset to module names (the filenames without
# the .so extension). Since several transformations involving the same
# charset could be defined in different files, the values of this dict are
# a set of module names.
self._modules = {}
def Load(self):
"""Load the charsets from gconv-modules."""
for line in open(self._filename):
line = line.split('#', 1)[0].strip()
if not line: # Comment
continue
lst = line.split()
if lst[0] == 'module':
_, fromset, toset, filename = lst[:4]
for charset in (fromset, toset):
charset = charset.rstrip('/')
mods = self._modules.get(charset, set())
mods.add(filename)
self._modules[charset] = mods
elif lst[0] == 'alias':
_, fromset, toset = lst
fromset = fromset.rstrip('/')
toset = toset.rstrip('/')
# Warn if the same charset is defined as two different aliases.
if self._alias.get(fromset, toset) != toset:
cros_build_lib.Error('charset "%s" already defined as "%s".',
fromset, self._alias[fromset])
self._alias[fromset] = toset
else:
cros_build_lib.Die('Unknown line: %s', line)
cros_build_lib.Debug('Found %d modules and %d alias in %s',
len(self._modules), len(self._alias), self._filename)
charsets = sorted(self._alias.keys() + self._modules.keys())
# Remove the 'INTERNAL' charset from the list, since it is not a charset
# but an internal representation used to convert to and from other charsets.
if 'INTERNAL' in charsets:
charsets.remove('INTERNAL')
return charsets
def Rewrite(self, used_charsets, dry_run=False):
"""Rewrite gconv-modules file with only the used charsets.
Args:
used_charsets: A list of used charsets. This should be a subset of the
list returned by Load().
dry_run: Whether this function should not change any file.
"""
# Compute the used modules.
used_modules = set()
for charset in used_charsets:
while charset in self._alias:
charset = self._alias[charset]
used_modules.update(self._modules[charset])
unused_modules = reduce(set.union, self._modules.values()) - used_modules
modules_dir = os.path.dirname(self._filename)
all_modules = set.union(used_modules, unused_modules)
# The list of charsets that depend on a given library. For example,
# libdeps['libCNS.so'] is the set of all the modules that require that
# library. These libraries live in the same directory as the modules.
libdeps = {}
for module in all_modules:
deps = lddtree.ParseELF(os.path.join(modules_dir, '%s.so' % module),
modules_dir, [])
if not 'needed' in deps:
continue
for lib in deps['needed']:
# Ignore the libs without a path defined (outside the modules_dir).
if deps['libs'][lib]['path']:
libdeps[lib] = libdeps.get(lib, set()).union([module])
used_libdeps = set(lib for lib, deps in libdeps.iteritems()
if deps.intersection(used_modules))
unused_libdeps = set(libdeps).difference(used_libdeps)
cros_build_lib.Debug('Used modules: %s', ', '.join(sorted(used_modules)))
cros_build_lib.Debug('Used dependency libs: %s',
', '.join(sorted(used_libdeps)))
unused_size = 0
for module in sorted(unused_modules):
module_path = os.path.join(modules_dir, '%s.so' % module)
unused_size += os.lstat(module_path).st_size
cros_build_lib.Debug('rm %s', module_path)
if not dry_run:
os.unlink(module_path)
unused_libdeps_size = 0
for lib in sorted(unused_libdeps):
lib_path = os.path.join(modules_dir, lib)
unused_libdeps_size += os.lstat(lib_path).st_size
cros_build_lib.Debug('rm %s', lib_path)
if not dry_run:
os.unlink(lib_path)
cros_build_lib.Info(
'Done. Using %d gconv modules. Removed %d unused modules'
' (%.1f KiB) and %d unused dependencies (%.1f KiB)',
len(used_modules), len(unused_modules), unused_size / 1024.,
len(unused_libdeps), unused_libdeps_size / 1024.)
# Recompute the gconv-modules file with only the included gconv modules.
result = []
for line in open(self._filename):
lst = line.split('#', 1)[0].strip().split()
if not lst:
result.append(line) # Keep comments and copyright headers.
elif lst[0] == 'module':
_, _, _, filename = lst[:4]
if filename in used_modules:
result.append(line) # Used module
elif lst[0] == 'alias':
_, charset, _ = lst
charset = charset.rstrip('/')
while charset in self._alias:
charset = self._alias[charset]
if used_modules.intersection(self._modules[charset]):
result.append(line) # Alias to an used module
else:
cros_build_lib.Die('Unknown line: %s', line)
if not dry_run:
osutils.WriteFile(self._filename, ''.join(result))
def MultipleStringMatch(patterns, corpus):
"""Search a list of strings in a corpus string.
Args:
patterns: A list of strings.
corpus: The text where to search for the strings.
Result:
A list of Booleans stating whether each pattern string was found in the
corpus or not.
"""
tree = ahocorasick.KeywordTree()
for word in patterns:
tree.add(word)
tree.make()
result = [False] * len(patterns)
for i, j in tree.findall(corpus):
match = corpus[i:j]
result[patterns.index(match)] = True
return result
def GconvStrip(opts):
"""Process gconv-modules and remove unused modules.
Args:
opts: The command-line args passed to the script.
Returns:
The exit code number indicating whether the process succeeded.
"""
root_st = os.lstat(opts.root)
if not stat.S_ISDIR(root_st.st_mode):
cros_build_lib.Die('root (%s) must be a directory.' % opts.root)
# Detect the possible locations of the gconv-modules file.
gconv_modules_files = glob.glob(os.path.join(opts.root, GCONV_MODULES_PATH))
if not gconv_modules_files:
cros_build_lib.Warning('gconv-modules file not found.')
return 1
# Only one gconv-modules files should be present, either on /usr/lib or
# /usr/lib64, but not both.
if len(gconv_modules_files) > 1:
cros_build_lib.Die('Found several gconv-modules files.')
gconv_modules_file = gconv_modules_files[0]
cros_build_lib.Info('Searching for unused gconv files defined in %s',
gconv_modules_file)
gmods = GconvModules(gconv_modules_file)
charsets = gmods.Load()
# Use scanelf to search for all the binary files on the rootfs that require
# or define the symbol iconv_open. We also include the binaries that define
# it since there could be internal calls to it from other functions.
files = set()
for symbol in GCONV_SYMBOLS:
cmd = ['scanelf', '--mount', '--quiet', '--recursive', '--format', '#s%F',
'--symbol', symbol, opts.root]
result = cros_build_lib.RunCommand(cmd, redirect_stdout=True,
print_cmd=False)
symbol_files = result.output.splitlines()
cros_build_lib.Debug('Symbol %s found on %d files.',
symbol, len(symbol_files))
files.update(symbol_files)
# The charsets are represented as nul-terminated strings in the binary files,
# so we append the '\0' to each string. This prevents some false positives
# when the name of the charset is a substring of some other string. It doesn't
# prevent false positives when the charset name is the suffix of another
# string, for example a binary with the string "DON'T DO IT\0" will match the
# 'IT' charset. Empirical test on ChromeOS images suggests that only 4
# charsets could fall in category.
strings = [s + '\0' for s in charsets]
cros_build_lib.Info('Will search for %d strings in %d files',
len(strings), len(files))
# Charsets listed in STICKY_MOUDLES are initialized as used. Note that those
# strings should be listed in the gconv-modules file.
unknown_sticky_modules = set(STICKY_MODULES) - set(charsets)
if unknown_sticky_modules:
cros_build_lib.Warning(
'The following charsets were explicitly requested in STICKY_MODULES '
'even though they don\'t exist: %s',
', '.join(unknown_sticky_modules))
global_used = [charset in STICKY_MODULES for charset in charsets]
for filename in files:
used_filename = MultipleStringMatch(strings,
osutils.ReadFile(filename, mode='rb'))
global_used = map(operator.or_, global_used, used_filename)
# Check the debug flag to avoid running an useless loop.
if opts.debug and any(used_filename):
cros_build_lib.Debug('File %s:', filename)
for i in range(len(used_filename)):
if used_filename[i]:
cros_build_lib.Debug(' - %s', strings[i])
used_charsets = [cs for cs, used in zip(charsets, global_used) if used]
gmods.Rewrite(used_charsets, opts.dry_run)
return 0
def ParseArgs(argv):
"""Return parsed commandline arguments."""
parser = commandline.ArgumentParser()
parser.add_argument(
'--dry-run', action='store_true', default=False,
help='process but don\'t modify any file.')
parser.add_argument(
'root', type='path',
help='path to the directory where the rootfs is mounted.')
opts = parser.parse_args(argv)
opts.Freeze()
return opts
def main(argv):
"""Main function to start the script."""
opts = ParseArgs(argv)
cros_build_lib.Debug('Options are %s', opts)
return GconvStrip(opts)
| mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/scripts/gconv_strip.py | Python | bsd-3-clause | 12,027 |
import os
import shutil
import pickle
from tests import add, TestCase
from mutagen.id3 import ID3FileType
from mutagen.easyid3 import EasyID3, error as ID3Error
from tempfile import mkstemp
class TEasyID3(TestCase):
def setUp(self):
fd, self.filename = mkstemp('.mp3')
os.close(fd)
empty = os.path.join('tests', 'data', 'emptyfile.mp3')
shutil.copy(empty, self.filename)
self.id3 = EasyID3()
def test_remember_ctr(self):
empty = os.path.join('tests', 'data', 'emptyfile.mp3')
mp3 = ID3FileType(empty, ID3=EasyID3)
self.failIf(mp3.tags)
mp3["artist"] = ["testing"]
self.failUnless(mp3.tags)
mp3.pprint()
self.failUnless(isinstance(mp3.tags, EasyID3))
def test_delete(self):
self.id3["artist"] = "foobar"
self.id3.save(self.filename)
self.failUnless(os.path.getsize(self.filename))
self.id3.delete(self.filename)
self.failIf(os.path.getsize(self.filename))
self.failIf(self.id3)
def test_pprint(self):
self.id3["artist"] = "baz"
self.id3.pprint()
def test_has_key(self):
self.failIf(self.id3.has_key("foo"))
def test_empty_file(self):
empty = os.path.join('tests', 'data', 'emptyfile.mp3')
self.assertRaises(ID3Error, EasyID3, filename=empty)
def test_nonexistent_file(self):
empty = os.path.join('tests', 'data', 'does', 'not', 'exist')
self.assertRaises(IOError, EasyID3, filename=empty)
def test_write_single(self):
for key in EasyID3.valid_keys:
if key == "date":
continue
elif key.startswith("replaygain_"):
continue
# Test creation
self.id3[key] = "a test value"
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3[key], ["a test value"])
self.failUnlessEqual(id3.keys(), [key])
# And non-creation setting.
self.id3[key] = "a test value"
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3[key], ["a test value"])
self.failUnlessEqual(id3.keys(), [key])
del(self.id3[key])
def test_write_double(self):
for key in EasyID3.valid_keys:
if key == "date":
continue
elif key.startswith("replaygain_"):
continue
elif key == "musicbrainz_trackid":
continue
self.id3[key] = ["a test", "value"]
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3.get(key), ["a test", "value"])
self.failUnlessEqual(id3.keys(), [key])
self.id3[key] = ["a test", "value"]
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3.get(key), ["a test", "value"])
self.failUnlessEqual(id3.keys(), [key])
del(self.id3[key])
def test_write_date(self):
self.id3["date"] = "2004"
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3["date"], ["2004"])
self.id3["date"] = "2004"
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3["date"], ["2004"])
def test_date_delete(self):
self.id3["date"] = "2004"
self.failUnlessEqual(self.id3["date"], ["2004"])
del(self.id3["date"])
self.failIf("date" in self.id3.keys())
def test_write_date_double(self):
self.id3["date"] = ["2004", "2005"]
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3["date"], ["2004", "2005"])
self.id3["date"] = ["2004", "2005"]
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3["date"], ["2004", "2005"])
def test_write_invalid(self):
self.failUnlessRaises(ValueError, self.id3.__getitem__, "notvalid")
self.failUnlessRaises(ValueError, self.id3.__delitem__, "notvalid")
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "notvalid", "tests")
def test_perfomer(self):
self.id3["performer:coder"] = ["piman", "mu"]
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3["performer:coder"], ["piman", "mu"])
def test_no_performer(self):
self.failIf("performer:foo" in self.id3)
def test_performer_delete(self):
self.id3["performer:foo"] = "Joe"
self.id3["performer:bar"] = "Joe"
self.failUnless("performer:foo" in self.id3)
self.failUnless("performer:bar" in self.id3)
del(self.id3["performer:foo"])
self.failIf("performer:foo" in self.id3)
self.failUnless("performer:bar" in self.id3)
del(self.id3["performer:bar"])
self.failIf("performer:bar" in self.id3)
self.failIf("TMCL" in self.id3._EasyID3__id3)
def test_performer_delete_dne(self):
self.failUnlessRaises(KeyError, self.id3.__delitem__, "performer:bar")
self.id3["performer:foo"] = "Joe"
self.failUnlessRaises(KeyError, self.id3.__delitem__, "performer:bar")
def test_txxx_empty(self):
# http://code.google.com/p/mutagen/issues/detail?id=135
self.id3["asin"] = ""
def test_txxx_set_get(self):
self.failIf("asin" in self.id3.keys())
self.id3["asin"] = "Hello"
self.failUnless("asin" in self.id3.keys())
self.failUnlessEqual(self.id3["asin"], ["Hello"])
self.failUnless("TXXX:ASIN" in self.id3._EasyID3__id3)
def test_txxx_del_set_del(self):
self.failIf("asin" in self.id3.keys())
self.failUnlessRaises(KeyError, self.id3.__delitem__, "asin")
self.id3["asin"] = "Hello"
self.failUnless("asin" in self.id3.keys())
self.failUnlessEqual(self.id3["asin"], ["Hello"])
del(self.id3["asin"])
self.failIf("asin" in self.id3.keys())
self.failUnlessRaises(KeyError, self.id3.__delitem__, "asin")
def test_txxx_save(self):
self.id3["asin"] = "Hello"
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3["asin"], ["Hello"])
def test_txxx_unicode(self):
self.id3["asin"] = u"He\u1234llo"
self.failUnlessEqual(self.id3["asin"], [u"He\u1234llo"])
def test_bad_trackid(self):
self.failUnlessRaises(ValueError, self.id3.__setitem__,
"musicbrainz_trackid", ["a", "b"])
self.failIf(self.id3._EasyID3__id3.getall("RVA2"))
def test_gain_bad_key(self):
self.failIf("replaygain_foo_gain" in self.id3)
self.failIf(self.id3._EasyID3__id3.getall("RVA2"))
def test_gain_bad_value(self):
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "replaygain_foo_gain", [])
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "replaygain_foo_gain", ["foo"])
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "replaygain_foo_gain", ["1", "2"])
self.failIf(self.id3._EasyID3__id3.getall("RVA2"))
def test_peak_bad_key(self):
self.failIf("replaygain_foo_peak" in self.id3)
self.failIf(self.id3._EasyID3__id3.getall("RVA2"))
def test_peak_bad_value(self):
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "replaygain_foo_peak", [])
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "replaygain_foo_peak", ["foo"])
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "replaygain_foo_peak", ["1", "1"])
self.failUnlessRaises(
ValueError, self.id3.__setitem__, "replaygain_foo_peak", ["3"])
self.failIf(self.id3._EasyID3__id3.getall("RVA2"))
def test_gain_peak_get(self):
self.id3["replaygain_foo_gain"] = "+3.5 dB"
self.id3["replaygain_bar_peak"] = "0.5"
self.failUnlessEqual(
self.id3["replaygain_foo_gain"], ["+3.500000 dB"])
self.failUnlessEqual(self.id3["replaygain_foo_peak"], ["0.000000"])
self.failUnlessEqual(
self.id3["replaygain_bar_gain"], ["+0.000000 dB"])
self.failUnlessEqual(self.id3["replaygain_bar_peak"], ["0.500000"])
def test_gain_peak_set(self):
self.id3["replaygain_foo_gain"] = "+3.5 dB"
self.id3["replaygain_bar_peak"] = "0.5"
self.id3.save(self.filename)
id3 = EasyID3(self.filename)
self.failUnlessEqual(id3["replaygain_foo_gain"], ["+3.500000 dB"])
self.failUnlessEqual(id3["replaygain_foo_peak"], ["0.000000"])
self.failUnlessEqual(id3["replaygain_bar_gain"], ["+0.000000 dB"])
self.failUnlessEqual(id3["replaygain_bar_peak"], ["0.500000"])
def test_gain_peak_delete(self):
self.id3["replaygain_foo_gain"] = "+3.5 dB"
self.id3["replaygain_bar_peak"] = "0.5"
del(self.id3["replaygain_bar_gain"])
del(self.id3["replaygain_foo_peak"])
self.failUnless("replaygain_foo_gain" in self.id3.keys())
self.failUnless("replaygain_bar_gain" in self.id3.keys())
del(self.id3["replaygain_foo_gain"])
del(self.id3["replaygain_bar_peak"])
self.failIf("replaygain_foo_gain" in self.id3.keys())
self.failIf("replaygain_bar_gain" in self.id3.keys())
del(self.id3["replaygain_foo_gain"])
del(self.id3["replaygain_bar_peak"])
self.failIf("replaygain_foo_gain" in self.id3.keys())
self.failIf("replaygain_bar_gain" in self.id3.keys())
def test_pickle(self):
# http://code.google.com/p/mutagen/issues/detail?id=102
pickle.dumps(self.id3)
def test_get_fallback(self):
called = []
def get_func(id3, key):
id3.getall("")
self.failUnlessEqual(key, "nope")
called.append(1)
self.id3.GetFallback = get_func
self.id3["nope"]
self.failUnless(called)
def test_set_fallback(self):
called = []
def set_func(id3, key, value):
id3.getall("")
self.failUnlessEqual(key, "nope")
self.failUnlessEqual(value, ["foo"])
called.append(1)
self.id3.SetFallback = set_func
self.id3["nope"] = "foo"
self.failUnless(called)
def test_del_fallback(self):
called = []
def del_func(id3, key):
id3.getall("")
self.failUnlessEqual(key, "nope")
called.append(1)
self.id3.DeleteFallback = del_func
del self.id3["nope"]
self.failUnless(called)
def test_list_fallback(self):
def list_func(id3, key):
id3.getall("")
self.failIf(key)
return ["somekey"]
self.id3.ListFallback = list_func
self.failUnlessEqual(self.id3.keys(), ["somekey"])
def tearDown(self):
os.unlink(self.filename)
add(TEasyID3)
| hanvo/MusicCloud | Crawler/Install Files/mutagen-1.22/tests/test_easyid3.py | Python | bsd-3-clause | 11,253 |
from django.shortcuts import render
from .models import Sample, SampleGroup
from files.models import File
from tasks.models import Task
import json
from tasks.tasks import annotate_vcf, run_qc
from django.shortcuts import render, redirect, get_object_or_404
from django.db.models import Q
from django.contrib import messages
from files.tasks import import_vcf
# from sample_tracking_system.users.models import User
from django.contrib.auth.models import User
# from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import F, Count
from django.contrib.auth.decorators import login_required
import csv
from django.http import HttpResponseRedirect, HttpResponse
import io
from samples.forms import SampleGroupForm
from django.views.generic import ListView
from django.views.generic.edit import DeleteView
from django.urls import reverse_lazy
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from django.contrib import messages
# Create your views here.
@login_required
def index(request):
args = []
if request.method == 'POST':
action = request.POST['action']
samples = request.POST.getlist('samples')
# print('samples', samples)
request.session['samples'] = samples
return redirect('create_group')
else:
samples = Sample.objects.filter(*args).order_by('id')
context = {
'samples':samples,
}
return render(request, 'samples/index.html', context)
@login_required
def create_group(request):
if request.method == 'POST':
form = SampleGroupForm(request.POST)
if form.is_valid():
form.save()
return redirect('sample_index')
else:
samples = request.session['samples']
print('samples', samples)
form = SampleGroupForm(initial = {'members': [1,2,3] })
# form.members.initial = ['537']
return render(request, 'samples/create_group.html', {'form': form})
def group_detail(request, pk):
object = SampleGroup.objects.get(pk=pk)
n_samples = object.members.count()
context = {
'object':object,
'n_samples':n_samples
}
return render(request, 'samples/samplegroup_detail.html', context)
class SampleGroupList(ListView):
model = SampleGroup
class SampleGroupDelete(DeleteView):
model = SampleGroup
success_url = reverse_lazy('samplegroup-list')
class SampleDetailView(DetailView):
model = Sample
class SampleGroupUpdateView(UpdateView):
model = SampleGroup
fields = '__all__'
def sample_import_vcf(request, pk):
sample = Sample.objects.get(pk=pk)
messages.add_message(request, messages.SUCCESS, 'VCF from sample {} will be imported'.format(sample.name))
import_vcf.delay(pk)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required
def bulk_action(request):
if request.method == 'POST':
samples = request.POST.getlist('samples')
action = request.POST['action']
print(action, samples)
for sample_id in samples:
if action == "import_vcf":
import_vcf.apply_async(sample_id)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| raonyguimaraes/mendelmd | samples/views.py | Python | bsd-3-clause | 3,269 |
from datetime import datetime
from decimal import Decimal
import six
from unittest import TestCase
from databuild import functional
class FunctionalTestCase(TestCase):
def test_compose(self):
f1 = lambda x: x + 1
f2 = lambda x: x + 2
f3 = lambda x: x + 3
fn = functional.compose(f1, f2, f3)
assert fn(0) == 6
def test_guess_type(self):
values = [
'Alice',
'Bob',
'Charlie',
'Daniel',
'Emily',
]
assert functional.guess_type(values) == six.text_type
values = [
'10939',
'157',
'49294',
'402',
'374',
]
assert functional.guess_type(values) == int
values = [
'1',
'2.5',
'3.43',
'4',
'5.7',
]
assert functional.guess_type(values) == float
values = [
'1.00',
'2.50',
'3.43',
'4.00',
'5.70',
]
assert functional.guess_type(values) == Decimal
values = [
'2014-01-01',
'2014-02-01',
'2014-03-01',
'2014-04-01',
'2014-05-01',
]
assert functional.guess_type(values) == datetime
values = [
'1',
'Bob',
'3.43',
'2014-04-01',
'5.70',
]
assert functional.guess_type(values) == six.text_type
values = [
1,
2,
3,
4,
5,
]
assert functional.guess_type(values) == int
| databuild/databuild | databuild/tests/test_functional.py | Python | bsd-3-clause | 1,696 |
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for `calc.tools` module."""
import numpy as np
from metpy.calc import find_intersections, nearest_intersection_idx, resample_nn_1d
from metpy.testing import assert_array_almost_equal, assert_array_equal
def test_resample_nn():
"""Test 1d nearest neighbor functionality."""
a = np.arange(5.)
b = np.array([2, 3.8])
truth = np.array([2, 4])
assert_array_equal(truth, resample_nn_1d(a, b))
def test_nearest_intersection_idx():
"""Test nearest index to intersection functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
truth = np.array([2, 12])
assert_array_equal(truth, nearest_intersection_idx(y1, y2))
def test_find_intersections():
"""Test finding the intersection of two curves functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
# Truth is what we will get with this sampling,
# not the mathematical intersection
truth = np.array([[8.88, 24.44],
[238.84, 1794.53]])
assert_array_almost_equal(truth, find_intersections(x, y1, y2), 2)
| ahill818/MetPy | metpy/calc/tests/test_tools.py | Python | bsd-3-clause | 1,247 |
Subsets and Splits