gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python2
#This is the main Bolinas script that runs the parser.
import sys
import fileinput
import math
import pprint
from argparse import ArgumentParser
# Bolinas imports
from config import config
from common.hgraph.hgraph import Hgraph
from common import log
from common import output
from common.exceptions import DerivationException
from common.grammar import Grammar
from parser.parser import Parser
from parser.vo_rule import VoRule
from parser_td.td_rule import TdRule
from parser_td.td_item import Item
from parser_td.parser_td import ParserTD
def read_pairs(input):
"""
An iterator over pairs of elements in an iterator.
"""
while True:
line1 = input.next()
try:
line2 = input.next()
except StopIteration:
raise IOError, "Uneven number of lines in input."
yield (line1, line2)
if __name__ == "__main__":
# Parse all the command line arguments, figure out what to do and dispatch to the appropriate modules.
# Initialize the command line argument parser
argparser = ArgumentParser(description = "Bolinas is a toolkit for synchronous hyperedge replacement grammars.")
argparser.add_argument("grammar_file", help="A hyperedge replacement grammar (HRG) or synchronous HRG (SHRG).")
argparser.add_argument("input_file", nargs="?", help="Input file containing one object per line or pairs of objects. Use - to read from stdin.")
argparser.add_argument("-o","--output_file", type=str, help="Write output to a file instead of stdout.")
direction = argparser.add_mutually_exclusive_group()
direction.add_argument("-f","--forward", action="store_true", default=True, help="Apply the synchronous HRG left-to-right (default)")
direction.add_argument("-r","--backward", action="store_true", default=False, help="Apply the synchronous HRG right-to-left.")
direction.add_argument("-b","--bitext", action="store_true", default=False, help="Parse pairs of objects from an input file with alternating lines.")
argparser.add_argument("-ot","--output_type", type=str, default="derived", help="Set the type of the output to be produced for each object in the input file. \n'forest' produces parse forests.\n'derivation' produces k-best derivations.\n'derived' produces k-best derived objects (default).")
mode = argparser.add_mutually_exclusive_group()
mode.add_argument("-g", type=int, default=0, const=5, nargs='?', help ="Generate G random derivations from the grammar stochastically. Cannot be used with -k.")
mode.add_argument("-k",type=int, default=False, help ="Generate K best derivations for the objects in the input file. Cannot be used with -g (default with K=1).")
weights = argparser.add_mutually_exclusive_group()
#weights.add_argument("-d","--randomize", default=False, action="store_true", help="Randomize weights to be distributed between 0.2 and 0.8. Useful for EM training.")
weights.add_argument("-n","--normalize", default=False, action="store_true", help="Normalize weights. If -b is specified, rules with the same LHS sum up to 1.0. If -f is specified rules with the same LHS and second RHS sum up to 1.0. If -r is specified rules with the same LHS and first RHS sum up to 1.0.")
weights.add_argument("-t","--train", default=0, type=int, const=5, nargs='?', help="Use TRAIN iterations of EM to train weights for the grammar using the input (graph, string, or pairs of objects in alternating lines). Initialize with the weights in the grammar file or with uniform weights if none are provided. Writes a grammar file with trained weights to the output.")
argparser.add_argument("-m", "--weight_type", default="prob", help="Input/output in real probabilities ('prob', default) or log probabilities ('logprob').")
argparser.add_argument("-p","--parser", default="basic", help="Specify which graph parser to use. 'td': the tree decomposition parser of Chiang et al, ACL 2013 (default). 'basic': a basic generalization of CKY that matches rules according to an arbitrary visit order on edges (less efficient).")
argparser.add_argument("-e","--edge_labels", action="store_true", default=False, help="Consider only edge labels when matching HRG rules. By default node labels need to match. Warning: The default is potentially unsafe when node-labels are used for non-leaf nodes on the target side of a synchronous grammar.")
argparser.add_argument("-bn","--boundary_nodes", action="store_true", help="In the tree decomposition parser, use the full representation for graph fragments instead of the compact boundary node representation. This can provide some speedup for grammars with small rules.")
#argparser.add_argument("-s","--remove_spurious", default=False, action="store_true", help="Remove spurious ambiguity. Only keep the best derivation for identical derived objects.")
argparser.add_argument("-s","--start_symbol", default=None, type=str, help="Use this start symbol instead of the left hand side of the first rule in the grammar.")
argparser.add_argument("-v","--verbose", type=int, default=2, help="Stderr output verbosity: 0 (all off), 1 (warnings), 2 (info, default), 3 (details), 3 (debug)")
args = argparser.parse_args()
# Verify command line parameters
if not args.output_type in ['forest', 'derivation', 'derived', 'yield', 'both']:
log.err("Output type (-ot) must be either 'forest', 'derivation', or 'derived'.")
sys.exit(1)
if not args.weight_type in ['prob', 'logprob']:
log.err("Weight type (-m) must be either 'prob'or 'logprob'.")
sys.exit(1)
logprob = (args.weight_type == 'logprob')
if args.output_type == "forest":
if not args.output_file:
log.err("Need to provide '-o FILE_PREFIX' with output type 'forest'.")
sys.exit(1)
if args.k:
log.warn("Ignoring -k command line option because output type is 'forest'.")
if not args.parser in ['td', 'basic']:
log.err("Parser (-p) must be either 'td' or 'basic'.")
sys.exit(1)
if args.parser != 'td' and args.boundary_nodes:
log.warn('The -bn option is only relevant for the tree decomposition parser ("-p td").')
if args.k > config.maxk:
log.err("k must be <= than %i (defined in in args.py)." % args.maxk)
sys.exit(1)
if args.verbose < 0 or args.verbose > 4:
log.err("Invalid verbosity level, must be 0-4.")
sys.exit(1)
# Updat global configuration with command line args
config.__dict__.update(vars(args))
# Definition of logger output verbosity levels
log.LOG = {0:{log.err},
1:{log.err, log.warn},
2:{log.err, log.warn, log.info},
3:{log.err, log.warn, log.info, log.chatter},
4:{log.err, log.warn, log.chatter, log.info, log.debug}
}[config.verbose]
# Direct output to stdout if no filename is provided
if config.output_type is not "derivation":
if config.output_file:
output_file = open(config.output_file,'wa')
else:
output_file = sys.stdout
with open(config.grammar_file,'ra') as grammar_file:
# Select the parser and rule class to use
if config.parser == 'td':
parser_class = ParserTD
rule_class = TdRule
if config.boundary_nodes:
parser_class.item_class = Item
elif config.parser == 'basic':
parser_class = Parser
rule_class = VoRule
# Read the grammar
grammar = Grammar.load_from_file(grammar_file, rule_class, config.backward, nodelabels = (not config.edge_labels), logprob = logprob)
if config.start_symbol:
grammar.start_symbol = config.start_symbol
if len(grammar) == 0:
log.err("Unable to load grammar from file.")
sys.exit(1)
log.info("Loaded %s%s grammar with %i rules."\
% (grammar.rhs1_type, "-to-%s" % grammar.rhs2_type if grammar.rhs2_type else '', len(grammar)))
# EM training
if config.train:
iterations = config.train
if not config.input_file:
log.err("Please specify corpus file for EM training.")
sys.exit(1)
if config.bitext:
corpus = list(read_pairs(fileinput.input(config.input_file)))
grammar.em(corpus, iterations, parser_class, "synchronous")
else:
corpus = [Hgraph.from_string(x) for x in fileinput.input(config.input_file)]
grammar.em(corpus, iterations, parser_class, "forward")
for rid in sorted(grammar.keys()):
output_file.write(str(grammar[rid]))
output_file.write("\n")
sys.exit(0)
# Normalization
if config.normalize:
if config.bitext or grammar.rhs2_type is None or config.g or (config.k and not config.input_files):
grammar.normalize_lhs()
else:
grammar.normalize_rhs2()
for rid in sorted(grammar.keys()):
output_file.write(str(grammar[rid]))
output_file.write("\n")
sys.exit(0)
# kbest derivations from grammar
derivations = []
if config.k and not config.input_file:
grammar.normalize_lhs()
derivations = grammar.kbest(config.k)
# Stochastically generate derivations
if config.g:
grammar.normalize_lhs()
derivations = (grammar.stochastically_generate() for i in range(config.g))
if derivations:
for score, derivation in derivations:
if not logprob:
n_score = math.exp(score)
else:
n_score = score
if config.output_type == "derived":
if grammar.rhs2_type == "string":
output_file.write("%s\t#%f\n" % (" ".join(output.apply_string_derivation(derivation)), n_score))
else:
output_file.write("%s\t#%f\n" % (output.apply_graph_derivation(derivation).to_string(), n_score))
elif config.output_type == "derivation":
output_file.write("%s\t#%f\n" % (output.format_derivation(derivation), n_score))
elif config.output_type == "yield":
if grammar.rhs2_type == "string":
output_file.write("%s\t#%f\n" % (" ".join(output.apply_string_derivation(derivation)), n_score))
else:
output_file.write("%s\t#%f\n" % (" ".join(output.apply_graph_derivation(derivation).graph_yield()), n_score))
sys.exit(0)
# Otherwise set up the correct parser and parser options
parser = parser_class(grammar)
if grammar.rhs2_type is None and config.output_type == "derived" and not config.g:
log.info('Printing derivation trees for HRG.')
config.output_type = "derivation"
if not config.k:
config.k = 1
if config.bitext:
if parser_class == ParserTD:
log.err("Bigraph parsing with tree decomposition based parser is not yet implemented. Use '-p basic'.")
sys.exit(1)
parse_generator = parser.parse_bitexts(read_pairs(fileinput.input(config.input_file)))
else:
if grammar.rhs1_type == "string":
if parser_class == ParserTD:
log.err("Parser class needs to be 'basic' to parse strings.")
sys.exit(1)
else:
parse_generator = parser.parse_strings(x.strip().split() for x in fileinput.input(config.input_file))
else:
parse_generator = parser.parse_graphs(Hgraph.from_string(x) for x in fileinput.input(config.input_file))
# Process input (if any) and produce desired output
if config.input_file:
count = 1
# Run the parser for each graph in the input
for chart in parse_generator:
# Produce Tiburon format derivation forests
if config.output_type == "forest":
output_file = open("%s_%i.rtg" % (config.output_file, count), 'wa')
output_file.write(output.format_tiburon(chart))
output_file.close()
count = count + 1
# Produce k-best derivations
if config.output_type == "derivation" or config.output_type == "both":
l1s = []
kbest = chart.kbest('START', config.k)
if kbest and len(kbest) < config.k:
log.info("Found only %i derivations." % len(kbest))
for score, derivation in kbest:
n_score = score if logprob else math.exp(score)
l1s.append("%s\t#%g\n" % (output.format_derivation(derivation), n_score))
# Produce k-best derived graphs/strings
if config.output_type == "derived" or config.output_type == "both":
l2s = []
kbest = chart.kbest('START', config.k)
if kbest and kbest < config.k:
log.info("Found only %i derivations." % len(kbest))
if grammar.rhs2_type == "hypergraph":
for score, derivation in kbest:
n_score = score if logprob else math.exp(score)
try:
output_file.write
l2s.append("%s\t#%g\n" % (output.apply_graph_derivation(derivation).to_string(newline = False), n_score))
except DerivationException,e:
log.err("Could not construct derivation: '%s'. Skipping." % e.message)
l2s.append("")
elif grammar.rhs2_type == "string":
for score, derivation in kbest:
n_score = score if logprob else math.exp(score)
l2s.append("%s\t#%g\n" % (" ".join(output.apply_string_derivation(derivation)), n_score))
if config.output_type == "derivation":
for l in l1s:
output_file.write(l)
output_file.write("\n")
elif config.output_type == "derived":
for l in l2s:
output_file.write(l)
output_file.write("\n")
elif config.output_type == "both":
for l1, l2 in zip(l1s, l2s):
output_file.write(l1)
output_file.write(l2)
output_file.write("\n")
|
|
"""SCons.Tool.mslink
Tool-specific initialization for the Microsoft linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslink.py 4369 2009/09/19 15:58:29 scons"
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
from MSCommon import merge_default_version, msvs_exists
def pdbGenerator(env, target, source, for_signature):
try:
return ['/PDB:%s' % target[0].attributes.pdb, '/DEBUG']
except (AttributeError, IndexError):
return None
def _dllTargets(target, source, env, for_signature, paramtp):
listCmd = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
if dll: listCmd.append("/out:%s"%dll.get_string(for_signature))
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: listCmd.append("/implib:%s"%implib.get_string(for_signature))
return listCmd
def _dllSources(target, source, env, for_signature, paramtp):
listCmd = []
deffile = env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX")
for src in source:
# Check explicitly for a non-None deffile so that the __cmp__
# method of the base SCons.Util.Proxy class used for some Node
# proxies doesn't try to use a non-existent __dict__ attribute.
if deffile and src == deffile:
# Treat this source as a .def file.
listCmd.append("/def:%s" % src.get_string(for_signature))
else:
# Just treat it as a generic source file.
listCmd.append(src)
return listCmd
def windowsShlinkTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'SHLIB')
def windowsShlinkSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'SHLIB')
def _windowsLdmodTargets(target, source, env, for_signature):
"""Get targets for loadable modules."""
return _dllTargets(target, source, env, for_signature, 'LDMODULE')
def _windowsLdmodSources(target, source, env, for_signature):
"""Get sources for loadable modules."""
return _dllSources(target, source, env, for_signature, 'LDMODULE')
def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError, 'A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp)
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if not insert_def in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if env.has_key('PDB') and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources)
def windowsLibEmitter(target, source, env):
return _dllEmitter(target, source, env, 'SHLIB')
def ldmodEmitter(target, source, env):
"""Emitter for loadable modules.
Loadable modules are identical to shared libraries on Windows, but building
them is subject to different parameters (LDMODULE*).
"""
return _dllEmitter(target, source, env, 'LDMODULE')
def prog_emitter(target, source, env):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
exe = env.FindIxes(target, "PROGPREFIX", "PROGSUFFIX")
if not exe:
raise SCons.Errors.UserError, "An executable should have exactly one target with the suffix: %s" % env.subst("$PROGSUFFIX")
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that have to be installed
extratargets.append(
env.ReplaceIxes(exe,
"PROGPREFIX", "PROGSUFFIX",
"WINDOWSPROGMANIFESTPREFIX", "WINDOWSPROGMANIFESTSUFFIX"))
if env.has_key('PDB') and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
return (target+extratargets,source)
def RegServerFunc(target, source, env):
if env.has_key('register') and env['register']:
ret = regServerAction([target[0]], [source[0]], env)
if ret:
raise SCons.Errors.UserError, "Unable to register %s" % target[0]
else:
print "Registered %s sucessfully" % target[0]
return ret
return 0
regServerAction = SCons.Action.Action("$REGSVRCOM", "$REGSVRCOMSTR")
regServerCheck = SCons.Action.Action(RegServerFunc, None)
shlibLinkAction = SCons.Action.Action('${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES")}')
compositeShLinkAction = shlibLinkAction + regServerCheck
ldmodLinkAction = SCons.Action.Action('${TEMPFILE("$LDMODULE $LDMODULEFLAGS $_LDMODULE_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_LDMODULE_SOURCES")}')
compositeLdmodAction = ldmodLinkAction + regServerCheck
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows")}'
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
# Set-up ms tools paths for default version
merge_default_version(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction
def exists(env):
return msvs_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
#!/usr/bin/env python
"""Yet another HTTP Server (YaHS)
Provides simple decorator API to quickly create and test RESTful APIs
"""
__author__ = 'Tim Sullivan'
__version__ = '1.1'
__license__ = 'MIT'
import sys
import os
import signal
import socket
import threading
import ssl
import logging
import re
import collections
import inspect
if sys.version_info >= (3,0):
from urllib.parse import parse_qs
else:
from urlparse import parse_qs
class Request:
"""Structure of an incoming HTTP request.
Requests are created by the running HttpWorker threads and have
a uri. 'handlers' register for request url patterns they're interested in.
"""
def __init__(self, method, uri, headers, get_query, address="127.0.0.1"):
self.method = method # e.g GET, PUT, HEAD
self.uri = uri # e.g /index.html
self.headers = headers
self.get_query = get_query # eg /couches/?orderby=lowestprice should be: {'orderby': 'lowestprice'}
self.body = None # if a PUT/POST request this will contain the raw data
self.remote_address = address
def __str__(self):
"""
String representation of a Request
"""
return "Request: {} {}\nHeaders:\n{}\nQuerystring:\n{}\n".format(
self.method,
self.uri,
self.headers,
self.get_query)
class Response:
"""Structure of a HTTP Response destined for the client.
Handlers are responsible for returning a Request to the HttpWorker
which gets sent to the client.
"""
def __init__(self):
self.status_code = 200
self.status_message = "OK"
self.headers = {
'Server': 'YaHS (Yet another HTTP Server) v1.0',
'Content-Type': "text/html",
'Access-Control-Allow-Origin': '*' # ruthless
}
self.body = b''
def send(self, client_socket):
"""Send the Response to the client socket provided.
This method is called *Internally* and should not be used directly.
"""
client_socket.send("HTTP/1.1 {0} {1}\r\n".format(self.status_code, self.status_message).encode('utf-8'))
self.headers['Content-Length'] = str(len(self.body))
for header in self.headers:
client_socket.send((header + ": " + self.headers[header] + "\r\n").encode('utf-8'))
client_socket.send(b'\r\n')
if type(self.body) is str:
self.body = self.body.encode('utf-8')
client_socket.send(self.body)
class HttpWorker(threading.Thread):
"""Process all the HTTP protocol work here in a Thread.
:param: args expects (client_socket, client_address) from socket.accept() call
"""
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None):
# call 'super' constructor to init thread
super(HttpWorker, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs)
# management fields
self.keep_running = True
self.client_socket = args[0]
self.client_address = args[1]
def run(self):
"""Process each client connection.
Parses the request to create a Request object,
gets a Response by finding a request handler that matches a regex.
The response is then sent and the connection closed.
This is run in a new thread for each request
"""
# parse http. Result is a new Request object
request = self.parse_request()
# generate a response by calling the handler which does the magic
response = self.handle_request(request)
# log the request and the response status to console
self.audit_log(request, response)
# send the response back to the client
response.send(self.client_socket)
# we're done...
self.client_socket.shutdown(socket.SHUT_RDWR)
self.client_socket.close()
def parse_request(self):
"""Reads the tcp client socket to make a Request.
:return: a Request object
"""
request = b''
data = b'' # if we had PUT or POST data build it here
post_flag = False
http_request = None
while True:
got_bytes = 0
new_data = self.client_socket.recv(8192)
if len(new_data) == 0:
break # got EOF meh
# if the request has just started
if not post_flag:
for line in new_data.split(b'\n'):
request += line + b'\n' # add linebreak back helps
got_bytes += len(line) + 1
if len(line) <= 1: # assumed to reach /r/n/r/n
request_lines = request.split(b'\r\n')
request_speci = request_lines[0].decode().split() # eg ['GET', '/', 'HTTP/1.1']
request_headers = {}
for header in request_lines[1:]:
try:
(var, val) = header.split(b': ') # split header key/value pairs into 2 components
request_headers[var.decode()] = val.decode()
except ValueError:
pass
# process querystring in request if any eg GET /?status=new&cake=lie
# resulting uri variable should then have the querystring chopped off.
# true keeps any blank values e.g /?egg
get_query = parse_qs(request_speci[1].replace('/?', ''), True)
# chop off querystring, e.g: /?status=new&cake=lie becomes /
uri = request_speci[1].split('?')[0]
# create an instance of a Request object
http_request = Request(request_speci[0], uri, request_headers, get_query,
address=self.client_address)
if request_speci[0] == 'POST' or request_speci[0] == 'PUT':
post_flag = True
data += new_data[got_bytes:]
if len(data) == int(http_request.headers['Content-Length']):
logging.debug("Finished reading POST request")
http_request.body = data
return http_request
else:
# exit for, post flag is set, will continue reading post later on
break
else:
return http_request
else:
# we have more POST/PUT data to process
data += new_data
if len(data) == int(http_request.headers['Content-Length']):
http_request.body = data
logging.debug("Finished reading large file")
break
elif len(data) >= int(http_request.headers['Content-Length']):
logging.warning("Got more data from client than specified in Content-Length")
# should return a bad request
return http_request
def handle_request(self, request):
"""Search the list of registered Request handlers which match an expression.
Calls handler if found otherwise should send 404
request incoming Request object
returns a Response destined for the client
"""
if request is None:
logging.warning("Tried to handle a None Request")
response = Response()
response.status_code = 400
response.status_message = "Bad Request None Got It"
return response
logging.debug(request)
# first check if we support that request method type i.e any registered handlers for it
if request.method not in Server.handlers:
response = Response()
response.status_code = 400
response.status_message = 'Bad Request'
response.body = "<h1>400 Bad Request</h1><p>The server could support your request</p>"
return response
# spin over the registered get handlers and call a match. O(n) where n is number of registered method handlers
for urlpattern in Server.handlers[request.method]:
match = urlpattern.match(request.uri)
if match is not None:
# found matching urlpatten. Get named regex back-reference values as args
args = match.groupdict()
# call our registered handler for that url with unpacked args
func = Server.handlers[request.method][urlpattern] # awesomeness
res = func(request, **args)
if type(res) is str:
response = Response()
response.body = res
return response
elif res is None:
response = Response()
response.status_code = 204
response.status_message = 'No Content'
logging.debug("Got a None response back from handler check return response exists?")
return response
return res
# If we reached here then it's time for a 404
response = Response()
response.status_code = 404
response.status_message = 'Not Found'
response.body = "<h1>404 Not Found</h1><p>The server could not find a resource matching your request :(</p>"
return response
@staticmethod
def audit_log(request, response):
"""Logs request and Response http status code destined for the client
request The original Request they made
response The actual Response destined for the client
"""
logging.info("{} {} Response: {}".format(request.method, request.uri, response.status_code))
class ListenerThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None):
"""
Create the server socket listener thread.
args is required with (hostname, port, https_enabled)
"""
# call 'super' constructor to init thread
super(ListenerThread, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs)
self.hostname = args[0]
self.port = args[1]
self.secure = args[2]
self.socket = None
if kwargs:
if 'keyfile' in kwargs:
self.key_file = kwargs['keyfile']
else:
self.key_file = None
if 'certfile' in kwargs:
self.certificate_file = kwargs['certfile']
else:
self.certificate_file = None
else:
self.key_file = None
self.certificate_file = None
self.setup_listening()
def setup_listening(self):
logging.info("Starting ListenerThread on {0}:{1}".format(self.hostname, self.port))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((self.hostname, self.port))
server_socket.listen(5)
self.socket = server_socket
if self.secure:
self.wrap_ssl()
def run(self):
logging.debug("Entering http server loop")
while True:
try:
(client_socket, address) = self.socket.accept()
logging.debug("Accepted connection from %s", address)
# create a HttpWorker thread, passing in the client socket
http_thread = HttpWorker(args=(client_socket, address))
http_thread.start()
except ssl.SSLEOFError:
# find this happening when browser issues warning and ends tcp stream
logging.debug("Reached EOF when SSL connection was being accepted")
continue
except ssl.SSLError as err:
logging.warning("Got a {} error: {}".format(err['library'], err['reason']))
continue
def wrap_ssl(self):
# Wrap socket in SSL. TODO look into using ssl.SSLContexts for better support of browsers
try:
logging.debug("Attempting to load private key at %s", self.key_file)
logging.debug("Attempting to load certificates at %s", self.certificate_file)
self.socket = ssl.wrap_socket(self.socket,
server_side=True,
certfile=self.certificate_file,
keyfile=self.key_file)
logging.debug("Certificates and server key loaded")
except IOError as err:
logging.warning("Could not find SSL certificate or private key file. Not starting ssl")
logging.warning(err)
self.socket.close()
return
class Server:
"""Server listens for secure and non-secure sockets.
Spawns HttpWorker threads to handle the HTTP/1.1 protocol.
handlers Dictionary mapping url patten regular expressions to event handler functions
"""
# Event Handling Structure for requests
handlers = {}
@staticmethod
def handle(method, uri):
"""Decorator for registering Request handlers
Takes a HTTP method as string such as 'GET'
Takes a regex string to compile and register for events
"""
def request_handler_decorator(func):
# build regular expression
uri_expression = re.compile(uri)
# update global handler dict dynamically as http methods are registered
if method not in Server.handlers:
logging.debug("Creating new handler structure for %s method type.", method)
Server.handlers[method] = collections.OrderedDict() # order of regex key insertion matters
Server.handlers[method][uri_expression] = func # add new function mapped to url regex
return func
return request_handler_decorator
def __init__(self, hostname='localhost', port=4321, secure=False, keyfile=None, certfile=None):
"""Create a live running http server instance to go
It will start listening on the specified port but won't run yet until start() is called.
"""
self.base_port = port
self.hostname = hostname
self.secure = secure
self.key_file = keyfile
self.certificate_file = certfile
# Bind the signal handler: SIGINT is send to the process when CTRL-C is pressed
signal.signal(signal.SIGINT, self.handle_shutdown)
self.listener = ListenerThread(args=(self.hostname, self.base_port, False))
self.listener.daemon = True
def handle_shutdown(self, signal_unused, frame_unused):
"""If the server receives a signal (e.g. Ctrl-C/Ctrl-Break), terminate gracefully
"""
logging.info("SIGINT Signal received; exiting gracefully...")
sys.exit(0)
def start(self):
"""Start the server mainloop.
By now the server should have been inited and ready to enter the run loop.
"""
self.listener.start()
if self.secure:
key_dict = {'keyfile': self.key_file, 'certfile': self.certificate_file}
secure_listener = ListenerThread(args=(self.hostname, self.base_port + 1, True), kwargs=key_dict)
secure_listener.daemon = True
secure_listener.start()
return self
def wait(self):
"""Helper to block main thread to keep process running.
"""
logging.info('Waiting for connections...')
while self.listener.is_alive:
self.listener.join(1)
@Server.handle('GET', r'^/$')
@Server.handle('GET', r'^/yahs/api/?$')
def api_index(request):
"""Display the API index page for browsing loaded Request handlers.
In conclusion, this is quite awesome.
"""
response = Response()
body = "<h1>Welcome to YaHS! API Index</h1>"
for method in Server.handlers:
body += "<h2 style='color: #555;'>{}</h2>".format(method)
for regex in Server.handlers[method]:
body += "<ul>"
func = Server.handlers[method][regex]
module = inspect.getmodule(func)
var_names = func.func_code.co_varnames[:func.func_code.co_argcount]
body += "<li><strong>{}</strong> <pre>{}: <span style='color: #00a;'>def</span> {}{}</pre><em>{}</em></li>".format(
regex.pattern,
module.__name__,
func.__name__,
var_names,
func.__doc__)
body += "</ul>"
response.body = body
return response
@Server.handle('GET', '^/apidocs/?$')
def api_docs(request):
body = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>YaHS Live API Docs</title>
<style type="text/css">
body {font-family: sans-serif; }
.urlpattern { border: #ddd 1px solid; border-radius: 4px; box-shadow: 0px 1px 1px rgba(0, 0, 0, 0.1); margin-top: 1.5em;}
.urlpattern header {color: #FFF; background-color: #1F5C99; height: 2em}
.urlpattern div {padding: 1em; }
.method-type { background-color: #143D66; font-weight: bold; padding: 0.4em; margin: 0.2em}
</style>
<body>
<h2>YaHS Live API Docs</h2>
"""
for method in Server.handlers:
for urlpattern in Server.handlers[method]:
func = Server.handlers[method][urlpattern]
module = inspect.getmodule(func)
# var_names = func.func_code.co_varnames[:func.func_code.co_argcount] # python 2.7.x
var_names = func.__code__.co_varnames[:func.__code__.co_argcount] # 3.4.x
body += "<section class='urlpattern'>"
body += "<header>"
body += "<span class='method-type'>{}</span>".format(method)
body += "<code>{}</code>".format(urlpattern.pattern)
body += "</header>"
body += "<div>{}</div>".format(func.__doc__)
body += "</section>"
body += "</body></html>"
return body
@Server.handle("GET", r'^/yahs/reload/?$')
def reload_server(request):
"""Re-Load the server event handling module
"""
logging.warning("Reloading event handler modules")
reloaded_modules = []
for method in Server.handlers:
for regex in Server.handlers[method]:
func = Server.handlers[method][regex]
module = inspect.getmodule(func)
if module.__name__ != "yahs":
if module not in reloaded_modules:
logging.info("Reloading {} module.".format(module.__name__))
reload(module)
reloaded_modules.append(module)
res = Response()
res.body = "Server reloaded"
return res
@Server.handle('OPTIONS', '.')
def handle_cors(request):
"""Ruthlessly handle anything that looks like a Cross Origin request.
Just repeats back what the browser requested granting all.
"""
response = Response()
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Method'] = request.headers['Access-Control-Request-Method']
response.headers['Access-Control-Allow-Headers'] = request.headers['Access-Control-Request-Headers']
return response
# main :)
if __name__ == "__main__":
if len(sys.argv) > 1:
tcp_port = int(sys.argv[1])
server = Server(port=tcp_port)
else:
server = Server() # defaults to http://localhost:4321, https://localhost:4322
server.start().wait()
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import time
from ambari_commons import constants
from resource_management.core import shell
from resource_management.core.source import Template
from resource_management.core.resources.system import File, Execute, Directory
from resource_management.core.resources.service import Service
from resource_management.libraries.functions import namenode_ha_utils
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
from resource_management.libraries.functions import Direction
from ambari_commons import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
from utils import get_dfsadmin_base_command
from utils import set_up_zkfc_security
if OSCheck.is_windows_family():
from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from utils import service, safe_zkfc_op, is_previous_fs_image
from setup_ranger_hdfs import setup_ranger_hdfs, create_ranger_audit_hdfs_directories
import namenode_upgrade
def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False, retries=115, sleep_seconds=10):
"""
During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
If HA is present, then this command will run individually on each NameNode, which checks for its own address.
"""
import params
sleep_minutes = int(sleep_seconds * retries / 60)
Logger.info("Waiting up to {0} minutes for the NameNode to leave Safemode...".format(sleep_minutes))
if params.security_enabled and execute_kinit:
kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
Execute(kinit_command, user=params.hdfs_user, logoutput=True)
try:
# Note, this fails if namenode_address isn't prefixed with "params."
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary, use_specific_namenode=True)
is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode is OFF'"
# Wait up to 30 mins
Execute(is_namenode_safe_mode_off, tries=retries, try_sleep=sleep_seconds,
user=params.hdfs_user, logoutput=True)
# Wait a bit more since YARN still depends on block reports coming in.
# Also saw intermittent errors with HBASE service check if it was done too soon.
time.sleep(afterwait_sleep)
except Fail:
Logger.error("The NameNode is still in Safemode. Please be careful with commands that need Safemode OFF.")
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
upgrade_suspended=False, env=None):
if action is None:
raise Fail('"action" parameter is required for function namenode().')
if action in ["start", "stop"] and hdfs_binary is None:
raise Fail('"hdfs_binary" parameter is required for function namenode().')
if action == "configure":
import params
#we need this directory to be present before any action(HA manual steps for
#additional namenode)
create_name_dirs(params.dfs_name_dir)
# set up failover / secure zookeper ACLs, this feature is supported from HDP 2.6 ownwards
set_up_zkfc_security(params)
elif action == "start":
Logger.info("Called service {0} with upgrade_type: {1}".format(action, str(upgrade_type)))
setup_ranger_hdfs(upgrade_type=upgrade_type)
import params
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group
)
if params.hdfs_include_file:
File(params.include_file_path,
content=Template("include_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group
)
pass
if do_format and not params.hdfs_namenode_format_disabled:
format_namenode()
pass
if params.dfs_ha_enabled and \
params.dfs_ha_namenode_standby is not None and \
(params.hostname == params.dfs_ha_namenode_standby or params.public_hostname == params.dfs_ha_namenode_standby):
# if the current host is the standby NameNode in an HA deployment
# run the bootstrap command, to start the NameNode in standby mode
# this requires that the active NameNode is already up and running,
# so this execute should be re-tried upon failure, up to a timeout
success = bootstrap_standby_namenode(params)
if not success:
raise Fail("Could not bootstrap standby namenode")
if upgrade_type == constants.UPGRADE_TYPE_ROLLING and params.dfs_ha_enabled:
# Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
# to kill ZKFC manually, so we need to start it if not already running.
safe_zkfc_op(action, env)
options = ""
if upgrade_type == constants.UPGRADE_TYPE_ROLLING:
if params.upgrade_direction == Direction.UPGRADE:
options = "-rollingUpgrade started"
elif params.upgrade_direction == Direction.DOWNGRADE:
options = "-rollingUpgrade downgrade"
elif upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
is_previous_image_dir = is_previous_fs_image()
Logger.info("Previous file system image dir present is {0}".format(str(is_previous_image_dir)))
if params.upgrade_direction == Direction.UPGRADE:
options = "-rollingUpgrade started"
elif params.upgrade_direction == Direction.DOWNGRADE:
options = "-rollingUpgrade downgrade"
elif upgrade_type == constants.UPGRADE_TYPE_HOST_ORDERED:
# nothing special to do for HOU - should be very close to a normal restart
pass
elif upgrade_type is None and upgrade_suspended is True:
# the rollingUpgrade flag must be passed in during a suspended upgrade when starting NN
if os.path.exists(namenode_upgrade.get_upgrade_in_progress_marker()):
options = "-rollingUpgrade started"
else:
Logger.info("The NameNode upgrade marker file {0} does not exist, yet an upgrade is currently suspended. "
"Assuming that the upgrade of NameNode has not occurred yet.".format(namenode_upgrade.get_upgrade_in_progress_marker()))
Logger.info("Options for start command are: {0}".format(options))
service(
action="start",
name="namenode",
user=params.hdfs_user,
options=options,
create_pid_dir=True,
create_log_dir=True
)
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
user = params.hdfs_user)
# ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
# no-HA | ON -> OFF | Yes |
# HA and active | ON -> OFF | Yes |
# HA and standby | no change | No |
# RU with HA on active | ON -> OFF | Yes |
# RU with HA on standby | ON -> OFF | Yes |
# EU with HA on active | ON -> OFF | No |
# EU with HA on standby | ON -> OFF | No |
# EU non-HA | ON -> OFF | No |
# because we do things like create directories after starting NN,
# the vast majority of the time this should be True - it should only
# be False if this is HA and we are the Standby NN
ensure_safemode_off = True
# True if this is the only NameNode (non-HA) or if its the Active one in HA
is_active_namenode = True
if params.dfs_ha_enabled:
Logger.info("Waiting for the NameNode to broadcast whether it is Active or Standby...")
if is_this_namenode_active() is False:
# we are the STANDBY NN
is_active_namenode = False
# we are the STANDBY NN and this restart is not part of an upgrade
if upgrade_type is None:
ensure_safemode_off = False
# During an Express Upgrade, NameNode will not leave SafeMode until the DataNodes are started,
# so always disable the Safemode check
if upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
ensure_safemode_off = False
# some informative logging separate from the above logic to keep things a little cleaner
if ensure_safemode_off:
Logger.info("Waiting for this NameNode to leave Safemode due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
params.dfs_ha_enabled, is_active_namenode, upgrade_type))
else:
Logger.info("Skipping Safemode check due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
params.dfs_ha_enabled, is_active_namenode, upgrade_type))
# wait for Safemode to end
if ensure_safemode_off:
if params.rolling_restart and params.rolling_restart_safemode_exit_timeout:
calculated_retries = int(params.rolling_restart_safemode_exit_timeout) / 30
wait_for_safemode_off(hdfs_binary, afterwait_sleep=30, retries=calculated_retries, sleep_seconds=30)
else:
wait_for_safemode_off(hdfs_binary)
# Always run this on the "Active" NN unless Safemode has been ignored
# in the case where safemode was ignored (like during an express upgrade), then
# NN will be in SafeMode and cannot have directories created
if is_active_namenode and ensure_safemode_off:
create_hdfs_directories()
create_ranger_audit_hdfs_directories()
else:
Logger.info("Skipping creation of HDFS directories since this is either not the Active NameNode or we did not wait for Safemode to finish.")
elif action == "stop":
import params
service(
action="stop", name="namenode",
user=params.hdfs_user
)
elif action == "status":
import status_params
check_process_status(status_params.namenode_pid_file)
elif action == "decommission":
decommission()
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
upgrade_suspended=False, env=None):
if action is None:
raise Fail('"action" parameter is required for function namenode().')
if action in ["start", "stop"] and hdfs_binary is None:
raise Fail('"hdfs_binary" parameter is required for function namenode().')
if action == "configure":
pass
elif action == "start":
import params
#TODO: Replace with format_namenode()
namenode_format_marker = os.path.join(params.hadoop_conf_dir,"NN_FORMATTED")
if not os.path.exists(namenode_format_marker):
hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
Execute("%s namenode -format" % (hadoop_cmd), logoutput=True)
open(namenode_format_marker, 'a').close()
Service(params.namenode_win_service_name, action=action)
elif action == "stop":
import params
Service(params.namenode_win_service_name, action=action)
elif action == "status":
import status_params
check_windows_service_status(status_params.namenode_win_service_name)
elif action == "decommission":
decommission()
def create_name_dirs(directories):
import params
dirs = directories.split(",")
Directory(dirs,
mode=0755,
owner=params.hdfs_user,
group=params.user_group,
create_parents = True,
cd_access="a",
)
def create_hdfs_directories():
import params
params.HdfsResource(params.hdfs_tmp_dir,
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
mode=0777,
)
params.HdfsResource(params.smoke_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.smoke_user,
mode=params.smoke_hdfs_user_mode,
)
params.HdfsResource(None,
action="execute",
)
def format_namenode(force=None):
import params
old_mark_dir = params.namenode_formatted_old_mark_dirs
mark_dir = params.namenode_formatted_mark_dirs
dfs_name_dir = params.dfs_name_dir
hdfs_user = params.hdfs_user
hadoop_conf_dir = params.hadoop_conf_dir
if not params.dfs_ha_enabled:
if force:
ExecuteHadoop('namenode -format',
bin_dir=params.hadoop_bin_dir,
conf_dir=hadoop_conf_dir,
logoutput=True)
else:
if not is_namenode_formatted(params):
Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
user = params.hdfs_user,
path = [params.hadoop_bin_dir],
logoutput=True
)
for m_dir in mark_dir:
Directory(m_dir,
create_parents = True
)
else:
if params.dfs_ha_namenode_active is not None and \
(params.hostname == params.dfs_ha_namenode_active or params.public_hostname == params.dfs_ha_namenode_active):
# check and run the format command in the HA deployment scenario
# only format the "active" namenode in an HA deployment
if force:
ExecuteHadoop('namenode -format',
bin_dir=params.hadoop_bin_dir,
conf_dir=hadoop_conf_dir,
logoutput=True)
else:
nn_name_dirs = params.dfs_name_dir.split(',')
if not is_namenode_formatted(params):
try:
Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
user = params.hdfs_user,
path = [params.hadoop_bin_dir],
logoutput=True
)
except Fail:
# We need to clean-up mark directories, so we can re-run format next time.
for nn_name_dir in nn_name_dirs:
Execute(format("rm -rf {nn_name_dir}/*"),
user = params.hdfs_user,
)
raise
for m_dir in mark_dir:
Directory(m_dir,
create_parents = True
)
def is_namenode_formatted(params):
old_mark_dirs = params.namenode_formatted_old_mark_dirs
mark_dirs = params.namenode_formatted_mark_dirs
nn_name_dirs = params.dfs_name_dir.split(',')
marked = False
# Check if name directories have been marked as formatted
for mark_dir in mark_dirs:
if os.path.isdir(mark_dir):
marked = True
Logger.info(format("{mark_dir} exists. Namenode DFS already formatted"))
# Ensure that all mark dirs created for all name directories
if marked:
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True
)
return marked
# Move all old format markers to new place
for old_mark_dir in old_mark_dirs:
if os.path.isdir(old_mark_dir):
for mark_dir in mark_dirs:
Execute(('cp', '-ar', old_mark_dir, mark_dir),
sudo = True
)
marked = True
Directory(old_mark_dir,
action = "delete"
)
elif os.path.isfile(old_mark_dir):
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True,
)
Directory(old_mark_dir,
action = "delete"
)
marked = True
if marked:
return True
# Check if name dirs are not empty
for name_dir in nn_name_dirs:
code, out = shell.call(("ls", name_dir))
dir_exists_and_valid = bool(not code)
if not dir_exists_and_valid: # situations if disk exists but is crashed at the moment (ls: reading directory ...: Input/output error)
Logger.info(format("NameNode will not be formatted because the directory {name_dir} is missing or cannot be checked for content. {out}"))
return True
try:
Execute(format("ls {name_dir} | wc -l | grep -q ^0$"),
)
except Fail:
Logger.info(format("NameNode will not be formatted since {name_dir} exists and contains content"))
return True
return False
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def decommission():
import params
hdfs_user = params.hdfs_user
conf_dir = params.hadoop_conf_dir
user_group = params.user_group
nn_kinit_cmd = params.nn_kinit_cmd
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=hdfs_user,
group=user_group
)
if params.hdfs_include_file:
File(params.include_file_path,
content=Template("include_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group
)
pass
if not params.update_files_only:
Execute(nn_kinit_cmd,
user=hdfs_user
)
if params.dfs_ha_enabled:
# due to a bug in hdfs, refreshNodes will not run on both namenodes so we
# need to execute each command scoped to a particular namenode
nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
else:
nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshNodes')
ExecuteHadoop(nn_refresh_cmd,
user=hdfs_user,
conf_dir=conf_dir,
bin_dir=params.hadoop_bin_dir)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def decommission():
import params
hdfs_user = params.hdfs_user
conf_dir = params.hadoop_conf_dir
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=hdfs_user
)
if params.hdfs_include_file:
File(params.include_file_path,
content=Template("include_hosts_list.j2"),
owner=params.hdfs_user
)
pass
if params.dfs_ha_enabled:
# due to a bug in hdfs, refreshNodes will not run on both namenodes so we
# need to execute each command scoped to a particular namenode
nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
else:
nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs {namenode_address} -refreshNodes')
Execute(nn_refresh_cmd, user=hdfs_user)
def bootstrap_standby_namenode(params, use_path=False):
mark_dirs = params.namenode_bootstrapped_mark_dirs
bin_path = os.path.join(params.hadoop_bin_dir, '') if use_path else ""
try:
iterations = 50
bootstrapped = False
bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive")
# Blue print based deployments start both NN in parallel and occasionally
# the first attempt to bootstrap may fail. Depending on how it fails the
# second attempt may not succeed (e.g. it may find the folder and decide that
# bootstrap succeeded). The solution is to call with -force option but only
# during initial start
if params.command_phase == "INITIAL_START":
# force bootstrap in INITIAL_START phase
bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive -force")
elif is_namenode_bootstrapped(params):
# Once out of INITIAL_START phase bootstrap only if we couldnt bootstrap during cluster deployment
return True
Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd))
for i in range(iterations):
Logger.info('Try %d out of %d' % (i+1, iterations))
code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user)
if code == 0:
Logger.info("Standby namenode bootstrapped successfully")
bootstrapped = True
break
elif code == 5:
Logger.info("Standby namenode already bootstrapped")
bootstrapped = True
break
else:
Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code))
except Exception as ex:
Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex)))
if bootstrapped:
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True
)
return bootstrapped
def is_namenode_bootstrapped(params):
mark_dirs = params.namenode_bootstrapped_mark_dirs
nn_name_dirs = params.dfs_name_dir.split(',')
marked = False
# Check if name directories have been marked as formatted
for mark_dir in mark_dirs:
if os.path.isdir(mark_dir):
marked = True
Logger.info(format("{mark_dir} exists. Standby Namenode already bootstrapped"))
break
# Ensure that all mark dirs created for all name directories
if marked:
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True
)
return marked
def find_timeout():
import params
if isinstance(params.command_timeout, (int, long)):
return params.command_timeout
return int(params.command_timeout)
@retry(sleep_time=5, backoff_factor=2, err_class=Fail, timeout_func=find_timeout)
def is_this_namenode_active():
"""
Gets whether the current NameNode is Active. This function will wait until the NameNode is
listed as being either Active or Standby before returning a value. This is to ensure that
that if the other NameNode is Active, we ensure that this NameNode has fully loaded and
registered in the event that the other NameNode is going to be restarted. This prevents
a situation where we detect the other NameNode as Active before this NameNode has fully booted.
If the other Active NameNode is then restarted, there can be a loss of service if this
NameNode has not entered Standby.
"""
import params
# returns ([('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
# 0 1 2
# or
# returns ([], [('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
# 0 1 2
#
namenode_states = namenode_ha_utils.get_namenode_states(params.hdfs_site, params.security_enabled,
params.hdfs_user, times=5, sleep_time=5, backoff_factor=2)
# unwraps [('nn1', 'c6401.ambari.apache.org:50070')]
active_namenodes = [] if len(namenode_states[0]) < 1 else namenode_states[0]
# unwraps [('nn2', 'c6402.ambari.apache.org:50070')]
standby_namenodes = [] if len(namenode_states[1]) < 1 else namenode_states[1]
# check to see if this is the active NameNode
for entry in active_namenodes:
if params.namenode_id in entry:
return True
# if this is not the active NameNode, then we must wait for it to register as standby
for entry in standby_namenodes:
if params.namenode_id in entry:
return False
# this this point, this NameNode is neither active nor standby - we must wait to ensure it
# enters at least one of these roles before returning a verdict - the annotation will catch
# this failure and retry the fuction automatically
raise Fail(format("The NameNode {namenode_id} is not listed as Active or Standby, waiting..."))
|
|
import difflib
import filecmp
import os.path
import sys
import tempfile
import unittest
def _get_parent_dir(path):
"""utility function to get parent dir
"""
return os.path.abspath(os.path.join(os.path.abspath(path), os.pardir))
root_dir = _get_parent_dir(_get_parent_dir(__file__))
test_dir = _get_parent_dir(__file__)
gproj_dir = os.path.join(test_dir, 'gproject')
sys.path.insert(0, root_dir+'/src')
import guerilla_parser
import guerilla_parser.util as grl_util
default_gprojects = [
gproj_dir+'/1.4.14_01_default/1.4.14_01_default.gproject',
gproj_dir+'/2.0.0a31_02_default/2.0.0a31_02_default.gproject'
]
default_glayers = [
gproj_dir+'/1.4.14_01_default/1.4.14_01_default.glayer',
gproj_dir+'/2.0.0a31_02_default/2.0.0a31_02_default.glayer'
]
default_grendergraphs = [
gproj_dir+'/1.4.14_01_default/1.4.14_01_default.grendergraph',
gproj_dir+'/2.0.0a31_02_default/2.0.0a31_02_default.grendergraph'
]
gprojects = [
gproj_dir+'/1.4.13_01/1.4.13_01.gproject',
gproj_dir+'/1.4.19_01_node_name/1.4.19_01.gproject',
gproj_dir+'/1.4.19_01_anim/1.4.19_01_anim.gproject',
gproj_dir+'/2.0.0a31_01/2.0.0a31_01.gproject',
gproj_dir+'/2.0.7/2.0.7.gproject',
gproj_dir+'/2.0.7/2.0.7_ref.gproject', # unsolvable even by Guerilla
gproj_dir+'/2.1.0b19/2.1.0b19_archreference.gproject',
gproj_dir+'/2.3.0b16/2.3.0b16.gproject',
gproj_dir+'/2.3.0b16/texture_colorspace.gproject',
gproj_dir+'/2.1.3/animmode_loop.gproject',
]
all_gprojects = [f for f in default_gprojects]
all_gprojects += [f for f in gprojects]
all_gfiles = [f for f in all_gprojects]
all_gfiles += [f for f in default_glayers]
all_gfiles += [f for f in default_grendergraphs]
# dynamic test pattern inspired from:
# https://stackoverflow.com/questions/32899/how-to-generate-dynamic-parametrized-unit-tests-in-python
# test pattern:
# TestSequence: the main empty class
# test_generator_<test_name> return a function to test given path
# '-> test_<test_name> function that run the test
def _gen_test_name(name, path):
"""Macro to properly generate test method name from given test `name` and
file `path`
:return: test name
:rtype: str
"""
return 'test_{}_{}'.format(name, path.replace(test_dir, '')
.replace('.', '_'))
g_parsed = {}
def test_generator_parse(path):
def test_func(self):
p = guerilla_parser.parse(path)
g_parsed[path] = p
return test_func
def test_generator_path_to_node(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
"""check returned path can be used to find node back
"""
# import cProfile, pstats, StringIO
# pr = cProfile.Profile()
# pr.enable()
assert path in g_parsed
p = g_parsed[path]
with self.assertRaises(guerilla_parser.PathError) as _:
p.path_to_node("BLAH")
for node in p.nodes:
self.assertIs(node, p.path_to_node(node.path))
for node in p._implicit_nodes:
self.assertIs(node, p.path_to_node(node.path))
# pr.disable()
# s = StringIO.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print(s.getvalue())
return test_func
def test_generator_path_to_plug(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
"""check returned path can be used to find plug back
"""
assert path in g_parsed
p = g_parsed[path]
with self.assertRaises(guerilla_parser.PathError) as _:
p.path_to_plug("BLAH")
for plug in p.plugs:
self.assertIs(plug, p.path_to_plug(plug.path))
return test_func
def test_generator_nodes(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
"""check each node path is unique
"""
assert path in g_parsed
p = g_parsed[path]
# implicit nodes
paths = set()
for node in p._implicit_nodes:
self.assertIsInstance(node, guerilla_parser.GuerillaNode)
self.assertNotIn(node.path, paths)
paths.add(node.path)
# nodes
paths = set()
for node in p.nodes:
self.assertIsInstance(node, guerilla_parser.GuerillaNode)
self.assertNotIn(node.path, paths)
paths.add(node.path)
return test_func
def test_generator_plugs(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
"""check each plug path is unique
"""
assert path in g_parsed
p = g_parsed[path]
# plugs
paths = set()
for plug in p.plugs:
self.assertIsInstance(plug, guerilla_parser.GuerillaPlug)
self.assertNotIn(plug.path, paths)
paths.add(plug.path)
return test_func
def test_generator_raises(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
assert path in g_parsed
p = g_parsed[path]
root_node = p.root
with self.assertRaises(guerilla_parser.PathError):
root_node.path
with self.assertRaises(guerilla_parser.ChildError):
root_node.get_child('TAGADAPOUETPOUET')
with self.assertRaises(guerilla_parser.PathError):
p.path_to_node('TAGADAPOUETPOUET')
with self.assertRaises(guerilla_parser.PathError):
grl_util.aov_node(p, 'RenderPass', 'Layer', 'TAGADAPOUETPOUET')
return test_func
def test_generator_child_unique(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
assert path in g_parsed
p = g_parsed[path]
for node in p.nodes:
child_names = set()
for child in node.children:
self.assertNotIn(child.name, child_names)
child_names.add(child.name)
return test_func
def test_generator_arch_ref(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
assert path in g_parsed
p = g_parsed[path]
for node in p.nodes:
if node.type == 'ArchReference':
# this plug must exists
node.get_plug('ReferenceFileName')
else:
with self.assertRaises(KeyError):
node.get_plug('ReferenceFileName')
return test_func
def test_generator_default_gprojects(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
assert path in g_parsed
p = g_parsed[path]
self.assertIsInstance(p, guerilla_parser.GuerillaParser)
self.assertEqual(p.doc_format_rev, 19)
doc = p.root
self.assertEqual(doc.id, 1)
self.assertEqual(doc.name, 'LUIDocument')
self.assertEqual(doc.type, 'GADocument')
self.assertEqual(doc.get_plug('AutoKey').value, True)
self.assertEqual(doc.get_plug('InvertT').value, False)
self.assertEqual(doc.get_plug('LastFrame').value, 50)
self.assertEqual(doc.get_plug('Membership').value, "All")
self.assertEqual(doc.get_plug('CurveWidthShape').value, 1.5)
pref = doc.get_child('Preferences')
self.assertEqual(pref.id, 2)
self.assertEqual(pref.name, 'Preferences')
self.assertEqual(pref.type, 'Preferences')
self.assertEqual(pref.get_plug('LightAmbient').value, [0, 0, 0, 1])
self.assertEqual(pref.get_plug('LightSpecular').value,
[0.5, 0.5, 0.5, 1])
self.assertEqual(pref.get_plug('SearchPathTexture').value, "")
for node in p.nodes:
for child in node.children:
self.assertIs(node.get_child(child.name), child)
for plug in node.plugs:
self.assertIs(node.get_plug(plug.name), plug)
# aov
aov = grl_util.aov_node(p, 'RenderPass', 'Layer', 'Beauty')
self.assertIsInstance(aov, guerilla_parser.GuerillaNode)
self.assertEqual(aov.path, "|RenderPass|Layer|Input1")
rp_iter = (n for n in p.nodes if n.type == 'RenderPass')
for rp in rp_iter:
rl_iter = (n for n in rp.children if n.type == 'RenderLayer')
for rl in rl_iter:
for aov in rl.children:
self.assertEqual(aov.type, "LayerOut")
aov_2 = grl_util.aov_node(p, rp.name, rl.name,
aov.display_name)
self.assertIs(aov, aov_2)
# path to node
for node in p.nodes:
self.assertEqual(node, p.path_to_node(node.path))
return test_func
def test_generator_aovs(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
"""test render pass render layer and AOV particularities
"""
assert path in g_parsed
p = g_parsed[path]
aov = grl_util.aov_node(p, 'RenderPass', 'Layer', 'Beauty')
self.assertIsInstance(aov, guerilla_parser.GuerillaNode)
self.assertEqual(aov.path, "|RenderPass|Layer|Input1")
rp_iter = (n for n in p.nodes if n.type == 'RenderPass')
for rp in rp_iter:
rl_iter = (n for n in rp.children if n.type == 'RenderLayer')
for rl in rl_iter:
for aov in rl.children:
self.assertEqual(aov.type, "LayerOut")
aov_2 = grl_util.aov_node(p, rp.name, rl.name,
aov.display_name)
self.assertIs(aov, aov_2)
return test_func
def test_generator_default_glayers(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
assert path in g_parsed
p = g_parsed[path]
root = p.root
self.assertEqual(root.id, 1)
self.assertEqual(root.name, 'RenderPass')
self.assertEqual(root.type, 'RenderPass')
self.assertEqual(root.get_plug('AutoBuildTextures').value, True)
self.assertEqual(root.get_plug('BalanceReyesDistribution').value, False)
self.assertEqual(root.get_plug('BrdfSamples').value, 16)
self.assertEqual(root.get_plug('ColorMode').value, "multiply")
self.assertEqual(root.get_plug('DeepCompression').value, 0.1)
self.assertEqual(root.get_plug('DefaultSurfaceColor').value,
[0.0, 0.0, 0.0])
return test_func
class TestSequence(unittest.TestCase):
pass
for path in all_gfiles:
test_name = _gen_test_name('001_parse', path)
test = test_generator_parse(path)
assert not hasattr(TestSequence, test_name), test_name
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('path_to_node', path)
test = test_generator_path_to_node(path)
assert not hasattr(TestSequence, test_name), test_name
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('path_to_plug', path)
test = test_generator_path_to_plug(path)
assert not hasattr(TestSequence, test_name), test_name
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('nodes', path)
test = test_generator_nodes(path)
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('plugs', path)
test = test_generator_plugs(path)
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('raises', path)
test = test_generator_raises(path)
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('child_unique', path)
test = test_generator_child_unique(path)
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('arch_ref', path)
test = test_generator_arch_ref(path)
setattr(TestSequence, test_name, test)
for path in default_gprojects:
test_name = _gen_test_name('default_gproject', path)
test = test_generator_default_gprojects(path)
assert not hasattr(TestSequence, test_name)
setattr(TestSequence, test_name, test)
test_name = _gen_test_name('aovs', path)
test = test_generator_aovs(path)
assert not hasattr(TestSequence, test_name)
setattr(TestSequence, test_name, test)
for path in default_glayers:
test_name = _gen_test_name('default_glayers', path)
test = test_generator_default_glayers(path)
assert not hasattr(TestSequence, test_name)
setattr(TestSequence, test_name, test)
def test_generator_set_plug_value(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
p = guerilla_parser.parse(path)
node = p.path_to_node("|Preferences|RenderViewport")
plug = node.get_plug("ColorMode")
self.assertEqual(plug.value, 'multiply')
p.set_plug_value([(plug, 'divide')])
self.assertEqual(plug.value, 'divide')
return test_func
class SetPlugValueTestCase(unittest.TestCase):
pass
for gproject in default_gprojects:
test_name = _gen_test_name('set_plug_value', gproject)
test = test_generator_set_plug_value(gproject)
setattr(SetPlugValueTestCase, test_name, test)
def test_generator_write_file(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
_, tmp_file = tempfile.mkstemp()
os.close(_)
p = guerilla_parser.parse(
path,
diagnose=False)
p.write(tmp_file)
# no change
self.assertFalse(p.has_changed)
self.assertTrue(filecmp.cmp(path, tmp_file))
node = p.path_to_node("|Preferences|RenderViewport")
# get value
plug = node.get_plug("ColorMode")
self.assertEqual(plug.value, 'multiply')
# set value
p.set_plug_value([(plug, 'divide')])
self.assertEqual(plug.value, 'divide')
p.write(tmp_file)
# has changed
self.assertTrue(p.has_changed)
self.assertFalse(filecmp.cmp(path, tmp_file))
# get diff
old = []
new = []
for c in difflib.ndiff(p.original_content, p.modified_content):
if c.startswith("- "):
old.append(c[2:])
continue
if c.startswith("+ "):
new.append(c[2:])
continue
old = "".join(old)
new = "".join(new)
# no typo here! "i" is just considered has haven't been moved
self.assertEqual(old, 'multply')
self.assertEqual(new, 'dvide')
os.remove(tmp_file)
return test_func
class WriteFileTestCase(unittest.TestCase):
pass
for gproject in default_gprojects:
test_name = _gen_test_name('write_file', gproject)
test = test_generator_write_file(gproject)
setattr(WriteFileTestCase, test_name, test)
class TestStringMethods(unittest.TestCase):
def test_read(self):
p = guerilla_parser.parse(default_gprojects[1])
self.assertIsInstance(p, guerilla_parser.GuerillaParser)
self.assertEqual(p.doc_format_rev, 19)
doc = p.root
self.assertEqual(doc.id, 1)
self.assertEqual(doc.name, 'LUIDocument')
self.assertEqual(doc.type, 'GADocument')
self.assertEqual(doc.get_plug('InternalDirectLighting').value, True)
self.assertEqual(doc.get_plug('InvertT').value, False)
self.assertEqual(doc.get_plug('LastFrame').value, 50)
self.assertEqual(doc.get_plug('Membership').value, "All")
self.assertEqual(doc.get_plug('CurveWidthShape').value, 1.5)
pref = doc.get_child('Preferences')
self.assertEqual(pref.id, 2)
self.assertEqual(pref.name, 'Preferences')
self.assertEqual(pref.type, 'Preferences')
self.assertEqual(pref.get_plug('LightAmbient').value, [0, 0, 0, 1])
self.assertEqual(pref.get_plug('LightSpecular').value, [0.5, 0.5, 0.5, 1])
self.assertEqual(pref.get_plug('SearchPathTexture').value, "")
for node in p.nodes:
for child in node.children:
self.assertIs(node.get_child(child.name), child)
for plug in node.plugs:
self.assertIs(node.get_plug(plug.name), plug)
class TestArchReferenceMethods(unittest.TestCase):
def test_read(self):
p = guerilla_parser.parse(gprojects[6])
self.assertIsInstance(p, guerilla_parser.GuerillaParser)
self.assertEqual(p.doc_format_rev, 19)
doc = p.root
self.assertEqual(doc.id, 1)
self.assertEqual(doc.name, 'LUIDocument')
self.assertEqual(doc.type, 'GADocument')
foo_node = doc.get_child('foo')
self.assertEqual(foo_node.type, 'ArchReference')
self.assertEqual(foo_node.get_plug('ReferenceFileName').value,
'/path/to/file.abc')
###############################################################################
# Unique string test
###############################################################################
class TestUniqueStringRegexes(unittest.TestCase):
def test_read(self):
cls = guerilla_parser.GuerillaParser
for raw_str in ('"AttributePlug","$799","MetalProfile",4,types.metal,"$(LIBRARY)/ior/Gold/McPeak.yml"',
):
match_arg = cls._CMD_CREATE_ARG_PARSE.match(raw_str)
self.assertIsNotNone(match_arg)
self.assertEqual(match_arg.group('type'), 'AttributePlug')
self.assertEqual(match_arg.group('parent'), '$799')
self.assertEqual(match_arg.group('name'), 'MetalProfile')
rest = match_arg.group('rest')
self.assertIsNotNone(match_arg)
match_rest = cls._CREATE_PLUG_REST_PARSE.match(rest)
self.assertIsNotNone(match_rest)
self.assertEqual(match_rest.group('flag'), '4')
self.assertEqual(match_rest.group('type'), 'types.metal')
self.assertIsNone(match_rest.group('param'))
self.assertEqual(match_rest.group('value'),
'"$(LIBRARY)/ior/Gold/McPeak.yml"')
#value = match_rest.group('value')
if __name__ == '__main__':
unittest.main()
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The American Gut Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join, dirname, abspath, exists, isfile
from os import environ
from functools import partial
import warnings
from future import standard_library
with standard_library.hooks():
from configparser import (ConfigParser, NoOptionError,
Error as ConfigParser_Error)
from amgut.lib.locale_data import available_locales # noqa
class MissingConfigSection(ConfigParser_Error):
"""Exception when the config file is missing a required section"""
def __init__(self, section):
super(MissingConfigSection, self).__init__('Missing section(s): %r' %
(section,))
self.section = section
self.args = (section,)
def _warn_on_extra(extra, set_type):
extra = ', '.join(extra)
if extra:
warnings.warn("Extra %s found: %r" % (set_type, extra))
class ConfigurationManager(object):
"""Holds the configuration information
Parameters
----------
conf_fp: str, optional
Filepath to the configuration file. Default: ag_config.txt
Attributes
----------
test_environment : bool
Whether we are in a test environment or not
base_data_dir : str
Path to the base directorys where all data file are stored
base_log_dir : str
Path to the base directory where the log file will be written
cookie_secret : str
The secret used to secure user session cookies
locale : str
The locale
user : str
The postgres user
password : str
The postgres password for the previous user
database : str
The postgres database to connect to
host : str
The host where the database lives
port : int
The port used to connect to the postgres database in the previous host
goodpassword : str
The correct password for the test account
badpassword : str
The password used for testing on the test account
redis_host : str
The host on which redis is running
redis_port : int
The port that redis is running on
redis_db_id : int
The ID of the redis database
smtp_host
The host where the SMTP server lives
smtp_ssl
Whether or not SSL connection is required by SMTP host
smtp_port
The port the SMTP serveris running on
smtp_user
The username for connecting to the SMTP server
smtp_password
The password for connecting to the SMTP server
Raises
------
IOError
If the AG_CONFIG environment variable is set, but does not point to an
existing file
Notes
-----
- The environment variable AG_CONFIG is checked for a path to the config
file. If the environment variable is not set, the default config file is
used.
"""
def __init__(self):
conf_fp = environ.get('AG_CONFIG') or join(dirname(abspath(__file__)),
'../ag_config.txt')
if not isfile(conf_fp):
raise IOError("The configuration file '%s' is not an "
"existing file" % conf_fp)
config = ConfigParser(defaults={
'open_humans_client_id': '',
'open_humans_client_secret': '',
'open_humans_base_url': 'https://openhumans.org',
})
self.defaults = set(config.defaults())
# Parse the configuration file
with open(conf_fp, 'U') as conf_file:
config.readfp(conf_file)
_expected_sections = {'main', 'postgres', 'test', 'redis', 'email',
'thirdparty'}
missing = _expected_sections - set(config.sections())
if missing:
raise MissingConfigSection(', '.join(missing))
extra = set(config.sections()) - _expected_sections
_warn_on_extra(extra, 'sections')
self._get_main(config)
self._get_postgres(config)
self._get_test(config)
self._get_redis(config)
self._get_email(config)
self._get_third_party(config)
def get_settings(self):
"""Returns settings that should be stored in postgres settings table
Returns
-------
list of tuple
Tuples are (parameter, argument)
"""
return [('test_environment', self.test_environment),
('base_data_dir', self.base_data_dir),
('locale', self.locale)]
def _get_main(self, config):
"""Get the configuration of the main section"""
expected_options = {'name', 'shorthand', 'test_environment',
'base_data_dir', 'locale', 'base_url',
'cookie_secret', 'error_email', 'sitebase'}
_warn_on_extra(set(config.options('main')) - expected_options -
self.defaults, 'main section option(s)')
get = partial(config.get, 'main')
getboolean = partial(config.getboolean, 'main')
self.project_name = get('NAME')
self.project_shorthand = get('SHORTHAND')
self.test_environment = getboolean('TEST_ENVIRONMENT')
self.base_data_dir = get('BASE_DATA_DIR')
self.base_log_dir = get('BASE_LOG_DIR')
self.base_url = get('BASE_URL')
self.cookie_secret = get('COOKIE_SECRET')
self.locale = get('LOCALE')
self.error_email = get('ERROR_EMAIL')
self.sitebase = get('SITEBASE')
if not exists(self.base_data_dir):
raise IOError("Directory %s does not exist!" % self.base_data_dir)
if self.locale not in available_locales:
raise ValueError("%s is not a recognized locale. Please select "
"from %r" % (self.locale, available_locales))
def _get_postgres(self, config):
"""Get the configuration of the postgres section"""
expected_options = {'user', 'password', 'database', 'host', 'port'}
_warn_on_extra(set(config.options('postgres')) - expected_options -
self.defaults, 'postgres section option(s)')
get = partial(config.get, 'postgres')
getint = partial(config.getint, 'postgres')
self.user = get('USER')
try:
self.password = get('PASSWORD')
except NoOptionError as e:
if self.test_environment:
self.password = None
else:
raise e
self.database = get('DATABASE')
self.host = get('HOST')
self.port = getint('PORT')
def _get_test(self, config):
"""Get the configuration of the test section"""
expected_options = {'goodpassword', 'badpassword'}
_warn_on_extra(set(config.options('test')) - expected_options -
self.defaults, 'test section option(s)')
get = partial(config.get, 'test')
self.goodpassword = get('GOODPASSWORD')
self.badpassword = get('BADPASSWORD')
def _get_redis(self, config):
"""Get the configuration of the redis section"""
expected_options = {'host', 'port', 'db_id'}
_warn_on_extra(set(config.options('redis')) - expected_options -
self.defaults, 'redis section option(s)')
get = partial(config.get, 'redis')
getint = partial(config.getint, 'redis')
self.redis_host = get('HOST')
self.redis_port = getint('PORT')
self.redis_db_id = getint('DB_ID')
def _get_email(self, config):
get = partial(config.get, 'email')
getint = partial(config.getint, 'email')
getbool = partial(config.getboolean, 'email')
self.smtp_host = get('HOST')
self.smtp_ssl = getbool('SSL')
self.smtp_port = getint('PORT')
self.smtp_user = get('USERNAME')
self.smtp_password = get('PASSWORD')
def _get_third_party(self, config):
get = partial(config.get, 'thirdparty')
self.vioscreen_regcode = get('VIOSCREEN_REGCODE')
self.vioscreen_cryptokey = get('VIOSCREEN_CRYPTOKEY')
self.open_humans_base_url = get('OPEN_HUMANS_BASE_URL')
self.open_humans_client_id = get('OPEN_HUMANS_CLIENT_ID')
self.open_humans_client_secret = get('OPEN_HUMANS_CLIENT_SECRET')
AMGUT_CONFIG = ConfigurationManager()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_site_vrf_region_cidr
short_description: Manage site-local VRF region CIDRs in schema template
description:
- Manage site-local VRF region CIDRs in schema template on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
site:
description:
- The name of the site.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
vrf:
description:
- The name of the VRF.
type: str
region:
description:
- The name of the region.
type: str
cidr:
description:
- The name of the region CIDR to manage.
type: str
aliases: [ ip ]
primary:
description:
- Whether this is the primary CIDR.
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- The ACI MultiSite PATCH API has a deficiency requiring some objects to be referenced by index.
This can cause silent corruption on concurrent access when changing/removing on object as
the wrong object may be referenced. This module is affected by this deficiency.
seealso:
- module: mso_schema_site_vrf_region
- module: mso_schema_site_vrf_region_cidr_subnet
- module: mso_schema_template_vrf
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new site VRF region CIDR
mso_schema_template_vrf_region_cidr:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
region: us-west-1
cidr: 14.14.14.1/24
state: present
delegate_to: localhost
- name: Remove a site VRF region CIDR
mso_schema_template_vrf_region_cidr:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
region: us-west-1
cidr: 14.14.14.1/24
state: absent
delegate_to: localhost
- name: Query a specific site VRF region CIDR
mso_schema_template_vrf_region_cidr:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
region: us-west-1
cidr: 14.14.14.1/24
state: query
delegate_to: localhost
register: query_result
- name: Query all site VRF region CIDR
mso_schema_template_vrf_region_cidr:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
region: us-west-1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
site=dict(type='str', required=True),
template=dict(type='str', required=True),
vrf=dict(type='str', required=True),
region=dict(type='str', required=True),
cidr=dict(type='str', aliases=['ip']), # This parameter is not required for querying all objects
primary=dict(type='bool'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['cidr']],
['state', 'present', ['cidr']],
],
)
schema = module.params['schema']
site = module.params['site']
template = module.params['template']
vrf = module.params['vrf']
region = module.params['region']
cidr = module.params['cidr']
primary = module.params['primary']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
schema_id = schema_obj['id']
# Get site
site_id = mso.lookup_site(site)
# Get site_idx
sites = [(s['siteId'], s['templateName']) for s in schema_obj['sites']]
if (site_id, template) not in sites:
mso.fail_json(msg="Provided site/template '{0}-{1}' does not exist. Existing sites/templates: {2}".format(site, template, ', '.join(sites)))
# Schema-access uses indexes
site_idx = sites.index((site_id, template))
# Path-based access uses site_id-template
site_template = '{0}-{1}'.format(site_id, template)
# Get VRF
vrf_ref = mso.vrf_ref(schema_id=schema_id, template=template, vrf=vrf)
vrfs = [v['vrfRef'] for v in schema_obj['sites'][site_idx]['vrfs']]
if vrf_ref not in vrfs:
mso.fail_json(msg="Provided vrf '{0}' does not exist. Existing vrfs: {1}".format(vrf, ', '.join(vrfs)))
vrf_idx = vrfs.index(vrf_ref)
# Get Region
regions = [r['name'] for r in schema_obj['sites'][site_idx]['vrfs'][vrf_idx]['regions']]
if region not in regions:
mso.fail_json(msg="Provided region '{0}' does not exist. Existing regions: {1}".format(region, ', '.join(regions)))
region_idx = regions.index(region)
# Get CIDR
cidrs = [c['ip'] for c in schema_obj['sites'][site_idx]['vrfs'][vrf_idx]['regions'][region_idx]['cidrs']]
if cidr is not None and cidr in cidrs:
cidr_idx = cidrs.index(cidr)
# FIXME: Changes based on index are DANGEROUS
cidr_path = '/sites/{0}/vrfs/{1}/regions/{2}/cidrs/{3}'.format(site_template, vrf, region, cidr_idx)
mso.existing = schema_obj['sites'][site_idx]['vrfs'][vrf_idx]['regions'][region_idx]['cidrs'][cidr_idx]
if state == 'query':
if cidr is None:
mso.existing = schema_obj['sites'][site_idx]['vrfs'][vrf_idx]['regions'][region_idx]['cidrs']
elif not mso.existing:
mso.fail_json(msg="CIDR IP '{cidr}' not found".format(cidr=cidr))
mso.exit_json()
cidrs_path = '/sites/{0}/vrfs/{1}/regions/{2}/cidrs'.format(site_template, vrf, region)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=cidr_path))
elif state == 'present':
if not mso.existing:
if primary is None:
primary = False
payload = dict(
ip=cidr,
primary=primary,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=cidr_path, value=mso.sent))
else:
ops.append(dict(op='add', path=cidrs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
import os
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock
from test_framework.script import (
CScript,
OP_NOP,
OP_RETURN,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def mine_large_blocks(node, n):
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
# transaction but is consensus valid.
# Set the nTime if this is the first time this function has been called.
# A static variable ensures that time is monotonicly increasing and is therefore
# different for each block created => blockhash is unique.
if "nTimes" not in mine_large_blocks.__dict__:
mine_large_blocks.nTime = 0
# Get the block parameters for the first block
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
for _ in range(n):
# Build the coinbase transaction (with large scriptPubKey)
coinbase_tx = create_coinbase(height)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
coinbase_tx.vout[0].scriptPubKey = big_script
coinbase_tx.rehash()
# Build the block
block = CBlock()
block.nVersion = best_block["version"]
block.hashPrevBlock = previousblockhash
block.nTime = mine_large_blocks.nTime
block.nBits = int('207fffff', 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Submit to the node
node.submitblock(block.serialize().hex())
previousblockhash = block.sha256
height += 1
mine_large_blocks.nTime += 1
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
self.supports_cli = False
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [
self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000"],
["-maxreceivebuffer=20000"],
["-prune=550"],
]
self.rpc_timeout = 120
def setup_network(self):
self.setup_nodes()
self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, 'blocks', '')
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
self.connect_nodes(0, 4)
self.sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
if self.is_wallet_compiled():
self.import_deterministic_coinbase_privkeys()
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.generate(self.nodes[1], 200, sync_fun=lambda: self.sync_blocks(self.nodes[0:2]))
self.generate(self.nodes[0], 150, sync_fun=self.no_op)
# Then mine enough full blocks to create more than 550MiB of data
mine_large_blocks(self.nodes[0], 645)
self.sync_blocks(self.nodes[0:5])
def test_invalid_command_line_options(self):
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune cannot be configured with a negative value.',
extra_args=['-prune=-1'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune configured below the minimum of 550 MiB. Please use a higher number.',
extra_args=['-prune=549'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune mode is incompatible with -txindex.',
extra_args=['-prune=550', '-txindex'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune mode is incompatible with -coinstatsindex.',
extra_args=['-prune=550', '-coinstatsindex'],
)
def test_height_min(self):
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
self.log.info(f"Though we're already using more than 550MiB, current usage: {calc_usage(self.prunedir)}")
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
self.wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for _ in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
# Mine 24 blocks in node 1
mine_large_blocks(self.nodes[1], 24)
# Reorg back with 25 block chain from node 0
mine_large_blocks(self.nodes[0], 25)
# Create connections in the order so both nodes can see the reorg at the same time
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.log.info(f"Usage can be over target because of high stale rate: {calc_usage(self.prunedir)}")
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
height = self.nodes[1].getblockcount()
self.log.info(f"Current block height: {height}")
self.forkheight = height - 287
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
self.log.info(f"Invalidating block {self.forkhash} at height {self.forkheight}")
self.nodes[1].invalidateblock(self.forkhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
assert self.nodes[1].getblockcount() == self.forkheight - 1
self.log.info(f"New best height: {self.nodes[1].getblockcount()}")
# Disconnect node1 and generate the new chain
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.log.info("Generating new longer chain of 300 more blocks")
self.generate(self.nodes[1], 300, sync_fun=self.no_op)
self.log.info("Reconnect nodes")
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.sync_blocks(self.nodes[0:3], timeout=120)
self.log.info(f"Verify height on node 2: {self.nodes[2].getblockcount()}")
self.log.info(f"Usage possibly still high because of stale blocks in block files: {calc_usage(self.prunedir)}")
self.log.info("Mine 220 more large blocks so we have requisite history")
mine_large_blocks(self.nodes[0], 220)
self.sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir)
self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
self.nodes[2].verifychain(checklevel=4, nblocks=0)
self.log.info(f"Will need to redownload block {self.forkheight}")
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large blocks are in the block files after it,
# it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info(f"Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {blocks_to_mine}")
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
goalbesthash = self.generate(self.nodes[0], blocks_to_mine, sync_fun=self.no_op)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
# Verify we can now have the data for a block previously pruned
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "Cannot prune blocks because node is not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.restart_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index):
ret = node.pruneblockchain(height=height(index))
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", f"blk{index:05}.dat"))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# Save block transaction count before pruning, assert value
block1_details = node.getblock(node.getblockhash(1))
assert_equal(block1_details["nTx"], len(block1_details["tx"]))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
self.generate(node, 6, sync_fun=self.no_op)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# prune parameter in the future (block or timestamp) should raise an exception
future_parameter = height(1001) + 5
if use_timestamp:
assert_raises_rpc_error(-8, "Could not find block with at least the specified timestamp", node.pruneblockchain, future_parameter)
else:
assert_raises_rpc_error(-8, "Blockchain is shorter than the attempted prune height", node.pruneblockchain, future_parameter)
# Pruned block should still know the number of transactions
assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative block height", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
assert has_block(0), "blk00000.dat is missing when should still be there"
# Does nothing
node.pruneblockchain(height(0))
assert has_block(0), "blk00000.dat is missing when should still be there"
# height=500 should prune first file
prune(500)
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
assert has_block(1), "blk00001.dat is missing when should still be there"
# height=650 should prune second file
prune(650)
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000)
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
self.generate(node, 288, sync_fun=self.no_op)
prune(1000)
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
self.restart_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.restart_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
self.connect_nodes(0, 5)
nds = [self.nodes[0], self.nodes[5]]
self.sync_blocks(nds, wait=5, timeout=300)
self.restart_node(5, extra_args=["-prune=550"]) # restart to trigger rescan
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space")
self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() # 1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
self.reorg_test() # (1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
if self.is_wallet_compiled():
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Test invalid pruning command line options")
self.test_invalid_command_line_options()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.rc'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import unittest
import StringIO
from grit.gather import rc
from grit import util
class RcUnittest(unittest.TestCase):
part_we_want = '''IDC_KLONKACC ACCELERATORS
BEGIN
"?", IDM_ABOUT, ASCII, ALT
"/", IDM_ABOUT, ASCII, ALT
END'''
def testSectionFromFile(self):
buf = '''IDC_SOMETHINGELSE BINGO
BEGIN
BLA BLA
BLA BLA
END
%s
IDC_KLONK BINGOBONGO
BEGIN
HONGO KONGO
END
''' % self.part_we_want
f = StringIO.StringIO(buf)
out = rc.Section.FromFile(f, 'IDC_KLONKACC')
self.failUnless(out.GetText() == self.part_we_want)
out = rc.Section.FromFile(util.PathFromRoot(r'grit/testdata/klonk.rc'),
'IDC_KLONKACC',
encoding='utf-16')
out_text = out.GetText().replace('\t', '')
out_text = out_text.replace(' ', '')
self.part_we_want = self.part_we_want.replace(' ', '')
self.failUnless(out_text.strip() == self.part_we_want.strip())
def testDialog(self):
dlg = rc.Dialog('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
ICON IDI_KLONK,IDC_MYICON,14,9,20,20
LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
BS_AUTORADIOBUTTON,46,51,84,10
// try a line where the ID is on the continuation line
LTEXT "blablablabla blablabla blablablablablablablabla blablabla",
ID_SMURF, whatever...
END
''')
dlg.Parse()
self.failUnless(len(dlg.GetTextualIds()) == 7)
self.failUnless(len(dlg.GetCliques()) == 6)
self.failUnless(dlg.GetCliques()[1].GetMessage().GetRealContent() ==
'klonk Version "yibbee" 1.0')
transl = dlg.Translate('en')
self.failUnless(transl.strip() == dlg.GetText().strip())
def testAlternateSkeleton(self):
dlg = rc.Dialog('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "Yipee skippy",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
END
''')
dlg.Parse()
alt_dlg = rc.Dialog('''IDD_ABOUTBOX DIALOGEX 040704, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "XXXXXXXXX"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "XXXXXXXXXXXXXXXXX",IDC_STATIC,110978,10,119,8,
SS_NOPREFIX
END
''')
alt_dlg.Parse()
transl = dlg.Translate('en', skeleton_gatherer=alt_dlg)
self.failUnless(transl.count('040704') and
transl.count('110978'))
self.failUnless(transl.count('Yipee skippy'))
def testMenu(self):
menu = rc.Menu('''IDC_KLONK MENU
BEGIN
POPUP "&File """
BEGIN
MENUITEM "E&xit", IDM_EXIT
MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
POPUP "gonk"
BEGIN
MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
END
MENUITEM "This is a very long menu caption to try to see if we can make the ID go to a continuation line, blablabla blablabla bla blabla blablabla blablabla blablabla blablabla...",
ID_FILE_THISISAVERYLONGMENUCAPTIONTOTRYTOSEEIFWECANMAKETHEIDGOTOACONTINUATIONLINE
END
POPUP "&Help"
BEGIN
MENUITEM "&About ...", IDM_ABOUT
END
END''')
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 6)
self.failUnless(len(menu.GetCliques()) == 1)
self.failUnless(len(menu.GetCliques()[0].GetMessage().GetPlaceholders()) ==
9)
transl = menu.Translate('en')
self.failUnless(transl.strip() == menu.GetText().strip())
def testVersion(self):
version = rc.Version('''
VS_VERSION_INFO VERSIONINFO
FILEVERSION 1,0,0,1
PRODUCTVERSION 1,0,0,1
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904e4"
BEGIN
VALUE "CompanyName", "TODO: <Company name>"
VALUE "FileDescription", "TODO: <File description>"
VALUE "FileVersion", "1.0.0.1"
VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved."
VALUE "InternalName", "res_format_test.dll"
VALUE "OriginalFilename", "res_format_test.dll"
VALUE "ProductName", "TODO: <Product name>"
VALUE "ProductVersion", "1.0.0.1"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
'''.strip())
version.Parse()
self.failUnless(len(version.GetTextualIds()) == 1)
self.failUnless(len(version.GetCliques()) == 4)
transl = version.Translate('en')
self.failUnless(transl.strip() == version.GetText().strip())
def testRegressionDialogBox(self):
dialog = rc.Dialog('''
IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE DIALOGEX 0, 0, 205, 157
STYLE DS_SETFONT | DS_FIXEDSYS | WS_CHILD
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
EDITTEXT IDC_SIDEBAR_WEATHER_NEW_CITY,3,27,112,14,ES_AUTOHSCROLL
DEFPUSHBUTTON "Add Location",IDC_SIDEBAR_WEATHER_ADD,119,27,50,14
LISTBOX IDC_SIDEBAR_WEATHER_CURR_CITIES,3,48,127,89,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Move Up",IDC_SIDEBAR_WEATHER_MOVE_UP,134,104,50,14
PUSHBUTTON "Move Down",IDC_SIDEBAR_WEATHER_MOVE_DOWN,134,121,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_WEATHER_DELETE,134,48,50,14
LTEXT "To see current weather conditions and forecasts in the USA, enter the zip code (example: 94043) or city and state (example: Mountain View, CA).",
IDC_STATIC,3,0,199,25
CONTROL "Fahrenheit",IDC_SIDEBAR_WEATHER_FAHRENHEIT,"Button",
BS_AUTORADIOBUTTON | WS_GROUP | WS_TABSTOP,3,144,51,10
CONTROL "Celsius",IDC_SIDEBAR_WEATHER_CELSIUS,"Button",
BS_AUTORADIOBUTTON,57,144,38,10
END'''.strip())
dialog.Parse()
self.failUnless(len(dialog.GetTextualIds()) == 10)
def testRegressionDialogBox2(self):
dialog = rc.Dialog('''
IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE DIALOG DISCARDABLE 0, 0, 264, 220
STYLE WS_CHILD
FONT 8, "MS Shell Dlg"
BEGIN
GROUPBOX "Email Filters",IDC_STATIC,7,3,250,190
LTEXT "Click Add Filter to create the email filter.",IDC_STATIC,16,41,130,9
PUSHBUTTON "Add Filter...",IDC_SIDEBAR_EMAIL_ADD_FILTER,196,38,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_EMAIL_REMOVE,196,174,50,14
PUSHBUTTON "", IDC_SIDEBAR_EMAIL_HIDDEN, 200, 178, 5, 5, NOT WS_VISIBLE
LISTBOX IDC_SIDEBAR_EMAIL_LIST,16,60,230,108,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
LTEXT "You can prevent certain emails from showing up in the sidebar with a filter.",
IDC_STATIC,16,18,234,18
END'''.strip())
dialog.Parse()
self.failUnless('IDC_SIDEBAR_EMAIL_HIDDEN' in dialog.GetTextualIds())
def testRegressionMenuId(self):
menu = rc.Menu('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "HyperFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip())
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 2)
def testRegressionNewlines(self):
menu = rc.Menu('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\nFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip())
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\n (the \n shouldn't be changed to \\n)
self.failUnless(transl.find('\\\\n') == -1)
def testRegressionTabs(self):
menu = rc.Menu('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\tFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip())
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\t (the \t shouldn't be changed to \\t)
self.failUnless(transl.find('\\\\t') == -1)
def testEscapeUnescape(self):
original = 'Hello "bingo"\n How\\are\\you\\n?'
escaped = rc.Section.Escape(original)
self.failUnless(escaped == 'Hello ""bingo""\\n How\\\\are\\\\you\\\\n?')
unescaped = rc.Section.UnEscape(escaped)
self.failUnless(unescaped == original)
def testRegressionPathsWithSlashN(self):
original = '..\\\\..\\\\trs\\\\res\\\\nav_first.gif'
unescaped = rc.Section.UnEscape(original)
self.failUnless(unescaped == '..\\..\\trs\\res\\nav_first.gif')
def testRegressionDialogItemsTextOnly(self):
dialog = rc.Dialog('''IDD_OPTIONS_SEARCH DIALOGEX 0, 0, 280, 292
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_DISABLED | WS_CAPTION | WS_SYSMENU
CAPTION "Search"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
GROUPBOX "Select search buttons and options",-1,7,5,266,262
CONTROL "",IDC_OPTIONS,"SysTreeView32",TVS_DISABLEDRAGDROP |
WS_BORDER | WS_TABSTOP | 0x800,16,19,248,218
LTEXT "Use Google site:",-1,26,248,52,8
COMBOBOX IDC_GOOGLE_HOME,87,245,177,256,CBS_DROPDOWNLIST |
WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Restore Defaults...",IDC_RESET,187,272,86,14
END''')
dialog.Parse()
translateables = [c.GetMessage().GetRealContent()
for c in dialog.GetCliques()]
self.failUnless('Select search buttons and options' in translateables)
self.failUnless('Use Google site:' in translateables)
def testAccelerators(self):
acc = rc.Accelerators('''\
IDR_ACCELERATOR1 ACCELERATORS
BEGIN
"^C", ID_ACCELERATOR32770, ASCII, NOINVERT
"^V", ID_ACCELERATOR32771, ASCII, NOINVERT
VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
END
''')
acc.Parse()
self.failUnless(len(acc.GetTextualIds()) == 4)
self.failUnless(len(acc.GetCliques()) == 0)
transl = acc.Translate('en')
self.failUnless(transl.strip() == acc.GetText().strip())
def testRegressionEmptyString(self):
dlg = rc.Dialog('''\
IDD_CONFIRM_QUIT_GD_DLG DIALOGEX 0, 0, 267, 108
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_CAPTION
EXSTYLE WS_EX_TOPMOST
CAPTION "Google Desktop"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
DEFPUSHBUTTON "&Yes",IDYES,82,87,50,14
PUSHBUTTON "&No",IDNO,136,87,50,14
ICON 32514,IDC_STATIC,7,9,21,20
EDITTEXT IDC_TEXTBOX,34,7,231,60,ES_MULTILINE | ES_READONLY | NOT WS_BORDER
CONTROL "",
IDC_ENABLE_GD_AUTOSTART,"Button",BS_AUTOCHECKBOX |
WS_TABSTOP,33,70,231,10
END''')
dlg.Parse()
def Check():
self.failUnless(transl.count('IDC_ENABLE_GD_AUTOSTART'))
self.failUnless(transl.count('END'))
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
if __name__ == '__main__':
unittest.main()
|
|
# encoding: utf-8
# Copyright 2013 maker
# License
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TicketRecord.updaterecord_ptr'
db.add_column('services_ticketrecord', 'updaterecord_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.UpdateRecord'], unique=True, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'TicketRecord.updaterecord_ptr'
db.delete_column('services_ticketrecord', 'updaterecord_ptr_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'})
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.updaterecord': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'UpdateRecord'},
'about': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Object']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_updates'", 'null': 'True', 'to': "orm['core.User']"}),
'body': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_on_updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'format_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format_strings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'received_updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_updates'", 'null': 'True', 'to': "orm['core.Object']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.AccessEntity']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.AccessEntity']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'messaging.emailbox': {
'Meta': {'ordering': "['last_updated']", 'object_name': 'EmailBox', '_ormbases': ['core.Object']},
'email_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'email_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_username': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'messaging.mailinglist': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'MailingList', '_ormbases': ['core.Object']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'from_contact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_contact_set'", 'to': "orm['identities.Contact']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'members_set'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['identities.Contact']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'opt_in': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.Template']", 'null': 'True', 'blank': 'True'})
},
'messaging.message': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Message', '_ormbases': ['core.Object']},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'mlist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mlist'", 'null': 'True', 'to': "orm['messaging.MailingList']"}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'read_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'read_by_user'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_recipients'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['identities.Contact']"}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['messaging.Message']"}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stream'", 'null': 'True', 'to': "orm['messaging.MessageStream']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'messaging.messagestream': {
'Meta': {'ordering': "['name', 'last_updated']", 'object_name': 'MessageStream', '_ormbases': ['core.Object']},
'email_incoming': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'incoming'", 'null': 'True', 'to': "orm['messaging.EmailBox']"}),
'email_outgoing': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outgoing'", 'null': 'True', 'to': "orm['messaging.EmailBox']"}),
'faulty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'incoming_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'incoming_server_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'incoming_server_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'incoming_server_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'outgoing_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_server_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_server_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_server_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'messaging.template': {
'Meta': {'object_name': 'Template', '_ormbases': ['core.Object']},
'body': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'services.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['services.Service']"})
},
'services.serviceagent': {
'Meta': {'ordering': "('related_user', '-active', 'occupied')", 'object_name': 'ServiceAgent', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'available_from': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'available_to': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'occupied': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"})
},
'services.servicelevelagreement': {
'Meta': {'ordering': "('name', 'client')", 'object_name': 'ServiceLevelAgreement', '_ormbases': ['core.Object']},
'available_from': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'available_to': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'client_sla'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provider_sla'", 'to': "orm['identities.Contact']"}),
'response_time': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Service']"}),
'uptime_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'services.ticket': {
'Meta': {'ordering': "('-priority', 'reference')", 'object_name': 'Ticket', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['services.ServiceAgent']", 'null': 'True', 'blank': 'True'}),
'caller': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.Message']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.TicketQueue']", 'null': 'True', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Service']", 'null': 'True', 'blank': 'True'}),
'sla': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.ServiceLevelAgreement']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.TicketStatus']"}),
'urgency': ('django.db.models.fields.IntegerField', [], {'default': '3'})
},
'services.ticketqueue': {
'Meta': {'ordering': "('name', '-active', 'ticket_code')", 'object_name': 'TicketQueue', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Service']", 'null': 'True', 'blank': 'True'}),
'default_ticket_priority': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'default_ticket_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.TicketStatus']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'message_stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.MessageStream']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'next_queue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'previous_set'", 'null': 'True', 'to': "orm['services.TicketQueue']"}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['services.TicketQueue']"}),
'ticket_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'waiting_time': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'services.ticketrecord': {
'Meta': {'ordering': "['ticket']", 'object_name': 'TicketRecord', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.Message']", 'null': 'True', 'blank': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Ticket']"}),
'updaterecord_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.UpdateRecord']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'services.ticketstatus': {
'Meta': {'ordering': "('hidden', '-active', 'name')", 'object_name': 'TicketStatus', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['services']
|
|
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4 import QtWebKit
from renderers import MessageRenderer
from qtx import ClickableQLabel, IdleTimer, RowPushButton, SpellTextEditor
class AboutDialog(QtGui.QDialog):
def __init__(self, mainFrame):
super(AboutDialog, self).__init__(mainFrame)
self._mainFrame = mainFrame
self.setWindowTitle(self._mainFrame._("About {name}").format(name=self._mainFrame.NAME))
self._setupUI()
def _website(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl("http://{url}".format(url=self._mainFrame.DOMAIN)))
def _setupUI(self):
label = ClickableQLabel()
label.setPixmap(QtGui.QPixmap(":/images/snakefire-big.png"))
label.setAlignment(QtCore.Qt.AlignCenter)
self.connect(label, QtCore.SIGNAL("clicked()"), self._website)
urlLabel = QtGui.QLabel("<a href=\"http://{url}\">{name}</a>".format(
url=self._mainFrame.DOMAIN,
name=self._mainFrame.DOMAIN
))
urlLabel.setOpenExternalLinks(True)
websiteBox = QtGui.QHBoxLayout()
websiteBox.addWidget(QtGui.QLabel(self._mainFrame._("Website:")))
websiteBox.addWidget(urlLabel)
websiteBox.addStretch(1)
twitterLabel = QtGui.QLabel("<a href=\"http://twitter.com/snakefirelinux\">@snakefirelinux</a>")
twitterLabel.setOpenExternalLinks(True)
twitterBox = QtGui.QHBoxLayout()
twitterBox.addWidget(QtGui.QLabel(self._mainFrame._("Twitter:")))
twitterBox.addWidget(twitterLabel)
twitterBox.addStretch(1)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
layout.addStretch(0.5)
layout.addWidget(QtGui.QLabel("<strong>{name} v{version}</strong>".format(
name=self._mainFrame.NAME,
version=self._mainFrame.VERSION
)))
layout.addStretch(0.5)
layout.addLayout(websiteBox)
layout.addLayout(twitterBox)
# Buttons
self._okButton = QtGui.QPushButton(self._mainFrame._("&OK"), self)
self.connect(self._okButton, QtCore.SIGNAL('clicked()'), self.close)
# Main layout
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self._okButton)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(layout)
vbox.addLayout(hbox)
self.setLayout(vbox)
class AlertsDialog(QtGui.QDialog):
def __init__(self, mainFrame):
super(AlertsDialog, self).__init__(mainFrame)
self._mainFrame = mainFrame
self.setWindowTitle(self._mainFrame._("Alerts"))
self._setupUI()
def ok(self):
self._save()
self.close()
def cancel(self):
self.close()
def add(self, match=None):
row = self._table.rowCount()
self._table.insertRow(row)
column = QtGui.QTableWidgetItem()
column.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable)
if match:
column.setText(match['match'])
self._table.setItem(row, 0, column)
checkbox = QtGui.QCheckBox(self._table)
checkbox.setChecked(match['regex'] if match else False)
self._table.setCellWidget(row, 1, checkbox)
button = RowPushButton(row, self._mainFrame._("Delete"), self._table)
self.connect(button, QtCore.SIGNAL('clicked(int)'), self.delete)
self._table.setCellWidget(row, 2, button)
self._table.setCurrentCell(row, 0)
def delete(self, row):
self._table.removeRow(row)
self.validate()
def validate(self):
isValid = True
rowCount = self._table.rowCount()
for i in range(rowCount):
match = self._table.item(i, 0).text().trimmed()
if match.isEmpty():
isValid = False
break
self._addButton.setEnabled(isValid)
self._okButton.setEnabled(isValid)
return isValid
def _save(self):
matches = []
for i in range(self._table.rowCount()):
matches.append({
'match': str(self._table.item(i, 0).text().trimmed()),
'regex': self._table.cellWidget(i, 1).isChecked()
})
self._mainFrame.setSettings("matches", matches)
alertsSettings = {
"notify_ping": self._notifyOnPingField.isChecked(),
"notify_inactive_tab": self._notifyOnInactiveTabField.isChecked(),
"notify_blink": self._notifyBlinkField.isChecked(),
"notify_notify": self._notifyNotifyField.isChecked()
}
self._mainFrame.setSettings("alerts", alertsSettings)
def _setupUI(self):
self._addButton = QtGui.QPushButton(self._mainFrame._("Add"), self)
self.connect(self._addButton, QtCore.SIGNAL('clicked()'), self.add)
addBox = QtGui.QHBoxLayout()
addBox.addStretch(1)
addBox.addWidget(self._addButton)
headers = QtCore.QStringList()
headers.append(QtCore.QString(self._mainFrame._("Search text")))
headers.append(QtCore.QString(self._mainFrame._("RegEx")))
headers.append(QtCore.QString(self._mainFrame._("Delete")))
self._table = QtGui.QTableWidget(self)
self._table.setColumnCount(3)
self._table.setHorizontalHeaderLabels(headers)
self._table.resizeColumnsToContents()
self._table.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.Stretch)
tableBox = QtGui.QVBoxLayout()
tableBox.addWidget(self._table)
tableBox.addLayout(addBox)
# Options
self._notifyOnPingField = QtGui.QCheckBox(self._mainFrame._("Alert me whenever I get a &direct message"), self)
self._notifyOnInactiveTabField = QtGui.QCheckBox(self._mainFrame._("Notify me of every message sent while I'm &inactive"), self)
optionsGrid = QtGui.QGridLayout()
optionsGrid.addWidget(self._notifyOnPingField, 1, 0)
optionsGrid.addWidget(self._notifyOnInactiveTabField, 2, 0)
optionsGroupBox = QtGui.QGroupBox(self._mainFrame._("Alerts && Notifications"))
optionsGroupBox.setLayout(optionsGrid)
# Methods
self._notifyBlinkField = QtGui.QCheckBox(self._mainFrame._("&Blink the systray icon when notifying"), self)
self._notifyNotifyField = QtGui.QCheckBox(self._mainFrame._("Trigger a &Notification using the OS notification system"), self)
methodsGrid = QtGui.QGridLayout()
methodsGrid.addWidget(self._notifyBlinkField, 1, 0)
methodsGrid.addWidget(self._notifyNotifyField, 2, 0)
methodsGroupBox = QtGui.QGroupBox(self._mainFrame._("Notification methods"))
methodsGroupBox.setLayout(methodsGrid)
# Buttons
self._okButton = QtGui.QPushButton(self._mainFrame._("&OK"), self)
self._cancelButton = QtGui.QPushButton(self._mainFrame._("&Cancel"), self)
self.connect(self._okButton, QtCore.SIGNAL('clicked()'), self.ok)
self.connect(self._cancelButton, QtCore.SIGNAL('clicked()'), self.cancel)
# Main layout
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self._okButton)
hbox.addWidget(self._cancelButton)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(tableBox)
vbox.addWidget(optionsGroupBox)
vbox.addWidget(methodsGroupBox)
vbox.addLayout(hbox)
self.setLayout(vbox)
# Load settings
alertsSettings = self._mainFrame.getSettings("alerts")
matches = self._mainFrame.getSettings("matches")
self._notifyOnPingField.setChecked(alertsSettings["notify_ping"])
self._notifyOnInactiveTabField.setChecked(alertsSettings["notify_inactive_tab"])
self._notifyBlinkField.setChecked(alertsSettings["notify_blink"])
self._notifyNotifyField.setChecked(alertsSettings["notify_notify"])
if matches:
for match in matches:
self.add(match)
# Only connect to signal after adding rows
self.connect(self._table, QtCore.SIGNAL('cellChanged(int,int)'), self.validate)
self.validate()
class OptionsDialog(QtGui.QDialog):
def __init__(self, mainFrame):
super(OptionsDialog, self).__init__(mainFrame)
self._mainFrame = mainFrame
self.setWindowTitle(self._mainFrame._("Settings"))
self._setupUI()
def ok(self):
self._save()
self.close()
def cancel(self):
self.close()
def validate(self):
isValid = (
not self._subdomainField.text().trimmed().isEmpty() and
not self._usernameField.text().trimmed().isEmpty() and
not self._passwordField.text().isEmpty() and
( not self._awayField.isChecked() or not self._awayMessageField.text().trimmed().isEmpty() )
)
self._okButton.setEnabled(isValid)
awayChecked = self._awayField.isEnabled() and self._awayField.isChecked()
self._awayTimeField.setEnabled(awayChecked)
self._awayMessageField.setEnabled(awayChecked)
self._awayTimeBetweenMessagesField.setEnabled(awayChecked)
return isValid
def _save(self):
(themeSize, themeSizeOk) = self._themeSizeField.itemData(self._themeSizeField.currentIndex()).toInt()
(awayTime, awayTimeOk) = self._awayTimeField.itemData(self._awayTimeField.currentIndex()).toInt()
(awayTimeBetweenMessages, awayTimeBetweenMessagesOk) = self._awayTimeBetweenMessagesField.itemData(self._awayTimeBetweenMessagesField.currentIndex()).toInt()
connectionSettings = {
"subdomain": str(self._subdomainField.text().trimmed()),
"user": str(self._usernameField.text().trimmed()),
"password": str(self._passwordField.text()),
"ssl": self._sslField.isChecked(),
"connect": self._connectField.isChecked(),
"join": self._joinField.isChecked()
}
programSettings = {
"minimize": self._minimizeField.isChecked(),
"spell_language": self._spellLanguageField.itemData(self._spellLanguageField.currentIndex()).toString(),
"away": self._awayField.isChecked(),
"away_time": awayTime if awayTimeOk else 10,
"away_time_between_messages": awayTimeBetweenMessages if awayTimeBetweenMessagesOk else 5,
"away_message": str(self._awayMessageField.text().trimmed())
}
displaySettings = {
"theme": self._themeField.itemData(self._themeField.currentIndex()).toString(),
"size": themeSize if themeSizeOk else 100,
"show_join_message": self._showJoinMessageField.isChecked(),
"show_part_message": self._showPartMessageField.isChecked(),
"show_message_timestamps": self._showMessageTimestampsField.isChecked(),
}
self._mainFrame.setSettings("connection", connectionSettings)
self._mainFrame.setSettings("program", programSettings)
self._mainFrame.setSettings("display", displaySettings)
def _themeSelected(self):
self._themePreview.settings().setUserStyleSheetUrl(QtCore.QUrl("qrc:/themes/{theme}.css".format(
theme = self._themeField.itemData(self._themeField.currentIndex()).toString()
)))
def _themeSizeSelected(self):
(value, ok) = self._themeSizeField.itemData(self._themeSizeField.currentIndex()).toInt()
if ok:
self._themePreview.setTextSizeMultiplier(round(float(value) / 100, 1))
def _setupThemesUI(self, displaySettings):
# Themes
children = QtCore.QResource(':/themes').children()
children.sort()
currentIndex = None
index = 0
for theme in children:
themeName = str(theme.replace(QtCore.QRegExp('\.css$'), ''))
self._themeField.addItem(themeName.replace('_', ' ').title(), themeName)
if themeName == displaySettings["theme"]:
currentIndex = index
index += 1
if currentIndex is not None:
self._themeField.setCurrentIndex(currentIndex)
# Theme sizes
currentIndex = None
index = 0
for size in [ x for x in range(50, 160, 10) ]:
self._themeSizeField.addItem("{n}%".format(n=size), size)
if size == int(displaySettings["size"]):
currentIndex = index
index += 1
if currentIndex is not None:
self._themeSizeField.setCurrentIndex(currentIndex)
# Load preview content
messages = [
MessageRenderer.MESSAGES['join'].format(user='John Doe', room='Snakefire'),
MessageRenderer.MESSAGES['message_self'].format(time='3:33 pm', user='John Doe', message='Hey everyone!'),
MessageRenderer.MESSAGES['message_self'].format(time='3:33 pm', user='John Doe', message='How are you all doing?'),
MessageRenderer.MESSAGES['alert'].format(time='3:34 pm', user='Jane Doe', message='Hi John Doe! Nice to see you here'),
MessageRenderer.MESSAGES['tweet'].format(url_user='#', user='@mgiglesias', url='#', message='Hello world from twitter :)'),
MessageRenderer.MESSAGES['message_self'].format(time='3:35 pm', user='John Doe', message='Look at this method:'),
MessageRenderer.MESSAGES['paste'].format(message='def hello(self):<br /> print "Hello World"'),
MessageRenderer.MESSAGES['topic'].format(user='Jane Doe', topic='Testing Snakefire, and loving it'),
MessageRenderer.MESSAGES['message'].format(time='3:36 pm', user='Jane Doe', message='Looks good. Now look at this upload:'),
MessageRenderer.MESSAGES['message'].format(time='3:36 pm', user='Jane Doe',
message = MessageRenderer.MESSAGES['upload'].format(url='#', name='my_upload.tar.gz')
)
]
image = QtGui.QImage(":/icons/snakefire.png")
buffer = QtCore.QBuffer()
if buffer.open(QtCore.QIODevice.WriteOnly) and image.save(buffer, 'PNG'):
messages.extend([
MessageRenderer.MESSAGES['message_self'].format(time='3:38 pm', user='John Doe', message='Look at this image:'),
MessageRenderer.MESSAGES['message_self'].format(time='3:38 pm', user='John Doe', message=MessageRenderer.MESSAGES['image'].format(
url = '#',
type = 'image/png',
data = buffer.data().toBase64().data(),
name = 'image.png',
url_md5 = '',
js='',
attribs = ''
))
])
messages.extend([
MessageRenderer.MESSAGES['leave'].format(user='Jane Doe', room='Snakefire'),
MessageRenderer.MESSAGES['message_self'].format(time='3:37 pm', user='John Doe', message='I guess I am all alone now :('),
])
self._themePreview.page().mainFrame().setHtml("\n".join(messages))
self._themePreview.show()
self._themeSelected()
self._themeSizeSelected()
self.connect(self._themeField, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self._themeSelected)
self.connect(self._themeSizeField, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self._themeSizeSelected)
def _setupUI(self):
# Connection group
self._subdomainField = QtGui.QLineEdit(self)
self._usernameField = QtGui.QLineEdit(self)
self._passwordField = QtGui.QLineEdit(self)
self._passwordField.setEchoMode(QtGui.QLineEdit.Password)
self._sslField = QtGui.QCheckBox(self._mainFrame._("Use &secure connection (SSL)"), self)
self.connect(self._subdomainField, QtCore.SIGNAL('textChanged(QString)'), self.validate)
self.connect(self._usernameField, QtCore.SIGNAL('textChanged(QString)'), self.validate)
self.connect(self._passwordField, QtCore.SIGNAL('textChanged(QString)'), self.validate)
connectionGrid = QtGui.QGridLayout()
connectionGrid.addWidget(QtGui.QLabel(self._mainFrame._("Subdomain:"), self), 1, 0)
connectionGrid.addWidget(self._subdomainField, 1, 1)
connectionGrid.addWidget(QtGui.QLabel(self._mainFrame._("Username:"), self), 2, 0)
connectionGrid.addWidget(self._usernameField, 2, 1)
connectionGrid.addWidget(QtGui.QLabel(self._mainFrame._("Password:"), self), 3, 0)
connectionGrid.addWidget(self._passwordField, 3, 1)
connectionGrid.addWidget(self._sslField, 4, 0, 1, -1)
connectionGroupBox = QtGui.QGroupBox(self._mainFrame._("Campfire connection"))
connectionGroupBox.setLayout(connectionGrid)
# Program group
spellLanguages = {
"": self._mainFrame._("No spell check")
}
if SpellTextEditor.canSpell():
for language in SpellTextEditor.languages():
spellLanguages[language] = language;
self._connectField = QtGui.QCheckBox(self._mainFrame._("Automatically &connect when program starts"), self)
self._joinField = QtGui.QCheckBox(self._mainFrame._("&Join last opened channels once connected"), self)
self._minimizeField = QtGui.QCheckBox(self._mainFrame._("&Minimize to system tray if window is minimized, or closed"), self)
self._spellLanguageField = QtGui.QComboBox(self)
spellLanguageBox = QtGui.QHBoxLayout()
spellLanguageBox.addWidget(QtGui.QLabel(self._mainFrame._("Spell checking:"), self))
spellLanguageBox.addWidget(self._spellLanguageField)
spellLanguageBox.addStretch(1)
programGrid = QtGui.QGridLayout()
programGrid.addWidget(self._connectField, 1, 0)
programGrid.addWidget(self._joinField, 2, 0)
programGrid.addWidget(self._minimizeField, 3, 0)
programGrid.addLayout(spellLanguageBox, 4, 0)
programGroupBox = QtGui.QGroupBox(self._mainFrame._("Program settings"))
programGroupBox.setLayout(programGrid)
if not SpellTextEditor.canSpell():
self._spellLanguageField.setEnabled(False)
# Away group
awayTimes = {
5: self._mainFrame._("5 minutes"),
10: self._mainFrame._("10 minutes"),
15: self._mainFrame._("15 minutes"),
30: self._mainFrame._("30 minutes"),
45: self._mainFrame._("45 minutes"),
60: self._mainFrame._("1 hour"),
90: self._mainFrame._("1 and a half hours"),
120: self._mainFrame._("2 hours")
}
awayBetweenTimes = {
2: self._mainFrame._("2 minutes"),
5: self._mainFrame._("5 minutes"),
10: self._mainFrame._("10 minutes"),
15: self._mainFrame._("15 minutes"),
30: self._mainFrame._("30 minutes"),
45: self._mainFrame._("45 minutes"),
60: self._mainFrame._("1 hour")
}
self._awayField = QtGui.QCheckBox(self._mainFrame._("Set me as &away after idle time"), self)
self._awayTimeField = QtGui.QComboBox(self)
self._awayMessageField = QtGui.QLineEdit(self)
self._awayTimeBetweenMessagesField = QtGui.QComboBox(self)
if IdleTimer.supported():
self.connect(self._awayField, QtCore.SIGNAL('stateChanged(int)'), self.validate)
self.connect(self._awayMessageField, QtCore.SIGNAL('textChanged(QString)'), self.validate)
else:
self._awayField.setEnabled(False)
awayTimeBox = QtGui.QHBoxLayout()
awayTimeBox.addWidget(QtGui.QLabel(self._mainFrame._("Idle Time:"), self))
awayTimeBox.addWidget(self._awayTimeField)
awayTimeBox.addWidget(QtGui.QLabel(self._mainFrame._("Wait"), self))
awayTimeBox.addWidget(self._awayTimeBetweenMessagesField)
awayTimeBox.addWidget(QtGui.QLabel(self._mainFrame._("before sending consecutive messages"), self))
awayTimeBox.addStretch(1)
awayMessageBox = QtGui.QHBoxLayout()
awayMessageBox.addWidget(QtGui.QLabel(self._mainFrame._("Message:"), self))
awayMessageBox.addWidget(self._awayMessageField)
awayBox = QtGui.QVBoxLayout()
awayBox.addWidget(self._awayField)
awayBox.addLayout(awayTimeBox)
awayBox.addLayout(awayMessageBox)
awayGroupBox = QtGui.QGroupBox(self._mainFrame._("Away mode"))
awayGroupBox.setLayout(awayBox)
# Theme group
self._themeField = QtGui.QComboBox(self)
self._themeSizeField = QtGui.QComboBox(self)
self._themePreview = QtWebKit.QWebView(self)
self._themePreview.setMaximumHeight(300)
themeSelectorBox = QtGui.QHBoxLayout()
themeSelectorBox.addWidget(QtGui.QLabel(self._mainFrame._("Theme:")))
themeSelectorBox.addWidget(self._themeField)
themeSelectorBox.addWidget(QtGui.QLabel(self._mainFrame._("Text size:")))
themeSelectorBox.addWidget(self._themeSizeField)
themeSelectorBox.addStretch(1)
themeSelectorBox.setContentsMargins(0, 0, 0, 0)
themeSelectorBox.setSpacing(5)
themeSelectorFrame = QtGui.QWidget()
themeSelectorFrame.setLayout(themeSelectorBox)
themeGrid = QtGui.QGridLayout()
themeGrid.addWidget(themeSelectorFrame, 1, 0)
themeGrid.addWidget(self._themePreview, 2, 0)
themeGroupBox = QtGui.QGroupBox(self._mainFrame._("Theme"))
themeGroupBox.setLayout(themeGrid)
# Events group
self._showJoinMessageField = QtGui.QCheckBox(self._mainFrame._("Show &join messages"), self)
self._showPartMessageField = QtGui.QCheckBox(self._mainFrame._("Show p&art messages"), self)
self._showMessageTimestampsField = QtGui.QCheckBox(self._mainFrame._("Show message ×tamps"), self)
eventsGrid = QtGui.QGridLayout()
eventsGrid.addWidget(self._showJoinMessageField, 1, 0)
eventsGrid.addWidget(self._showPartMessageField, 2, 0)
eventsGrid.addWidget(self._showMessageTimestampsField, 3, 0)
eventsGroupBox = QtGui.QGroupBox(self._mainFrame._("Display events"))
eventsGroupBox.setLayout(eventsGrid)
# Options tab
optionsBox = QtGui.QVBoxLayout()
optionsBox.addWidget(connectionGroupBox)
optionsBox.addWidget(programGroupBox)
optionsBox.addWidget(awayGroupBox)
optionsBox.addStretch(1)
optionsFrame = QtGui.QWidget()
optionsFrame.setLayout(optionsBox)
# Display tab
displayBox = QtGui.QVBoxLayout()
displayBox.addWidget(themeGroupBox)
displayBox.addWidget(eventsGroupBox)
displayBox.addStretch(1)
displayFrame = QtGui.QWidget()
displayFrame.setLayout(displayBox)
# Tabs
tabs = QtGui.QTabWidget()
tabs.setTabsClosable(False)
tabs.addTab(optionsFrame, self._mainFrame._("&Program options"))
tabs.addTab(displayFrame, self._mainFrame._("&Display options"))
# Buttons
self._okButton = QtGui.QPushButton(self._mainFrame._("&OK"), self)
self._cancelButton = QtGui.QPushButton(self._mainFrame._("&Cancel"), self)
self.connect(self._okButton, QtCore.SIGNAL('clicked()'), self.ok)
self.connect(self._cancelButton, QtCore.SIGNAL('clicked()'), self.cancel)
# Main layout
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self._okButton)
hbox.addWidget(self._cancelButton)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(tabs)
vbox.addLayout(hbox)
self.setLayout(vbox)
# Load settings
connectionSettings = self._mainFrame.getSettings("connection")
programSettings = self._mainFrame.getSettings("program")
displaySettings = self._mainFrame.getSettings("display")
self._subdomainField.setText(connectionSettings["subdomain"])
self._usernameField.setText(connectionSettings["user"])
if connectionSettings["password"]:
self._passwordField.setText(connectionSettings["password"])
self._sslField.setChecked(connectionSettings["ssl"])
self._connectField.setChecked(connectionSettings["connect"])
self._joinField.setChecked(connectionSettings["join"])
self._minimizeField.setChecked(programSettings["minimize"])
self._awayField.setChecked(programSettings["away"])
self._awayMessageField.setText(programSettings["away_message"])
self._showJoinMessageField.setChecked(displaySettings["show_join_message"])
self._showPartMessageField.setChecked(displaySettings["show_part_message"])
self._showMessageTimestampsField.setChecked(displaySettings["show_message_timestamps"])
self._setupThemesUI(displaySettings)
currentIndex = None
index = 0
spellLanguageKeys = spellLanguages.keys()
spellLanguageKeys.sort()
for value in spellLanguageKeys:
self._spellLanguageField.addItem(spellLanguages[value], value)
if value == programSettings["spell_language"]:
currentIndex = index
index += 1
if currentIndex is not None:
self._spellLanguageField.setCurrentIndex(currentIndex)
currentIndex = None
index = 0
awayTimeKeys = awayTimes.keys()
awayTimeKeys.sort()
for value in awayTimeKeys:
self._awayTimeField.addItem(awayTimes[value], value)
if value == int(programSettings["away_time"]):
currentIndex = index
index += 1
if currentIndex is not None:
self._awayTimeField.setCurrentIndex(currentIndex)
currentIndex = None
index = 0
awayBetweenTimeKeys = awayBetweenTimes.keys()
awayBetweenTimeKeys.sort()
for value in awayBetweenTimeKeys:
self._awayTimeBetweenMessagesField.addItem(awayBetweenTimes[value], value)
if value == int(programSettings["away_time_between_messages"]):
currentIndex = index
index += 1
if currentIndex is not None:
self._awayTimeBetweenMessagesField.setCurrentIndex(currentIndex)
self.validate()
|
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class PathStateChanged(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
PathStateChanged - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'sys_time': 'datetime',
'endpoint_a': 'str',
'endpoint_b': 'str',
'state': 'str',
'parent': 'str',
'type': 'str'
}
self.attribute_map = {
'sys_time': 'sysTime',
'endpoint_a': 'endpointA',
'endpoint_b': 'endpointB',
'state': 'state',
'parent': 'parent',
'type': 'type'
}
self._sys_time = None
self._endpoint_a = None
self._endpoint_b = None
self._state = None
self._parent = None
self._type = None
@property
def sys_time(self):
"""
Gets the sys_time of this PathStateChanged.
Time of notification
:return: The sys_time of this PathStateChanged.
:rtype: datetime
"""
return self._sys_time
@sys_time.setter
def sys_time(self, sys_time):
"""
Sets the sys_time of this PathStateChanged.
Time of notification
:param sys_time: The sys_time of this PathStateChanged.
:type: datetime
"""
self._sys_time = sys_time
@property
def endpoint_a(self):
"""
Gets the endpoint_a of this PathStateChanged.
One side of the path
:return: The endpoint_a of this PathStateChanged.
:rtype: str
"""
return self._endpoint_a
@endpoint_a.setter
def endpoint_a(self, endpoint_a):
"""
Sets the endpoint_a of this PathStateChanged.
One side of the path
:param endpoint_a: The endpoint_a of this PathStateChanged.
:type: str
"""
self._endpoint_a = endpoint_a
@property
def endpoint_b(self):
"""
Gets the endpoint_b of this PathStateChanged.
The other side of the path
:return: The endpoint_b of this PathStateChanged.
:rtype: str
"""
return self._endpoint_b
@endpoint_b.setter
def endpoint_b(self, endpoint_b):
"""
Sets the endpoint_b of this PathStateChanged.
The other side of the path
:param endpoint_b: The endpoint_b of this PathStateChanged.
:type: str
"""
self._endpoint_b = endpoint_b
@property
def state(self):
"""
Gets the state of this PathStateChanged.
New state of the path
:return: The state of this PathStateChanged.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this PathStateChanged.
New state of the path
:param state: The state of this PathStateChanged.
:type: str
"""
allowed_values = ["inactive", "active", "created", "deleted"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state`, must be one of {0}"
.format(allowed_values)
)
self._state = state
@property
def parent(self):
"""
Gets the parent of this PathStateChanged.
The MAC address of the parent side of the path. This field only has a value for path activated notifications.
:return: The parent of this PathStateChanged.
:rtype: str
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this PathStateChanged.
The MAC address of the parent side of the path. This field only has a value for path activated notifications.
:param parent: The parent of this PathStateChanged.
:type: str
"""
self._parent = parent
@property
def type(self):
"""
Gets the type of this PathStateChanged.
Notification type
:return: The type of this PathStateChanged.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this PathStateChanged.
Notification type
:param type: The type of this PathStateChanged.
:type: str
"""
allowed_values = ["netStarted", "pathStateChanged", "pathAlert", "moteStateChanged", "joinFailed", "pingResponse", "invalidMIC", "dataPacketReceived", "ipPacketReceived", "packetSent", "cmdFinished", "configChanged", "configLoaded", "alarmOpened", "alarmClosed", "deviceHealthReport", "neighborHealthReport", "discoveryHealthReport", "rawMoteNotification", "serviceChanged", "apStateChanged", "managerStarted", "managerStopping", "optPhase", "pathAlert", "moteTrace", "frameCapacity", "apGpsSyncChanged"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type`, must be one of {0}"
.format(allowed_values)
)
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
import pickle, time, weakref, gc
from twisted.trial import unittest, util
from twisted.python import threadable, failure, context
from twisted.internet import reactor, interfaces
from twisted.internet.defer import Deferred
#
# See the end of this module for the remainder of the imports.
#
class Synchronization(object):
failures = 0
def __init__(self, N, waiting):
self.N = N
self.waiting = waiting
self.lock = threading.Lock()
self.runs = []
def run(self):
# This is the testy part: this is supposed to be invoked
# serially from multiple threads. If that is actually the
# case, we will never fail to acquire this lock. If it is
# *not* the case, we might get here while someone else is
# holding the lock.
if self.lock.acquire(False):
if not len(self.runs) % 5:
time.sleep(0.0002) # Constant selected based on
# empirical data to maximize the
# chance of a quick failure if this
# code is broken.
self.lock.release()
else:
self.failures += 1
# This is just the only way I can think of to wake up the test
# method. It doesn't actually have anything to do with the
# test.
self.lock.acquire()
self.runs.append(None)
if len(self.runs) == self.N:
self.waiting.release()
self.lock.release()
synchronized = ["run"]
threadable.synchronize(Synchronization)
class ThreadPoolTestCase(unittest.TestCase):
"""
Test threadpools.
"""
def _waitForLock(self, lock):
for i in xrange(1000000):
if lock.acquire(False):
break
time.sleep(1e-5)
else:
self.fail("A long time passed without succeeding")
def test_attributes(self):
"""
L{ThreadPool.min} and L{ThreadPool.max} are set to the values passed to
L{ThreadPool.__init__}.
"""
pool = threadpool.ThreadPool(12, 22)
self.assertEqual(pool.min, 12)
self.assertEqual(pool.max, 22)
def test_start(self):
"""
L{ThreadPool.start} creates the minimum number of threads specified.
"""
pool = threadpool.ThreadPool(0, 5)
pool.start()
self.addCleanup(pool.stop)
self.assertEqual(len(pool.threads), 0)
pool = threadpool.ThreadPool(3, 10)
self.assertEqual(len(pool.threads), 0)
pool.start()
self.addCleanup(pool.stop)
self.assertEqual(len(pool.threads), 3)
def test_threadCreationArguments(self):
"""
Test that creating threads in the threadpool with application-level
objects as arguments doesn't results in those objects never being
freed, with the thread maintaining a reference to them as long as it
exists.
"""
tp = threadpool.ThreadPool(0, 1)
tp.start()
self.addCleanup(tp.stop)
# Sanity check - no threads should have been started yet.
self.assertEqual(tp.threads, [])
# Here's our function
def worker(arg):
pass
# weakref needs an object subclass
class Dumb(object):
pass
# And here's the unique object
unique = Dumb()
workerRef = weakref.ref(worker)
uniqueRef = weakref.ref(unique)
# Put some work in
tp.callInThread(worker, unique)
# Add an event to wait completion
event = threading.Event()
tp.callInThread(event.set)
event.wait(self.getTimeout())
del worker
del unique
gc.collect()
self.assertEquals(uniqueRef(), None)
self.assertEquals(workerRef(), None)
def test_threadCreationArgumentsCallInThreadWithCallback(self):
"""
As C{test_threadCreationArguments} above, but for
callInThreadWithCallback.
"""
tp = threadpool.ThreadPool(0, 1)
tp.start()
self.addCleanup(tp.stop)
# Sanity check - no threads should have been started yet.
self.assertEqual(tp.threads, [])
# this holds references obtained in onResult
refdict = {} # name -> ref value
onResultWait = threading.Event()
onResultDone = threading.Event()
resultRef = []
# result callback
def onResult(success, result):
onResultWait.wait(self.getTimeout())
refdict['workerRef'] = workerRef()
refdict['uniqueRef'] = uniqueRef()
onResultDone.set()
resultRef.append(weakref.ref(result))
# Here's our function
def worker(arg, test):
return Dumb()
# weakref needs an object subclass
class Dumb(object):
pass
# And here's the unique object
unique = Dumb()
onResultRef = weakref.ref(onResult)
workerRef = weakref.ref(worker)
uniqueRef = weakref.ref(unique)
# Put some work in
tp.callInThreadWithCallback(onResult, worker, unique, test=unique)
del worker
del unique
gc.collect()
# let onResult collect the refs
onResultWait.set()
# wait for onResult
onResultDone.wait(self.getTimeout())
self.assertEquals(uniqueRef(), None)
self.assertEquals(workerRef(), None)
# XXX There's a race right here - has onResult in the worker thread
# returned and the locals in _worker holding it and the result been
# deleted yet?
del onResult
gc.collect()
self.assertEqual(onResultRef(), None)
self.assertEqual(resultRef[0](), None)
def test_persistence(self):
"""
Threadpools can be pickled and unpickled, which should preserve the
number of threads and other parameters.
"""
pool = threadpool.ThreadPool(7, 20)
self.assertEquals(pool.min, 7)
self.assertEquals(pool.max, 20)
# check that unpickled threadpool has same number of threads
copy = pickle.loads(pickle.dumps(pool))
self.assertEquals(copy.min, 7)
self.assertEquals(copy.max, 20)
def _threadpoolTest(self, method):
"""
Test synchronization of calls made with C{method}, which should be
one of the mechanisms of the threadpool to execute work in threads.
"""
# This is a schizophrenic test: it seems to be trying to test
# both the callInThread()/dispatch() behavior of the ThreadPool as well
# as the serialization behavior of threadable.synchronize(). It
# would probably make more sense as two much simpler tests.
N = 10
tp = threadpool.ThreadPool()
tp.start()
self.addCleanup(tp.stop)
waiting = threading.Lock()
waiting.acquire()
actor = Synchronization(N, waiting)
for i in xrange(N):
method(tp, actor)
self._waitForLock(waiting)
self.failIf(actor.failures, "run() re-entered %d times" %
(actor.failures,))
def test_dispatch(self):
"""
Call C{_threadpoolTest} with C{dispatch}.
"""
return self._threadpoolTest(
lambda tp, actor: tp.dispatch(actor, actor.run))
test_dispatch.suppress = [util.suppress(
message="dispatch\(\) is deprecated since Twisted 8.0, "
"use callInThread\(\) instead",
category=DeprecationWarning)]
def test_callInThread(self):
"""
Call C{_threadpoolTest} with C{callInThread}.
"""
return self._threadpoolTest(
lambda tp, actor: tp.callInThread(actor.run))
def test_callInThreadException(self):
"""
L{ThreadPool.callInThread} logs exceptions raised by the callable it
is passed.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
tp = threadpool.ThreadPool(0, 1)
tp.callInThread(raiseError)
tp.start()
tp.stop()
errors = self.flushLoggedErrors(NewError)
self.assertEqual(len(errors), 1)
def test_callInThreadWithCallback(self):
"""
L{ThreadPool.callInThreadWithCallback} calls C{onResult} with a
two-tuple of C{(True, result)} where C{result} is the value returned
by the callable supplied.
"""
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
waiter.release()
results.append(success)
results.append(result)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, lambda : "test")
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
self.assertTrue(results[0])
self.assertEqual(results[1], "test")
def test_callInThreadWithCallbackExceptionInCallback(self):
"""
L{ThreadPool.callInThreadWithCallback} calls C{onResult} with a
two-tuple of C{(False, failure)} where C{failure} represents the
exception raised by the callable supplied.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
waiter.release()
results.append(success)
results.append(result)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, raiseError)
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
self.assertFalse(results[0])
self.assertTrue(isinstance(results[1], failure.Failure))
self.assertTrue(issubclass(results[1].type, NewError))
def test_callInThreadWithCallbackExceptionInOnResult(self):
"""
L{ThreadPool.callInThreadWithCallback} logs the exception raised by
C{onResult}.
"""
class NewError(Exception):
pass
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
results.append(success)
results.append(result)
raise NewError()
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, lambda : None)
tp.callInThread(waiter.release)
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
errors = self.flushLoggedErrors(NewError)
self.assertEqual(len(errors), 1)
self.assertTrue(results[0])
self.assertEqual(results[1], None)
def test_callbackThread(self):
"""
L{ThreadPool.callInThreadWithCallback} calls the function it is
given and the C{onResult} callback in the same thread.
"""
threadIds = []
import thread
event = threading.Event()
def onResult(success, result):
threadIds.append(thread.get_ident())
event.set()
def func():
threadIds.append(thread.get_ident())
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, func)
tp.start()
self.addCleanup(tp.stop)
event.wait(self.getTimeout())
self.assertEqual(len(threadIds), 2)
self.assertEqual(threadIds[0], threadIds[1])
def test_callbackContext(self):
"""
The context L{ThreadPool.callInThreadWithCallback} is invoked in is
shared by the context the callable and C{onResult} callback are
invoked in.
"""
myctx = context.theContextTracker.currentContext().contexts[-1]
myctx['testing'] = 'this must be present'
contexts = []
event = threading.Event()
def onResult(success, result):
ctx = context.theContextTracker.currentContext().contexts[-1]
contexts.append(ctx)
event.set()
def func():
ctx = context.theContextTracker.currentContext().contexts[-1]
contexts.append(ctx)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, func)
tp.start()
self.addCleanup(tp.stop)
event.wait(self.getTimeout())
self.assertEqual(len(contexts), 2)
self.assertEqual(myctx, contexts[0])
self.assertEqual(myctx, contexts[1])
def test_existingWork(self):
"""
Work added to the threadpool before its start should be executed once
the threadpool is started: this is ensured by trying to release a lock
previously acquired.
"""
waiter = threading.Lock()
waiter.acquire()
tp = threadpool.ThreadPool(0, 1)
tp.callInThread(waiter.release) # before start()
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
def test_dispatchDeprecation(self):
"""
Test for the deprecation of the dispatch method.
"""
tp = threadpool.ThreadPool()
tp.start()
self.addCleanup(tp.stop)
def cb():
return tp.dispatch(None, lambda: None)
self.assertWarns(DeprecationWarning,
"dispatch() is deprecated since Twisted 8.0, "
"use callInThread() instead",
__file__, cb)
def test_dispatchWithCallbackDeprecation(self):
"""
Test for the deprecation of the dispatchWithCallback method.
"""
tp = threadpool.ThreadPool()
tp.start()
self.addCleanup(tp.stop)
def cb():
return tp.dispatchWithCallback(
None,
lambda x: None,
lambda x: None,
lambda: None)
self.assertWarns(DeprecationWarning,
"dispatchWithCallback() is deprecated since Twisted 8.0, "
"use twisted.internet.threads.deferToThread() instead.",
__file__, cb)
class RaceConditionTestCase(unittest.TestCase):
def setUp(self):
self.event = threading.Event()
self.threadpool = threadpool.ThreadPool(0, 10)
self.threadpool.start()
def tearDown(self):
del self.event
self.threadpool.stop()
del self.threadpool
def test_synchronization(self):
"""
Test a race condition: ensure that actions run in the pool synchronize
with actions run in the main thread.
"""
timeout = self.getTimeout()
self.threadpool.callInThread(self.event.set)
self.event.wait(timeout)
self.event.clear()
for i in range(3):
self.threadpool.callInThread(self.event.wait)
self.threadpool.callInThread(self.event.set)
self.event.wait(timeout)
if not self.event.isSet():
self.event.set()
self.fail("Actions not synchronized")
def test_singleThread(self):
"""
The submission of a new job to a thread pool in response to the
C{onResult} callback does not cause a new thread to be added to the
thread pool.
This requires that the thread which calls C{onResult} to have first
marked itself as available so that when the new job is queued, that
thread may be considered to run it. This is desirable so that when
only N jobs are ever being executed in the thread pool at once only
N threads will ever be created.
"""
# Ensure no threads running
self.assertEquals(self.threadpool.workers, 0)
loopDeferred = Deferred()
def onResult(success, counter):
reactor.callFromThread(submit, counter)
def submit(counter):
if counter:
self.threadpool.callInThreadWithCallback(
onResult, lambda: counter - 1)
else:
loopDeferred.callback(None)
def cbLoop(ignored):
# Ensure there is only one thread running.
self.assertEqual(self.threadpool.workers, 1)
loopDeferred.addCallback(cbLoop)
submit(10)
return loopDeferred
if interfaces.IReactorThreads(reactor, None) is None:
for cls in ThreadPoolTestCase, RaceConditionTestCase:
setattr(cls, 'skip', "No thread support, nothing to test here")
else:
import threading
from twisted.python import threadpool
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gc
import os
import pickle
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_deprecated_v1
def testHandleDtypeShapeMatch(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
def testGPUInt64(self):
if not context.context().num_gpus():
return
with context.eager_mode(), context.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
self.assertAllEqual(1, v.numpy())
def testEagerNameNotIdentity(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
self.assertAllEqual(v0.numpy(), 1.0)
self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(handle, 1)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32."):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testEagerInitializedValue(self):
with context.eager_mode():
variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init")
self.assertAllEqual(variable.numpy(), 1.0)
self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def testEagerBool(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(False, name="bool_test")
self.assertAllEqual(bool(v), False)
def testEagerDeepCopy(self):
with context.eager_mode():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
copied_variable = copy.deepcopy(variable)
copied_variable.assign(4 * np.ones((4, 4, 4)))
# Copying the variable should create a new underlying tensor with distinct
# values.
self.assertFalse(np.allclose(variable.numpy(), copied_variable.numpy()))
@test_util.run_deprecated_v1
def testGraphDeepCopy(self):
with self.cached_session():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
with self.assertRaises(NotImplementedError):
copy.deepcopy(variable)
@test_util.run_in_graph_and_eager_modes
def testStridedSliceAssign(self):
v = resource_variable_ops.ResourceVariable([1.0, 2.0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(v[0].assign(2.0))
self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testVariableShape(self):
v = resource_variable_ops.ResourceVariable([1., 1.])
self.assertAllEqual(
tensor_util.constant_value(
resource_variable_ops.variable_shape(v.handle)),
[2])
@test_util.run_deprecated_v1
def testDifferentAssignGraph(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
ops.reset_default_graph()
v.assign(2.0) # Note: this fails if we run convert_to_tensor on not the
# variable graph.
@test_util.run_deprecated_v1
def testFetchHandle(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertGreater(len(handle.eval()), 0)
@test_util.run_deprecated_v1
def testCachedValueReadBeforeWrite(self):
with self.cached_session() as sess:
v = resource_variable_ops.ResourceVariable(0.0, caching_device="cpu:0")
self.evaluate(v.initializer)
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to assign variable with wrong "
"dtype. Expected int32 got float."):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testUnprintableHandle(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
def testUnreadOpName(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testEagerPickle(self):
with context.eager_mode():
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, "var.pickle")
with open(fname, "wb") as f:
v = resource_variable_ops.ResourceVariable(
10.0,
dtype=dtypes.float16,
name="v")
pickle.dump(v, f)
with open(fname, "rb") as f:
new_v = pickle.load(f)
self.assertEqual(new_v.name, v.name)
self.assertEqual(new_v.shape, v.shape)
self.assertEqual(new_v.dtype, v.dtype)
self.assertEqual(new_v.trainable, v.trainable)
self.assertAllEqual(new_v.numpy(), v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testUseResource(self):
v = variables.VariableV1(1.0, use_resource=True)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
def testEagerNoUseResource(self):
with context.eager_mode():
v = variables.Variable(1.0)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[[6]],
dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(handle, [0],
constant_op.constant(
[[3]],
dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testMetagraph(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("foo", use_resource=True):
a = variable_scope.get_variable("a", initializer=10.0)
momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.1).minimize(
a,
colocate_gradients_with_ops=True,
global_step=training_util.get_or_create_global_step())
graph = ops.get_default_graph()
meta_graph_def = saver.export_meta_graph(graph=graph)
with ops.Graph().as_default():
saver.import_meta_graph(meta_graph_def, import_scope="")
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_deprecated_v1
def testScatterUpdateString(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([["a"]], dtype=dtypes.string)))
self.evaluate(resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
compat.as_bytes("b"))
@test_util.run_deprecated_v1
def testScatterUpdateStringScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[["a"]],
dtype=dtypes.string)))
self.evaluate(
resource_variable_ops.resource_scatter_update(handle, [0],
constant_op.constant(
"b",
dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(
compat.as_bytes(self.evaluate(read)[0][0]), compat.as_bytes("b"))
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with test_util.use_gpu():
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
def testScatterBool(self):
with context.eager_mode():
ref = resource_variable_ops.ResourceVariable(
[False, True, False], trainable=False)
indices = math_ops.range(3)
updates = constant_op.constant([True, True, True])
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
@test_util.run_deprecated_v1
def testInitFn(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testHandleNumpy(self):
with context.eager_mode():
with self.assertRaises(ValueError):
resource_variable_ops.ResourceVariable(
1.0, name="handle-numpy").handle.numpy()
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(v.count_up_to(1), 0)
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
def testToFromProtoCachedValue(self):
with ops.Graph().as_default():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
v_prime = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertTrue(getattr(v_prime, "_cached_value", None) is None)
other_v_def = resource_variable_ops.ResourceVariable(
caching_device="cpu:0",
initial_value=constant_op.constant(3.0)).to_proto()
other_v_prime = resource_variable_ops.ResourceVariable(
variable_def=other_v_def)
self.assertTrue(other_v_prime._cached_value is not None)
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = resource_variable_ops.ResourceVariable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
resource_variable_ops.ResourceVariable(
variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = resource_variable_ops.ResourceVariable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
resource_variable_ops.ResourceVariable(
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
@test_util.run_deprecated_v1
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
self.assertEquals(v._handle, w._handle)
self.assertEquals(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_add(1.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_add(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_sub(1.0, read_value=True)
self.assertEqual(1.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_sub(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
@test_util.run_deprecated_v1
def testAssignDifferentShapes(self):
with self.cached_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEager(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError,
"Shapes.*and.*are incompatible"):
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
@test_util.run_deprecated_v1
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
@test_util.run_deprecated_v1
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaises(ValueError):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var5",
container=ops.get_default_graph()._container)
with self.assertRaisesOpError("Resource .*/var5/.* does not exist"):
resource_variable_ops.read_variable_op(x, v.dtype.base_dtype).eval()
@test_util.run_deprecated_v1
def testSharedNameWithNamescope(self):
with self.cached_session():
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var6",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if not context.executing_eagerly():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
@test_util.run_deprecated_v1
def testSetInitialValue(self):
with self.cached_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertTrue(isinstance(v.handle, ops.EagerTensor))
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
def testContainerEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="same")
with ops.container("different"):
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: 0,
name="same")
v2.assign(2)
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var_handle = var._handle
del var
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(var_handle,
ignore_lookup_error=False)
def testScatterUpdate(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3.0])
self.assertAllEqual([1.0, 3.0], v.numpy())
def testScatterAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="add")
state_ops.scatter_add(v, [1], [3])
self.assertAllEqual([1.0, 5.0], v.numpy())
def testScatterSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
state_ops.scatter_sub(v, [1], [3])
self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterUpdateVariant(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([
list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
])
v.scatter_update(
ops.IndexedSlices(
list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
self.assertAllEqual(
list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
1.)
def testGroupDoesntForceRead(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
assign = v.assign_add(1.0)
g = control_flow_ops.group([assign])
self.assertEqual(g.control_inputs[0].type, "AssignAddVariableOp")
def testScatterNdAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="add")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 13, 3, 14, 14, 6, 7, 20])
state_ops.scatter_nd_add(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterNdSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="sub")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, -9, 3, -6, -4, 6, 7, -4])
state_ops.scatter_nd_sub(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterUpdateCast(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the
# error is realized during shape inference at graph construction time) and
# eager execution (where the error is realized during kernel execution).
with self.assertRaisesRegexp(Exception, r"shape.*2.*3"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
with self.assertRaisesRegexp(Exception, r"hapes must be equal"):
self.assertAllEqual(self.evaluate(v.assign_add(1)), [1, 2, 3, 4])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph()
with copy_to_graph.as_default(): # Intentionally testing v1 behavior
copied = resource_variable_ops.copy_to_graph_uninitialized(v)
self.assertEqual(v.name, copied.name)
with self.session(copy_to_graph) as session:
with self.assertRaises(errors.InvalidArgumentError):
session.run(copied.initializer)
def create_variant_shape_and_type_data(self):
variant_shape_and_type_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData())
variant_shape_and_type_data.is_set = True
stored_shape = tensor_shape.TensorShape([None, 4]).as_proto()
stored_dtype = dtypes.float32.as_datatype_enum
# NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf.
variant_shape_and_type_data.shape_and_type.extend([
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=stored_shape, dtype=stored_dtype)])
return variant_shape_and_type_data
@def_function.function
def create_constant_variant(self, value):
value = constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
return value
# TODO(ebrevdo): Add run_in_graph_and_eager_modes once we can create
# EagerTensor constants with TensorProto inputs.
@test_util.run_in_graph_and_eager_modes()
def testVariantInitializer(self):
variant_shape_and_type_data = self.create_variant_shape_and_type_data()
value = self.create_constant_variant(3)
initializer = array_ops.fill([3], value)
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
initializer, variant_shape_and_type_data,
graph_mode=not context.executing_eagerly())
v = resource_variable_ops.ResourceVariable(initializer)
read = array_ops.identity(v)
read_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(read))
self.assertEqual(
read_variant_shape_and_type, variant_shape_and_type_data)
gather = v.sparse_read([0])
gather_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(gather))
self.assertEqual(
gather_variant_shape_and_type, variant_shape_and_type_data)
# Make sure initializer runs.
if not context.executing_eagerly():
self.evaluate(v.initializer)
self.evaluate(read.op)
self.evaluate(gather.op)
if __name__ == "__main__":
test.main()
|
|
#!/bin/env python
# Copyright (C) 2012,2013,2014,2015 Seven Watt <[email protected]>
# <http://www.sevenwatt.com>
#
# This file is part of Plugwise-2-py.
#
# Plugwise-2-py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plugwise-2-py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plugwise-2-py. If not, see <http://www.gnu.org/licenses/>.
#
# The program is a major modification and extension to:
# python-plugwise - written in 2011 by Sven Petai <[email protected]>
# which itself is inspired by Plugwise-on-Linux (POL):
# POL v0.2 - written in 2009 by Maarten Damen <http://www.maartendamen.com>
# TODO:
# - make communication channel concurrency safe
# - return more reasonable responses than response message objects from the functions that don't do so yet
# - make message construction syntax better. Fields should only be specified once and contain name so we can serialize response message to dict
# - unit tests
# - pairing
# - support for older firmware versions
import re
import sys
import time
import math
from datetime import datetime, timedelta
import calendar
import logging
from serial.serialutil import SerialException
from swutil.util import *
from .protocol import *
from .exceptions import *
PULSES_PER_KW_SECOND = 468.9385193
DEFAULT_TIMEOUT = 1
class Stick(SerialComChannel):
"""provides interface to the Plugwise Stick"""
def __init__(self, logger, port=0, timeout=DEFAULT_TIMEOUT):
self.logger = logger
SerialComChannel.__init__(self, port=port, timeout=timeout)
self.unjoined = set()
self.init()
def init(self):
"""send init message to the stick"""
msg = PlugwiseStatusRequest().serialize()
self.send_msg(msg)
resp = self.expect_response(PlugwiseStatusResponse)
self.logger.debug(str(resp))
def send_msg(self, cmd):
#log communication done in serialize function of message object. Could be too early!
self.logger.debug("SEND %4d %s" % (len(cmd), repr(cmd)))
self.write(cmd)
resp = self.expect_response(PlugwiseAckResponse)
success = False
if resp.status.value == 0xC1:
success = True
return (success, resp.command_counter)
def _recv_response(self, retry_timeout=5):
await_response = True
msg = ""
retry_timeout += 1
while await_response:
msg += self.readline()
# if msg == b"":
# logcomm("TOUT '' - <!> Timeout on serial port" )
# raise TimeoutException("Timeout while waiting for response from device")
# #debug("read:"+repr(msg)+" with length "+str(len(msg)))
# if (msg != b""):
# if (msg[-1] != '\n'):
# logcomm("lastbyte not 0A")
# else:
# logcomm("lastbyte is 0A")
# try:
# logcomm("last bytes: %04X %04X" % (ord(msg[-2]), ord(msg[-1])))
# except:
# logcomm("last byte : %04X" % (ord(msg[-1]),))
# pass
# logcomm("counter: %2d" % (retry_timeout,))
if (msg == b"") or (msg[-1] != '\n'):
retry_timeout -= 1
if retry_timeout <= 0:
if (msg != b""):
logcomm("TOUT %4d %s - <!> Timeout on serial port" % ( len(msg), repr(msg)))
else:
logcomm("TOUT '' - <!> Timeout on serial port" )
raise TimeoutException("Timeout while waiting for response from device")
else:
continue
header_start = msg.find(PlugwiseMessage.PACKET_HEADER5)
if header_start < 0:
header_start = msg.find(PlugwiseMessage.PACKET_HEADER)
if header_start > 0:
### 2011 firmware seems to sometimes send extra \x83 byte before some of the
### response messages but there might a all kinds of chatter going on so just
# look for our packet header. Due to protocol errors it might be in the middle of a response
logcomm("DSTR %4d %s" % ( len(msg[:header_start]), repr(msg[:header_start])))
msg = msg[header_start:]
if msg.find('#') >= 0:
logcomm("DTRC %4d %s" % ( len(msg), repr(msg)))
msg = ""
elif len(msg)<22:
# Ignore. It is too short to interpet as a message.
# It may be part of Stick debug messages.
logcomm("DSHR %4d %s" % ( len(msg), repr(msg)))
msg = ""
else:
#message can be interpreted as response
#perform logcomm after interpetation of response
#logcomm("RECV %4d %s" % ( len(msg), repr(msg)))
await_response = False
return msg
def is_in_sequence(self, resp, seqnr):
if not seqnr is None and resp.command_counter != seqnr:
self.logger.info("Out of sequence message. Expected seqnr %s, received seqnr %s" % (seqnr, resp.command_counter))
return False
else:
return True
def expect_response(self, response_class, src_mac=None, seqnr=None, retry_timeout=5):
resp = response_class(seqnr)
# there's a lot of debug info flowing on the bus so it's
# expected that we constantly get unexpected messages
while 1:
try:
#readlen = len(resp)
#debug("expecting to read "+str(readlen)+" bytes for msg. "+str(resp))
msg = self._recv_response(retry_timeout)
resp.unserialize(msg)
if self.is_in_sequence(resp, seqnr) and src_mac is None or src_mac == resp.mac:
return resp
except ProtocolError as reason:
#retry to receive the response
logcomm("RERR %4d %s - <!> protocol error: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("protocol error [1]:"+str(reason))
except OutOfSequenceException as reason:
#retry to receive the response
logcomm("RERR %4d %s - <!> out of sequence: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("protocol error [2]:"+str(reason))
except UnexpectedResponse as reason:
#response could be an error status message
#suppress error logging when expecting a response to ping in case circle is offline
if str(reason) != "'expected response code 000E, received code 0000'":
self.logger.info("unexpected response [1]:"+str(reason))
else:
self.logger.debug("unexpected response [1]:"+str(reason))
if not issubclass(resp.__class__, PlugwiseAckResponse):
#Could be an Ack or AckMac or AcqAssociation error code response when same seqnr
try:
if (len(msg) == 22 and msg[0:1] == b'\x05') or (len(msg) == 23 and msg[0:1] == b'\x83'):
ackresp = PlugwiseAckResponse()
ackresp.unserialize(msg)
if self.is_in_sequence(ackresp, seqnr):
return ackresp
elif (len(msg) == 38 and msg[0:1] == b'\x05') or(len(msg) == 39 and msg[0:1] == b'\x83'):
ackresp = PlugwiseAckMacResponse()
ackresp.unserialize(msg)
if self.is_in_sequence(ackresp, seqnr):
return ackresp
else:
#it does not appear to be a proper Ack message
#just retry to read next message
logcomm("RERR %4d %s - <!> unexpected response error: %s" % ( len(msg), repr(msg), str(reason)))
pass
except ProtocolError as reason:
#retry to receive the response
logcomm("RERR %4d %s - <!> protocol error while interpreting as Ack: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("protocol error [3]:"+str(reason))
except OutOfSequenceException as reason:
#retry to receive the response
logcomm("RERR %4d %s - <!> out of sequence while interpreting as Ack: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("protocol error [4]:"+str(reason))
except UnexpectedResponse as reason:
#response could be an error status message
logcomm("RERR %4d %s - <!> unexpected response error while interpreting as Ack: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("unexpected response [2]:"+str(reason))
self.logger.info("TEST: %s" % (resp.function_code,))
if resp.function_code in ['0006', '0061']:
#Could be an unsolicited AdvertiseNode or AcqAssociation response
try:
if resp.function_code == "0006":
self.logger.info("entering unknown advertise MAC ")
ackresp = PlugwiseAdvertiseNodeResponse()
ackresp.unserialize(msg)
self.logger.info("unknown advertise MAC %s" % str(ackresp.mac))
if ackresp.mac not in self.unjoined:
self.unjoined.add(ackresp.mac)
elif resp.function_code == "0061":
ackresp = PlugwiseAckAssociationResponse()
ackresp.unserialize(msg)
self.logger.info("unknown MAC associating %s" % str(ackresp.mac))
else:
#it does not appear to be a proper Ack message
#just retry to read next message
logcomm("RERR %4d %s - <!> unexpected response error: %s" % ( len(msg), repr(msg), str(reason)))
pass
except ProtocolError as reason:
#retry to receive the response
logcomm("RERR %4d %s - <!> protocol error while interpreting as Advertise: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("protocol error [5]:"+str(reason))
except OutOfSequenceException as reason:
#retry to receive the response
logcomm("RERR %4d %s - <!> out of sequence while interpreting as Advertise: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("protocol error [6]:"+str(reason))
except UnexpectedResponse as reason:
#response could be an error status message
logcomm("RERR %4d %s - <!> unexpected response error while interpreting as Advertise: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("unexpected response [3]:"+str(reason))
else:
logcomm("RERR %4d %s - <!> unexpected response error while expecting Ack: %s" % ( len(msg), repr(msg), str(reason)))
self.logger.info("unexpected response [4]:"+str(reason))
def enable_joining(self, enabled):
req = PlugwiseEnableJoiningRequest('', enabled)
_, seqnr = self.send_msg(req.serialize())
self.expect_response(PlugwiseAckMacResponse)
def join_node(self, newmac, permission):
req = PlugwiseJoinNodeRequest(newmac, permission)
_, seqnr = self.send_msg(req.serialize())
#No response other then normal ack
#After this an unsollicted 0061 response from the circle may be received.
def reset(self):
type = 0
req = PlugwiseResetRequest(self.mac, type, 20)
_, seqnr = self.send_msg(req.serialize())
resp = self.expect_response(PlugwiseAckMacResponse)
return resp.status.value
def status(self):
req = PlugwiseStatusRequest(self.mac)
_, seqnr = self.send_msg(req.serialize())
#TODO: There is a short and a long response to 0011.
#The short reponse occurs when no cirlceplus is connected, and has two byte parameters.
#The short repsonse is likely not properly handled (exception?)
resp = self.expect_response(PlugwiseStatusResponse)
return
def find_circleplus(self):
req = PlugwiseQueryCirclePlusRequest(self.mac)
_, seqnr = self.send_msg(req.serialize())
#Receive the circle+ response, but possibly, only an end-protocol response is seen.
success = False
circleplusmac = None
try:
resp = self.expect_response(PlugwiseQueryCirclePlusResponse)
success=True
circleplusmac = resp.new_node_mac_id.value
except (TimeoutException, SerialException) as reason:
self.logger.info("Error: %s, %s" % (datetime.datetime.now().isoformat(), str(reason),))
return success,circleplusmac
def connect_circleplus(self):
req = PlugwiseConnectCirclePlusRequest(self.mac)
_, seqnr = self.send_msg(req.serialize())
resp = self.expect_response(PlugwiseConnectCirclePlusResponse)
return resp.existing.value, self.allowed.value
class Circle(object):
"""provides interface to the Plugwise Plug & Plug+ devices
"""
def __init__(self, logger, mac, comchan, attr):
"""
will raise ValueError if mac doesn't look valid
"""
self.logger = logger
mac = str(mac).upper()
if self._validate_mac(mac) == False:
raise ValueError("MAC address is in unexpected format: "+str(mac))
self.mac = sc(mac)
#debug("self.mac %s" % (type(self.mac),))
#debug("mac %s" % (type(mac),))
self._comchan = comchan
self.attr = attr
self._devtype = None
self.gain_a = None
self.gain_b = None
self.off_noise = None
self.off_tot = None
self.scheduleCRC = None
self.schedule = None
self.joined = False
self.online = False
self.initialized = False
self.relay_state = '?'
self.switch_state = '?'
self.schedule_state = '?'
if self.attr.get('always_on','False') != 'False':
#self.relay_state = 'on'
self.schedule_state = 'off'
self.last_seen = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
self.last_log = 0
self.last_log_idx = 0
self.last_log_ts = 0
self.power = [0, 0, 0, 0]
self.power_ts = 0
self.interval=60
self.usage=True
self.production=False
self.reinit()
def reinit(self):
#self.get_info() called by _get_interval
try:
self._get_interval()
self.online = True
self.initialized = True
except (ValueError, TimeoutException, SerialException, AttributeError) as reason:
self.online = False
self.initialized = False
self.logger.info("OFFLINE Circle '%s' during initialization Error: %s" % (self.attr['name'], str(reason)))
def get_status(self):
retd = {}
retd["mac"] = self.mac
retd["type"] = self.type()
retd["name"] = self.attr["name"]
retd["location"] = self.attr["location"]
retd["online"] = self.online
retd["lastseen"] = self.last_seen
retd["readonly"] = (self.attr['always_on'] != 'False')
retd["switch"] = self.relay_state
retd["switchreq"] = self.switch_state
retd["schedule"] = self.schedule_state
if self.schedule != None:
retd["schedname"] = self.schedule.name
else:
retd["schedname"] = ""
now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
tdelta = now - self.power_ts
# if tdelta < 60:
# retd["power"] = self.power[1] # 8-seconds value
# elif tdelta < 10800:
# retd["power"] = self.power[2] - self.power[3] # 1 hour value value
# else:
# retd["power"] = 0 # clear value
retd["power1s"] = round(self.power[0], 3)
retd["power8s"] = round(self.power[1], 3)
retd["power1h"] = round(self.power[2] - self.power[3], 3)
retd["powerts"] = self.power_ts
retd["production"] = self.production
retd["interval"] = self.interval
return retd
def dump_status(self):
retd = {}
for key in dir(self):
ptr = getattr(self, key)
if key == 'schedule' and not ptr == None:
retd[key] = ptr.dump_status()
continue
#if isinstance(ptr, int):
if not hasattr(ptr, '__call__') and not key[0] == '_':
retd[key] = ptr
return retd
def _validate_mac(self, mac):
if not re.match("^[A-F0-9]+$", mac):
return False
try:
_ = int(mac, 16)
except ValueError:
return False
return True
def _expect_response(self, response_class, seqnr):
retry_count = 1
retry_timeout = 5 #allow 5+1 seconds for timeout
#instead of expected response a status message with correct seqnr may be received
#the common case is the offline status 'E1'
#it appears that in case of bad networks the expected response competes with
#the offline status. Sometimes the proper response arrives just (<1s) after
#the offline status.
#the while loop is intended to deal with this situation.
while retry_count >= 0:
retry_count -= 1
try:
resp = self._comchan.expect_response(response_class, self.mac, seqnr, retry_timeout)
except (TimeoutException, SerialException) as reason:
if self.online:
self.logger.info("OFFLINE Circle '%s'." % (self.attr['name'],))
self.online = False
raise TimeoutException("Timeout while waiting for response from circle '%s'" % (self.attr['name'],))
if not isinstance(resp, response_class):
#error status returned
if resp.status.value == 0xE1:
self.logger.debug("Received an error status '%04X' from circle '%s' - Network slow or circle offline - Retry receive ..." % (resp.status.value, self.attr['name']))
retry_timeout = 1 #allow 1+1 seconds for timeout after an E1.
else:
self.logger.info("Received an error status '%04X' from circle '%s' with correct seqnr - Retry receive ..." % (resp.status.value, self.attr['name']))
else:
ts_now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
if not self.online:
self.logger.info("ONLINE Circle '%s' after %d seconds." % (self.attr['name'], ts_now - self.last_seen))
self.online = True
#self.last_seen = (datetime.datetime.utcnow()-datetime.timedelta(seconds=time.timezone)).isoformat()
self.last_seen = ts_now
return resp
#we only end here when multiple ack or ackmac messages are generated before the real response
if self.online:
self.logger.info("OFFLINE Circle '%s'." % (self.attr['name'],))
self.online = False
#TODO: Replace timeout exception by more specific exception
raise TimeoutException("Received multiple error messages from circle '%s'" % (self.attr['name'],))
def map_type(self, devtype):
types = dict({0: 'stick', 1: 'circle+', 2: 'circle'})
return types[devtype]
def _type(self):
if self._devtype is None:
self.get_info()
return self._devtype
def type(self):
return self.map_type(self._type())
def pulse_correction(self, pulses, seconds=1):
"""correct pulse count with Circle specific calibration offsets
@param pulses: pulse counter
@param seconds: over how many seconds were the pulses counted
"""
self.logger.debug("PULSE: uncorrected: %.3f" % (pulses,))
if pulses == 0:
return 0.0
if self.gain_a is None:
self.calibrate()
pulses /= float(seconds)
corrected_pulses = seconds * (((((pulses + self.off_noise)**2) * self.gain_b) + ((pulses + self.off_noise) * self.gain_a)) + self.off_tot)
self.logger.debug("PULSE: corrected: %.3f" % (pulses/seconds,))
self.logger.debug("PULSE: t corrected: %.3f" % (pulses,))
if (pulses > 0.0 and corrected_pulses < 0.0 or pulses < 0.0 and corrected_pulses > 0.0):
return 0.0
return corrected_pulses
def pulses_to_kWs(self, pulses):
"""converts the pulse count to kWs
"""
# pulses -> kWs
kWs = pulses/PULSES_PER_KW_SECOND
return kWs
def watt_to_pulses(self, watt, seconds=1):
"""correct pulse count with Circle specific calibration offsets
@param watt: power in watts to convert to pulses
@param seconds: over how many seconds will the pulses be counted
"""
if watt == 0:
return 0.0
if self.gain_a is None:
self.calibrate()
corr_pulses_1s = watt * PULSES_PER_KW_SECOND / 1000.0
raw_pulses_1s = (math.sqrt(self.gain_a**2.0 + 4.0 * self.gain_b * (corr_pulses_1s - self.off_tot)) - self.gain_a - 2.0 * self.gain_b * self.off_noise) / (2.0 * self.gain_b);
if (corr_pulses_1s > 0.0 and raw_pulses_1s < 0.0 or corr_pulses_1s < 0.0 and raw_pulses_1s > 0.0):
return 0.0
return seconds*raw_pulses_1s
def calibrate(self):
"""fetch calibration info from the device
"""
msg = PlugwiseCalibrationRequest(self.mac).serialize()
_, seqnr = self._comchan.send_msg(msg)
calibration_response = self._expect_response(PlugwiseCalibrationResponse, seqnr)
retl = []
for x in ('gain_a', 'gain_b', 'off_noise', 'off_tot'):
val = getattr(calibration_response, x).value
retl.append(val)
setattr(self, x, val)
return retl
def get_pulse_counters(self):
"""return pulse counters for 1s interval, 8s interval and for the current hour,
both usage and production as a tuple
"""
msg = PlugwisePowerUsageRequest(self.mac).serialize()
_, seqnr = self._comchan.send_msg(msg)
resp = self._expect_response(PlugwisePowerUsageResponse, seqnr)
p1s, p8s, p1h, pp1h = resp.pulse_1s.value, resp.pulse_8s.value, resp.pulse_hour.value, resp.pulse_prod_hour.value
if self.attr['production'] == 'False':
pp1h = 0
return (p1s, p8s, p1h, pp1h)
def get_power_usage(self):
"""returns power usage for the last second in Watts
might raise ValueError if reading the pulse counters fails
"""
pulse_1s, pulse_8s, pulse_1h, pulse_prod_1h = self.get_pulse_counters()
kw_1s = 1000*self.pulses_to_kWs(self.pulse_correction(pulse_1s))
self.logger.debug("POWER: 1s: %.3f" % (kw_1s,))
kw_8s = 1000*self.pulses_to_kWs(self.pulse_correction(pulse_8s, 8))/8.0
self.logger.debug("POWER: 8s: %.3f" % (kw_8s,))
kw_1h = 1000*self.pulses_to_kWs(self.pulse_correction(pulse_1h, 3600))/3600.0
self.logger.debug("POWER: 1h: %.3f" % (kw_1h,))
kw_p_1h = 1000*self.pulses_to_kWs(self.pulse_correction(pulse_prod_1h, 3600))/3600.0
self.logger.debug("POWER: prod 1h: %.3f" % (kw_p_1h,))
self.power = [kw_1s, kw_8s, kw_1h, kw_p_1h]
self.power_ts = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
#just return negative values. It is production
return (kw_1s, kw_8s, kw_1h, kw_p_1h)
def get_info(self):
"""fetch relay state & current logbuffer index info
"""
def map_hz(hz_raw):
if hz_raw == 133:
return 50
elif hz_raw == 197:
return 60
def relay(state):
states = dict({0: 'off', 1: 'on'})
return states[state]
msg = PlugwiseInfoRequest(self.mac).serialize()
_, seqnr = self._comchan.send_msg(msg)
resp = self._expect_response(PlugwiseInfoResponse, seqnr)
retd = response_to_dict(resp)
retd['hz'] = map_hz(retd['hz'])
self._devtype = retd['type']
retd['type'] = self.map_type(retd['type'])
retd['relay_state'] = relay(retd['relay_state'])
self.relay_state = retd['relay_state']
if self.attr['always_on'] != 'False' and self.relay_state == 'off':
return False
return retd
def get_clock(self):
"""fetch current time from the device
"""
msg = PlugwiseClockInfoRequest(self.mac).serialize()
_, seqnr = self._comchan.send_msg(msg)
resp = self._expect_response(PlugwiseClockInfoResponse, seqnr)
self.scheduleCRC = resp.scheduleCRC.value
self.logger.debug("Circle %s get clock to %s" % (self.attr['name'], resp.time.value.isoformat()))
return resp.time.value
def set_clock(self, dt):
"""set clock to the value indicated by the datetime object dt
"""
self.logger.debug("Circle %s set clock to %s" % (self.attr['name'], dt.isoformat()))
msg = PlugwiseClockSetRequest(self.mac, dt).serialize()
_, seqnr = self._comchan.send_msg(msg)
resp = self._expect_response(PlugwiseAckMacResponse, seqnr)
#status = '00D7'
return dt
def switch(self, on):
"""switch power on or off
@param on: new state, boolean
"""
self.logger.info("API %s %s circle switch: %s" % (self.mac, self.attr["name"], 'on' if on else 'off',))
if not isinstance(on, bool):
return False
if self.attr['always_on'] != 'False' and on != True:
return False
req = PlugwiseSwitchRequest(self.mac, on)
_, seqnr = self._comchan.send_msg(req.serialize())
resp = self._expect_response(PlugwiseAckMacResponse, seqnr)
if on == True:
if resp.status.value != 0xD8:
self.logger.info("Wrong switch status reply when switching on. expected '00D8', received '%04X'" % (resp.status.value,))
self.switch_state = 'on'
self.relay_state = 'on'
self.schedule_state = 'off'
else:
if resp.status.value != 0xDE:
self.logger.info("Wrong switch status reply when switching off. expected '00DE', received '%04X'" % (resp.status.value,))
self.switch_state = 'off'
self.relay_state = 'off'
self.schedule_state = 'off'
return
def switch_on(self):
self.switch(True)
#status = '00D8'
def switch_off(self):
self.switch(False)
#status = '00DE'
def get_power_usage_history(self, log_buffer_index=None, start_dt=None):
"""Reads power usage information from the given log buffer address at the Circle.
Each log buffer contains the power usage data for 4 intervals, some of which might
not be filled yet. The intervals can contain values for usage, or values for both
usage and production. Production values are negative, and have the same timestamp
as their preceding usage value. The default is usage only with a 3600 sec = 1 hour
interval. The interval and production can be set with set_log_interval().
@param log_buffer_index: index of the first log buffer to return.
If None then current log buffer index is used
@return: list of (datetime|None, average-watt-in-interval, watt-hours-in-this-interval)
tuples.
If the first tuple element is None it means this buffer isn't written yet and
the second and third value are undefined in that case.
"""
if log_buffer_index is None:
info_resp = self.get_info()
log_buffer_index = info_resp['last_logaddr']
#the cur-pos may not be complete.
if log_buffer_index > 0:
log_buffer_index -= 1
log_req = PlugwisePowerBufferRequest(self.mac, log_buffer_index).serialize()
_, seqnr = self._comchan.send_msg(log_req)
resp = self._expect_response(PlugwisePowerBufferResponse, seqnr)
intervals = []
dts = []
pulses = []
if start_dt is None:
prev_dt = getattr(resp, "logdate1").value
else:
prev_dt = start_dt
if prev_dt is None:
self.logger.info("get_power_usage_history: empty first entry in power buffer")
return []
prev2_dt = prev_dt
both = False
for i in range(0, 4):
dt = getattr(resp, "logdate%d" % (i+1,)).value
if not dt is None:
dts.append(dt)
pulses.append(getattr(resp, "pulses%d" % (i+1,)).value)
if prev_dt == dts[i]:
both = True
intervals.append((dts[i]-prev2_dt).total_seconds())
else:
intervals.append((dts[i]-prev_dt).total_seconds())
prev2_dt = prev_dt
prev_dt = dts[i]
retl = []
for i in range(0, len(dts)):
#first two elements of interval may be zero. Derive intervals
#try to get it from intervals within the four readings
#otherwise assume 60 minutes.
if intervals[i] == 0:
if len(dts)>i+1 and dts[i] == dts[i+1]:
if len(dts)>i+2:
intervals[i] = (dts[i+2]-dts[i]).total_seconds()
else:
intervals[i]=3600
elif len(dts)>i+1:
intervals[i] = (dts[i+1]-dts[i]).total_seconds()
else:
intervals[i]=3600
corrected_pulses = self.pulse_correction(pulses[i], intervals[i])
watt = self.pulses_to_kWs(corrected_pulses)/intervals[i]*1000
watthour = self.pulses_to_kWs(corrected_pulses)/3600*1000
retl.append((dts[i], watt, watthour))
return retl
def get_power_usage_history_raw(self, log_buffer_index=None):
"""Reads the raw interpreted power usage information from the given log buffer
address at the Circle. This function reads (check!) 64 bytes of memory.
The function can be used to make a total memory dump of the circle, as increasing
addresses causes a wrap around.
@param log_buffer_index: index of the first log buffer to return.
If None then current log buffer index is used
@return: list of hexadecimal (single string?) bytes
"""
if log_buffer_index is None:
info_resp = self.get_info()
log_buffer_index = info_resp['last_logaddr']
log_req = PlugwisePowerBufferRequest(self.mac, log_buffer_index).serialize()
_, seqnr = self._comchan.send_msg(log_req)
resp = self._expect_response(PlugwisePowerBufferResponseRaw, seqnr)
retl = getattr(resp, "raw").value
return retl
def set_log_interval(self, interval, production=False):
"""set log interval in minutes for usage and production
@param interval: the loginterval in minutes.
@param production: boolean to indicate logging for production.
False: Usage logging only.
True: Usage and Production logging.
"""
msg = PlugwiseLogIntervalRequest(self.mac, interval, interval if production else 0).serialize()
_, seqnr = self._comchan.send_msg(msg)
return self._expect_response(PlugwiseAckMacResponse, seqnr)
#status = '00F8'
def get_features(self):
"""fetch feature set
"""
msg = PlugwiseFeatureSetRequest(self.mac).serialize()
_, seqnr = self._comchan.send_msg(msg)
resp = self._expect_response(PlugwiseFeatureSetResponse, seqnr)
return resp.features.value
def get_circleplus_datetime(self):
"""fetch current time from the circle+
"""
msg = PlugwiseDateTimeInfoRequest(self.mac).serialize()
_, seqnr = self._comchan.send_msg(msg)
resp = self._expect_response(PlugwiseDateTimeInfoResponse, seqnr)
dt = datetime.datetime.combine(resp.date.value, resp.time.value)
return dt
def set_circleplus_datetime(self, dt):
"""set circle+ clock to the value indicated by the datetime object dt
"""
msg = PlugwiseSetDateTimeRequest(self.mac, dt).serialize()
_, seqnr = self._comchan.send_msg(msg)
return self._expect_response(PlugwiseAckMacResponse, seqnr)
#status = '00DF'=ack '00E7'=nack
def define_schedule(self, name, scheddata, dst=0):
self.logger.info("circle.define_schedule.")
self.schedule = Schedule(name, scheddata, self.watt_to_pulses)
self.schedule._dst_shift(dst)
def undefine_schedule(self):
self.schedule = None
def load_schedule(self, dst=0):
if not self.schedule._pulse is None:
self.logger.info("circle.load_schedule. enter function")
self.schedule._dst_shift(dst)
#TODO: add test on inequality of CRC
for idx in range(0,84):
chunk = self.schedule._pulse[(8*idx):(8*idx+8)]
req = PlugwisePrepareScheduleRequest(idx, chunk)
_, seqnr = self._comchan.send_msg(req.serialize())
for idx in range(1,43):
req = PlugwiseSendScheduleRequest(self.mac, idx)
_, seqnr = self._comchan.send_msg(req.serialize())
resp = self._expect_response(PlugwiseSendScheduleResponse, seqnr)
self.logger.info("circle.load_schedule. exit function")
def schedule_onoff(self, on):
"""switch schedule on or off
@param on: new state, boolean
"""
self.logger.info("API %s %s circle schedule %s" % (self.mac, self.attr["name"], 'on' if on else 'off'))
if not isinstance(on, bool):
return False
if self.attr['always_on'] != 'False':
return False
req = PlugwiseEnableScheduleRequest(self.mac, on)
_, seqnr = self._comchan.send_msg(req.serialize())
resp = self._expect_response(PlugwiseAckMacResponse, seqnr)
if on == True:
if resp.status.value != 0xE4:
self.logger.info("Wrong schedule status reply when setting schedule on. expected '00E4', received '%04X'" % (resp.status.value,))
self.schedule_state = 'on'
#update self.relay_state
self.get_info()
else:
if resp.status.value != 0xE5:
self.logger.info("Wrong schedule status reply when setting schedule off. expected '00E5', received '%04X'" % (resp.status.value,))
self.schedule_state = 'off'
return
def schedule_on(self):
self.schedule_onoff(True)
#status = '00E4'
def schedule_off(self):
self.schedule_onoff(False)
#status = '00E5'
def set_schedule_value(self, val):
"""Set complete schedule to a single value.
@param val: -1=On 0=Off >0=StandbyKiller threshold in Watt
"""
#TODO: incorporate this in Schedule object
val = self.watt_to_pulses(val) if val>=0 else val
req = PlugwiseSetScheduleValueRequest(self.mac, val)
_, seqnr = self._comchan.send_msg(req.serialize())
return self._expect_response(PlugwiseAckMacResponse, seqnr)
#status = '00FA'
def _get_interval(self):
self.interval=60
self.usage=True
self.production=False
info = self.get_info()
cur_idx = info['last_logaddr']
if cur_idx < 1:
return
log = self.get_power_usage_history(cur_idx)
if len(log)<3:
log = self.get_power_usage_history(cur_idx-1) + log
if len(log)<3:
self.logger.info("_get_interval: to few entries in power buffer to determine interval")
return
#self.logger.debug(log)
interval = log[-1][0]-log[-2][0]
self.usage=True
if interval == timedelta(0):
interval = log[-1][0]-log[-3][0]
self.production=True
self.interval = int(interval.total_seconds())/60
def ping(self):
"""ping circle
"""
req = PlugwisePingRequest(self.mac)
_, seqnr = self._comchan.send_msg(req.serialize())
return self._expect_response(PlugwisePingResponse, seqnr)
def read_node_table(self):
#Needs to be called on Circle+
nodetable = []
for idx in range(0,64):
req = PlugwiseAssociatedNodesRequest(self.mac, idx)
_, seqnr = self._comchan.send_msg(req.serialize())
resp = self._expect_response(PlugwiseAssociatedNodesResponse, seqnr)
nodetable.append(resp.node_mac_id.value)
return nodetable
def remove_node(self, removemac):
#Needs to be called on Circle+
req = PlugwiseRemoveNodeRequest(self.mac, removemac)
_, seqnr = self._comchan.send_msg(req.serialize())
resp = self._expect_response(PlugwiseRemoveNodeResponse, seqnr)
return resp.status.value
def reset(self):
req = PlugwiseResetRequest(self.mac, self._type(), 20)
_, seqnr = self._comchan.send_msg(req.serialize())
resp = self._expect_response(PlugwiseAckMacResponse, seqnr)
return resp.status.value
def response_to_dict(r):
retd = {}
for key in dir(r):
ptr = getattr(r, key)
if isinstance(ptr, BaseType):
retd[key] = ptr.value
return retd
class Schedule(object):
"""Schedule for circles(+) to control timed On/Off/StandByKiller
A value per 15 minutes 24/7, 672 values (int) in total
-1 = On
0 = Off
>0 = StandbyKiller threshold in Watt
The objects can exist meaningful in the context of a circle only, as
calibration data is required for conversion to pulses and CRC calculation
"""
def __init__(self, name, scheddata, convertor):
"""
......
"""
self.name = str(name)
self.dst = 0
self._watt = scheddata
self._pulse = list(int(convertor(i)) if i>=0 else i for i in self._watt)
self._shift_day()
self._hex = ''.join(("%04X" % int_to_uint(i,4)) for i in self._pulse)
self.CRC = crc_fun(''.join(str(struct.pack('>h',i)) for i in self._pulse))
def dump_status(self):
retd = {}
retd['name'] = self.name
retd['CRC'] = self.CRC
retd['schedule'] = self._watt
return retd
def _shift_day(self):
self.logger.info("circle.schedule._shift_day rotate left by one day")
#rotate schedule a day to the left
self._pulse = self._pulse[96:]+self._pulse[:96]
#self.CRC = crc_fun(''.join(str(struct.pack('>h',i)) for i in self._pulse))
def _dst_shift(self, dst):
if self.dst and not dst:
self.logger.info("circle.schedule._dst_shift rotate right [end of DST]")
#rotate schedule 4 quarters right (forward in time)
self._pulse = self._pulse[-4:]+self._pulse[:-4]
self.CRC = crc_fun(''.join(str(struct.pack('>h',i)) for i in self._pulse))
self.dst = 0
elif not self.dst and dst:
self.logger.info("circle.schedule._dst_shift rotate left [start of DST]")
#rotate schedule 4 quarters left (backward in time)
self._pulse = self._pulse[4:]+self._pulse[:4]
self.CRC = crc_fun(''.join(str(struct.pack('>h',i)) for i in self._pulse))
self.dst = 1
|
|
from .maybeerror import MaybeError as M
from . import functions
class ConsList(object):
'''
A data structure that supports constant-time first/rest slicing.
The input sequence is never copied or modified -- all the slicing
does is increment a position counter and create a new wrapper.
'''
def __init__(self, seq, start=0):
self.seq = seq
self.start = start
def isEmpty(self):
return self.start >= len(self.seq)
def first(self):
'''
Returns first element. Throws exception if empty.
'''
if not self.isEmpty():
return self.seq[self.start]
raise ValueError('cannot get first element of empty sequence')
def rest(self):
'''
Return ConsList of all but the first element.
Throws exception if empty.
'''
if not self.isEmpty():
return ConsList(self.seq, self.start + 1)
raise ValueError('cannot get rest of empty sequence')
def getAsList(self):
'''
Return list of remaining elements.
'''
return list(self.seq[self.start:])
def __eq__(self, other):
try:
return self.getAsList() == other.getAsList()
except:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return repr({
'type': 'cons list',
'sequence': self.getAsList()
})
class Parser(object):
'''
A wrapper around a callable of type
`[t] -> s -> ME e ([t], s, a)`, to create a `Parser e s (m t) a`.
Run the parser using the `parse` method.
'''
def __init__(self, parse):
self.parse = parse
def checkFunction(fName, actual):
if not hasattr(actual, '__call__'):
obj = {
'message' : 'type error',
'function': fName,
'expected': 'function',
'actual' : actual
}
raise TypeError(obj)
# else: nothing to do
def checkParser(fName, actual):
if not isinstance(actual, Parser):
obj = {
'message' : 'type error',
'function': fName,
'expected': 'Parser',
'actual' : actual
}
raise TypeError(obj)
# else: nothing to do
def result(value, rest, state):
return {'result': value, 'rest': rest, 'state': state}
def good(value, rest, state):
return M.pure(result(value, rest, state))
def pure(x):
'''
a -> Parser e s (m t) a
'''
def f(xs, s):
return good(x, xs, s)
return Parser(f)
# Parser e s (m t) a
zero = Parser(functions.const_f(M.zero))
def error(e):
'''
e -> Parser e s (m t) a
'''
return Parser(functions.const_f(M.error(e)))
def fmap(g, parser):
'''
(a -> b) -> Parser e s (m t) a -> Parser e s (m t) b
'''
checkParser('fmap', parser)
checkFunction('fmap', g)
def h(r):
return result(g(r['result']), r['rest'], r['state'])
def f(xs, s):
return parser.parse(xs, s).fmap(h)
return Parser(f)
def bind(parser, g):
'''
Parser e s (m t) a -> (a -> Parser e s (m t) b) -> Parser e s (m t) b
'''
checkParser('bind', parser)
checkFunction('bind', g)
def f(xs, s):
r = parser.parse(xs, s)
val = r.value
if r.status == 'success':
return g(val['result']).parse(val['rest'], val['state'])
else:
return r
return Parser(f)
def check(predicate, parser):
'''
(a -> Bool) -> Parser e s (m t) a -> Parser e s (m t) a
'''
checkFunction('check', predicate)
checkParser('check', parser)
return bind(parser, lambda value: pure(value) if predicate(value) else zero)
def update(f):
'''
(m t -> m t) -> Parser e s (m t) (m t)
'''
checkFunction('update', f)
def g(xs, s):
ys = f(xs)
return good(ys, ys, s)
return Parser(g)
# Parser e s (m t) (m t)
get = update(functions.id_f)
# m t -> Parser e s (m t) a
put = functions.compose(update, functions.const_f)
def updateState(g):
'''
(s -> s) -> Parser e s (m t) a
'''
checkFunction('updateState', g)
def f(xs, s):
new_state = g(s)
return good(new_state, xs, new_state)
return Parser(f)
# Parser e s (m t) s
getState = updateState(functions.id_f)
# s -> Parser e s (m t) a
putState = functions.compose(updateState, functions.const_f)
def many0(parser):
'''
Parser e s (m t) a -> Parser e s (m t) [a]
'''
checkParser('many0', parser)
def f(xs, s):
vals = []
tokens = xs
state = s
while True:
r = parser.parse(tokens, state)
if r.status == 'success':
vals.append(r.value['result'])
state = r.value['state']
tokens = r.value['rest']
elif r.status == 'failure':
return good(vals, tokens, state)
else: # must respect errors
return r
return Parser(f)
def many1(parser):
'''
Parser e s (m t) a -> Parser e s (m t) [a]
'''
checkParser('many1', parser)
return check(lambda x: len(x) > 0, many0(parser))
def seq(parsers):
'''
[Parser e s (m t) a] -> Parser e s (m t) [a]
'''
for (ix, p) in enumerate(parsers):
checkParser('seq-{}'.format(ix), p)
def f(xs, s):
vals = []
state, tokens = s, xs
for p in parsers:
r = p.parse(tokens, state)
if r.status == 'success':
vals.append(r.value['result'])
state = r.value['state']
tokens = r.value['rest']
else:
return r
return good(vals, tokens, state)
return Parser(f)
def appP(p, *parsers):
'''
Parser e s (m t) (a -> ... -> z) -> Parser e s (m t) a -> ... -> Parser e s (m t) z
'''
checkParser('appP', p)
for (ix, parser) in enumerate(parsers):
checkParser('appP-{}'.format(ix), parser)
def g(f):
return fmap(lambda args: f(*args), seq(parsers))
return bind(p, g)
def app(f, *args):
'''
(a -> ... -> z) -> Parser e s (m t) a -> ... -> Parser e s (m t) z
'''
checkFunction('app', f)
return appP(pure(f), *args)
def seq2L(p1, p2):
'''
Parser e s (m t) a -> Parser e s (m t) b -> Parser e s (m t) a
'''
checkParser('seq2L', p1)
checkParser('seq2L', p2)
return app(functions.first, p1, p2)
def seq2R(p1, p2):
'''
Parser e s (m t) a -> Parser e s (m t) b -> Parser e s (m t) b
'''
checkParser('seq2R', p1)
checkParser('seq2R', p2)
return app(functions.second, p1, p2)
def repeat(count, parser):
'''
Int -> Parser e s (m t) a -> Parser e s (m t) [a]
'''
checkParser('repeat', parser)
return seq(functions.replicate(count, parser))
def lookahead(parser):
'''
Parser e s (m t) a -> Parser e s (m t) a
'''
checkParser('lookahead', parser)
def g(xs):
def h(s):
return app(lambda a, _1, _2: a, parser, put(xs), putState(s))
return bind(getState, h)
return bind(get, g)
def not0(parser):
'''
Parser e s (m t) a -> Parser e s (m t) None
'''
checkParser('not0', parser)
def f(xs, s):
r = parser.parse(xs, s)
if r.status == 'error':
return r
elif r.status == 'success':
return M.zero
else:
return good(None, xs, s)
return Parser(f)
def alt(parsers):
'''
[Parser e s (m t) a] -> Parser e s (m t) a
'''
for (ix, p) in enumerate(parsers):
checkParser('alt-{}'.format(ix), p)
def f(xs, s):
r = M.zero
for p in parsers:
r = p.parse(xs, s)
if r.status in ['success', 'error']:
return r
return r
return Parser(f)
def optional(parser, default=None):
'''
Parser e s (m t) a -> a -> Parser e s (m t) a
'''
checkParser('optional', parser)
return alt([parser, pure(default)])
def catchError(parser, f):
'''
Parser e s (m t) a -> (e -> Parser e s (m t) a) -> Parser e s (m t) a
'''
checkFunction('catchError', f)
checkParser('catchError', parser)
def g(xs, s):
v = parser.parse(xs, s)
if v.status == 'error':
return f(v.value).parse(xs, s)
return v
return Parser(g)
def mapError(f, parser):
'''
(e -> e) -> Parser e s (m t) a -> Parser e s (m t) a
'''
checkFunction('mapError', f)
checkParser('mapError', parser)
return catchError(parser, functions.compose(error, f))
def commit(e, parser):
'''
e -> Parser e s (m t) a -> Parser e s (m t) a
'''
checkParser('commit', parser)
return alt([parser, error(e)])
def addError(e, parser):
'''
e -> Parser [e] s (m t) a -> Parser [e] s (m t) a
assumes errors are lists
'''
checkParser('addError', parser)
return mapError(lambda es: functions.cons(e, es), parser)
def sepBy1(parser, separator):
'''
Parser e s (m t) a -> Parser e s (m t) b -> Parser e s (m t) (a, [(b, a)])
'''
checkParser('sepBy1', parser)
checkParser('sepBy1', separator)
return app(functions.pair, parser, many0(app(functions.pair, separator, parser)))
def sepBy0(parser, separator):
'''
Parser e s (m t) a -> Parser e s (m t) b -> Parser e s (m t) (Maybe (a, [(b, a)]))
'''
checkParser('sepBy0', parser)
checkParser('sepBy0', separator)
return optional(sepBy1(parser, separator))
class Itemizer(object):
def __init__(self, f):
'''
f :: t -> s -> s
'''
checkFunction('Itemizer', f)
self.f = f
self.item = self._item()
def _item(self):
'''
Parser e s (m t) t
'''
def g(xs, s):
if xs.isEmpty():
return M.zero
first, rest = xs.first(), xs.rest()
return good(first, rest, self.f(first, s))
return Parser(g)
def satisfy(self, pred):
'''
(t -> Bool) -> Parser e s (m t) t
'''
checkFunction('satisfy', pred)
return check(pred, self.item)
def literal(self, x):
'''
Eq t => t -> Parser e s (m t) t
'''
return self.satisfy(lambda y: x == y)
def not1(self, parser):
'''
Parser e s (m t) a -> Parser e s (m t) t
'''
checkParser('not1', parser)
return seq2R(not0(parser), self.item)
def string(self, elems):
'''
Eq t => [t] -> Parser e s (m t) [t]
'''
matcher = seq(list(map(self.literal, elems)))
return seq2R(matcher, pure(elems))
def oneOf(self, elems):
c_set = set(elems)
return self.satisfy(lambda x: x in c_set)
# doesn't do anything to the state
basic = Itemizer(functions.second)
# assumes the state is a 2-tuple of integers (line, column)
position = Itemizer(functions.updatePosition)
# assumes that state is an integer -- how many tokens have been consumed
count = Itemizer(lambda _, s: s + 1)
def run(parser, input_string, state=(1,1)):
'''
Run a parser given the token input and state.
'''
return parser.parse(ConsList(input_string), state)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.sku'
db.add_column('shop_product', 'sku',
self.gf('cartridge.shop.fields.SKUField')(max_length=20, unique=True, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.sku'
db.delete_column('shop_product', 'sku')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_footer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category', '_ormbases': ['pages.Page']},
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uses_remaining': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']", 'frozen_by_south': 'True'}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ProductImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
|
|
'''
Created on Apr 8, 2014
@author: cmills
'''
from tasr_test import TASRTestCase
import unittest
import time
import tasr.app
from tasr import AvroSchemaRepository
from tasr.group import InvalidGroupException
APP = tasr.app.TASR_APP
APP.set_config_mode('local')
HOST_PORT = r'%s:%s' % (APP.config.host, APP.config.port)
try:
import redis
# note we use port 5379 (instead of 6379) in prod, so we test against that
R_TEST = redis.StrictRedis(host=APP.config.redis_host,
port=APP.config.redis_port, db=0)
R_TEST.keys('no_match_pattern') # should throw exception if no redis
HASR_LOCAL_REDIS = True
except ImportError:
HASR_LOCAL_REDIS = False
class TestTASR(TASRTestCase):
def setUp(self):
self.event_type = "gold"
fix_rel_path = "schemas/%s.avsc" % (self.event_type)
self.avsc_file = TASRTestCase.get_fixture_file(fix_rel_path, "r")
self.schema_str = self.avsc_file.read()
self.schema_version = 0
self.asr = None
if HASR_LOCAL_REDIS:
self.asr = AvroSchemaRepository(host=APP.config.redis_host,
port=APP.config.redis_port)
# clear out all the keys before beginning -- careful!
self.asr.redis.flushdb()
else:
self.fail(u'No Redis on %s:%s' % (APP.config.redis_host,
APP.config.redis_port))
def tearDown(self):
if HASR_LOCAL_REDIS:
# clear out any added data
self.asr.redis.flushdb()
# registration tests
def test_register_group(self):
'''register_group() - error case'''
self.asr.register_group(self.event_type)
def test_register_group_fail_for_invalid_group(self):
'''register_group() - error case'''
try:
self.asr.register_group("%s-B" % self.event_type)
self.fail(u'Should have thrown an InvalidGroupException.')
except InvalidGroupException:
pass
def test_register_schema(self):
'''register_schema() - as expected'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
self.assertFalse(rs == None, u'Failed to register_schema schema')
dif = long(time.time()) - rs.current_version_timestamp(self.event_type)
self.assertTrue(dif <= 1, 'crazy timestamp')
def test_register_fail_for_empty_schema(self):
'''register_schema() - error case'''
try:
self.asr.register_schema(self.event_type, None)
self.fail(u'Should have thrown a ValueError.')
except ValueError:
pass
def test_register_fail_for_invalid_schema(self):
'''register_schema() - error case'''
try:
self.asr.register_schema(self.event_type, "%s }" % self.schema_str)
self.fail(u'Should have raised a ValueError.')
except ValueError:
pass
def test_register_fail_for_invalid_group(self):
'''register_schema() - error case'''
try:
self.asr.register_schema("%s-B" % self.event_type, self.schema_str)
self.fail(u'Should have thrown an InvalidGroupException.')
except InvalidGroupException:
pass
def test_reg_and_rereg(self):
'''register_schema() - re-reg of current shouldn't change versions'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
re_rs = self.asr.register_schema(self.event_type, self.schema_str)
self.assertEqual(rs, re_rs, u'Re-registered schema different.')
def test_reg_1_schema_for_2_topics(self):
'''register_schema() - same schema for two topics'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
get_rs = self.asr.get_latest_schema_for_group(self.event_type)
self.assertEqual(rs, get_rs, u'Recovered registered schema unequal.')
alt_topic = 'bob'
rs2 = self.asr.register_schema(alt_topic, self.schema_str)
get_rs2 = self.asr.get_latest_schema_for_group(alt_topic)
self.assertEqual(rs2, get_rs2, u'Recovered reg schema unequal.')
self.assertEqual(get_rs, get_rs2, u'Recovered reg schema unequal.')
# retrieval tests
def test_lookup(self):
'''lookup_group() - as expected'''
self.assertFalse(self.asr.lookup_group(self.event_type),
'Group should not be registered yet.')
self.asr.register_schema(self.event_type, self.schema_str)
self.assertTrue(self.asr.lookup_group(self.event_type),
'Group should be registered.')
def test_get_latest_for_topic(self):
'''get_latest_schema_for_group() - as expected'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
rs2 = self.asr.get_latest_schema_for_group(self.event_type)
self.assertEqual(rs, rs2, u'Recovered registered schema unequal.')
def test_get_latest_fail_for_missing_topic(self):
'''get_latest_schema_for_group() - error case'''
rs = self.asr.get_latest_schema_for_group(self.event_type)
self.assertEqual(None, rs, 'expected None back for missing topic')
def test_reg_then_reg_new_and_get_latest_for_topic(self):
'''get_latest_schema_for_group() - as expected'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
# modify the namespace in the schema to ensure a non-whitespace change
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.alt', 1)
rs2 = self.asr.register_schema(self.event_type, schema_str_2)
self.assertNotEqual(rs, rs2, u'Modded schema unexpectedly equal')
# should have two versions of the gold schema now, so grab the latest
latest_schema = self.asr.get_latest_schema_for_group(self.event_type)
latest_schema_str = latest_schema.canonical_schema_str
self.assertNotEqual(rs.canonical_schema_str, latest_schema_str,
u'Latest schema equal to earlier version')
self.assertEqual(rs2.canonical_schema_str, latest_schema_str,
u'Latest schema unequal to later version')
def test_reg_50_then_get_for_topic_and_version(self):
'''get_schema_for_group_and_version() - as expected'''
rs_list = []
for v in range(1, 50):
ver_schema_str = self.schema_str.replace('tagged.events',
'tagged.events.%s' % v, 1)
rs_list.append(self.asr.register_schema(self.event_type, ver_schema_str))
for v in range(1, 50):
re_rs = self.asr.get_schema_for_group_and_version(self.event_type, v)
self.assertEqual(rs_list[v - 1], re_rs,
u'retrieved schema unequal.')
def test_get_for_topic_and_version_fail_for_missing_version(self):
'''get_schema_for_group_and_version() - error case'''
self.asr.register_schema(self.event_type, self.schema_str)
rs = self.asr.get_schema_for_group_and_version(self.event_type, 2)
self.assertEqual(None, rs, 'expected None back for missing topic')
def test_get_for_id(self):
'''schema_for_id_str() - as expected'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
rs.version = None
rs.topic = None
self.assertEqual(rs, self.asr.get_schema_for_id_str(rs.md5_id),
u'MD5 ID retrieved unequal registered schema')
self.assertEqual(rs, self.asr.get_schema_for_id_str(rs.sha256_id),
u'SHA256 ID retrieved unequal registered schema')
def test_get_first_for_id(self):
'''schema_for_id_str() - with non-sequential re-registration'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
# modify the namespace in the schema to ensure a non-whitespace change
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.alt', 1)
rs2 = self.asr.register_schema(self.event_type, schema_str_2)
self.assertNotEqual(rs, rs2, u'Modded schema unexpectedly equal')
# now pull the first by id and assert equality to _rs
re_rs = self.asr.get_schema_for_id_str(rs.md5_id)
self.assertEqual(rs, re_rs, u'MD5 ID retrieved unequal reg schema')
self.assertEqual(rs, self.asr.get_schema_for_id_str(rs.sha256_id),
u'SHA256 ID retrieved unequal registered schema')
def test_get_for_schema_str(self):
'''schema_for_schema_str() - as expected'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
re_rs = self.asr.get_schema_for_schema_str(self.schema_str)
self.assertEqual(rs, re_rs, u'Schema str retrieved unequal reg schema')
def test_get_for_schema_str_fail_for_bad_schema(self):
'''schema_for_schema_str() - error case'''
self.asr.register_schema(self.event_type, self.schema_str)
try:
self.asr.get_schema_for_schema_str("%s }" % self.schema_str)
self.fail(u'Should have got raised a ValueError')
except ValueError:
pass
def test_get_latest_versions_for_topic(self):
'''get_latest_schema_versions_for_group() - pull the most recent 2
when there are 3.'''
rs1 = self.asr.register_schema(self.event_type, self.schema_str)
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.2', 1)
rs2 = self.asr.register_schema(self.event_type, schema_str_2)
schema_str_3 = self.schema_str.replace('tagged.events',
'tagged.events.3', 1)
rs3 = self.asr.register_schema(self.event_type, schema_str_3)
rs_list = self.asr.get_latest_schema_versions_for_group(self.event_type, 2)
self.assertEqual(2, len(rs_list), 'Expected a list of length 2.')
self.assertEqual(rs2, rs_list[0], 'Expecting RS2 as first entry.')
self.assertEqual(rs3, rs_list[1], 'Expecting RS3 as second entry.')
# now test getting all the versions
rs_list = self.asr.get_latest_schema_versions_for_group(self.event_type, -1)
self.assertEqual(3, len(rs_list), 'Expected a list of length 2.')
self.assertEqual(rs1, rs_list[0], 'Expecting RS1 as first entry.')
self.assertEqual(rs2, rs_list[1], 'Expecting RS2 as second entry.')
self.assertEqual(rs3, rs_list[2], 'Expecting RS3 as third entry.')
def test_legacy_topic_list_matches_vid_list(self):
'''Check that the old topic.* list matches the vid.* list with multiple
schema versions for a group registered'''
self.asr.register_schema(self.event_type, self.schema_str)
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.2', 1)
self.asr.register_schema(self.event_type, schema_str_2)
schema_str_3 = self.schema_str.replace('tagged.events',
'tagged.events.3', 1)
self.asr.register_schema(self.event_type, schema_str_3)
for ver in range(1, 3):
t_val = self.asr.redis.lindex('topic.%s' % self.event_type, ver)
v_val = self.asr.redis.lindex('vid.%s' % self.event_type, ver)
self.assertEqual(t_val, v_val, 'Mismatch at index %s' % ver)
def test_get_all_groups(self):
'''get_all_groups() - as expected'''
self.assertEqual(0, len(self.asr.get_all_groups()),
'should not be any topics yet')
self.asr.register_schema(self.event_type, self.schema_str)
groups = self.asr.get_all_groups()
self.assertEqual(1, len(groups), 'should have 1')
self.assertEqual(self.event_type, groups[0].name,
'expected group missing')
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.alt', 1)
# reg another version -- should not increase number of groups
self.asr.register_schema(self.event_type, schema_str_2)
groups = self.asr.get_all_groups()
self.assertEqual(1, len(groups), 'should still have 1 group')
def test_get_active_groups(self):
'''get_active_groups() - as expected'''
self.assertEqual(0, len(self.asr.get_all_groups()),
'should not be any groups yet')
self.assertEqual(0, len(self.asr.get_active_groups()),
'should not be any groups yet')
# reg the group without a schema -- active count should still be 0
self.asr.register_group(self.event_type)
self.assertEqual(1, len(self.asr.get_all_groups()), 'should have 1')
self.assertEqual(0, len(self.asr.get_active_groups()),
'should not be any ACTIVE groups yet')
# now reg a schema for the group
self.asr.register_schema(self.event_type, self.schema_str)
self.assertEqual(1, len(self.asr.get_all_groups()), 'should have 1')
self.assertEqual(1, len(self.asr.get_active_groups()), 'should be 1')
def test_multi_version_for_topic(self):
'''get_versions_for_id_str_and_group() - as expected'''
rs = self.asr.register_schema(self.event_type, self.schema_str)
# modify the namespace in the schema to ensure a non-whitespace change
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.alt', 1)
self.asr.register_schema(self.event_type, schema_str_2)
# now re-register_schema the original schema, which should become version 3
rs3 = self.asr.register_schema(self.event_type, self.schema_str)
self.assertEqual(rs.sha256_id, rs3.sha256_id,
u'Unequal SHA256 IDs on re-reg!')
self.assertNotEqual(rs, rs3, u'Expected different versions for topic.')
vlist = self.asr.get_versions_for_id_str_and_group(rs3.sha256_id,
self.event_type)
self.assertEqual(2, len(vlist), u'Expected two entry version list.')
self.assertEqual(1, vlist[0], u'Expected first version to be 1.')
self.assertEqual(3, vlist[1], u'Expected second version to be 3.')
# deletion tests
def test_delete_group(self):
'''Test that a group delete works.'''
self.asr.register_schema(self.event_type, self.schema_str)
self.assertTrue(self.asr.lookup_group(self.event_type),
'Group should be registered.')
self.asr.delete_group(self.event_type)
self.assertFalse(self.asr.lookup_group(self.event_type),
'Group should not be registered any more.')
def test_delete_group_with_cross_registered_schema(self):
'''Test that deleting a group with a schema version that is also
registered to a second group does not delete the still needed schema
version.'''
alt_group_name = 'bob'
self.asr.register_schema(self.event_type, self.schema_str)
self.asr.register_schema(alt_group_name, self.schema_str)
self.assertTrue(self.asr.lookup_group(self.event_type),
'Group should be registered.')
self.assertTrue(self.asr.lookup_group(alt_group_name),
'Group should be registered.')
self.asr.delete_group(self.event_type)
self.assertFalse(self.asr.lookup_group(self.event_type),
'Group should not be registered any more.')
self.assertTrue(self.asr.lookup_group(alt_group_name),
'Group should be registered.')
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(TestTASR)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
|
#!/bin/sh
# sudoku3.py
# SudokuSolver
#
# Created by joe yuan on 3/6/11.
# Copyright 2011 __MyCompanyName__. All rights reserved.
import time
import sys
def rowbyrow():
d = []
for i in range(9):
x = INPUT_PROCESS(i)
d.append(x)
for k,i in enumerate(d):
for j,c in enumerate(i):
d[k][j] = int(c)
return d
def INPUT_PROCESS(i,u=False):
while not u:
x = INPUT(i)
x, u = input_check(x,i)
return x
def INPUT(i):
x = list(raw_input("Row " + str(i+1) + ":\n"))
if ''.join(p for p in x) in ["Q","quit","q","Quit","QUIT"]:
sys.exit(1)
print(x)
return x
def input_check(x,i,u=False):
while not u:
x, u = entry_check(x,i)
x, c = length_check(x,i)
return x, u
def length_check(x,i):
while len(x) != 9:
print("Invalid entry. Please enter the 9 entries from the indicated row using zeroes for blank entries:")
x = INPUT(i)
x, c = input_error(x,i)
return x, c
def entry_check(x,i,c = False,u = True):
for p in x:
try:
h = int(p)
except ValueError:
print("Invalid entry. Each space must be an integer 0-9.")
u = False
return x,u
return x, u
def input_error(x,i):
c = raw_input("Is this correct? (y/n)\n")
while c == "n":
print("Please input the row again: ")
x = INPUT(i)
x,c = input_check(x,i)
return x,c
def puzzprint(n):
print '+ - - - + - - - + - - - +'
for p in range(3):
print '|',
for i in range(3):
print n[p][i],
print '|',
for i in range(3,6):
print n[p][i],
print '|',
for i in range(6,9):
print n[p][i],
print '|'
print '+ - - - + - - - + - - - +'
for p in range(3,6):
print '|',
for i in range(3):
print n[p][i],
print '|',
for i in range(3,6):
print n[p][i],
print '|',
for i in range(6,9):
print n[p][i],
print '|'
print '+ - - - + - - - + - - - +'
for p in range(6,9):
print '|',
for i in range(3):
print n[p][i],
print '|',
for i in range(3,6):
print n[p][i],
print '|',
for i in range(6,9):
print n[p][i],
print '|'
print '+ - - - + - - - + - - - +'
### Transforms
def transpose(n):
"""Takes a list-style Matrix and gives back the transpose"""
d = [[n[j][i] for j in range(len(n[0]))] for i in range(len(n))]
return d
def box(n):
d = [[] for i in range(len(n))]
m = 0
for Q in range(len(n)):
if 18 <= m < 27:
if 24 <= m < 27:
for i in range(6,9):
m = m + 1
for c in range(6,9):
d[Q].append(n[i][c])
elif 21 <= m < 24:
for i in range(3,6):
m = m + 1
for c in range(6,9):
d[Q].append(n[i][c])
elif 18 <= m < 21:
for i in range(3):
m = m + 1
for c in range(6,9):
d[Q].append(n[i][c])
elif 9 <= m < 18:
if 15 <= m < 18:
for i in range(6,9):
m = m + 1
for c in range(3,6):
d[Q].append(n[i][c])
elif 12 <= m < 15:
for i in range(3,6):
m = m + 1
for c in range(3,6):
d[Q].append(n[i][c])
elif 9 <= m < 12:
for i in range(3):
m = m + 1
for c in range(3,6):
d[Q].append(n[i][c])
elif m < 9:
if 6 <= m < 9:
for i in range(6,9):
m = m + 1
for c in range(3):
d[Q].append(n[i][c])
elif 3 <= m < 6:
for i in range(3,6):
m = m + 1
for c in range(3):
d[Q].append(n[i][c])
elif m < 3:
for i in range(3):
m = m + 1
for c in range(3):
d[Q].append(n[i][c])
return d
### useful functions
def ld(x, y):
pos = [i for i in x if i not in y]
return pos
def solved(n):
# Checks if each position has been made into an integer
d = 0
for i in n:
for c in i:
if not type(c) == int:
d = d + 1
if d == 0:
return True
else:
return False
def linecheck(n):
for k,i in enumerate(n):
for j,c in enumerate(i):
if type(c) == list:
n[k][j] = ld(c,i)
return n
def single(puzzle):
# Goes line by line finding variables then tests each possibility in a
# list of variables then takes each possibility and checks to see
# if that is the only variable spot in which that possibility appears.
for line_index, line in enumerate(puzzle):
for variable_index, variable1 in enumerate(line):
if type(variable1) == list:
for possibility in variable1:
count = 0
for variable2 in line:
if type(variable2) == list:
if possibility in variable2:
count = count + 1
if count > 1: break
if count == 1:
puzzle[line_index][variable_index] = possibility
break
return puzzle
def confirm(n):
# replaces the variables that have been knocked down to one possibility
for k,i in enumerate(n):
for j,c in enumerate(i):
if type(c) == list:
if len(c) == 1:
n[k][j] = int(c[0])
return n
def step(n):
# checks lines, eliminating variables and singularities
n = linecheck(n)
n = single(n)
n = confirm(n)
return n
def rc(n):
# column then row
for w in range(2):
n = transpose(n)
n = step(n)
return n
def boxxy(n):
# box
n = box(n)
n = step(n)
n = box(box(n))
return n
def solve(n):
n = rc(n)
n = boxxy(n)
n = confirm(n)
return n
def var(n,t=0):
# Gives coordinates for spot with the least number of variables.
vc = []
v = []
for x1,line in enumerate(n):
for x2,nums in enumerate(line):
if type(nums) == list:
vc.append([len(nums),[x1,x2]])
if len(nums) == 2:
return [len(nums),[x1,x2]]
vc.sort()
m = vc[t]
return m
def bruteforce1(n,xfs):
# First Brute force, this method does not incude a backtracking
# function as it is the first place for a source of error.
# Finds the variable with the lowest number of possiblities
# cycles through the variables until the correct one has been found.
m = var(n)
for i in range(m[0]):
n[m[1][0]][m[1][1]] = n[m[1][0]][m[1][1]][i]
u = False
while not solved(n):
n1 = n
n = solve(n)
if bfcondition(n):
# Backtrack: error raised
n = xfs[-1]
m = var(n)
break
if n == n1:
n2 = failsafe(n)
xfs.append(n2)
n, u = bruteforce2(n,xfs)
if solved(n):
break
m = var(n)
if solved(n):
break
return n
def bruteforce2(n,xfs):
# Finds the variable with the lowest number of possiblities
# cycles through the variables until the correct one has been found.
m = var(n)
for i in range(m[0]):
n[m[1][0]][m[1][1]] = n[m[1][0]][m[1][1]][i]
u = False
while not solved(n):
n1 = n
n = solve(n)
if bfcondition(n):
# backtrack: error raised
n = xfs[-1]
m = var(n)
break
elif n == n1:
# New forced solution needed
n2 = failsafe(n)
xfs.append(n2)
n, u = bruteforce2(n,xfs)
if solved(n):
break
elif bfcondition(n):
n = xfs[-1]
m = var(n)
break
if u:
break
if solved(n):
break
if solved(n):
return n, True
elif not bfcondition(n):
f = xfs[-1]
xfs.pop()
return f, False
else:
return n, True
def bfcondition(n):
for i in n:
for c in i:
if c == []:
return True
for i in n:
for c in i:
if type(c) == int:
if i.count(c) > 1:
return True
for i in box(n):
for c in i:
if type(c) == int:
if i.count(c) > 1:
return True
for i in transpose(n):
for c in i:
if type(c) == int:
if i.count(c) > 1:
return True
return False
def failsafe(n):
# Recreates list from scratch so that the failsafe does not get redefined later.
n1 = [i for i in n]
return n1
def puzzle_setup(x,v):
xc = [i for i in range(1,10)]
if v:
print "Here's your puzzle:\n"
puzzprint(x)
xgrid = []
for i in range(9):
dc = []
for i in range(9):
dc.append(xc)
xgrid.append(dc)
for i in range(9):
for p,c in enumerate(x[i]):
if c != 0:
xgrid[i][p] = c
return xgrid
def solve_puzzle(xgrid,v=False):
xgrid = puzzle_setup(xgrid,v)
start = time.clock()
t = 0
while not solved(xgrid):
xgrid1 = failsafe(xgrid)
xgrid = solve(xgrid)
if xgrid == xgrid1:
xgrid2 = failsafe(xgrid)
xfs = [xgrid2]
xgrid = bruteforce1(xgrid,xfs)
end = time.clock()
t = end - start
return t,xgrid
### RUNNING PORTION ###
if __name__ == "__main__":
print("Welcome!")
print("This program solves Sudoku problems \n")
print("Enter the digits in your puzzle row by row.")
print("At anytime hitting enter is ok instead of typing yes(y).\n")
print("Typing quit during the input process will end the program.")
print("Type a digit for a digit and a 0 (zero) for a blank entry: ")
exit = "y"
while exit != "n":
x = rowbyrow()
t,xgrid = solve_puzzle(x)
print "You're puzzle has been solved!\n"
print "It took " + str(t) + " secs."
puzzprint(xgrid)
print '\n'
exit = raw_input("Another puzzle? (y/n): ")
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import shlex
import unittest
from pants.option.arg_splitter import (ArgSplitter, NoGoalHelp, OptionsHelp, UnknownGoalHelp,
VersionHelp)
from pants.option.scope import ScopeInfo
def task(scope):
return ScopeInfo(scope, ScopeInfo.TASK)
def intermediate(scope):
return ScopeInfo(scope, ScopeInfo.INTERMEDIATE)
def subsys(scope):
return ScopeInfo(scope, ScopeInfo.SUBSYSTEM)
class ArgSplitterTest(unittest.TestCase):
_known_scope_infos = [intermediate('compile'), task('compile.java'), task('compile.scala'),
subsys('jvm'), subsys('jvm.test.junit'),
subsys('reporting'), intermediate('test'), task('test.junit')]
def _split(self, args_str, expected_goals, expected_scope_to_flags, expected_target_specs,
expected_passthru=None, expected_passthru_owner=None,
expected_is_help=False, expected_help_advanced=False, expected_help_all=False):
expected_passthru = expected_passthru or []
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
args = shlex.split(args_str)
goals, scope_to_flags, target_specs, passthru, passthru_owner = splitter.split_args(args)
self.assertEquals(expected_goals, goals)
self.assertEquals(expected_scope_to_flags, scope_to_flags)
self.assertEquals(expected_target_specs, target_specs)
self.assertEquals(expected_passthru, passthru)
self.assertEquals(expected_passthru_owner, passthru_owner)
self.assertEquals(expected_is_help, splitter.help_request is not None)
self.assertEquals(expected_help_advanced,
(isinstance(splitter.help_request, OptionsHelp) and
splitter.help_request.advanced))
self.assertEquals(expected_help_all,
(isinstance(splitter.help_request, OptionsHelp) and
splitter.help_request.all_scopes))
def _split_help(self, args_str, expected_goals, expected_scope_to_flags, expected_target_specs,
expected_help_advanced=False, expected_help_all=False):
self._split(args_str, expected_goals, expected_scope_to_flags, expected_target_specs,
expected_passthru=None, expected_passthru_owner=None,
expected_is_help=True,
expected_help_advanced=expected_help_advanced,
expected_help_all=expected_help_all)
def _split_version_request(self, args_str):
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
splitter.split_args(shlex.split(args_str))
self.assertTrue(isinstance(splitter.help_request, VersionHelp))
def _split_unknown_goal(self, args_str, unknown_goals):
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
splitter.split_args(shlex.split(args_str))
self.assertTrue(isinstance(splitter.help_request, UnknownGoalHelp))
self.assertSetEqual(set(unknown_goals), set(splitter.help_request.unknown_goals))
def _split_no_goal(self, args_str):
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
splitter.split_args(shlex.split(args_str))
self.assertTrue(isinstance(splitter.help_request, NoGoalHelp))
def test_basic_arg_splitting(self):
# Various flag combos.
self._split('./pants --compile-java-long-flag -f compile -g compile.java -x test.junit -i '
'src/java/org/pantsbuild/foo src/java/org/pantsbuild/bar:baz',
['compile', 'test'],
{
'': ['-f'],
'compile.java': ['--long-flag', '-x'],
'compile': ['-g'],
'test.junit': ['-i']
},
['src/java/org/pantsbuild/foo', 'src/java/org/pantsbuild/bar:baz'])
self._split('./pants -farg --fff=arg compile --gg-gg=arg-arg -g test.junit --iii '
'--compile-java-long-flag src/java/org/pantsbuild/foo src/java/org/pantsbuild/bar:baz',
['compile', 'test'],
{
'': ['-farg', '--fff=arg'],
'compile': ['--gg-gg=arg-arg', '-g'],
'test.junit': ['--iii'],
'compile.java': ['--long-flag'],
},
['src/java/org/pantsbuild/foo', 'src/java/org/pantsbuild/bar:baz'])
def test_distinguish_goals_from_target_specs(self):
self._split('./pants compile test foo::', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, ['foo::'])
self._split('./pants compile test foo::', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, ['foo::'])
self._split('./pants compile test:test', ['compile'], {'': [], 'compile': []}, ['test:test'])
self._split('./pants test test:test', ['test'], {'': [], 'test': []}, ['test:test'])
self._split('./pants test ./test', ['test'], {'': [], 'test': []}, ['./test'])
self._split('./pants test //test', ['test'], {'': [], 'test': []}, ['//test'])
def test_descoping_qualified_flags(self):
self._split('./pants compile test --compile-java-bar --no-test-junit-baz foo/bar',
['compile', 'test'],
{'': [], 'compile': [], 'compile.java': ['--bar'], 'test': [],
'test.junit': ['--no-baz']}, ['foo/bar'])
# Qualified flags don't count as explicit goals.
self._split('./pants compile --test-junit-bar foo/bar',
['compile'],
{'': [], 'compile': [], 'test.junit': ['--bar']}, ['foo/bar'])
def test_passthru_args(self):
self._split('./pants test foo/bar -- -t arg',
['test'],
{'': [], 'test': []},
['foo/bar'],
expected_passthru=['-t', 'arg'],
expected_passthru_owner='test')
self._split('./pants -farg --fff=arg compile --gg-gg=arg-arg -g test.junit --iii '
'--compile-java-long-flag src/java/org/pantsbuild/foo '
'src/java/org/pantsbuild/bar:baz '
'-- passthru1 passthru2',
['compile', 'test'],
{
'': ['-farg', '--fff=arg'],
'compile': ['--gg-gg=arg-arg', '-g'],
'compile.java': ['--long-flag'],
'test.junit': ['--iii']
},
['src/java/org/pantsbuild/foo', 'src/java/org/pantsbuild/bar:baz'],
expected_passthru=['passthru1', 'passthru2'],
expected_passthru_owner='test.junit')
def test_subsystem_flags(self):
# Global subsystem flag in global scope.
self._split('./pants --jvm-options=-Dbar=baz test foo:bar',
['test'],
{'': [], 'jvm': ['--options=-Dbar=baz'], 'test': []}, ['foo:bar'])
# Qualified task subsystem flag in global scope.
self._split('./pants --jvm-test-junit-options=-Dbar=baz test foo:bar',
['test'],
{'': [], 'jvm.test.junit': ['--options=-Dbar=baz'], 'test': []}, ['foo:bar'])
# Unqualified task subsystem flag in task scope.
# Note that this exposes a small problem: You can't set an option on the cmd-line if that
# option's name begins with any subsystem scope. For example, if test.junit has some option
# named --jvm-foo, then it cannot be set on the cmd-line, because the ArgSplitter will assume
# it's an option --foo on the jvm subsystem.
self._split('./pants test.junit --jvm-options=-Dbar=baz foo:bar',
['test'],
{'': [], 'jvm.test.junit': ['--options=-Dbar=baz'], 'test.junit': []}, ['foo:bar'])
# Global-only flag in task scope.
self._split('./pants test.junit --reporting-template-dir=path foo:bar',
['test'],
{'': [], 'reporting': ['--template-dir=path'], 'test.junit': []}, ['foo:bar'])
def test_help_detection(self):
self._split_help('./pants', [], {'': []}, [])
self._split_help('./pants goal', [], {'': []}, [])
self._split_help('./pants -f', [], {'': ['-f']}, [])
self._split_help('./pants goal -f', [], {'': ['-f']}, [])
self._split_help('./pants help', [], {'': []}, [])
self._split_help('./pants goal help', [], {'': []}, [])
self._split_help('./pants -h', [], {'': []}, [])
self._split_help('./pants goal -h', [], {'': []}, [])
self._split_help('./pants --help', [], {'': []}, [])
self._split_help('./pants goal --help', [], {'': []}, [])
self._split_help('./pants help compile -x', ['compile'],
{'': [], 'compile': ['-x']}, [])
self._split_help('./pants help compile -x', ['compile'],
{'': [], 'compile': ['-x']}, [])
self._split_help('./pants compile -h', ['compile'],
{'': [], 'compile': []}, [])
self._split_help('./pants compile --help test', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, [])
self._split_help('./pants test src/foo/bar:baz -h', ['test'],
{'': [], 'test': []}, ['src/foo/bar:baz'])
self._split_help('./pants help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants help-all', [], {'': []}, [], False, True)
self._split_help('./pants --help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants --help-all', [], {'': []}, [], False, True)
self._split_help('./pants --help --help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants --help-advanced --help', [], {'': []}, [], True, False)
self._split_help('./pants --help --help-all', [], {'': []}, [], False, True)
self._split_help('./pants --help-all --help --help-advanced', [], {'': []}, [], True, True)
self._split_help('./pants help --help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants help-advanced --help-all', [], {'': []}, [], True, True)
self._split_help('./pants compile --help-advanced test', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, [], True, False)
self._split_help('./pants help-advanced compile', ['compile'],
{'': [], 'compile': []}, [], True, False)
self._split_help('./pants compile help-all test --help', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, [], False, True)
def test_version_request_detection(self):
self._split_version_request('./pants -v')
self._split_version_request('./pants -V')
self._split_version_request('./pants --version')
# A version request supercedes anything else.
self._split_version_request('./pants --version compile --foo --bar path/to/target')
def test_unknown_goal_detection(self):
self._split_unknown_goal('./pants foo', ['foo'])
self._split_unknown_goal('./pants compile foo', ['foo'])
self._split_unknown_goal('./pants foo bar baz:qux', ['foo', 'bar'])
self._split_unknown_goal('./pants foo compile bar baz:qux', ['foo', 'bar'])
def test_no_goal_detection(self):
self._split_no_goal('./pants foo/bar:baz')
|
|
# -*- coding: utf-8 -*-
"""The extractor class definitions.
An extractor is a class used to extract information from "raw" data.
"""
import copy
import pysigscan
from dfvfs.helpers import file_system_searcher
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.engine import logger
from plaso.lib import errors
from plaso.parsers import interface as parsers_interface
from plaso.parsers import manager as parsers_manager
class EventExtractor(object):
"""Event extractor.
An event extractor extracts events from event sources.
"""
_PARSE_RESULT_FAILURE = 1
_PARSE_RESULT_SUCCESS = 2
_PARSE_RESULT_UNSUPPORTED = 3
def __init__(self, parser_filter_expression=None):
"""Initializes an event extractor.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
"""
super(EventExtractor, self).__init__()
self._file_scanner = None
self._filestat_parser = None
self._formats_with_signatures = None
self._mft_parser = None
self._non_sigscan_parser_names = None
self._parsers = None
self._parsers_profiler = None
self._usnjrnl_parser = None
self._InitializeParserObjects(
parser_filter_expression=parser_filter_expression)
def _CheckParserCanProcessFileEntry(self, parser, file_entry):
"""Determines if a parser can process a file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
parser (BaseParser): parser.
Returns:
bool: True if the file entry can be processed by the parser object.
"""
for filter_object in parser.FILTERS:
if filter_object.Match(file_entry):
return True
return False
def _GetSignatureMatchParserNames(self, file_object):
"""Determines if a file-like object matches one of the known signatures.
Args:
file_object (file): file-like object whose contents will be checked
for known signatures.
Returns:
list[str]: parser names for which the contents of the file-like object
matches their known signatures.
"""
parser_names = []
scan_state = pysigscan.scan_state()
self._file_scanner.scan_file_object(scan_state, file_object)
for scan_result in iter(scan_state.scan_results):
format_specification = (
self._formats_with_signatures.GetSpecificationBySignature(
scan_result.identifier))
if format_specification.identifier not in parser_names:
parser_names.append(format_specification.identifier)
return parser_names
def _InitializeParserObjects(self, parser_filter_expression=None):
"""Initializes the parser objects.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
"""
self._formats_with_signatures, non_sigscan_parser_names = (
parsers_manager.ParsersManager.GetFormatsWithSignatures(
parser_filter_expression=parser_filter_expression))
self._non_sigscan_parser_names = []
for parser_name in non_sigscan_parser_names:
if parser_name not in ('filestat', 'usnjrnl'):
self._non_sigscan_parser_names.append(parser_name)
self._file_scanner = parsers_manager.ParsersManager.CreateSignatureScanner(
self._formats_with_signatures)
self._parsers = parsers_manager.ParsersManager.GetParserObjects(
parser_filter_expression=parser_filter_expression)
active_parser_names = ', '.join(sorted(self._parsers.keys()))
logger.debug('Active parsers: {0:s}'.format(active_parser_names))
self._filestat_parser = self._parsers.get('filestat', None)
if 'filestat' in self._parsers:
del self._parsers['filestat']
self._mft_parser = self._parsers.get('mft', None)
self._usnjrnl_parser = self._parsers.get('usnjrnl', None)
if 'usnjrnl' in self._parsers:
del self._parsers['usnjrnl']
def _ParseDataStreamWithParser(
self, parser_mediator, parser, file_entry, data_stream_name):
"""Parses a data stream of a file entry with a specific parser.
Args:
parser_mediator (ParserMediator): parser mediator.
parser (BaseParser): parser.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
Raises:
RuntimeError: if the file-like object is missing.
"""
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
raise RuntimeError('Unable to retrieve file-like object from file entry.')
try:
self._ParseFileEntryWithParser(
parser_mediator, parser, file_entry, file_object=file_object)
finally:
file_object.close()
def _ParseFileEntryWithParser(
self, parser_mediator, parser, file_entry, file_object=None):
"""Parses a file entry with a specific parser.
Args:
parser_mediator (ParserMediator): parser mediator.
parser (BaseParser): parser.
file_entry (dfvfs.FileEntry): file entry.
file_object (Optional[file]): file-like object to parse.
If not set the parser will use the parser mediator to open
the file entry's default data stream as a file-like object.
Returns:
int: parse result which is _PARSE_RESULT_FAILURE if the file entry
could not be parsed, _PARSE_RESULT_SUCCESS if the file entry
successfully was parsed or _PARSE_RESULT_UNSUPPORTED when
UnableToParseFile was raised.
Raises:
TypeError: if parser object is not a supported parser type.
"""
if not isinstance(parser, (
parsers_interface.FileEntryParser, parsers_interface.FileObjectParser)):
raise TypeError('Unsupported parser object type.')
parser_mediator.ClearParserChain()
reference_count = (
parser_mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
parser_mediator.SampleStartTiming(parser.NAME)
try:
if isinstance(parser, parsers_interface.FileEntryParser):
parser.Parse(parser_mediator)
elif isinstance(parser, parsers_interface.FileObjectParser):
parser.Parse(parser_mediator, file_object)
result = self._PARSE_RESULT_SUCCESS
# We catch IOError so we can determine the parser that generated the error.
except (IOError, dfvfs_errors.BackEndError) as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning(
'{0:s} unable to parse file: {1:s} with error: {2!s}'.format(
parser.NAME, display_name, exception))
result = self._PARSE_RESULT_FAILURE
except errors.UnableToParseFile as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug(
'{0:s} unable to parse file: {1:s} with error: {2!s}'.format(
parser.NAME, display_name, exception))
result = self._PARSE_RESULT_UNSUPPORTED
finally:
parser_mediator.SampleStopTiming(parser.NAME)
parser_mediator.SampleMemoryUsage(parser.NAME)
new_reference_count = (
parser_mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
if reference_count != new_reference_count:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning((
'[{0:s}] did not explicitly close file-object for file: '
'{1:s}.').format(parser.NAME, display_name))
return result
def _ParseFileEntryWithParsers(
self, parser_mediator, parser_names, file_entry, file_object=None):
"""Parses a file entry with a specific parsers.
Args:
parser_mediator (ParserMediator): parser mediator.
parser_names (list[str]): names of parsers.
file_entry (dfvfs.FileEntry): file entry.
file_object (Optional[file]): file-like object to parse.
If not set the parser will use the parser mediator to open
the file entry's default data stream as a file-like object.
Returns:
int: parse result which is _PARSE_RESULT_FAILURE if the file entry
could not be parsed, _PARSE_RESULT_SUCCESS if the file entry
successfully was parsed or _PARSE_RESULT_UNSUPPORTED when
UnableToParseFile was raised or no names of parser were provided.
Raises:
RuntimeError: if the parser object is missing.
"""
parse_results = self._PARSE_RESULT_UNSUPPORTED
for parser_name in parser_names:
parser = self._parsers.get(parser_name, None)
if not parser:
raise RuntimeError(
'Parser object missing for parser: {0:s}'.format(parser_name))
if parser.FILTERS:
if not self._CheckParserCanProcessFileEntry(parser, file_entry):
parse_results = self._PARSE_RESULT_SUCCESS
continue
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug((
'[ParseFileEntryWithParsers] parsing file: {0:s} with parser: '
'{1:s}').format(display_name, parser_name))
parse_result = self._ParseFileEntryWithParser(
parser_mediator, parser, file_entry, file_object=file_object)
if parse_result == self._PARSE_RESULT_FAILURE:
return self._PARSE_RESULT_FAILURE
if parse_result == self._PARSE_RESULT_SUCCESS:
parse_results = self._PARSE_RESULT_SUCCESS
return parse_results
def ParseDataStream(self, parser_mediator, file_entry, data_stream_name):
"""Parses a data stream of a file entry with the enabled parsers.
Args:
parser_mediator (ParserMediator): parser mediator.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
Raises:
RuntimeError: if the file-like object or the parser object is missing.
"""
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
raise RuntimeError(
'Unable to retrieve file-like object from file entry.')
try:
parser_names = self._GetSignatureMatchParserNames(file_object)
parse_with_non_sigscan_parsers = True
if parser_names:
parse_result = self._ParseFileEntryWithParsers(
parser_mediator, parser_names, file_entry, file_object=file_object)
if parse_result in (
self._PARSE_RESULT_FAILURE, self._PARSE_RESULT_SUCCESS):
parse_with_non_sigscan_parsers = False
if parse_with_non_sigscan_parsers:
self._ParseFileEntryWithParsers(
parser_mediator, self._non_sigscan_parser_names, file_entry,
file_object=file_object)
finally:
file_object.close()
def ParseFileEntryMetadata(self, parser_mediator, file_entry):
"""Parses the file entry metadata such as file system data.
Args:
parser_mediator (ParserMediator): parser mediator.
file_entry (dfvfs.FileEntry): file entry.
"""
if self._filestat_parser:
self._ParseFileEntryWithParser(
parser_mediator, self._filestat_parser, file_entry)
def ParseMetadataFile(
self, parser_mediator, file_entry, data_stream_name):
"""Parses a metadata file.
Args:
parser_mediator (ParserMediator): parser mediator.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
"""
parent_path_spec = getattr(file_entry.path_spec, 'parent', None)
filename_upper = file_entry.name.upper()
if (self._mft_parser and parent_path_spec and
filename_upper in ('$MFT', '$MFTMIRR') and not data_stream_name):
self._ParseDataStreamWithParser(
parser_mediator, self._mft_parser, file_entry, '')
elif (self._usnjrnl_parser and parent_path_spec and
filename_upper == '$USNJRNL' and data_stream_name == '$J'):
# To be able to ignore the sparse data ranges the UsnJrnl parser
# needs to read directly from the volume.
volume_file_object = path_spec_resolver.Resolver.OpenFileObject(
parent_path_spec, resolver_context=parser_mediator.resolver_context)
try:
self._ParseFileEntryWithParser(
parser_mediator, self._usnjrnl_parser, file_entry,
file_object=volume_file_object)
finally:
volume_file_object.close()
class PathSpecExtractor(object):
"""Path specification extractor.
A path specification extractor extracts path specification from a source
directory, file or storage media device or image.
"""
_MAXIMUM_DEPTH = 255
def _ExtractPathSpecs(
self, path_spec, find_specs=None, recurse_file_system=True,
resolver_context=None):
"""Extracts path specification from a specific source.
Args:
path_spec (dfvfs.PathSpec): path specification.
find_specs (Optional[list[dfvfs.FindSpec]]): find specifications
used in path specification extraction.
recurse_file_system (Optional[bool]): True if extraction should
recurse into a file system.
resolver_context (Optional[dfvfs.Context]): resolver context.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the source.
"""
file_entry = None
try:
file_entry = path_spec_resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=resolver_context)
except (
dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.error('Unable to open file entry with error: {0!s}'.format(
exception))
if not file_entry:
logger.warning('Unable to open: {0:s}'.format(path_spec.comparable))
elif (not file_entry.IsDirectory() and not file_entry.IsFile() and
not file_entry.IsDevice()):
logger.warning((
'Source path specification not a device, file or directory.\n'
'{0:s}').format(path_spec.comparable))
elif file_entry.IsFile():
yield path_spec
else:
for extracted_path_spec in self._ExtractPathSpecsFromFileSystem(
path_spec, find_specs=find_specs,
recurse_file_system=recurse_file_system,
resolver_context=resolver_context):
yield extracted_path_spec
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0):
"""Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory.
Raises:
MaximumRecursionDepth: when the maximum recursion depth is reached.
"""
if depth >= self._MAXIMUM_DEPTH:
raise errors.MaximumRecursionDepth('Maximum recursion depth reached.')
# Need to do a breadth-first search otherwise we'll hit the Python
# maximum recursion depth.
sub_directories = []
for sub_file_entry in file_entry.sub_file_entries:
try:
if not sub_file_entry.IsAllocated() or sub_file_entry.IsLink():
continue
except dfvfs_errors.BackEndError as exception:
logger.warning(
'Unable to process file: {0:s} with error: {1!s}'.format(
sub_file_entry.path_spec.comparable.replace(
'\n', ';'), exception))
continue
# For TSK-based file entries only, ignore the virtual /$OrphanFiles
# directory.
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
if sub_file_entry.IsDirectory():
sub_directories.append(sub_file_entry)
for path_spec in self._ExtractPathSpecsFromFile(sub_file_entry):
yield path_spec
for sub_file_entry in sub_directories:
try:
for path_spec in self._ExtractPathSpecsFromDirectory(
sub_file_entry, depth=(depth + 1)):
yield path_spec
except (
IOError, dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception))
def _ExtractPathSpecsFromFile(self, file_entry):
"""Extracts path specification from a file.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the file.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the file.
"""
produced_main_path_spec = False
for data_stream in file_entry.data_streams:
# Make a copy so we don't make the changes on a path specification
# directly. Otherwise already produced path specifications can be
# altered in the process.
path_spec = copy.deepcopy(file_entry.path_spec)
if data_stream.name:
setattr(path_spec, 'data_stream', data_stream.name)
yield path_spec
if not data_stream.name:
produced_main_path_spec = True
if not produced_main_path_spec:
yield file_entry.path_spec
def _ExtractPathSpecsFromFileSystem(
self, path_spec, find_specs=None, recurse_file_system=True,
resolver_context=None):
"""Extracts path specification from a file system within a specific source.
Args:
path_spec (dfvfs.PathSpec): path specification of the root of
the file system.
find_specs (Optional[list[dfvfs.FindSpec]]): find specifications
used in path specification extraction.
recurse_file_system (Optional[bool]): True if extraction should
recurse into a file system.
resolver_context (Optional[dfvfs.Context]): resolver context.
Yields:
dfvfs.PathSpec: path specification of a file entry found in
the file system.
"""
file_system = None
try:
file_system = path_spec_resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=resolver_context)
except (
dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.error('Unable to open file system with error: {0!s}'.format(
exception))
if file_system:
try:
if find_specs:
searcher = file_system_searcher.FileSystemSearcher(
file_system, path_spec)
for extracted_path_spec in searcher.Find(find_specs=find_specs):
yield extracted_path_spec
elif recurse_file_system:
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if file_entry:
for extracted_path_spec in self._ExtractPathSpecsFromDirectory(
file_entry):
yield extracted_path_spec
else:
yield path_spec
except (
dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception))
finally:
file_system.Close()
def ExtractPathSpecs(
self, path_specs, find_specs=None, recurse_file_system=True,
resolver_context=None):
"""Extracts path specification from a specific source.
Args:
path_specs (Optional[list[dfvfs.PathSpec]]): path specifications.
find_specs (Optional[list[dfvfs.FindSpec]]): find specifications
used in path specification extraction.
recurse_file_system (Optional[bool]): True if extraction should
recurse into a file system.
resolver_context (Optional[dfvfs.Context]): resolver context.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the source.
"""
for path_spec in path_specs:
for extracted_path_spec in self._ExtractPathSpecs(
path_spec, find_specs=find_specs,
recurse_file_system=recurse_file_system,
resolver_context=resolver_context):
yield extracted_path_spec
|
|
#
# This file is part of python-dbusx. Python-dbusx is free software
# available under the terms of the MIT license. See the file "LICENSE" that
# was provided together with this source file for the licensing terms.
#
# Copyright (c) 2012-2013 the python-dbusx authors. See the file "AUTHORS"
# for a complete list.
import dbusx
import dbusx.util
import time
import threading
class Connection(dbusx.ConnectionBase):
"""A connection to the D-BUS.
This class represents a connection to a desktop bus (D-BUS). It has two
constructors. The default constructor `Connection()` will create a new
private connection. An alternate constructor `Connection.get()` is
available that create a connection that is shared with other users of
libdbus (Python and non-Python) in the current process.
To connect to a D-BUS instance, you need to specify its address to one of
the constructors or to the :meth:`open` method. The most common D-BUS
instances are the system and session bus. To connect to these buses, you
can use the special constants dbusx.BUS_SYSTEM and dbusx.BUS_SESSION as the
address, respectively. To connect to another bus, specify the address as a
string.
By default, this class will use the event loop provided by the package
`looping.tulip`. This provides a good basic event loop based on the Python
built-in select/poll/epoll/kqueue multiplexers. The event loop can be
overrided by passing a different one to one of the constructors. It is your
responsiblity to make sure the event loop is run.
You can also use an event loop that is external to dbusx. In this case, you
need to specify the parameter `install_event_loop=False` toathe constructor.
"""
def __init__(self, address):
"""Create a new private connection.
If *address* is provided, the connection will opened to the specified
address.
"""
super(Connection, self).__init__(address)
self.context = None
self._registered = False
self._signal_handlers = []
self.logger = dbusx.util.getLogger('dbusx.Connection',
context=str(self))
self.local = self._local()
def __str__(self):
s = 'Connection(address=%s, shared=%s)' % (self.address, self.shared)
return s
def proxy(self, service, path, interfaces=None):
"""Return a proxy for an object on the D-BUS.
The *service* argument specifies the bus name of the remote object and
*path* specifies its path. The *interfaces* argument, if provided,
specifies the interface search path used when resolving methods. You
only need to provide this if the object exposes methods or signals with
the same name on multiple interfaces.
"""
return dbusx.Proxy(self, service, path, interfaces)
def publish(self, instance, path):
"""Publish a Python object instance on the D-BUS.
The *instance* parameter can be a :class:`dbusx.Object` instance, or
any other instance. In the latter case, the object is automatically
wrapped to an Object instance using :meth:`dbusx.Object.wrap`. The
*path* parameter specifies the path to publish this object at. It may
contain a trailing '*' to indicate that the object will also handle all
paths below the indicated path.
"""
if not isinstance(instance, dbusx.Object):
instance = dbusx.Object.wrap(instance)
instance.register(self, path)
fallback = path.endswith('*')
path = path.rstrip('/*')
self.register_object_path(path, instance._process, fallback)
def remove(self, path):
"""Remove a published Python object.
An object should have been previously published at *path* using
:meth:`publish`. An error will be raised if this is not the case.
"""
path = path.rstrip('/*')
self.unregister_object_path(path)
def call_method(self, service, path, interface, method, signature=None,
args=None, no_reply=False, callback=None, timeout=None):
"""Call the method *method* on the interface *interface* of the remote
object at bus name *service* at path *path*.
If *signature* is specified, it must be a D-BUS signature string
describing the input arguments of the method. In this case, *args* must
be a tuple containing the argument values.
If *callback* is not provided, this method performs a synchronous
method call. It will block until a reply is received. The return value
is a tuple containing the the return values of the remote method. In
case of an error, a :class:`dbusx.Error` instance is raised. The
actual message of the response is available in the :attr:`reply`
attribute. Subsequent calls will overwrite this attribute.
If *callback* is provided, this method performs an asynchronous method
call. The method call message will be queued, after which this method
will return immediately. There is no return value. At a later time,
when the message is sent out and a reply is received, the callback will
be called with two parameters: the connection and the message. In case
of an error, this message will have the type
`dbusx.MESSAGE_TYPE_ERROR`. The internal :class:`PendingCall` instance
that is used to track the response is available at the :attr:`pending`
attribute. Subsequent calls will overwrite this attribute.
The *no_reply* argument will set a flag in the D-BUS message indicating
that no reply is required. In this case, a synchronous method call will
still block, but only until the message has been sent out. An
asynchronous method call will never block.
The *timeout* parameter specifies the timeout in seconds to wait for a
reply. The timeout may be an int or float. If no timeout is provided, a
suitable default timeout is used. If no response is received within the
timeout, a "org.freedesktop.DBus.Error.Timeout" error is generated.
"""
message = dbusx.Message(dbusx.MESSAGE_TYPE_METHOD_CALL,
no_reply=no_reply, destination=service,
path=path, interface=interface, member=method)
if signature is not None:
message.set_args(signature, args)
if callback is not None:
# Fire a callback for the reply. Note that this requires event
# loop integration otherwise the callback will never be called.
self.send_with_reply(message, callback, timeout)
elif no_reply:
# No reply needed but block until flushed
self.send(message)
if not self.loop:
self.flush()
else:
# Block for the reply
replies = []
def callback(message):
replies.append(message)
self.send_with_reply(message, callback, timeout)
if timeout is not None:
end_time = time.time() + timeout
while not replies:
secs = None if timeout is None else end_time - time.time()
if self.loop:
if self.dispatch_status == dbusx.DISPATCH_DATA_REMAINS:
self.dispatch()
else:
self.loop.run_once(secs)
else:
self.read_write_dispatch(secs)
assert len(replies) == 1
reply = replies[0]
assert reply.type in (dbusx.MESSAGE_TYPE_METHOD_RETURN,
dbusx.MESSAGE_TYPE_ERROR)
assert reply.reply_serial == message.serial
return reply
def connect_to_signal(self, service, path, interface, signal, callback):
"""Install a signal handler for the signal *signal* that is raised on
*interface* by the remote object at bus name *service* and path *path*.
The *callback* argument must be a callable Python object. When a
matching signal arrives, the callback is called with two arguments:
the connection on which the signal was received, and the D-BUS message
containing the signal.
"""
if not self._registered:
self.add_filter(self._signal_handler)
self._registered = True
self._signal_handlers.append((service, path, interface,
signal, callback))
# Call the "AddMatch" method on the D-BUS so that the signal specified
# will get routed to us. Signals are normally sent out as multicast
# messages and therefore an explicit route is required.
# NOTE: It is OK to do this multiple times for the same signal.
message = dbusx.Message(dbusx.MESSAGE_TYPE_METHOD_CALL, no_reply=True,
destination=dbusx.SERVICE_DBUS, path=dbusx.PATH_DBUS,
interface=dbusx.INTERFACE_DBUS, member='AddMatch')
rule = "type='signal'"
rule += ",sender='%s'" % service
rule += ",path='%s'" % path
rule += ",interface='%s'" % interface
rule += ",member='%s'" % signal
message.set_args('s', (rule,))
self.send(message)
def _signal_handler(self, connection, message):
"""Filter handler that is used to call signal handlers that are
registered with connect_to_signal().
"""
log = self.logger
if connection is not self:
log.error('_signal_handler: connection is not self??')
return False
if message.type != dbusx.MESSAGE_TYPE_SIGNAL:
return False
for service,path,interface,signal,callback in self._signal_handlers:
if message.sender != service \
or message.path != path \
or message.interface != interface \
or message.member != signal:
continue
try:
self._spawn(callback, message)
except Exception as e:
log.error('exception in signal handler', exc_info=True)
# Allow others to see this signal as well
return False
def _spawn(self, function, *args):
"""Helper to spawn a function in a new context.
By default this just executes function(*args), but it can be
reimplemented by subclasses to add different spawning behavior.
"""
return function(*args)
def _local(self):
"""Helper to return a context-local storage object.
This method must work in tandem with :meth:`_spawn()` so that the
object implements local storage for the type of context implemented.
"""
return threading.local()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from django.core.urlresolvers import reverse
from exam import fixture
from social_auth.models import UserSocialAuth
from sentry.models import UserOption, LostPasswordHash, User
from sentry.testutils import TestCase
class AppearanceSettingsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings-appearance')
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_does_use_template(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/appearance.html')
def test_does_save_settings(self):
self.login_as(self.user)
resp = self.client.post(self.path, {
'language': 'en',
'stacktrace_order': '2',
'clock_24_hours': True
})
assert resp.status_code == 302
options = UserOption.objects.get_all_values(user=self.user, project=None)
assert options.get('language') == 'en'
assert options.get('stacktrace_order') == '2'
assert options.get('clock_24_hours') is True
class SettingsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings')
def params(self, without=()):
params = {
'username': 'foobar',
'email': '[email protected]',
'name': 'Foo bar',
}
return dict((k, v) for k, v in params.iteritems() if k not in without)
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/settings.html')
assert 'form' in resp.context
def test_requires_email(self):
self.login_as(self.user)
resp = self.client.post(self.path, self.params(without=['email']))
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/settings.html')
assert 'form' in resp.context
assert 'email' in resp.context['form'].errors
def test_requires_name(self):
self.login_as(self.user)
resp = self.client.post(self.path, self.params(without=['name']))
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/settings.html')
assert 'form' in resp.context
assert 'name' in resp.context['form'].errors
def test_minimum_valid_params(self):
self.login_as(self.user)
params = self.params()
resp = self.client.post(self.path, params)
assert resp.status_code == 302
user = User.objects.get(id=self.user.id)
assert user.name == params['name']
assert user.email == params['email']
def test_can_change_password(self):
self.login_as(self.user)
params = self.params()
params['new_password'] = 'foobar'
resp = self.client.post(self.path, params)
assert resp.status_code == 302
user = User.objects.get(id=self.user.id)
assert user.check_password('foobar')
class NotificationSettingsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings-notifications')
def params(self, without=()):
params = {
'alert_email': '[email protected]',
}
return dict((k, v) for k, v in params.iteritems() if k not in without)
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
user = self.create_user('[email protected]')
organization = self.create_organization()
team = self.create_team(organization=organization)
project = self.create_project(organization=organization, team=team)
team2 = self.create_team(organization=organization)
self.create_project(organization=organization, team=team2)
self.create_member(organization=organization, user=user, teams=[project.team])
self.login_as(user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/notifications.html')
assert 'form' in resp.context
assert len(resp.context['project_forms']) == 1
def test_valid_params(self):
self.login_as(self.user)
params = self.params()
resp = self.client.post(self.path, params)
assert resp.status_code == 302
options = UserOption.objects.get_all_values(user=self.user, project=None)
assert options.get('alert_email') == '[email protected]'
class ListIdentitiesTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings-identities')
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
UserSocialAuth.objects.create(user=self.user, provider='github')
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/identities.html')
assert 'identity_list' in resp.context
assert 'AUTH_PROVIDERS' in resp.context
class RecoverPasswordTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-recover')
def test_renders_with_required_context(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/index.html')
assert 'form' in resp.context
def test_invalid_username(self):
resp = self.client.post(self.path, {
'user': 'nonexistent'
})
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/index.html')
assert 'form' in resp.context
assert 'user' in resp.context['form'].errors
@mock.patch('sentry.models.LostPasswordHash.send_recover_mail')
def test_valid_username(self, send_recover_mail):
resp = self.client.post(self.path, {
'user': self.user.username
})
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/sent.html')
assert 'email' in resp.context
send_recover_mail.assert_called_once_with()
class RecoverPasswordConfirmTest(TestCase):
def setUp(self):
super(RecoverPasswordConfirmTest, self).setUp()
self.password_hash = LostPasswordHash.objects.create(user=self.user)
@fixture
def path(self):
return reverse('sentry-account-recover-confirm', args=[self.user.id, self.password_hash.hash])
def test_valid_token(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/confirm.html')
def test_invalid_token(self):
resp = self.client.get(reverse('sentry-account-recover-confirm', args=[1, 'adfadsf']))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/failure.html')
def test_change_password(self):
resp = self.client.post(self.path, {
'password': 'bar',
'confirm_password': 'bar'
})
assert resp.status_code == 302
user = User.objects.get(id=self.user.id)
assert user.check_password('bar')
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tests for util."""
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.utils import util
from tensorflow_metadata.proto.v0 import schema_pb2
class UtilTest(tf.test.TestCase):
def testToTensorValueFromTFSparseTensor(self):
original = tf.SparseTensor(
values=[0.5, -1., 0.5, -1.],
indices=[[0, 3, 1], [0, 20, 0], [1, 3, 1], [1, 20, 0]],
dense_shape=[2, 100, 3])
sparse_value = util.to_tensor_value(original)
self.assertAllClose(sparse_value.values, original.values.numpy())
self.assertAllClose(sparse_value.indices, original.indices.numpy())
self.assertAllClose(sparse_value.dense_shape, original.dense_shape.numpy())
def testToTensorValueFromTFV1SparseTensorValue(self):
original = tf.compat.v1.SparseTensorValue(
values=np.array([0.5, -1., 0.5, -1.]),
indices=np.array([[0, 3, 1], [0, 20, 0], [1, 3, 1], [1, 20, 0]]),
dense_shape=np.array([2, 100, 3]))
sparse_value = util.to_tensor_value(original)
self.assertAllClose(sparse_value.values, original.values)
self.assertAllClose(sparse_value.indices, original.indices)
self.assertAllClose(sparse_value.dense_shape, original.dense_shape)
def testToTensorValueFromTFRaggedTensor(self):
original = tf.RaggedTensor.from_nested_row_splits(
[3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1],
[[0, 3, 6], [0, 2, 3, 4, 5, 5, 8], [0, 2, 3, 3, 6, 9, 10, 11, 13]])
ragged_value = util.to_tensor_value(original)
self.assertAllClose(ragged_value.values, original.flat_values.numpy())
self.assertLen(ragged_value.nested_row_splits, 3)
original_nested_row_splits = original.nested_row_splits
self.assertAllClose(ragged_value.nested_row_splits[0],
original_nested_row_splits[0].numpy())
self.assertAllClose(ragged_value.nested_row_splits[1],
original_nested_row_splits[1].numpy())
self.assertAllClose(ragged_value.nested_row_splits[2],
original_nested_row_splits[2].numpy())
def testToTensorValueFromTFRaggedTensorUsingRowLengths(self):
original = tf.RaggedTensor.from_nested_row_lengths(
[3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1],
[[3, 3], [2, 1, 1, 1, 0, 3], [2, 1, 0, 3, 3, 1, 1, 2]])
ragged_value = util.to_tensor_value(original)
self.assertAllClose(ragged_value.values, original.flat_values.numpy())
self.assertLen(ragged_value.nested_row_splits, 3)
original_nested_row_splits = original.nested_row_splits
self.assertAllClose(ragged_value.nested_row_splits[0],
original_nested_row_splits[0].numpy())
self.assertAllClose(ragged_value.nested_row_splits[1],
original_nested_row_splits[1].numpy())
self.assertAllClose(ragged_value.nested_row_splits[2],
original_nested_row_splits[2].numpy())
def testToTensorValueFromTFV1RaggedTensorValue(self):
ragged_value = util.to_tensor_value(
tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1]),
row_splits=np.array([0, 2, 3, 3, 6, 9, 10, 11, 13])),
row_splits=np.array([0, 2, 3, 4, 5, 5, 8])),
row_splits=np.array([0, 3, 6])))
self.assertAllClose(ragged_value.values,
np.array([3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1]))
self.assertLen(ragged_value.nested_row_splits, 3)
self.assertAllClose(ragged_value.nested_row_splits[0], np.array([0, 3, 6]))
self.assertAllClose(ragged_value.nested_row_splits[1],
np.array([0, 2, 3, 4, 5, 5, 8]))
self.assertAllClose(ragged_value.nested_row_splits[2],
np.array([0, 2, 3, 3, 6, 9, 10, 11, 13]))
def testToTensorValueFromNumpy(self):
self.assertAllClose(util.to_tensor_value([1, 2, 3]), np.array([1, 2, 3]))
self.assertAllClose(
util.to_tensor_value(np.array([1, 2, 3])), np.array([1, 2, 3]))
def testToTensorValueFromTFTensor(self):
self.assertAllClose(
util.to_tensor_value(tf.constant([1, 2, 3])), np.array([1, 2, 3]))
def testToTFSparseTensorFromSparseTensorValue(self):
original = types.SparseTensorValue(
values=np.array([0.5, -1., 0.5, -1.]),
indices=np.array([[0, 3, 1], [0, 20, 0], [1, 3, 1], [1, 20, 0]]),
dense_shape=np.array([2, 100, 3]))
sparse_tensor = util.to_tensorflow_tensor(original)
self.assertAllClose(sparse_tensor.values.numpy(), original.values)
self.assertAllClose(sparse_tensor.indices.numpy(), original.indices)
self.assertAllClose(sparse_tensor.dense_shape.numpy(), original.dense_shape)
def testToTFRaggedTensorFromRaggedTensorValue(self):
original = types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1]),
nested_row_splits=[
np.array([0, 3, 6]),
np.array([0, 2, 3, 4, 5, 5, 8]),
np.array([0, 2, 3, 3, 6, 9, 10, 11, 13])
])
ragged_tensor = util.to_tensorflow_tensor(original)
self.assertAllClose(ragged_tensor.flat_values.numpy(), original.values)
self.assertLen(ragged_tensor.nested_row_splits, 3)
self.assertAllClose(ragged_tensor.nested_row_splits[0].numpy(),
original.nested_row_splits[0])
self.assertAllClose(ragged_tensor.nested_row_splits[1].numpy(),
original.nested_row_splits[1])
self.assertAllClose(ragged_tensor.nested_row_splits[2].numpy(),
original.nested_row_splits[2])
def testToTFTensorFromNumpy(self):
self.assertAllClose(
util.to_tensorflow_tensor(np.array([1, 2, 3])).numpy(),
np.array([1, 2, 3]))
def testToFromTensorValues(self):
tensor_values = {
'features': {
'feature_1':
np.array([1, 2, 3]),
'feature_2':
types.SparseTensorValue(
values=np.array([0.5, -1., 0.5, -1.]),
indices=np.array([[0, 3, 1], [0, 20, 0], [1, 3, 1],
[1, 20, 0]]),
dense_shape=np.array([2, 100, 3])),
'feature_3':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1]),
nested_row_splits=[
np.array([0, 3, 6]),
np.array([0, 2, 3, 4, 5, 5, 8]),
np.array([0, 2, 3, 3, 6, 9, 10, 11, 13])
])
},
'labels': np.array([1])
}
actual = util.to_tensor_values(util.to_tensorflow_tensors(tensor_values))
self.assertAllClose(actual, tensor_values)
def testToFromTensorValuesWithSpecs(self):
sparse_value = types.SparseTensorValue(
values=np.array([0.5, -1., 0.5, -1.], dtype=np.float32),
indices=np.array([[0, 3, 1], [0, 20, 0], [1, 3, 1], [1, 20, 0]]),
dense_shape=np.array([2, 100, 3]))
ragged_value = types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1],
dtype=np.float32),
nested_row_splits=[
np.array([0, 3, 6]),
np.array([0, 2, 3, 4, 5, 5, 8]),
np.array([0, 2, 3, 3, 6, 9, 10, 11, 13])
])
tensor_values = {
'features': {
'feature_1': np.array([1, 2, 3], dtype=np.float32),
'feature_2': sparse_value,
'feature_3': ragged_value,
'ignored_feature': np.array([1, 2, 3])
},
'labels': np.array([1], dtype=np.float32),
'ignored': np.array([2])
}
specs = {
'features': {
'feature_1':
tf.TensorSpec([3], dtype=tf.float32),
'feature_2':
tf.SparseTensorSpec(shape=[2, 100, 3], dtype=tf.float32),
'feature_3':
tf.RaggedTensorSpec(
shape=[2, None, None, None], dtype=tf.float32)
},
'labels': tf.TensorSpec([1], dtype=tf.float32)
}
actual = util.to_tensor_values(
util.to_tensorflow_tensors(tensor_values, specs))
expected = {
'features': {
'feature_1': np.array([1, 2, 3], dtype=np.float32),
'feature_2': sparse_value,
'feature_3': ragged_value
},
'labels': np.array([1], dtype=np.float32)
}
self.assertAllClose(actual, expected)
def testToTensorflowTensorsRaisesIncompatibleSpecError(self):
with self.assertRaisesRegex(ValueError, '.* is not compatible with .*'):
util.to_tensorflow_tensors(
{'features': {
'feature_1': np.array([1, 2, 3], dtype=np.int64)
}}, {'features': {
'feature_1': tf.TensorSpec([1], dtype=tf.float32)
}})
def testToTensorflowTensorsRaisesUnknownKeyError(self):
with self.assertRaisesRegex(ValueError, '.* not found in .*'):
util.to_tensorflow_tensors(
{'features': {
'feature_1': np.array([1, 2, 3], dtype=np.float32)
}}, {
'features': {
'missing_feature': tf.TensorSpec([1], dtype=tf.float32)
}
})
def testRecordBatchToTensorValues(self):
record_batch = pa.record_batch(
[pa.array([[1], [2], [3]]),
pa.array([[0], [1], [1]])], ['feature_1', 'feature_2'])
actual = util.record_batch_to_tensor_values(record_batch)
expected = {
'feature_1': np.array([1, 2, 3]),
'feature_2': np.array([0, 1, 1])
}
self.assertAllClose(actual, expected)
def testRecordBatchToTensorValuesWithTensorRepresentation(self):
record_batch = pa.record_batch(
[pa.array([[1, 2], [2, 3], [3, 4]]),
pa.array([[0], [1], [1]])], ['feature_1', 'feature_2'])
tensor_representation = schema_pb2.TensorRepresentation()
tensor_representation.dense_tensor.column_name = 'feature_1'
tensor_representation.dense_tensor.shape.dim.append(
schema_pb2.FixedShape.Dim(size=2))
actual = util.record_batch_to_tensor_values(
record_batch, {'feature_1': tensor_representation})
expected = {
'feature_1': np.array([[1, 2], [2, 3], [3, 4]]),
'feature_2': np.array([0, 1, 1])
}
self.assertAllClose(actual, expected)
def testBatchSizeWithTensorValues(self):
tensor_values = {
'feature_1':
np.array([1, 2], dtype=np.float32),
'feature_2':
types.SparseTensorValue(
values=np.array([0.5, -1., 0.5, -1.], dtype=np.float32),
indices=np.array([[0, 3, 1], [0, 20, 0], [1, 3, 1], [1, 20,
0]]),
dense_shape=np.array([2, 100, 3])),
'feature_3':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1],
dtype=np.float32),
nested_row_splits=[
np.array([0, 3, 6]),
np.array([0, 2, 3, 4, 5, 5, 8]),
np.array([0, 2, 3, 3, 6, 9, 10, 11, 13])
]),
}
self.assertEqual(util.batch_size(tensor_values), 2)
def testBatchSizeWithTFTensors(self):
tensor_values = {
'feature_1':
tf.constant([1, 2]),
'feature_2':
tf.SparseTensor(
values=[0.5, -1., 0.5, -1.],
indices=[[0, 3, 1], [0, 20, 0], [1, 3, 1], [1, 20, 0]],
dense_shape=[2, 100, 3]),
'feature_3':
tf.RaggedTensor.from_nested_row_lengths(
[3, 1, 4, 1, 5, 9, 2, 7, 1, 8, 8, 2, 1],
[[3, 3], [2, 1, 1, 1, 0, 3], [2, 1, 0, 3, 3, 1, 1, 2]]),
}
self.assertEqual(util.batch_size(tensor_values), 2)
def testBatchSizeError(self):
with self.assertRaisesRegex(ValueError,
'Batch sizes have differing values.*'):
util.batch_size({
'feature_1': np.array([1, 2, 3], dtype=np.int64),
'feature_2': np.array([1, 2], dtype=np.int64)
})
def testUniqueKey(self):
self.assertEqual('key', util.unique_key('key', ['key1', 'key2']))
self.assertEqual('key1_1', util.unique_key('key1', ['key1', 'key2']))
self.assertEqual('key1_2', util.unique_key('key1', ['key1', 'key1_1']))
def testUniqueKeyWithUpdateKeys(self):
keys = ['key1', 'key2']
util.unique_key('key1', keys, update_keys=True)
self.assertEqual(['key1', 'key2', 'key1_1'], keys)
def testCompoundKey(self):
self.assertEqual('a_b', util.compound_key(['a_b']))
self.assertEqual('a__b', util.compound_key(['a', 'b']))
self.assertEqual('a__b____c__d', util.compound_key(['a', 'b__c', 'd']))
def testGetByKeys(self):
self.assertEqual([1], util.get_by_keys({'labels': [1]}, ['labels']))
def testGetByKeysMissingAndDefault(self):
self.assertEqual('a', util.get_by_keys({}, ['labels'], default_value='a'))
self.assertEqual(
'a', util.get_by_keys({'labels': {}}, ['labels'], default_value='a'))
def testGetByKeysMissingAndOptional(self):
self.assertIsNone(util.get_by_keys({}, ['labels'], optional=True))
self.assertIsNone(
util.get_by_keys({'labels': {}}, ['labels'], optional=True))
def testGetByKeysMissingAndNonOptional(self):
with self.assertRaisesRegex(ValueError, 'not found'):
util.get_by_keys({}, ['labels'])
with self.assertRaisesRegex(ValueError, 'not found'):
util.get_by_keys({'labels': {}}, ['labels'])
def testGetByKeysWitMultiLevel(self):
self.assertEqual([1],
util.get_by_keys({'predictions': {
'output': [1]
}}, ['predictions', 'output']))
self.assertEqual([1],
util.get_by_keys(
{'predictions': {
'model': {
'output': [1],
},
}}, ['predictions', 'model', 'output']))
def testGetByKeysWithPrefix(self):
self.assertEqual({
'all_classes': ['a', 'b'],
'probabilities': [1]
},
util.get_by_keys(
{
'predictions': {
'output/all_classes': ['a', 'b'],
'output/probabilities': [1],
},
}, ['predictions', 'output']))
self.assertEqual({
'all_classes': ['a', 'b'],
'probabilities': [1]
},
util.get_by_keys(
{
'predictions': {
'model': {
'output/all_classes': ['a', 'b'],
'output/probabilities': [1],
},
},
}, ['predictions', 'model', 'output']))
def testGetByKeysMissingSecondaryKey(self):
with self.assertRaisesRegex(ValueError, 'not found'):
util.get_by_keys({'predictions': {
'missing': [1]
}}, ['predictions', 'output'])
def testIncludeFilter(self):
got = util.include_filter(
include={
'b': {},
'c': {
'c2': {
'c21': {}
}
},
'e': {
'e2': {
'e21': {}
}
}
},
target={
'a': 1,
'b': {
'b2': 2
},
'c': {
'c2': {
'c21': 3,
'c22': 4
}
},
'd': {
'd2': 4
},
'e': {
'e2': {
'e22': {}
}
}
})
self.assertEqual(got, {
'b': {
'b2': 2
},
'c': {
'c2': {
'c21': 3
}
},
'e': {
'e2': {}
}
})
def testExcludeFilter(self):
got = util.exclude_filter(
exclude={
'b': {},
'c': {
'c2': {
'c21': {}
}
}
},
target={
'a': 1,
'b': {
'b2': 2
},
'c': {
'c2': {
'c21': 3,
'c22': 4
}
},
'd': {
'd2': 4
}
})
self.assertEqual(got, {'a': 1, 'c': {'c2': {'c22': 4}}, 'd': {'d2': 4}})
def testMergeFilters(self):
filter1 = {
'features': {
'feature_1': {},
'feature_2': {},
},
'labels': {},
'example_weights': {
'model1': {},
},
'predictions': {
'model1': {
'output1': {},
},
'model2': {
'output1': {}
}
},
'attributions': {
'model1': {}
},
}
filter2 = {
'features': {
'feature_2': {},
'feature_3': {},
},
'labels': {
'model1': {},
'model2': {},
},
'example_weights': {
'model2': {},
},
'predictions': {
'model1': {
'output2': {},
},
'model2': {
'output1': {},
'output2': {},
}
},
'attributions': {
'model1': {
'output1': {
'feature1': {}
},
},
},
}
merged = util.merge_filters(filter1, filter2)
self.assertEqual(
merged, {
'features': {
'feature_1': {},
'feature_2': {},
'feature_3': {},
},
'labels': {},
'example_weights': {
'model1': {},
'model2': {},
},
'predictions': {
'model1': {
'output1': {},
'output2': {},
},
'model2': {
'output1': {},
'output2': {},
}
},
'attributions': {
'model1': {},
},
})
def testKwargsOnly(self):
@util.kwargs_only
def fn(a, b, c, d=None, e=5):
if d is None:
d = 100
if e is None:
e = 1000
return a + b + c + d + e
self.assertEqual(1 + 2 + 3 + 100 + 5, fn(a=1, b=2, c=3))
self.assertEqual(1 + 2 + 3 + 100 + 1000, fn(a=1, b=2, c=3, e=None))
with self.assertRaisesRegex(TypeError, 'keyword-arguments only'):
fn(1, 2, 3)
with self.assertRaisesRegex(TypeError, 'with c specified'):
fn(a=1, b=2, e=5) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'with extraneous kwargs'):
fn(a=1, b=2, c=3, f=11) # pylint: disable=unexpected-keyword-arg
def testGetFeaturesFromExtracts(self):
self.assertEqual(
{'a': np.array([1])},
util.get_features_from_extracts({
constants.FEATURES_PREDICTIONS_LABELS_KEY:
types.FeaturesPredictionsLabels(
input_ref=0,
features={'a': np.array([1])},
predictions={},
labels={})
}),
)
self.assertEqual(
{'a': np.array([1])},
util.get_features_from_extracts(
{constants.FEATURES_KEY: {
'a': np.array([1])
}}),
)
self.assertEqual({}, util.get_features_from_extracts({}))
def testMergeExtracts(self):
extracts = [
{
'features': {
'feature_1':
np.array([1.0, 2.0]),
'feature_2':
np.array([1.0, 2.0]),
'feature_3':
types.SparseTensorValue(
values=np.array([1]),
indices=np.array([[0, 1]]),
dense_shape=np.array([1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 6]),
nested_row_splits=[np.array([0, 4, 4, 7, 8, 8])]),
'feature_5':
types.SparseTensorValue(
values=np.array([1]),
indices=np.array([[0, 1]]),
dense_shape=np.array([1, 3])),
},
'labels': np.array([1.0]),
'example_weights': np.array(0.0),
'predictions': {
'model1': np.array([0.1, 0.2]),
'model2': np.array([0.1, 0.2])
},
'_slice_key_types': [()]
},
{
'features': {
'feature_1':
np.array([3.0, 4.0]),
'feature_2':
np.array([3.0, 4.0]),
'feature_3':
types.SparseTensorValue(
values=np.array([2]),
indices=np.array([[0, 2]]),
dense_shape=np.array([1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 6]),
nested_row_splits=[np.array([0, 4, 4, 7, 8, 8])]),
'feature_5':
types.SparseTensorValue(
values=np.array([2]),
indices=np.array([[0, 2]]),
dense_shape=np.array([1, 4])),
},
'labels': np.array([0.0]),
'example_weights': np.array(0.5),
'predictions': {
'model1': np.array([0.3, 0.4]),
'model2': np.array([0.3, 0.4])
},
'_slice_key_types': [()]
},
{
'features': {
'feature_1':
np.array([5.0, 6.0]),
'feature_2':
np.array([5.0, 6.0]),
'feature_3':
types.SparseTensorValue(
values=np.array([3]),
indices=np.array([[0, 0]]),
dense_shape=np.array([1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 6]),
nested_row_splits=[np.array([0, 4, 4, 7, 8, 8])]),
'feature_5':
types.SparseTensorValue(
values=np.array([3]),
indices=np.array([[0, 3]]),
dense_shape=np.array([1, 5])),
},
'labels': np.array([1.0]),
'example_weights': np.array(1.0),
'predictions': {
'model1': np.array([0.5, 0.6]),
'model2': np.array([0.5, 0.6])
},
'_slice_key_types': [()]
},
]
expected = {
'features': {
'feature_1':
np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
'feature_2':
np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
'feature_3':
types.SparseTensorValue(
values=np.array([1, 2, 3]),
indices=np.array([[0, 0, 1], [1, 0, 2], [2, 0, 0]]),
dense_shape=np.array([3, 1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([
3, 1, 4, 1, 5, 9, 2, 6, 3, 1, 4, 1, 5, 9, 2, 6, 3, 1, 4,
1, 5, 9, 2, 6
]),
nested_row_splits=[
np.array([0, 5, 10, 15]),
np.array([
0, 4, 4, 7, 8, 8, 12, 12, 15, 16, 16, 20, 20, 23,
24, 24
])
]),
'feature_5':
types.SparseTensorValue(
values=np.array([1, 2, 3]),
indices=np.array([[0, 0, 1], [1, 0, 2], [2, 0, 3]]),
dense_shape=np.array([3, 1, 5])),
},
'labels': np.array([1.0, 0.0, 1.0]),
'example_weights': np.array([0.0, 0.5, 1.0]),
'predictions': {
'model1': np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
'model2': np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
},
'_slice_key_types': np.array([(), (), ()])
}
self.assertAllClose(util.merge_extracts(extracts), expected)
def testSplitExtracts(self):
extracts = {
'features': {
'feature_1':
np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
'feature_2':
np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
'feature_3':
types.SparseTensorValue(
values=np.array([1, 2, 3]),
indices=np.array([[0, 0, 1], [1, 0, 2], [2, 0, 0]]),
dense_shape=np.array([3, 1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([
3, 1, 4, 1, 5, 9, 2, 6, 3, 1, 4, 1, 5, 9, 2, 6, 3, 1, 4,
1, 5, 9, 2, 6
]),
nested_row_splits=[
np.array([0, 5, 10, 15]),
np.array([
0, 4, 4, 7, 8, 8, 12, 12, 15, 16, 16, 20, 20, 23,
24, 24
])
])
},
'labels': np.array([1.0, 0.0, 1.0]),
'example_weights': np.array([0.0, 0.5, 1.0]),
'predictions': {
'model1': np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
'model2': np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
},
'empty': None,
'multi_level_empty': {
'empty': None,
'next_level': {
'empty': None
},
},
'_slice_key_types': np.array([(), (), ()])
}
expected = [
{
'features': {
'feature_1':
np.array([1.0, 2.0]),
'feature_2':
np.array([1.0, 2.0]),
'feature_3':
types.SparseTensorValue(
values=np.array([1]),
indices=np.array([[0, 1]]),
dense_shape=np.array([1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 6]),
nested_row_splits=[np.array([0, 4, 4, 7, 8, 8])])
},
'labels': np.array([1.0]),
'example_weights': np.array([0.0]),
'predictions': {
'model1': np.array([0.1, 0.2]),
'model2': np.array([0.1, 0.2])
},
'_slice_key_types': np.array([()])
},
{
'features': {
'feature_1':
np.array([3.0, 4.0]),
'feature_2':
np.array([3.0, 4.0]),
'feature_3':
types.SparseTensorValue(
values=np.array([2]),
indices=np.array([[0, 2]]),
dense_shape=np.array([1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 6]),
nested_row_splits=[np.array([0, 4, 4, 7, 8, 8])])
},
'labels': np.array([0.0]),
'example_weights': np.array([0.5]),
'predictions': {
'model1': np.array([0.3, 0.4]),
'model2': np.array([0.3, 0.4])
},
'_slice_key_types': np.array([()])
},
{
'features': {
'feature_1':
np.array([5.0, 6.0]),
'feature_2':
np.array([5.0, 6.0]),
'feature_3':
types.SparseTensorValue(
values=np.array([3]),
indices=np.array([[0, 0]]),
dense_shape=np.array([1, 3])),
'feature_4':
types.RaggedTensorValue(
values=np.array([3, 1, 4, 1, 5, 9, 2, 6]),
nested_row_splits=[np.array([0, 4, 4, 7, 8, 8])])
},
'labels': np.array([1.0]),
'example_weights': np.array([1.0]),
'predictions': {
'model1': np.array([0.5, 0.6]),
'model2': np.array([0.5, 0.6])
},
'_slice_key_types': np.array([()])
},
]
splits = util.split_extracts(extracts)
self.assertLen(splits, 3)
# Verify empty and delete since None can't be compared with assertAllClose
for i in range(3):
self.assertIn('empty', splits[i])
self.assertIsNone(splits[i]['empty'])
del splits[i]['empty']
self.assertIn('multi_level_empty', splits[i])
self.assertIn('empty', splits[i]['multi_level_empty'])
self.assertIsNone(splits[i]['multi_level_empty']['empty'])
self.assertIn('next_level', splits[i]['multi_level_empty'])
self.assertIn('empty', splits[i]['multi_level_empty']['next_level'])
self.assertIsNone(splits[i]['multi_level_empty']['next_level']['empty'])
del splits[i]['multi_level_empty']
self.assertAllClose(splits, expected)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
|
"""
WSGI Middleware apps that haven't gotten around to being extracted to
their own modules.
"""
import logging
import time
import urllib
from tiddlyweb.model.policy import UserRequiredError, ForbiddenError
from tiddlyweb.store import Store
from tiddlyweb.web.http import HTTP403, HTTP302
from tiddlyweb.web.util import server_base_url
from tiddlyweb import __version__ as VERSION
class Header(object):
"""
If REQUEST_METHOD is HEAD, change it to GET and
consume the output for lower requests.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'HEAD':
environ['REQUEST_METHOD'] = 'GET'
_ = self.application(environ, start_response)
return []
else:
return self.application(environ, start_response)
class HTMLPresenter(object):
"""
Take the core app output, if tiddlyweb.title is set
in environ and we appear to be using a browser,
add some HTML framework.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
output = self.application(environ, start_response)
if self._needs_title(environ):
def wrapped_output(output):
yield self._header(environ)
for item in output:
yield item
yield self._footer(environ)
return
return wrapped_output(output)
return output
def _needs_title(self, environ):
"""
Determine if we are outputting html to a browser.
"""
return ('tiddlyweb.title' in environ and 'Mozilla'
in environ.get('HTTP_USER_AGENT', ''))
def _header(self, environ):
"""
Wrap the HTML in an HTML header.
"""
css = ''
if environ['tiddlyweb.config'].get('css_uri', ''):
css = '<link rel="stylesheet" href="%s" type="text/css" />' % \
environ['tiddlyweb.config']['css_uri']
try:
links = '\n'.join(environ['tiddlyweb.links'])
except KeyError:
links = ''
header_extra = self.header_extra(environ)
return """
<html>
<head>
<title>TiddlyWeb - %s</title>
%s
%s
</head>
<body>
<div id="header">
<h1>%s</h1>
%s
</div>
<div id="content">
""" % (environ['tiddlyweb.title'], css, links,
environ['tiddlyweb.title'], header_extra)
def _footer(self, environ):
"""
Wrap the HTML with an HTML footer.
"""
footer_extra = self.footer_extra(environ)
return """
</div>
<div id="footer">
%s
<div id="badge">This is <a href="http://tiddlyweb.com/">TiddlyWeb</a> %s</div>
<div id="usergreet">User %s.</div>
</div>
</body>
</html>
""" % (footer_extra, VERSION, environ['tiddlyweb.usersign']['name'])
def header_extra(self, environ):
"""
Override this in plugins to add to the header.
"""
return ''
def footer_extra(self, environ):
"""
Override this in plugins to add to the footer.
"""
return ''
class SimpleLog(object):
"""
WSGI Middleware to write a very simple log to stdout.
Borrowed from Paste Translogger
"""
format = ('%(REMOTE_ADDR)s - %(REMOTE_USER)s [%(time)s] '
'"%(REQUEST_METHOD)s %(REQUEST_URI)s %(HTTP_VERSION)s" '
'%(status)s %(bytes)s "%(HTTP_REFERER)s" "%(HTTP_USER_AGENT)s"')
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
logger = logging.getLogger()
if logger.isEnabledFor(logging.INFO):
return self._log_app(environ, start_response)
else:
return self.application(environ, start_response)
def _log_app(self, environ, start_response):
req_uri = urllib.quote(environ.get('SCRIPT_NAME', '')
+ environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
req_uri += '?' + environ['QUERY_STRING']
def replacement_start_response(status, headers, exc_info=None):
"""
We need to gaze at the content-length, if set, to
write log info.
"""
size = None
for name, value in headers:
if name.lower() == 'content-length':
size = value
self.write_log(environ, req_uri, status, size)
return start_response(status, headers, exc_info)
return self.application(environ, replacement_start_response)
def write_log(self, environ, req_uri, status, size):
"""
Print the log info out in a formatted form to logging.info.
This is rather more complex than desirable because there is
a mix of str and unicode in the gathered data and we need to
make it acceptable for output.
"""
environ['REMOTE_USER'] = None
try:
environ['REMOTE_USER'] = environ['tiddlyweb.usersign']['name']
except KeyError:
pass
if size is None:
size = '-'
log_format = {
'REMOTE_ADDR': environ.get('REMOTE_ADDR') or '-',
'REMOTE_USER': environ.get('REMOTE_USER') or '-',
'REQUEST_METHOD': environ['REQUEST_METHOD'],
'REQUEST_URI': req_uri,
'HTTP_VERSION': environ.get('SERVER_PROTOCOL'),
'time': time.strftime('%d/%b/%Y:%H:%M:%S ', time.localtime()),
'status': status.split(None, 1)[0],
'bytes': size,
'HTTP_REFERER': environ.get('HTTP_REFERER', '-'),
'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'),
}
for key, value in log_format.items():
try:
log_format[key] = value.encode('utf-8', 'replace')
except UnicodeDecodeError:
log_format[key] = value
message = self.format % log_format
logging.info(message)
class StoreSet(object):
"""
WSGI Middleware that sets our choice of Store (tiddlyweb.store)
in the environment.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
database = Store(environ['tiddlyweb.config']['server_store'][0],
environ['tiddlyweb.config']['server_store'][1],
environ)
environ['tiddlyweb.store'] = database
return self.application(environ, start_response)
class EncodeUTF8(object):
"""
WSGI Middleware to ensure that the content we send out the pipe is encoded
as UTF-8. Within the application content is _unicode_ (i.e. not encoded).
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
return (_encoder(output) for output in
self.application(environ, start_response))
def _encoder(string):
"""
Take a potentially unicode string and encode it
as UTF-8.
"""
# if we are currently unicode, encode to utf-8
if type(string) == unicode:
string = string.encode('utf-8')
return string
class PermissionsExceptor(object):
"""
Trap permissions exceptions and turn them into HTTP
exceptions so the errors are propagated to client
code.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
try:
output = self.application(environ, start_response)
return output
except ForbiddenError, exc:
raise HTTP403(exc)
except UserRequiredError, exc:
# We only send to the challenger on a GET
# request. Otherwise we're in for major confusion
# on dealing with redirects and the like in
# scripts and javascript, where follow
# behavior is inconsistent.
if environ['REQUEST_METHOD'] == 'GET':
url = _challenge_url(environ)
raise HTTP302(url)
raise HTTP403(exc)
def _challenge_url(environ):
"""
Generate the URL of the challenge system
so that GET requests are redirected to the
right place.
"""
script_name = environ.get('SCRIPT_NAME', '')
query_string = environ.get('QUERY_STRING', None)
redirect = script_name
if query_string:
redirect += '?%s' % query_string
redirect = urllib.quote(redirect, safe='')
return '%s/challenge?tiddlyweb_redirect=%s' % (
server_base_url(environ), redirect)
|
|
# Copyright 2013 Josh Durgin
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import urllib
import webob
from webob import exc
from nova.api.openstack.compute import assisted_volume_snapshots \
as assisted_snaps_v21
from nova.api.openstack.compute.legacy_v2.contrib import \
assisted_volume_snapshots as assisted_snaps_v2
from nova.api.openstack.compute.legacy_v2.contrib import volumes
from nova.api.openstack.compute import volumes as volumes_v21
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_get_instance(self, context, instance_id, want_objects=False,
expected_attrs=None):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
def fake_get_volume(self, context, id):
return {'id': 'woot'}
def fake_attach_volume(self, context, instance, volume_id, device):
pass
def fake_detach_volume(self, context, instance, volume):
pass
def fake_swap_volume(self, context, instance,
old_volume_id, new_volume_id):
pass
def fake_create_snapshot(self, context, volume, name, description):
return {'id': 123,
'volume_id': 'fakeVolId',
'status': 'available',
'volume_size': 123,
'created_at': '2013-01-01 00:00:01',
'display_name': 'myVolumeName',
'display_description': 'myVolumeDescription'}
def fake_delete_snapshot(self, context, snapshot_id):
pass
def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def fake_compute_volume_snapshot_create(self, context, volume_id,
create_info):
pass
def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1})]
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(compute_api.API, 'create',
self._get_fake_compute_api_create())
fakes.stub_out_nw_api(self.stubs)
self._block_device_mapping_seen = None
self._legacy_bdm_seen = True
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
def _get_fake_compute_api_create(self):
def _fake_compute_api_create(cls, context, instance_type,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
inst_type = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
return _fake_compute_api_create
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id='1',
device_name='/dev/vda',
virtual='root',
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertTrue(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], '1')
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
def test_create_root_volume_bdm_v2(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping_v2=[dict(
source_type='volume',
uuid='1',
device_name='/dev/vda',
boot_index=0,
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertFalse(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], '1')
self.assertEqual(self._block_device_mapping_seen[0]['boot_index'],
0)
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
class VolumeApiTestV21(test.NoDBTestCase):
url_prefix = '/v2/fake'
def setUp(self):
super(VolumeApiTestV21, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = self._get_app()
def _get_app(self):
return fakes.wsgi_app_v21()
def test_volume_create(self):
self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('volume', resp_dict)
self.assertEqual(resp_dict['volume']['size'],
vol['size'])
self.assertEqual(resp_dict['volume']['displayName'],
vol['display_name'])
self.assertEqual(resp_dict['volume']['displayDescription'],
vol['display_description'])
self.assertEqual(resp_dict['volume']['availabilityZone'],
vol['availability_zone'])
def _test_volume_create_bad(self, cinder_exc, api_exc):
def fake_volume_create(self, context, size, name, description,
snapshot, **param):
raise cinder_exc
self.stubs.Set(cinder.API, "create", fake_volume_create)
vol = {"size": '#$?',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(api_exc,
volumes.VolumeController().create, req, body=body)
@mock.patch.object(cinder.API, 'get_snapshot')
@mock.patch.object(cinder.API, 'create')
def test_volume_create_bad_snapshot_id(self, mock_create, mock_get):
vol = {"snapshot_id": '1'}
body = {"volume": vol}
mock_get.side_effect = exception.SnapshotNotFound(snapshot_id='1')
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(webob.exc.HTTPNotFound,
volumes.VolumeController().create, req, body=body)
def test_volume_create_bad_input(self):
self._test_volume_create_bad(exception.InvalidInput(reason='fake'),
webob.exc.HTTPBadRequest)
def test_volume_create_bad_quota(self):
self._test_volume_create_bad(exception.OverQuota(overs='fake'),
webob.exc.HTTPForbidden)
def test_volume_index(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_detail(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/detail')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertIn('Volume 456 could not be found.', resp.body)
def test_volume_delete(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertIn('Volume 456 could not be found.', resp.body)
class VolumeApiTestV2(VolumeApiTestV21):
def setUp(self):
super(VolumeApiTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = self._get_app()
def _get_app(self):
return fakes.wsgi_app()
class VolumeAttachTestsV21(test.NoDBTestCase):
validation_error = exception.ValidationError
def setUp(self):
super(VolumeAttachTestsV21, self).setUp()
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
self._set_up_controller()
def _set_up_controller(self):
self.attachments = volumes_v21.VolumeAttachmentController()
def test_show(self):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
def test_show_no_instance(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=None)
def test_show_no_bdms(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_show_bdms_no_mountpoint(self):
FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_NOTEXIST)
def test_detach(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_detach_vol_not_found(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
@mock.patch('nova.objects.BlockDeviceMapping.is_root',
new_callable=mock.PropertyMock)
def test_detach_vol_root(self, mock_isroot):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
mock_isroot.return_value = True
self.assertRaises(exc.HTTPForbidden,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_detach_volume_from_locked_server(self):
def fake_detach_volume_from_locked_server(self, context,
instance, volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume_from_locked_server)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
req, FAKE_UUID, FAKE_UUID_A)
def test_attach_volume(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
@mock.patch.object(compute_api.API, 'attach_volume',
return_value='/dev/myfake')
def test_attach_volume_with_auto_device(self, mock_attach):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': None}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
self.assertEqual(result['volumeAttachment']['device'],
'/dev/myfake')
def test_attach_volume_to_locked_server(self):
def fake_attach_volume_to_locked_server(self, context, instance,
volume_id, device=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume_to_locked_server)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_bad_id(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None,
'volumeId': 'TESTVOLUME',
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_without_volumeId(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake',
'extra': 'extra_arg'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def _test_swap(self, attachments, uuid=FAKE_UUID_A,
fake_func=None, body=None):
fake_func = fake_func or fake_swap_volume
self.stubs.Set(compute_api.API,
'swap_volume',
fake_func)
body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'PUT'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
return attachments.update(req, FAKE_UUID, uuid, body=body)
def test_swap_volume_for_locked_server(self):
def fake_swap_volume_for_locked_server(self, context, instance,
old_volume, new_volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
self.attachments,
fake_func=fake_swap_volume_for_locked_server)
def test_swap_volume(self):
result = self._test_swap(self.attachments)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.update.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_swap_volume_no_attachment(self):
self.assertRaises(exc.HTTPNotFound, self._test_swap,
self.attachments, FAKE_UUID_C)
def test_swap_volume_without_volumeId(self):
body = {'volumeAttachment': {'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
def test_swap_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
class VolumeAttachTestsV2(VolumeAttachTestsV21):
validation_error = webob.exc.HTTPBadRequest
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-volume-attachment-update'}
self.attachments = volumes.VolumeAttachmentController(ext_mgr)
ext_mgr_no_update = extensions.ExtensionManager()
ext_mgr_no_update.extensions = {}
self.attachments_no_update = volumes.VolumeAttachmentController(
ext_mgr_no_update)
def test_swap_volume_no_extension(self):
self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap,
self.attachments_no_update)
@mock.patch.object(compute_api.API, 'attach_volume',
return_value=[])
def test_attach_volume_with_extra_arg(self, mock_attach):
# NOTE(gmann): V2 does not perform strong input validation
# so volume is attached successfully even with extra arg in
# request body.
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake',
'extra': 'extra_arg'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
def test_swap_volume_with_extra_arg(self):
# NOTE(gmann): V2 does not perform strong input validation.
# Volume is swapped successfully even with extra arg in
# request body. So 'pass' this test for V2.
pass
class CommonBadRequestTestCase(object):
resource = None
entity_name = None
controller_cls = None
kwargs = {}
bad_request = exc.HTTPBadRequest
"""
Tests of places we throw 400 Bad Request from
"""
def setUp(self):
super(CommonBadRequestTestCase, self).setUp()
self.controller = self.controller_cls()
def _bad_request_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
req.method = 'POST'
kwargs = self.kwargs.copy()
kwargs['body'] = body
self.assertRaises(self.bad_request,
self.controller.create, req, **kwargs)
def test_create_no_body(self):
self._bad_request_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._bad_request_create(body=body)
def test_create_malformed_entity(self):
body = {self.entity_name: 'string'}
self._bad_request_create(body=body)
class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-volumes'
entity_name = 'volume'
controller_cls = volumes_v21.VolumeController
bad_request = exception.ValidationError
class BadRequestVolumeTestCaseV2(BadRequestVolumeTestCaseV21):
controller_cls = volumes.VolumeController
bad_request = exc.HTTPBadRequest
class BadRequestAttachmentTestCase(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
entity_name = 'volumeAttachment'
controller_cls = volumes.VolumeAttachmentController
kwargs = {'server_id': FAKE_UUID}
class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-snapshots'
entity_name = 'snapshot'
controller_cls = volumes_v21.SnapshotController
bad_request = exception.ValidationError
class BadRequestSnapshotTestCaseV2(BadRequestSnapshotTestCaseV21):
controller_cls = volumes.SnapshotController
bad_request = exc.HTTPBadRequest
class AssistedSnapshotCreateTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
bad_request = exception.ValidationError
def setUp(self):
super(AssistedSnapshotCreateTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_create',
fake_compute_volume_snapshot_create)
def test_assisted_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
req.method = 'POST'
self.controller.create(req, body=body)
def test_assisted_create_missing_create_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': '1'}}
req.method = 'POST'
self.assertRaises(self.bad_request, self.controller.create,
req, body=body)
class AssistedSnapshotCreateTestCaseV2(AssistedSnapshotCreateTestCaseV21):
assisted_snaps = assisted_snaps_v2
bad_request = webob.exc.HTTPBadRequest
class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, controller_method.wsgi_code)
def setUp(self):
super(AssistedSnapshotDeleteTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
fake_compute_volume_snapshot_delete)
def test_assisted_delete(self):
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-assisted-volume-snapshots?%s' %
urllib.parse.urlencode(params))
req.method = 'DELETE'
result = self.controller.delete(req, '5')
self._check_status(204, result, self.controller.delete)
def test_assisted_delete_missing_delete_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '5')
class AssistedSnapshotDeleteTestCaseV2(AssistedSnapshotDeleteTestCaseV21):
assisted_snaps = assisted_snaps_v2
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, res.status_int)
class TestAssistedVolumeSnapshotsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestAssistedVolumeSnapshotsPolicyEnforcementV21, self).setUp()
self.controller = (
assisted_snaps_v21.AssistedVolumeSnapshotsController())
self.req = fakes.HTTPRequest.blank('')
def test_create_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:create"
self.policy.set_rules({rule_name: "project:non_fake"})
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, '5')
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class TestVolumeAttachPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestVolumeAttachPolicyEnforcementV21, self).setUp()
self.controller = volumes_v21.VolumeAttachmentController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
self.policy.set_rules(rules)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes-attachments:index"
rules = {rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name,
self.controller.index, self.req, FAKE_UUID)
def test_show_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:show": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:show"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
def test_create_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:create": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
rule_name = "os_compute_api:os-volumes-attachments:create"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
def test_update_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:update": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
rule_name = "os_compute_api:os-volumes-attachments:update"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
def test_delete_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:delete": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:delete"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API endpoint"""
import datetime
import httplib
import random
import StringIO
import boto
from boto.ec2 import regioninfo
from boto import exception as boto_exc
import webob
from nova import block_device
from nova import context
from nova import exception
from nova import test
from nova.api import auth
from nova.api import ec2
from nova.api.ec2 import apirequest
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer"""
return self._buffer
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
the httplib.HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
def request(self, method, path, data, headers):
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
self.sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(self.sock)
# NOTE(vish): boto is accessing private variables for some reason
self._HTTPConnection__response = self.http_response
self.http_response.begin()
def getresponse(self):
return self.http_response
def getresponsebody(self):
return self.sock.response_string
def close(self):
"""Required for compatibility with boto/tornado"""
pass
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
self.assertEqual(conv('-57.12'), -57.12)
self.assertEqual(conv('0x57'), 0x57)
self.assertEqual(conv('-0x57'), -0x57)
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
self.assertEqual(conv('0.0'), 0.0)
self.assertEqual(conv('1e-8'), 0.0)
self.assertEqual(conv('-1e-8'), 0.0)
self.assertEqual(conv('0xDD8G'), '0xDD8G')
self.assertEqual(conv('0XDD8G'), '0XDD8G')
self.assertEqual(conv('-stringy'), '-stringy')
self.assertEqual(conv('stringy'), 'stringy')
self.assertEqual(conv('add'), 'add')
self.assertEqual(conv('remove'), 'remove')
self.assertEqual(conv(''), '')
class Ec2utilsTestCase(test.TestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def test_bad_ec2_id(self):
self.assertRaises(exception.InvalidEc2Id,
ec2utils.ec2_id_to_id,
'badone')
def test_id_to_ec2_id(self):
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
def test_dict_from_dotted_str(self):
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
expected_dict = {
'block_device_mapping': {
'1': {'device_name': '/dev/sda1',
'ebs': {'snapshot_id': 'snap-0000001c',
'volume_size': 80,
'delete_on_termination': False}},
'2': {'device_name': '/dev/sdc',
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
self.assertDictMatch(out_dict, expected_dict)
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
properties0 = {'mappings': mappings}
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
root_device_name = block_device.properties_root_device_name(
properties0)
self.assertEqual(root_device_name, '/dev/sda1')
root_device_name = block_device.properties_root_device_name(
properties1)
self.assertEqual(root_device_name, '/dev/sdb')
def test_mapping_prepend_dev(self):
mappings = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
expected_result = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': '/dev/sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
self.assertDictListMatch(block_device.mappings_prepend_dev(mappings),
expected_result)
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
# NOTE(vish): skipping the Authorizer
roles = ['sysadmin', 'netadmin']
ctxt = context.RequestContext('fake', 'fake', roles=roles)
self.app = auth.InjectContext(ctxt,
ec2.Requestify(ec2.Authorizer(ec2.Executor()),
'nova.api.ec2.cloud.CloudController'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
if api_version:
self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable=E1103
if boto.Version >= '2':
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
def test_return_valid_isoformat(self):
"""
Ensure that the ec2 api returns datetime in xs:dateTime
(which apparently isn't datetime.isoformat())
NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
"""
conv = apirequest._database_to_isoformat
# sqlite database representation with microseconds
time_to_convert = datetime.datetime.strptime(
"2011-02-21 20:14:10.634276",
"%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(
conv(time_to_convert),
'2011-02-21T20:14:10.634Z')
# mysqlite database representation
time_to_convert = datetime.datetime.strptime(
"2011-02-21 19:56:18",
"%Y-%m-%d %H:%M:%S")
self.assertEqual(
conv(time_to_convert),
'2011-02-21T19:56:18.000Z')
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
# Any request should be fine
self.ec2.get_all_instances()
self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly"""
self.expect_http()
self.mox.ReplayAll()
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
"""Attempt to terminate an invalid instance"""
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.terminate_instances, "i-00000005")
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly"""
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
# NOTE(vish): create depends on pool, so call helper directly
cloud._gen_key(context.get_admin_context(), 'fake', keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEquals(len(results), 1)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
requesting a second keypair with the same name fails sanely"""
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
# NOTE(vish): create depends on pool, so call helper directly
self.ec2.create_key_pair('test')
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError, e:
if e.code == 'KeyPairExists':
pass
else:
self.fail("Unexpected EC2ResponseError: %s "
"(expected KeyPairExists)" % e.code)
else:
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
"""Test that we can retrieve security groups"""
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 1)
self.assertEquals(rv[0].name, 'default')
def test_create_delete_security_group(self):
"""Test that we can create a security group"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.ec2.create_security_group(security_group_name, 'test group')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 2)
self.assertTrue(security_group_name in [group.name for group in rv])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
""" Test that we sanely handle invalid security group names.
API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. """
self.expect_http()
self.mox.ReplayAll()
# Test block group_name of non alphanumeric characters, spaces,
# dashes, and underscores.
security_group_name = "aa #^% -=99"
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_group_name_valid_length_security_group(self):
"""Test that we sanely handle invalid security group names.
API Spec states that the length should not exceed 255 chars """
self.expect_http()
self.mox.ReplayAll()
# Test block group_name > 255 chars
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
for x in range(random.randint(256, 266)))
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_authorize_revoke_security_group_cidr(self):
"""
Test that we can add and remove CIDR based rules
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '0.0.0.0/0')
group.authorize('icmp', -1, -1, '0.0.0.0/0')
group.authorize('udp', 80, 81, '0.0.0.0/0')
group.authorize('tcp', 1, 65535, '0.0.0.0/0')
group.authorize('udp', 1, 65535, '0.0.0.0/0')
group.authorize('icmp', 1, 0, '0.0.0.0/0')
group.authorize('icmp', 0, 1, '0.0.0.0/0')
group.authorize('icmp', 0, 0, '0.0.0.0/0')
def _assert(message, *args):
try:
group.authorize(*args)
except boto_exc.EC2ResponseError as e:
self.assertEqual(e.status, 400, 'Expected status to be 400')
self.assertIn(message, e.error_message, e.error_message)
else:
raise self.failureException, 'EC2ResponseError not raised'
# Invalid CIDR address
_assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
# Missing ports
_assert('Not enough parameters', 'tcp', '0.0.0.0/0')
# from port cannot be greater than to port
_assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
# For tcp, negative values are not allowed
_assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
# For tcp, valid port range 1-65535
_assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
# Invalid Cidr for ICMP type
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
# Invalid protocol
_assert('An unknown error has occurred', 'xyz', 1, 14, '0.0.0.0/0')
# Invalid port
_assert('An unknown error has occurred', 'tcp', " ", "81", '0.0.0.0/0')
# Invalid icmp port
_assert('An unknown error has occurred', 'icmp', " ", "81",
'0.0.0.0/0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
# Invalid Cidr ports
_assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEquals(len(group.rules), 8)
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '0.0.0.0/0')
group.revoke('icmp', -1, -1, '0.0.0.0/0')
group.revoke('udp', 80, 81, '0.0.0.0/0')
group.revoke('tcp', 1, 65535, '0.0.0.0/0')
group.revoke('udp', 1, 65535, '0.0.0.0/0')
group.revoke('icmp', 1, 0, '0.0.0.0/0')
group.revoke('icmp', 0, 1, '0.0.0.0/0')
group.revoke('icmp', 0, 0, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_cidr_v6(self):
"""
Test that we can add and remove CIDR based rules
to a security group for IPv6
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEquals(len(group.rules), 1)
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_foreign_group(self):
"""
Test that we can grant and revoke another security group access
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
other_security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
other_group = self.ec2.create_security_group(other_security_group_name,
'some other group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
self.assertEquals(len(group.rules), 3)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' %
(other_security_group_name, 'fake'))
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
for group in rv:
if group.name == security_group_name:
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.ec2.delete_security_group(other_security_group_name)
|
|
from epynet import Network, epanet2
from nose.tools import assert_equal, assert_almost_equal
import pandas as pd
class TestGeneratedNetwork(object):
@classmethod
def setup_class(self):
self.network = Network()
@classmethod
def teadown(self):
self.network.ep.ENclose()
def test00_build_network(self):
network = self.network
network.ep.ENsettimeparam(epanet2.EN_DURATION, 10*3600)
# add nodes
reservoir = network.add_reservoir('in',0,30)
reservoir.elevation = 10
pattern_values = [1,2,3,4,5,4,3,2,1,1]
pattern = network.add_pattern('1',pattern_values)
junctions = {'2':(10,30,0),
'3':(20,30,0),
'4':(30,30,1),
'5':(30,20,1),
'6':(30,10,1),
'7':(40,10,1),
'8':(40,20,1),
'9':(40,30,1),
'10':(50,30,1)}
links = {'1':('in','2'),
'3':('3','4'),
'4':('4','5'),
'5':('5','6'),
'6':('6','7'),
'7':('7','8'),
'8':('8','9'),
'10':('5','8'),
'11':('4','9'),
'12':('9','11')}
for uid, coord in junctions.items():
node = network.add_junction(uid, coord[0], coord[1], elevation=0, basedemand=coord[2])
node.pattern = pattern
tank = network.add_tank('11',40,40, diameter=50, maxlevel=20, minlevel=0, tanklevel=10)
for uid, coord in links.items():
link = network.add_pipe(uid, coord[0], coord[1], diameter=100, length=100, roughness=0.1)
valve = network.add_valve('9','prv','9','10', diameter=100, setting=5)
pump = network.add_pump('2','2','3', speed=1)
curve = network.add_curve('1',[(100,50)])
pump.curve = curve
network.nodes['4'].elevation = 5
network.links['11'].diameter = 150
network.links['11'].minorloss = 0.1
network.solve()
def test01_network(self):
# test0 node count
assert_equal(len(self.network.nodes),11)
# test0 link count
assert_equal(len(self.network.links),12)
# test0 reservoir count
assert_equal(len(self.network.reservoirs),1)
# test0 valve count
assert_equal(len(self.network.valves),1)
# test0 pump count
assert_equal(len(self.network.pumps),1)
# test0 tank count
assert_equal(len(self.network.tanks),1)
def test02_link(self):
# test0 the properties of a single link
link = self.network.links['11']
# pipe index and uid
assert_equal(link.uid,'11')
# from/to node
assert_equal(link.from_node.uid,'4')
assert_equal(link.to_node.uid,'9')
def test03_pipe(self):
# test0 the properties of a single pipe
pipe = self.network.links['11']
# check type
assert_equal(pipe.link_type,'pipe')
assert_almost_equal(pipe.length,100,2)
assert_almost_equal(pipe.diameter,150,2)
assert_almost_equal(pipe.roughness,0.1,2)
assert_almost_equal(pipe.minorloss,0.1,2)
# flow
assert_almost_equal(pipe.flow,87.92,2)
# direction
assert_almost_equal(pipe.velocity,1.38,2)
# status
assert_equal(pipe.status,1)
# headloss
assert_almost_equal(pipe.headloss,1.29,2)
# upstream/downstream node
assert_equal(pipe.upstream_node.uid,'4')
assert_equal(pipe.downstream_node.uid,'9')
def test04_pump(self):
pump = self.network.pumps['2']
# check type
assert_equal(pump.link_type,'pump')
assert_equal(pump.speed,1.0)
assert_almost_equal(pump.flow,109.67,2)
# change speed
pump.speed = 1.5
assert_equal(pump.speed,1.5)
# resolve network
self.network.solve()
assert_almost_equal(pump.flow,164.5,2)
# revert speed
pump.speed = 1.0
self.network.solve()
def test05_valve(self):
valve = self.network.valves['9']
# check type
assert_equal(valve.link_type,'valve')
# check valve type
assert_equal(valve.valve_type,'PRV')
# valve settings
assert_equal(valve.setting,5)
assert_almost_equal(valve.downstream_node.pressure,5,2)
# change setting
valve.setting = 10
assert_equal(valve.setting,10)
self.network.solve()
assert_almost_equal(valve.downstream_node.pressure,10,2)
def test06_node(self):
node = self.network.nodes['4']
# uid
assert_equal(node.uid,'4')
# coordinates
#coordinates = node.coordinates
# dont test these for created networks
#assert_almost_equal(coordinates[0],2103.02,2)
#assert_almost_equal(coordinates[1],5747.69,2)
# links
assert_equal(len(node.links),3)
# up and downstream links
assert_equal(len(node.downstream_links),2)
assert_equal(len(node.upstream_links),1)
# inflow
assert_equal(round(node.inflow,2),109.67)
# outflow
assert_equal(round(node.outflow,2),round(node.inflow,2)-node.demand)
# elevation
assert_equal(node.elevation,5)
# head
assert_equal(round(node.head,2),25.13)
def test07_junction(self):
junction = self.network.junctions['4']
assert_equal(round(junction.basedemand,2),1)
assert_equal(round(junction.demand,2),1)
def test08_tank(self):
tank = self.network.tanks['11']
assert_equal(tank.diameter,50)
assert_equal(round(tank.initvolume,2),19634.95)
assert_equal(tank.minvolume,0)
assert_equal(tank.minlevel,0)
assert_equal(tank.maxlevel,20)
assert_equal(round(tank.volume,2),19634.95)
assert_equal(round(tank.maxvolume),2*round(tank.volume))
def test09_time(self):
junction = self.network.junctions['4']
self.network.solve(3600)
assert_equal(round(junction.demand,2),2)
self.network.solve(7200)
assert_equal(round(junction.demand,2),3)
def test10_collections(self):
# collection attributes as pandas Series
assert_almost_equal(self.network.pipes.flow.mean(),46.78,2)
assert_almost_equal(self.network.pipes.diameter.max(),150,2)
assert_almost_equal(self.network.pipes.velocity.min(),0.105,2)
assert_equal(self.network.valves.setting.mean(),10)
assert_almost_equal(self.network.junctions.demand.mean(),2.33,2)
# filtering and slicing collections
assert_equal(len(self.network.pipes[self.network.pipes.velocity > 3]),3)
assert_equal(len(self.network.nodes[self.network.nodes.pressure < 20]),5)
#increase the size of all pipes
self.network.pipes.diameter += 500
assert_almost_equal(self.network.pipes.diameter.mean(),605,2)
self.network.pipes.diameter -= 500
self.network.solve()
# resize pipes, and recalculate velocity
self.network.pipes[self.network.pipes.velocity > 3].diameter += 100
self.network.solve()
assert_equal(len(self.network.pipes[self.network.pipes.velocity > 3]),0)
def test11_timeseries(self):
# run network
self.network.run()
# check return types
# should return Series
assert(isinstance(self.network.pipes['1'].velocity, pd.Series))
# should return Dataframe
assert(isinstance(self.network.pipes.velocity, pd.DataFrame))
# timeseries operations
# pipe 1 max velocity
assert_almost_equal(self.network.pipes['1'].velocity.mean(),1.66,2)
# all day mean velocity
assert_almost_equal(self.network.pipes.velocity.mean().mean(),1.14,2)
# test revert to steady state calculation
self.network.solve()
print(type(self.network.pipes['1'].velocity))
assert(isinstance(self.network.pipes['1'].velocity, float))
assert(isinstance(self.network.pipes.velocity, pd.Series))
def test12_deletion(self):
# Delete node
self.network.delete_node('10')
assert_equal(len(self.network.nodes), 10)
assert_equal(len(self.network.links), 11)
assert_equal(len(self.network.valves), 0)
self.network.solve()
assert_almost_equal(self.network.junctions['9'].pressure, 11.21, 2)
# Delete Tank
self.network.delete_node('11')
assert_equal(len(self.network.tanks), 0)
assert_equal(len(self.network.links), 10)
self.network.solve()
assert_almost_equal(self.network.junctions['9'].pressure, 76.60, 2)
# Delete link
self.network.delete_link('7')
assert_equal(len(self.network.links), 9)
self.network.solve()
assert_almost_equal(self.network.pipes['6'].flow, 1)
# delete pump
self.network.delete_link('2')
assert_equal(len(self.network.pumps), 0)
# add link instead
self.network.add_pipe('R2', '2', '3', diameter=200, length=100, roughness=0.1)
self.network.solve()
assert_almost_equal(self.network.junctions['9'].pressure, 9.99, 2)
|
|
import os
import pandas as pd
import pytest
import pickle
import numpy as np
import string
import dask
import dask.dataframe as dd
from dask.threaded import get as threaded_get
from dask.multiprocessing import get as mp_get
from dask.dataframe.shuffle import (shuffle,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
maybe_buffered_partd)
from dask.async import get_sync
from dask.dataframe.utils import assert_eq, make_meta
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [1, 4, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [2, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [3, 6, 9]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = get_sync(s.dask, (s._name, 0))
y = get_sync(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({'x': np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle='tasks', npartitions=17, max_branch=4)
sc = s.compute(get=get_sync)
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert (set(map(tuple, sc.values.tolist())) ==
set(map(tuple, df.values.tolist())))
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method),
shuffle(d, 'b', shuffle=method))
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[['b']], shuffle=method).compute()
res2 = shuffle(d, ['b'], shuffle=method).compute()
res3 = shuffle(d, 'b', shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, 'x', npartitions=i, shuffle=method)
assert len(a.compute(get=get_sync)) == len(b.compute(get=get_sync))
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({'x': [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = s._get(s.dask, s._keys())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame({'i32': np.array([1, 2, 3] * 3, dtype='int32'),
'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'),
'cat': pd.Series(['a', 'b', 'c'] * 3).astype('category'),
'obj': pd.Series(['d', 'e', 'f'] * 3),
'bool': np.array([True, False, True] * 3),
'dt': pd.Series(pd.date_range('20130101', periods=9)),
'dt_tz': pd.Series(pd.date_range('20130101', periods=9, tz='US/Eastern')),
'td': pd.Series(pd.timedelta_range('2000', periods=9))})
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[['i32']], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[['cat', 'bool', 'f32']], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({'a': list(string.ascii_letters),
'b': [1, 2, 3, 4] * 13})
df.a = df.a.astype('category')
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize('npartitions', [1, 4, 7, pytest.mark.slow(23)])
def test_set_partition_tasks(npartitions):
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100) // 0.2},
index=np.random.random(100))
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index('x'),
ddf.set_index('x', shuffle='tasks'))
assert_eq(df.set_index('y'),
ddf.set_index('y', shuffle='tasks'))
assert_eq(df.set_index(df.x),
ddf.set_index(ddf.x, shuffle='tasks'))
assert_eq(df.set_index(df.x + df.y),
ddf.set_index(ddf.x + ddf.y, shuffle='tasks'))
assert_eq(df.set_index(df.x + 1),
ddf.set_index(ddf.x + 1, shuffle='tasks'))
assert_eq(df.set_index(df.index),
ddf.set_index(ddf.index, shuffle='tasks'))
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_self_index(shuffle):
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100) // 0.2},
index=np.random.random(100))
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize('shuffle', ['tasks'])
def test_set_partition_names(shuffle):
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100) // 0.2},
index=np.random.random(100))
ddf = dd.from_pandas(df, npartitions=4)
assert (set(ddf.set_index('x', shuffle=shuffle).dask) ==
set(ddf.set_index('x', shuffle=shuffle).dask))
assert (set(ddf.set_index('x', shuffle=shuffle).dask) !=
set(ddf.set_index('y', shuffle=shuffle).dask))
assert (set(ddf.set_index('x', max_branch=4, shuffle=shuffle).dask) !=
set(ddf.set_index('x', max_branch=3, shuffle=shuffle).dask))
assert (set(ddf.set_index('x', drop=True, shuffle=shuffle).dask) !=
set(ddf.set_index('x', drop=False, shuffle=shuffle).dask))
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_partition_tasks_2(shuffle):
df = dd.demo.make_timeseries(
'2000', '2004', {'value': float, 'name': str, 'id': int},
freq='2H', partition_freq='1M', seed=1)
df2 = df.set_index('name', shuffle=shuffle)
df2.value.sum().compute(get=get_sync)
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_partition_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=['x', 'y'])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index('x', shuffle=shuffle, max_branch=2,
npartitions=ddf.npartitions)
df2 = df.set_index('x')
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize('shuffle', ['tasks', 'disk'])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({'x': [1, 2, 3, 2, 1], 'y': [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index('x').sort_index()
ddf2 = ddf.set_index('x', shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize('shuffle', ['tasks', 'disk'])
@pytest.mark.parametrize('get', [threaded_get, mp_get])
def test_rearrange(shuffle, get):
df = pd.DataFrame({'x': np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(y=ddf.x % 4)
result = rearrange_by_column(ddf2, 'y', max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(get=get)
parts = get(result.dask, result._keys())
for i in a.y.drop_duplicates():
assert sum(i in part.y for part in parts) == 1
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, 'x', (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({'x': [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.set_options(get=throw):
ddf2 = ddf.set_index('x', divisions=[1, 3, 5])
df2 = df.set_index('x')
assert_eq(ddf2, df2)
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({'x': np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index('x', shuffle=shuffle, npartitions='auto')
assert ddf2.npartitions < 10
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_reduces_partitions_large(shuffle):
n = 2**24
df = pd.DataFrame({'x': np.random.random(n),
'y': np.random.random(n),
'z': np.random.random(n)})
ddf = dd.from_pandas(df, npartitions=50, name='x', sort=False)
ddf2 = ddf.set_index('x', shuffle=shuffle, npartitions='auto')
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_doesnt_increase_partitions(shuffle):
n = 2**24
df = pd.DataFrame({'x': np.random.random(n),
'y': np.random.random(n),
'z': np.random.random(n)})
ddf = dd.from_pandas(df, npartitions=2, name='x', sort=False)
ddf2 = ddf.set_index('x', shuffle=shuffle, npartitions='auto')
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({'x': range(100), 'y': range(100)})
ddf = dd.from_pandas(df, npartitions=10, name='x', sort=False)
ddf2 = ddf.set_index('x', shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_temporary_directory():
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100),
'z': np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10, name='x', sort=False)
with dask.set_options(temporary_directory=os.getcwd(),
get=dask.multiprocessing.get):
ddf2 = ddf.set_index('x', shuffle='disk')
ddf2.compute()
assert any(fn.endswith('.partd') for fn in os.listdir(os.getcwd()))
|
|
from __future__ import absolute_import
import six
from datetime import datetime, timedelta
import pytest
from django.db.models import ProtectedError
from django.utils import timezone
from sentry import tagstore
from sentry.models import (
Group, GroupRedirect, GroupSnooze, GroupStatus, Release, get_group_with_redirect
)
from sentry.testutils import TestCase
class GroupTest(TestCase):
def test_is_resolved(self):
group = self.create_group(status=GroupStatus.RESOLVED)
assert group.is_resolved()
group.status = GroupStatus.IGNORED
assert not group.is_resolved()
group.status = GroupStatus.UNRESOLVED
assert not group.is_resolved()
group.last_seen = timezone.now() - timedelta(hours=12)
group.project.update_option('sentry:resolve_age', 24)
assert not group.is_resolved()
group.project.update_option('sentry:resolve_age', 1)
assert group.is_resolved()
def test_get_oldest_latest_event_no_events(self):
group = self.create_group()
assert group.get_latest_event() is None
assert group.get_oldest_event() is None
def test_get_oldest_latest_events(self):
group = self.create_group()
for i in range(0, 3):
self.create_event(
event_id=six.text_type(i),
group=group,
datetime=datetime(2013, 8, 13, 3, 8, i),
)
assert group.get_latest_event().event_id == '2'
assert group.get_oldest_event().event_id == '0'
def test_get_oldest_latest_identical_timestamps(self):
group = self.create_group()
for i in range(0, 3):
self.create_event(
event_id=six.text_type(i),
group=group,
datetime=datetime(2013, 8, 13, 3, 8, 50),
)
assert group.get_latest_event().event_id == '2'
assert group.get_oldest_event().event_id == '0'
def test_get_oldest_latest_almost_identical_timestamps(self):
group = self.create_group()
self.create_event(
event_id='0',
group=group,
datetime=datetime(2013, 8, 13, 3, 8, 0), # earliest
)
for i in range(1, 3):
self.create_event(
event_id=six.text_type(i),
group=group,
datetime=datetime(2013, 8, 13, 3, 8, 30), # all in the middle
)
self.create_event(
event_id='3',
group=group,
datetime=datetime(2013, 8, 13, 3, 8, 59), # latest
)
assert group.get_latest_event().event_id == '3'
assert group.get_oldest_event().event_id == '0'
def test_is_ignored_with_expired_snooze(self):
group = self.create_group(
status=GroupStatus.IGNORED,
)
GroupSnooze.objects.create(
group=group,
until=timezone.now() - timedelta(minutes=1),
)
assert not group.is_ignored()
def test_status_with_expired_snooze(self):
group = self.create_group(
status=GroupStatus.IGNORED,
)
GroupSnooze.objects.create(
group=group,
until=timezone.now() - timedelta(minutes=1),
)
assert group.get_status() == GroupStatus.UNRESOLVED
def test_deleting_release_does_not_delete_group(self):
project = self.create_project()
release = Release.objects.create(
version='a',
organization_id=project.organization_id,
)
release.add_project(project)
group = self.create_group(
project=project,
first_release=release,
)
with pytest.raises(ProtectedError):
release.delete()
group = Group.objects.get(id=group.id)
assert group.first_release == release
def test_save_truncate_message(self):
assert len(self.create_group(message='x' * 300).message) == 255
assert self.create_group(message='\nfoo\n ').message == 'foo'
assert self.create_group(message='foo').message == 'foo'
assert self.create_group(message='').message == ''
def test_get_group_with_redirect(self):
group = self.create_group()
assert get_group_with_redirect(group.id) == (group, False)
duplicate_id = self.create_group().id
Group.objects.filter(id=duplicate_id).delete()
GroupRedirect.objects.create(
group_id=group.id,
previous_group_id=duplicate_id,
)
assert get_group_with_redirect(duplicate_id) == (group, True)
# We shouldn't end up in a case where the redirect points to a bad
# reference, but testing this path for completeness.
group.delete()
with pytest.raises(Group.DoesNotExist):
get_group_with_redirect(duplicate_id)
def test_invalid_shared_id(self):
with pytest.raises(Group.DoesNotExist):
Group.from_share_id('adc7a5b902184ce3818046302e94f8ec')
def test_qualified_share_id(self):
project = self.create_project(name='foo bar')
group = self.create_group(project=project, short_id=project.next_short_id())
short_id = group.qualified_short_id
assert short_id.startswith('FOO-BAR-')
group2 = Group.objects.by_qualified_short_id(group.organization.id, short_id)
assert group2 == group
def test_first_last_release(self):
project = self.create_project()
release = Release.objects.create(
version='a',
organization_id=project.organization_id,
)
release.add_project(project)
group = self.create_group(
project=project,
first_release=release,
)
tagstore.create_group_tag_value(
project_id=project.id, group_id=group.id, environment_id=self.environment.id,
key='sentry:release', value=release.version
)
assert group.first_release == release
assert group.get_first_release() == release.version
assert group.get_last_release() == release.version
def test_first_release_from_tag(self):
project = self.create_project()
release = Release.objects.create(
version='a',
organization_id=project.organization_id,
)
release.add_project(project)
group = self.create_group(
project=project,
)
tagstore.create_group_tag_value(
project_id=project.id, group_id=group.id, environment_id=self.environment.id,
key='sentry:release', value=release.version
)
assert group.first_release is None
assert group.get_first_release() == release.version
assert group.get_last_release() == release.version
def test_first_last_release_miss(self):
project = self.create_project()
release = Release.objects.create(
version='a',
organization_id=project.organization_id,
)
release.add_project(project)
group = self.create_group(
project=project,
)
assert group.first_release is None
assert group.get_first_release() is None
assert group.get_last_release() is None
def test_get_email_subject(self):
project = self.create_project()
group = self.create_group(project=project)
assert group.get_email_subject() == '%s - %s' % (group.qualified_short_id, group.title)
|
|
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test digibyted with different proxy configuration.
Test plan:
- Start digibyted's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on digibyted side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create digibyteds that connect to them
- Manipulate the digibyteds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert isinstance(cmd, Socks5Command)
# Note: digibyted's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert isinstance(cmd, Socks5Command)
# Note: digibyted's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("digibyteostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert isinstance(cmd, Socks5Command)
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"digibyteostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert isinstance(cmd, Socks5Command)
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
|
"""Test UniFi config flow."""
import socket
from unittest.mock import patch
import aiounifi
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.unifi.config_flow import async_discover_unifi
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_DPI_RESTRICTIONS,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
CONTENT_TYPE_JSON,
)
from .test_controller import setup_unifi_integration
from tests.common import MockConfigEntry
CLIENTS = [{"mac": "00:00:00:00:00:01"}]
DEVICES = [
{
"board_rev": 21,
"device_id": "mock-id",
"ip": "10.0.1.1",
"last_seen": 0,
"mac": "00:00:00:00:01:01",
"model": "U7PG2",
"name": "access_point",
"state": 1,
"type": "uap",
"version": "4.0.80.10875",
"wlan_overrides": [
{
"name": "SSID 3",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"name": "",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
],
}
]
WLANS = [
{"name": "SSID 1"},
{"name": "SSID 2", "name_combine_enabled": False, "name_combine_suffix": "_IOT"},
]
DPI_GROUPS = [
{
"_id": "5ba29dd8e3c58f026e9d7c4a",
"name": "Default",
"site_id": "5ba29dd4e3c58f026e9d7c38",
},
]
async def test_flow_works(hass, aioclient_mock, mock_discovery):
"""Test config flow."""
mock_discovery.return_value = "1"
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["data_schema"]({CONF_USERNAME: "", CONF_PASSWORD: ""}) == {
CONF_HOST: "unifi",
CONF_USERNAME: "",
CONF_PASSWORD: "",
CONF_PORT: 443,
CONF_VERIFY_SSL: False,
}
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Site name"
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: True,
CONF_CONTROLLER: {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: True,
},
}
async def test_flow_works_negative_discovery(hass, aioclient_mock, mock_discovery):
"""Test config flow with a negative outcome of async_discovery_unifi."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["data_schema"]({CONF_USERNAME: "", CONF_PASSWORD: ""}) == {
CONF_HOST: "",
CONF_USERNAME: "",
CONF_PASSWORD: "",
CONF_PORT: 443,
CONF_VERIFY_SSL: False,
}
async def test_flow_multiple_sites(hass, aioclient_mock):
"""Test config flow works when finding multiple sites."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"name": "default", "role": "admin", "desc": "site name", "_id": "1"},
{"name": "site2", "role": "admin", "desc": "site2 name", "_id": "2"},
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "site"
assert result["data_schema"]({"site": "1"})
assert result["data_schema"]({"site": "2"})
async def test_flow_raise_already_configured(hass, aioclient_mock):
"""Test config flow aborts since a connected config entry already exists."""
await setup_unifi_integration(hass, aioclient_mock)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.clear_requests()
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_aborts_configuration_updated(hass, aioclient_mock):
"""Test config flow aborts since a connected config entry already exists."""
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"host": "1.2.3.4", "site": "office"}, unique_id="2"
)
entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"host": "1.2.3.4", "site": "site_id"}, unique_id="1"
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
with patch("homeassistant.components.unifi.async_setup_entry"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "configuration_updated"
async def test_flow_fails_user_credentials_faulty(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.Unauthorized):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "faulty_credentials"}
async def test_flow_fails_controller_unavailable(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.RequestError):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "service_unavailable"}
async def test_reauth_flow_update_configuration(hass, aioclient_mock):
"""Verify reauth flow can update controller configuration."""
config_entry = await setup_unifi_integration(hass, aioclient_mock)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.available = False
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": SOURCE_REAUTH},
data=config_entry,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
aioclient_mock.clear_requests()
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "new_name",
CONF_PASSWORD: "new_pass",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert config_entry.data[CONF_HOST] == "1.2.3.4"
assert config_entry.data[CONF_USERNAME] == "new_name"
assert config_entry.data[CONF_PASSWORD] == "new_pass"
async def test_advanced_option_flow(hass, aioclient_mock):
"""Test advanced config flow options."""
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
clients_response=CLIENTS,
devices_response=DEVICES,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
config_entry.entry_id, context={"show_advanced_options": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device_tracker"
assert set(
result["data_schema"].schema[CONF_SSID_FILTER].options.keys()
).intersection(("SSID 1", "SSID 2", "SSID 2_IOT", "SSID 3"))
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "client_control"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "statistics_sensors"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
CONF_IGNORE_WIRED_BUG: False,
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
}
async def test_simple_option_flow(hass, aioclient_mock):
"""Test simple config flow options."""
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
clients_response=CLIENTS,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
config_entry.entry_id, context={"show_advanced_options": False}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "simple_options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
}
async def test_form_ssdp(hass):
"""Test we get the form with ssdp source."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "192.168.208.1",
"site": "default",
}
async def test_form_ssdp_aborts_if_host_already_exists(hass):
"""Test we abort if the host is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"host": "192.168.208.1", "site": "site_id"},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_aborts_if_serial_already_exists(hass):
"""Test we abort if the serial is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"controller": {"host": "1.2.3.4", "site": "site_id"}},
unique_id="e0:63:da:20:14:a9",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_gets_form_with_ignored_entry(hass):
"""Test we can still setup if there is an ignored entry."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"not_controller_key": None},
source=config_entries.SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine New",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://1.2.3.4:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "1.2.3.4",
"site": "default",
}
async def test_discover_unifi_positive(hass):
"""Verify positive run of UniFi discovery."""
with patch("socket.gethostbyname", return_value=True):
assert await async_discover_unifi(hass)
async def test_discover_unifi_negative(hass):
"""Verify negative run of UniFi discovery."""
with patch("socket.gethostbyname", side_effect=socket.gaierror):
assert await async_discover_unifi(hass) is None
|
|
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20140917
from _sre import MAXREPEAT, MAXGROUPS
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
def __init__(self, msg, pattern=None, pos=None):
self.msg = msg
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
msg = '%s at position %d' % (msg, pos)
if isinstance(pattern, str):
newline = '\n'
else:
newline = b'\n'
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
if newline in pattern:
msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno)
else:
self.lineno = self.colno = None
super().__init__(msg)
class _NamedIntConstant(int):
def __new__(cls, value, name):
self = super(_NamedIntConstant, cls).__new__(cls, value)
self.name = name
return self
def __str__(self):
return self.name
__repr__ = __str__
MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
def _makecodes(names):
names = names.strip().split()
items = [_NamedIntConstant(i, name) for i, name in enumerate(names)]
globals().update({item.name: item for item in items})
return items
# operators
# failure=0 success=1 (just because it looks better that way :-)
OPCODES = _makecodes("""
FAILURE SUCCESS
ANY ANY_ALL
ASSERT ASSERT_NOT
AT
BRANCH
CALL
CATEGORY
CHARSET BIGCHARSET
GROUPREF GROUPREF_EXISTS GROUPREF_IGNORE
IN IN_IGNORE
INFO
JUMP
LITERAL LITERAL_IGNORE
MARK
MAX_UNTIL
MIN_UNTIL
NOT_LITERAL NOT_LITERAL_IGNORE
NEGATE
RANGE
REPEAT
REPEAT_ONE
SUBPATTERN
MIN_REPEAT_ONE
RANGE_IGNORE
MIN_REPEAT MAX_REPEAT
""")
del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT
# positions
ATCODES = _makecodes("""
AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING
AT_BOUNDARY AT_NON_BOUNDARY
AT_END AT_END_LINE AT_END_STRING
AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY
AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY
""")
# categories
CHCODES = _makecodes("""
CATEGORY_DIGIT CATEGORY_NOT_DIGIT
CATEGORY_SPACE CATEGORY_NOT_SPACE
CATEGORY_WORD CATEGORY_NOT_WORD
CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK
CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD
CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT
CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE
CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD
CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK
""")
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE,
RANGE: RANGE_IGNORE,
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d)
for item in items:
f.write("#define %s_%s %d\n" % (prefix, item, item))
with open("sre_constants.h", "w") as f:
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
print("done")
|
|
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import shutil
import time
import xml.etree.ElementTree as ET
import requests
from stratuslab import Exceptions
from stratuslab import Defaults
from stratuslab.Util import appendOrReplaceInFile, execute, fileAppendContent, \
fileGetContent, filePutContent, scp, sshCmd, set_stdouterr
import stratuslab.Util as Util
from stratuslab.system.PackageInfo import PackageInfo
from stratuslab.system import Systems
from stratuslab.Exceptions import ExecutionException
class BaseSystem(object):
os = ''
caRepoName = 'CAs'
voIdCardUrl = 'http://operations-portal.egi.eu/xml/voIDCard/public/all/true'
vomsesDir = '/etc/grid-security/vomsdir'
def __init__(self):
self._set_stdouterr()
self.extraRepos = {}
self.packages = {}
self.packages.update({'opennebula' : PackageInfo('one-3.2-StratusLab')})
self.installPackagesErrorMsgs = []
self.repoFileNamePattern = '/etc/%s'
self.certificateAuthorityPackages = ''
self.certificateAuthorityRepo = ''
self.workOnFrontend()
self.oneDbUsername = None
self.oneDbPassword = None
self.qemuConf = '/etc/libvirt/qemu.conf'
self.shareType = Defaults.SHARE_TYPE
def init(self):
self._setOneHome()
def _set_stdouterr(self):
set_stdouterr(self)
# -------------------------------------------
# Packages manager and related
# -------------------------------------------
def addRepositories(self, packages):
pass
def updatePackageManager(self):
pass
def installWebServer(self):
pass
def installPackages(self, packages):
if len(packages) < 1:
return
self.addRepositories(packages)
packages_versioned = []
for package in packages:
packages_versioned.append(
self.getPackageWithVersionForInstall(package))
cmd = '%s %s' % (self.installCmd, ' '.join(packages_versioned))
rc, output = self._executeWithOutput(cmd, shell=True)
if rc != 0:
raise ExecutionException('Failed to install: %s\n%s' % \
(', '.join(packages_versioned), output))
Util.printDetail(output, self.verboseLevel, Util.VERBOSE_LEVEL_DETAILED)
def getPackageWithVersionForInstall(self, package):
try:
self.packages[package]
except KeyError:
return package
else:
if self.packages[package].packageVersion:
return '%s%s%s*' % (self.packages[package].packageName,
self._getPackageAndVersionSeparator(),
self.packages[package].packageVersion)
else:
return self.packages[package].packageName
def _getPackageAndVersionSeparator(self):
return Systems.getPackageAndVersionSeparatorBasedOnOs(self.os)
def installNodePackages(self, packages):
if len(packages) > 0:
rc, output = self._nodeShell('%s %s' %
(self.installCmd, ' '.join(packages)),
withOutput=True)
if rc != 0:
raise Exceptions.ExecutionException('Error installing packages: %s\n%s' % \
(packages, output))
for err in self.installPackagesErrorMsgs:
if re.search(err, output, re.M):
raise Exceptions.ExecutionException('Error installing packages: %s\n%s' % \
(packages, output))
def installFrontendDependencies(self):
self.addRepositories(self.frontendDeps)
self.updatePackageManager()
self.installPackages(self.frontendDeps)
def installNodeDependencies(self):
self.installNodePackages(self.nodeDeps)
def installHypervisor(self):
self.installNodePackages(self.hypervisorDeps.get(self.hypervisor))
def _updatePackageAndRepoInfo(self, packageName, repoName, repoConf):
self.packages[packageName] = PackageInfo(packageName, repository=repoName)
self.extraRepos[repoName] = {'content' : repoConf,
'filename' : self.repoFileNamePattern % repoName}
def getPackageName(self, package):
return self.packages[package].packageName
def getPackageConfigFileName(self, package):
return self.packages[package].configFile
def getPackageInitdScriptName(self, package):
return self.packages[package].initdScriptName
def getPakcageRepositoryName(self, package):
return self.packages[package].repository
def getPakcageRepositoryConfig(self, package):
repoName = self.getPakcageRepositoryName(package)
return self.extraRepos[repoName]
def getIsPackageInstalledCommand(self, package):
pass
def isPackageInstalled(self, package):
cmd = self.getIsPackageInstalledCommand(package)
rc, output = self._executeWithOutput(cmd, shell=True)
if rc != 0:
Util.printDetail(output)
return False
return True
def startService(self, service):
return self._operationOnService(service, 'start')
def stopService(self, service):
return self._operationOnService(service, 'stop')
def restartService(self, service):
return self._operationOnService(service, 'restart')
def _operationOnService(self, service, operation):
cmd = ['service', service, operation]
rc, output = self._executeWithOutput(cmd)
if rc != 0:
Util.printDetail(output)
return rc
def startCloudSystem(self):
self.stopService('oned')
if self.startService('oned'):
Util.printError("ONE failed to start")
Util.printDetail('Waiting for ONE to finish starting')
time.sleep(10)
def enableServiceOnBoot(self, service, level='3'):
return 0
# -------------------------------------------
# ONE admin creation
# -------------------------------------------
def createCloudGroup(self, groupname, gid):
self.oneGroup = groupname
self.oneGid = gid
self.executeCmd(['groupadd', '-g', self.oneGid,
self.oneGroup])
def createCloudAdmin(self):
# see below...
# self.createDirsCmd(os.path.dirname(self.oneHome))
self.executeCmd(['useradd', '-g',
self.oneGroup, '-u', self.oneUid, self.oneUsername,
'-s', '/bin/bash', '-p', self.onePassword, '--create-home',
'--expiredate ""', '--inactive -1'])
# hack to reset the value of self.oneHome
# the code assumes that the account exists before initializing this class
# this is not the case as it is created by installing the one package
self.oneHome = None
self._setOneHome()
# -------------------------------------------
# ONE admin env config and related
# -------------------------------------------
def configureCloudAdminEnv(self, ONeDPort, stratuslabLocation):
self.ONeDPort = ONeDPort
self.appendOrReplaceInFileCmd('%s/.bashrc' % self.oneHome,
'export ONE_LOCATION',
'export ONE_LOCATION=%s' % self.oneHome)
self.appendOrReplaceInFileCmd('%s/.bashrc' % self.oneHome,
'export ONE_XMLRPC',
'export ONE_XMLRPC=http://localhost:%s/RPC2' % self.ONeDPort)
self.appendOrReplaceInFileCmd('%s/.bashrc' % self.oneHome,
'export PATH',
'export PATH=%s/bin:%s' % (self.oneHome, os.getenv('PATH')))
if stratuslabLocation:
self.appendOrReplaceInFileCmd('%s/.bashrc' % self.oneHome,
'export STRATUSLAB_LOCATION',
'export STRATUSLAB_LOCATION=%s' % stratuslabLocation)
self.filePutContentsCmd('%s/.bash_login' % self.oneHome,
'[ -f ~/.bashrc ] && source ~/.bashrc')
self.setOwnerCmd('%s/.bash_login' % self.oneHome)
# Hack to always load .bashrc
self.executeCmd(['sed -i \'s/\[ -z \\\"\$PS1\\\" \\] \\&\\& '
'return/#&/\' %s/.bashrc' % self.oneHome], shell=True)
def configureCloudAdminSshKeys(self):
keyFileName = '%s/.ssh/id_rsa' % self.oneHome
if os.path.exists(keyFileName):
Util.printDetail('Key file %s already exists, skipping this step' % keyFileName)
return
self.createDirsCmd(os.path.dirname(keyFileName))
self.setOwnerCmd(os.path.dirname(keyFileName))
self.executeCmd(['ssh-keygen -f %s -N "" -q' % keyFileName],
shell=True)
self.setOwnerCmd(keyFileName)
self.setOwnerCmd('%s.pub' % keyFileName)
self.copyCmd('%s.pub' % keyFileName,
'%s/.ssh/authorized_keys' % self.oneHome)
self.setOwnerCmd('%s/.ssh/authorized_keys' % self.oneHome)
self._configureCloudAdminSsh()
def configureCloudAdminSshKeysNode(self):
self.createDirsCmd('%s/.ssh/' % self.oneHome)
self.setOwnerCmd('%s/.ssh/' % self.oneHome)
# FIXME: why ssh key-pair from the Frontend is pushed to the Node?
# ssh-keygen on the Node should be used to generate the user
# specific ssh key-pair on that machine.
oneKey = fileGetContent('%s/.ssh/id_rsa' % self.oneHome)
self._filePutContentAsOneAdmin('%s/.ssh/id_rsa' % self.oneHome, oneKey)
oneKeyPub = fileGetContent('%s/.ssh/id_rsa.pub' % self.oneHome)
self._filePutContentAsOneAdmin('%s/.ssh/authorized_keys' % self.oneHome,
oneKeyPub)
self.chmodCmd('%s/.ssh/id_rsa' % self.oneHome, 0600)
self._configureCloudAdminSsh()
def _configureCloudAdminSsh(self):
confFile = '%s/.ssh/config' % self.oneHome
self.appendOrReplaceInFileCmd(confFile,
'^Host.*$', 'Host *')
self.appendOrReplaceInFileCmd(confFile,
'^StrictHost.*$', 'StrictHostKeyChecking no')
self.setOwnerCmd(confFile)
self.chmodCmd(confFile, 0600)
def configureCloudAdminAccount(self):
# hack to reset the value of self.oneHome
# the code assumes that the account exists before initializing this class
# this is not the case as it is created by installing the one package
self.oneHome = None
self._setOneHome()
oneAuthFile = '%s/.one/one_auth' % self.oneHome
self.appendOrReplaceInFileCmd(oneAuthFile,
self.oneUsername, '%s:%s' % (self.oneUsername, self.onePassword))
os.environ['ONE_AUTH'] = oneAuthFile
self.addCloudAdminToExtraGroups()
self.configureCloudAdminSudoFrontend()
def addCloudAdminToExtraGroups(self):
if Util.isTrueConfVal(self.persistentDisk) and self.persistentDiskStorage == 'lvm':
self._addCloudAdminToExtraGroup(self.persistentDiskLvmDevfilesGroup)
def _addCloudAdminToExtraGroup(self, group):
self.executeCmd(['usermod', '-aG', group, self.oneUsername])
def configureCloudAdminSudoFrontend(self):
commands = ['/sbin/lvs',
'/var/lib/stratuslab/python/stratuslab/tm/TMMakeVFAT.py']
self._configureCloudAdminSudo(commands)
def configureCloudAdminSudoNode(self):
commands = ['/bin/chmod']
self._configureCloudAdminSudo(commands)
def _configureCloudAdminSudo(self, commands):
Util.printDetail("Configuring sudo rights for '%s'" % self.oneUsername)
for cmd in commands:
replace = '%s ALL = NOPASSWD: %s' % (self.oneUsername, cmd)
self.appendOrReplaceInFileCmd('/etc/sudoers', '%s' % replace, replace)
replace = 'Defaults:%s !requiretty' % self.oneUsername
self.appendOrReplaceInFileCmd('/etc/sudoers', '%s' % replace, replace)
replace = 'Defaults:%s !requiretty' % 'root'
self.appendOrReplaceInFileCmd('/etc/sudoers', '%s' % replace, replace)
def _setOneHome(self):
if not self.oneHome:
self.oneHome = os.path.expanduser('~' + self.oneUsername)
# -------------------------------------------
# Persistent disks
# -------------------------------------------
def configureCloudAdminPdiskNode(self):
pdiskAttach = '/usr/sbin/attach-persistent-disk.sh'
pdiskDetach = '/usr/sbin/detach-persistent-disk.sh'
if Util.isFalseConfVal(getattr(self, 'persistentDisks', False)):
self.executeCmd('"[ -f %(pd)s ] || { touch %(pd)s; chmod +x %(pd)s; }"' %
{'pd':pdiskDetach}, shell=True)
return
Util.printDetail("Configuring persistent disks management for "
"'%s' user." % self.oneUsername)
line = 'oneadmin ALL = NOPASSWD: %s, %s' % (pdiskAttach, pdiskDetach)
self.appendOrReplaceInFileCmd('/etc/sudoers',
'^%s.*persistent-disk.*$' %
self.oneUsername, line)
# -------------------------------------------
# File sharing configuration
# -------------------------------------------
def configureNewNfsServer(self, mountPoint, networkAddr, networkMask):
self.createDirsCmd(mountPoint)
self.appendOrReplaceInFileCmd('/etc/exports', '%s .*' % mountPoint,
'%s %s/%s(rw,async,no_subtree_check,no_root_squash)' %
(mountPoint, networkAddr, networkMask))
self.executeCmd(['exportfs', '-a'])
def configureExistingNfsShare(self, shareLocation, mountPoint):
self.createDirsCmd(mountPoint)
self.appendOrReplaceInFileCmd('/etc/fstab', '%s .*' % shareLocation,
'%s %s nfs soft,intr,rsize=32768,wsize=32768,rw 0 0' % (
shareLocation, mountPoint))
self.executeCmd(['mount', '-a'])
def configureSshServer(self):
pass
def configureSshClient(self, sharedDir):
self.createDirsCmd(sharedDir)
self.setOwnerCmd(sharedDir)
# -------------------------------------------
# Hypervisor configuration
# -------------------------------------------
def configureHypervisor(self):
if self.hypervisor == 'xen':
self._configureXen()
elif self.hypervisor == 'kvm':
self._configureKvm()
def _configureKvm(self):
self.executeCmd(['modprobe', 'kvm_intel'])
self.executeCmd(['modprobe', 'kvm_amd'])
# seen a case when permission of /dev/kvm were 0600
self.executeCmd(['chmod', '0666', '/dev/kvm'])
if self.shareType == 'nfs':
self._configureQemuUserOnFrontend()
def _configureQemuUserOnFrontend(self):
"""Add qemu user on Fronted with the same UID and GID as on the node
being configured. Add qemu user to 'cloud' group both on Frontend
and the node.
"""
if self.shareType != 'nfs':
return
user = group = 'qemu'
getUidGidCmd = "getent passwd %s"
Util.printDetail("Configuring '%s' user on Frontend as shared filesystem setup requested." % user)
def getUidGidFromNode(user):
rc, output = self._nodeShell(getUidGidCmd % user,
withOutput=True)
if rc != 0:
Util.printError("Error getting '%s' user UID/GID from Node.\n%s" %
(user,output))
return _extractUidGidFromGetentPasswdOutput(output)
def _extractUidGidFromGetentPasswdOutput(output):
uid, gid = output.split(':')[2:4] # uid, gid
if not all([uid, gid]):
Util.printError("Error extracting '%s' user UID/GID from output.\n%s" %
(user,output))
return uid, gid
uidNode, gidNode = getUidGidFromNode(user)
rc, output = self._executeWithOutput((getUidGidCmd % uidNode).split())
if rc == 0:
uidLocal, gidLocal = _extractUidGidFromGetentPasswdOutput(output)
Util.printDetail("User with UID:%s/GID:%s already configured on Frontend." %
(uidLocal, gidLocal), verboseLevel=self.verboseLevel)
if gidNode != gidLocal:
Util.printError("Frontend user '%s' GID:%s doesn't match GID:%s on Node %s." %
(gidLocal, user, gidNode, self.nodeAddr))
else:
self._execute(['groupadd', '-g', gidNode, '-r', group])
self._execute(['useradd', '-r', '-u', uidNode, '-g', group,
'-d', '/', '-s', '/sbin/nologin',
'-c', '"%s user"'%user, user])
# Instruct libvirt to run VMs with GID of ONE group.
self.appendOrReplaceInFileCmd(self.qemuConf, '^group.*$',
'group = "%s"' % self.oneGroup)
# TODO: check why this didn't work
# # Add the user to ONE admin group. Directory with the images on
# # shared Frontend is restricted to ONE admin user.
# cmd = ['usermod', '-aG', self.oneGroup, user]
# self._execute(cmd)
# self._nodeShell(cmd)
def _configureXen(self):
self.appendOrReplaceInFileCmd('/etc/sudoers', self.oneUsername,
'%s ALL=(ALL) NOPASSWD: /usr/sbin/xm *' % self.oneUsername)
self.appendOrReplaceInFileCmd('/etc/sudoers', self.oneUsername,
'%s ALL=(ALL) NOPASSWD: /usr/sbin/xentop *' % self.oneUsername)
self.executeCmd(['sed -i -E \'s/Defaults[[:space:]]+requiretty/#&/\''
' /etc/sudoers'])
def configureLibvirt(self):
libvirtConf = '/etc/libvirt/libvirtd.conf'
self.appendOrReplaceInFileCmd(libvirtConf, '^unix_sock_group.*$',
'unix_sock_group = "cloud"')
self.appendOrReplaceInFileCmd(libvirtConf, '^unix_sock_ro_perms.*$',
'unix_sock_ro_perms = "0777"')
self.appendOrReplaceInFileCmd(libvirtConf, '^unix_sock_rw_perms.*$',
'unix_sock_rw_perms = "0770"')
self.appendOrReplaceInFileCmd(libvirtConf, '^auth_unix_ro.*$',
'auth_unix_ro = "none"')
self.appendOrReplaceInFileCmd(libvirtConf, '^auth_unix_rw.*$',
'auth_unix_rw = "none"')
self.appendOrReplaceInFileCmd(self.qemuConf, '^vnc_listen.*$',
'vnc_listen = "0.0.0.0"')
def startLibvirt(self):
rc, output = self.executeCmd('service libvirtd restart'.split(),
withOutput=True)
if rc != 0:
Util.printError('Could not start libvirt.\n%s' % output)
# -------------------------------------------
# Front-end related methods
# -------------------------------------------
def execute(self, commandAndArgsList, **kwargs):
return self._execute(commandAndArgsList, **kwargs)
def _execute(self, commandAndArgsList, **kwargs):
stdout = kwargs.get('stdout', self.stdout)
stderr = kwargs.get('stderr', self.stderr)
if kwargs.has_key('stdout'):
del kwargs['stdout']
if kwargs.has_key('stderr'):
del kwargs['stderr']
return execute(commandAndArgsList,
stdout=stdout,
stderr=stderr,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED,
**kwargs)
def _executeWithOutput(self, command, **kwargs):
kwargs['withOutput'] = True
return self._execute(command, **kwargs)
def _setCloudAdminOwner(self, path):
os.chown(path, int(self.oneUid), int(self.oneGid))
def _createDirs(self, path):
if not os.path.isdir(path) and not os.path.isfile(path):
os.makedirs(path)
def _copy(self, src, dst):
if os.path.isfile(src):
shutil.copy(src, dst)
else:
shutil.copytree(src, dst)
def _remove(self, path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
# -------------------------------------------
# Node related methods
# -------------------------------------------
def _nodeShell(self, command, **kwargs):
stdout = kwargs.get('stdout', self.stdout)
stderr = kwargs.get('stderr', self.stderr)
if kwargs.has_key('stdout'):
del kwargs['stdout']
if kwargs.has_key('stderr'):
del kwargs['stderr']
if isinstance(command, list):
command = ' '.join(command)
return sshCmd(command,
self.nodeAddr,
sshKey=self.nodePrivateKey,
stdout=stdout,
stderr=stderr,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED,
**kwargs)
def _nodeCopy(self, source, dest, **kwargs):
stdout = kwargs.get('stdout', self.stdout)
stderr = kwargs.get('stderr', self.stderr)
if kwargs.has_key('stdout'):
del kwargs['stdout']
if kwargs.has_key('stderr'):
del kwargs['stderr']
return scp(source,
'root@%s:%s' % (self.nodeAddr, dest),
self.nodePrivateKey,
stdout=stdout,
stderr=stderr,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED,
**kwargs)
def _remoteSetCloudAdminOwner(self, path):
self._nodeShell(['chown %s:%s %s' % (self.oneUid,
self.oneGid, path)])
def _remoteCreateDirs(self, path):
self._nodeShell('mkdir -p %s' % path)
def _remoteAppendOrReplaceInFile(self, filename, search, replace):
res = self._nodeShell(['grep', '"%s"'%search, filename])
replace = Util.escapeDoubleQuotes(replace)
if self._patternExists(res):
rc, output = self._nodeShell('"sed -i \'s|%s|%s|\' %s"' % (search, replace, filename),
withOutput=True, shell=True)
if rc != 0:
Util.printError("Failed to modify %s.\n%s" % (filename, output))
else:
self._remoteFileAppendContents(filename, replace)
def _patternExists(self, returnCode):
return returnCode == 0
def _remoteCopyFile(self, src, dest):
self._nodeShell(['cp -rf %s %s' % (src, dest)])
def _remoteRemove(self, path):
self._nodeShell(['rm -rf %s' % path])
def _remoteFilePutContents(self, filename, data):
data = Util.escapeDoubleQuotes(data, times=4)
rc, output = self._nodeShell('"echo \\"%s\\" > %s"' % (data, filename),
withOutput=True, shell=True)
if rc != 0:
Util.printError("Failed to write to %s\n%s" % (filename, output))
def _remoteFileAppendContents(self, filename, data):
data = Util.escapeDoubleQuotes(data, times=4)
rc, output = self._nodeShell('"echo \\"%s\\" >> %s"' % (data, filename),
withOutput=True, shell=True)
if rc != 0:
Util.printError("Failed to append to %s\n%s" % (filename, output))
def _filePutContentAsOneAdmin(self, filename, content):
self.filePutContentsCmd(filename, content)
self.setOwnerCmd(filename)
def _remoteChmod(self, path, mode):
return self._nodeShell('chmod %o %s' % (mode, path))
def _remoteFileExists(self, path):
return self._nodeShell('ls %s' % path, sshQuiet=True) == 0
# -------------------------------------------
# General
# -------------------------------------------
def setNodeAddr(self, nodeAddr):
self.nodeAddr = nodeAddr
def setNodePrivateKey(self, privateKey):
self.nodePrivateKey = privateKey
def workOnFrontend(self):
self.appendOrReplaceInFileCmd = appendOrReplaceInFile
self.setOwnerCmd = self._setCloudAdminOwner
self.executeCmd = self._execute
self.executeCmdWithOutput = self._executeWithOutput
self.createDirsCmd = self._createDirs
self.filePutContentsCmd = filePutContent
self.fileAppendContentsCmd = fileAppendContent
self.chmodCmd = os.chmod
self.copyCmd = self._copy
self.duplicateCmd = self._copy
self.removeCmd = self._remove
def workOnNode(self):
self.appendOrReplaceInFileCmd = self._remoteAppendOrReplaceInFile
self.setOwnerCmd = self._remoteSetCloudAdminOwner
self.executeCmd = self._nodeShell
self.duplicateCmd = self._remoteCopyFile
self.createDirsCmd = self._remoteCreateDirs
self.filePutContentsCmd = self._remoteFilePutContents
self.fileAppendContentsCmd = self._remoteFileAppendContents
self.chmodCmd = self._remoteChmod
self.copyCmd = self._nodeCopy
self.removeCmd = self._remoteRemove
def configureQuarantine(self):
filename = os.path.join(Defaults.ETC_DIR, 'quarantine.cfg')
search = '^PERIOD.*$'
replace = 'PERIOD=%(quarantinePeriod)s' % self.__dict__
Util.appendOrReplaceInFile(filename, search, replace)
def configureCloudProxyService(self):
self.installPackages(['stratuslab-one-proxy', 'stratuslab-nginx-load'])
self._configureProxyDefaultUsers()
self._restartJetty()
def _configureProxyDefaultUsers(self):
self._configureProxyDefaultUsersUsernamePassword()
def _configureProxyDefaultUsersUsernamePassword(self):
filename = Defaults.AUTHN_CONFIG_FILE
search = self.oneUsername
replace = '%(oneUsername)s=%(proxyOneadminPassword)s,cloud-access' % self.__dict__
Util.appendOrReplaceInFile(filename, search, replace)
def _restartJetty(self):
self.executeCmd('/etc/init.d/one-proxy restart'.split(' '))
# -------------------------------------------
# Firewall
# -------------------------------------------
# TODO: extract Firewall class from the code below
DEFAULT_FIREWALL_TABLE = 'filter'
# redefine in sub-class to point to required file
FILE_FIREWALL_RULES = '/etc/sysconfig/iptables'
IP_TABLES_LIST = ['filter','nat','mangle','raw']
def _configureNetworkInterface(self, device, ip, netmask):
pass
def configureFirewall(self):
self._loadNetfilterModules()
self._configureFirewallForProxy()
self._configureFirewallNat()
self._persistFirewallRules()
def _configureFirewallForProxy(self):
port = str(self.onePort)
rules = ({'table':'filter',
'rule' :'-A INPUT -s 127.0.0.1 -p tcp -m tcp --dport %s -j ACCEPT' % port},
{'table':'filter',
'rule' :'-A INPUT -p tcp -m tcp --dport %s -j REJECT --reject-with icmp-port-unreachable' % port})
if not self._isSetFirewallRulesAll(rules):
self._setFirewallRulesAll(rules)
def _configureFirewallNat(self):
if self.nat.lower() in ['false', 'no', 'off', '0', '']:
return None
self._configureFirewallNatNetworking()
networkWithMask = '%s/%s' % (self.natNetwork, self.natNetmask)
rules = ({'table':'nat',
'rule':'-A POSTROUTING -s %s -d ! %s -j MASQUERADE' % ((networkWithMask,)*2)},
{'table':'filter',
'rule':'-A FORWARD -d %s -m state --state RELATED,ESTABLISHED -j ACCEPT' % networkWithMask},
{'table':'filter',
'rule':'-A FORWARD -d %s -j ACCEPT' % networkWithMask})
if not self._isSetFirewallRulesAll(rules):
self._setFirewallRulesAll(rules)
def _configureFirewallNatNetworking(self):
self._enableIpForwarding()
device = self.natNetworkInterface
ip = getattr(self, 'natGateway', '')
if not ip:
ip = Util.gatewayIpFromNetAddress(self.natNetwork)
self._configureVirtualNetInterface(device, ip,
self.natNetmask)
@staticmethod
def enableIpForwarding():
Util.printDetail('Enabling packets forwarding.')
file(FILE_IPFORWARD_HOT_ENABLE, 'w').write('1')
appendOrReplaceInFile(FILE_IPFORWARD_PERSIST,
'net.ipv4.ip_forward',
'net.ipv4.ip_forward = 1')
def _enableIpForwarding(self):
return BaseSystem.enableIpForwarding()
def _configureVirtualNetInterface(self, device, ip, netmask):
device = device + ':privlan'
Util.printDetail('Configuring network interface %s.' % device)
self._configureNetworkInterface(device, ip, netmask)
Util.printDetail('Starting network interface %s.' % device)
self.executeCmd(['ifup', device])
def _persistFirewallRules(self):
self._saveFirewallRules(self.FILE_FIREWALL_RULES)
def _loadNetfilterModules(self):
# just in case if kernel modules were not yet loaded
devNull = open(os.path.devnull, 'w')
for table in self.IP_TABLES_LIST:
cmd = 'iptables -nL -t %s' % table
self.executeCmd(cmd.split(), stdout=devNull)
devNull.close()
def _saveFirewallRules(self, filename):
# back-up
self.executeCmd(('cp -fp %s %s.LAST'%((filename,)*2)).split(' '))
_,output = self.executeCmdWithOutput(['iptables-save'])
Util.printDetail('Saving firewall rules to %s.' % filename)
filePutContent(filename, output)
os.chmod(filename, 0600)
def _isSetFirewallRulesAll(self, rules):
tables = dict.fromkeys([r.get('table', self.DEFAULT_FIREWALL_TABLE)
for r in rules]).keys()
currentRules = self._getFirewallRulesPerTable(tables)
for ruleSpec in rules:
if not self._isSetFirewallRule(currentRules, ruleSpec):
return False
return True
def _getFirewallRulesPerTable(self, tables=IP_TABLES_LIST):
rules = {}
for table in tables:
rc, output = self.executeCmdWithOutput(('iptables-save -t %s' %
table).split(' '))
if rc != 0:
raise Exceptions.ExecutionException('iptables-save reported an error:\n%s'%
output)
rules.update({table:output})
return rules
def _isSetFirewallRule(self, currentRules, ruleSpec):
rule, table = self._getRuleAndTableFromRuleSpec(ruleSpec)
rulesInTable = currentRules[table]
if re.search(rule, rulesInTable, re.M):
return True
return False
def _setFirewallRulesAll(self, rules):
self._deleteFirewallRulesAllGiven(rules)
for ruleSpec in rules:
self._setFirewallRule(ruleSpec)
def _deleteFirewallRulesAllGiven(self, rules):
for ruleSpec in rules:
self._deleteFirewallRule(ruleSpec)
def _deleteFirewallRule(self, ruleSpec):
rule, table = self._getRuleAndTableFromRuleSpec(ruleSpec)
rule = '-D %s' % rule[3:] # remove action; leave chain and rule
self.executeCmd(('iptables -t %s %s' % (table,rule)).split(' '))
def _setFirewallRule(self, ruleSpec):
rule, table = self._getRuleAndTableFromRuleSpec(ruleSpec)
self.executeCmd(('iptables -t %s %s' % (table,rule)).split(' '))
def _getRuleAndTableFromRuleSpec(self, ruleSpec):
return ruleSpec['rule'], \
ruleSpec.get('table', self.DEFAULT_FIREWALL_TABLE)
# -------------------------------------------
# sendmail
# -------------------------------------------
def installSendmail(self):
package = 'sendmail'
if getattr(self, 'smtpHost', 'localhost') == 'localhost':
self.installPackages([self.getPackageName(package)])
else:
Util.printDetail('Skipping installation of %s' % package)
# -------------------------------------------
# CA
# -------------------------------------------
def installCAs(self):
def _isCertificateAuthority():
return Util.isTrueConfVal(getattr(self, 'certificateAuthority', False))
if not _isCertificateAuthority():
Util.printDetail('Requested not to install CAs.')
else:
self._installCAs()
self._installFetchCrl()
self._enableFetchCrl()
self._installVomsFiles()
def _enableFetchCrl(self):
pass
def _installCAs(self):
packages = []
if self.certificateAuthorityPackages and self.certificateAuthorityRepo:
caPackages = map(lambda x: x.strip(),
self.certificateAuthorityPackages.split(','))
packages.extend(caPackages)
repoConf = '\n'.join([line.strip() for line in self.certificateAuthorityRepo.strip().split('\n')])
repoName = self.caRepoName
for package in packages:
self._updatePackageAndRepoInfo(package, repoName, repoConf)
else:
packages.append(self.getPackageName('CA'))
self.installPackages(packages)
for package in packages:
if not self.isPackageInstalled(package):
Util.printError('Failed to install %s.' % package)
def installOpenNebula(self):
Util.printDetail('Installing OpenNebula')
self.installPackages([self.getPackageName('opennebula')])
def _installFetchCrl(self):
package = self.getPackageName('fetch-crl')
self.installPackages([package])
if not self.isPackageInstalled(package):
Util.printError('Failed to install %s.' % package)
def _installVomsFiles(self):
r = requests.get(self.voIdCardUrl)
if r.status_code == requests.codes.ok:
if not os.path.exists(self.vomsesDir):
try:
os.mkdir(self.vomsesDir)
except Exception as e:
Util.printError('could not create ' + vomsesDir)
vo_data = ET.fromstring(r.text)
for idcard in vo_data:
voname = idcard.attrib['Name']
vopath = os.path.join(self.vomsesDir, voname)
if not os.path.exists(vopath):
try:
os.mkdir(vopath)
except Exception as e:
Util.printError('could not create ' + vopath)
for server in idcard.findall('./gLiteConf/VOMSServers/VOMS_Server'):
hostname = server.find('hostname')
dn = server.find('X509Cert/DN')
ca_dn = server.find('X509Cert/CA_DN')
if hostname is not None and dn is not None and ca_dn is not None:
contents = '%s\n%s\n' % (dn.text, ca_dn.text)
path = os.path.join(vopath, hostname.text + '.lsc')
try:
with open(path, 'w') as f:
f.write(contents)
except Exception as e:
Util.printError('could not create file ' + path)
else:
Util.printError('error retrieving VO ID card data from ' + self.voIdCardUrl)
# -------------------------------------------
# DHCP server
# -------------------------------------------
NET_TYPES_DHCP = ['OnePublicNetwork', 'OneLocalNetwork']
def configureDhcpServer(self):
def _dhcpDefined():
return Util.isTrueConfVal(getattr(self, 'dhcp', 'False'))
def _dhcpNetTypesDefined():
return any([Util.isTrueConfVal(getattr(self, self._assembleDhcpAttributeName(v), 'False'))
for v in self.NET_TYPES_DHCP])
if not _dhcpDefined():
return
elif not _dhcpNetTypesDefined():
return
Util.printStep('Configuring DHCP service')
self._installDhcp()
self._confgureDhcp()
self._startDhcp()
def _assembleDhcpAttributeName(self, postfix):
DHCP_PARAMETER_PREFIX = 'dhcp'
return '%s%s' % (DHCP_PARAMETER_PREFIX, postfix)
def _installDhcp(self):
Util.printDetail('Installing DHCP server.')
dhcpPackage = self.getPackageName('dhcp')
self.installPackages([dhcpPackage])
if not self.isPackageInstalled(dhcpPackage):
Util.printError('Failed to install %s.' % dhcpPackage)
def _confgureDhcp(self):
def _isAllDhcpGroupsDefined(_groups):
return all(_groups.values())
def _getConfGlobals():
_globals = """
ddns-update-style none;
ignore unknown-clients;
ignore bootp;
"""
if hasattr(self, 'dhcpNtpServers') and self.dhcpNtpServers:
_globals += 'option ntp-servers %s;\n' % self.dhcpNtpServers
return _globals
def _getConfSubnets():
subnetTemplate = """
subnet %(subnet)s netmask %(netmask)s {
option routers %(routers)s;
}
"""
subnet = ''
# All net types are defined together with NATing. Assuming NATing for
# Local net type. Need to create a shared network.
if Util.isTrueConfVal(self.nat) and _isAllDhcpGroupsDefined(dhcpGroups):
subnet = """
shared-network StratusLab-LAN {
"""
for _type in self.NET_TYPES_DHCP:
subnet += subnetTemplate % {
'subnet' : getattr(self, self._assembleDhcpAttributeName('%sSubnet' % _type)),
'netmask' : getattr(self, self._assembleDhcpAttributeName('%sNetmask' % _type)),
'routers' : getattr(self, self._assembleDhcpAttributeName('%sRouters' % _type))}
subnet += "}\n"
elif Util.isTrueConfVal(self.nat) and dhcpGroups['OneLocalNetwork']:
subnet = """
shared-network StratusLab-LAN {
"""
# main interface
subnet += """
subnet %(subnet)s netmask %(netmask)s {
}
""" % {'subnet' : self.dhcpSubnet,
'netmask' : self.dhcpNetmask}
# virtual interface
natGateway = getattr(self, 'natGateway', '')
if not natGateway:
natGateway = Util.gatewayIpFromNetAddress(self.natNetwork)
subnet += subnetTemplate % {'subnet' : self.natNetwork,
'netmask' : self.natNetmask,
'routers' : natGateway}
subnet += "}\n"
elif dhcpGroups['OnePublicNetwork']:
# main interface
subnet += """
subnet %(subnet)s netmask %(netmask)s {
}
""" % {'subnet' : self.dhcpSubnet,
'netmask' : self.dhcpNetmask}
elif dhcpGroups['OneLocalNetwork']:
# virtual interface
subnet = subnetTemplate % {
'subnet' : self.dhcpOneLocalNetworkSubnet,
'netmask' : self.dhcpOneLocalNetworkNetmask,
'routers' : self.dhcpOneLocalNetworkRouters}
else:
Util.printWarning('Invalid parameters combination to configure DHCP.')
return subnet
def _getConfGroups():
groupHeadTemplate = """
group {
option broadcast-address %(broadcast)s;
option subnet-mask %(netmask)s;
option routers %(routers)s;
option domain-name "%(domainName)s";
option domain-name-servers %(nameservers)s;
"""
hostTemplate = """
host %(type)s-vm%(id)s {
hardware ethernet %(mac)s;
fixed-address %(ip)s;
max-lease-time %(leaseTime)s;
}
"""
groups = ''
for _type,ipsMacs in dhcpGroups.items():
if not ipsMacs:
continue
groups += groupHeadTemplate % \
{'broadcast' : getattr(self, self._assembleDhcpAttributeName('%sBroadcast' % _type)),
'netmask' : getattr(self, self._assembleDhcpAttributeName('%sNetmask' % _type)),
'routers' : getattr(self, self._assembleDhcpAttributeName('%sRouters' % _type)),
'domainName' : getattr(self, self._assembleDhcpAttributeName('%sDomainName' % _type)),
'nameservers' : getattr(self, self._assembleDhcpAttributeName('%sDomainNameServers' % _type))}
hosts = ''
for i,ipMac in enumerate(ipsMacs):
hosts += hostTemplate % {'type' : _type.lower(),
'id' : str(i),
'mac' : ipMac[1],
'ip' : ipMac[0],
'leaseTime' : self.dhcpLeaseTime}
groups += hosts
groups += '}\n'
return groups
Util.printDetail('Configuring DHCP server.')
_NOTHING = []
dhcpGroups = dict.fromkeys(self.NET_TYPES_DHCP, _NOTHING)
for netType in self.NET_TYPES_DHCP:
if Util.isTrueConfVal(getattr(self, self._assembleDhcpAttributeName(netType), False)):
dhcpGroups[netType] = self.__getIpMacTuplesForNetworkType(netType)
if not any(dhcpGroups.values()):
Util.printError('When configuring DHCP %s networks IP/MAC pairs should be given.' %
','.join(self.NET_TYPES_DHCP))
content = _getConfGlobals() + \
_getConfSubnets() + \
_getConfGroups()
confFile = self.getPackageConfigFileName('dhcp')
Util.filePutContent(confFile, content)
def __getIpMacTuplesForNetworkType(self, _type):
if _type not in self.NET_TYPES_DHCP:
Util.printError('Expected one of: %s. Got %s'%(','.join(self.NET_TYPES_DHCP),_type))
_type = _type.replace(_type[0], _type[0].lower(), 1)
ips = [x for x in getattr(self, '%sAddr'%_type).split()]
macs = [x for x in getattr(self, '%sMac'%_type).split()]
if len(ips) != len(macs):
Util.printError('%s network: number of IPs should match number of MACs.'%_type)
return zip(ips, macs)
def _startDhcp(self):
Util.printDetail('(Re)Starting DHCP server.')
serviceName = self.packages['dhcp'].initdScriptName
rc = self.restartService(serviceName)
if rc != 0:
Util.printError('Failed to (re)start DHCP service.')
# -------------------------------------------
# DB
# -------------------------------------------
def configureDatabase(self):
if self.oneDbHost in ['localhost', '127.0.0.1']:
Util.printDetail('Installing MySQL server.')
mysqlPackage = self.getPackageName('MySQLServer')
self.installPackages([mysqlPackage])
Util.printDetail('Starting MySQL server.')
mysqlService = self.getPackageInitdScriptName('MySQLServer')
self.startService(mysqlService)
Util.printDetail('Changing db root password')
self._configureRootDbUser(self.oneDbRootPassword)
Util.printDetail('Creating oneadmin db account')
self._configureDbUser(self.oneDbUsername, self.oneDbPassword)
else:
Util.printDetail('Skipping MySQL installation/configuration. It is assumed to be configured on %s' % self.oneDbHost)
def _configureRootDbUser(self, password):
rc, output = self._execute(["/usr/bin/mysqladmin", "-uroot", "password", "%s" % password], withOutput=True)
if rc != 0:
Util.printWarning("Couldn't set root password. Already set?\n%s" % output)
def _configureDbUser(self, username, password):
mysqlCommand = "/usr/bin/mysql -uroot -p%s" % self.oneDbRootPassword
userCreate = "CREATE USER '%s'@'localhost' IDENTIFIED BY '%s'" % (username, password)
userGrant = "GRANT CREATE, DROP, SELECT, INSERT, DELETE, UPDATE, INDEX ON opennebula.* TO '%s'@'localhost'" % username
rc, output = self._execute("%s -e \"%s\"" % (mysqlCommand, userCreate),
withOutput=True, shell=True)
if rc != 0:
Util.printWarning("Couldn't create user '%s'. Already exists?\n%s" % (username, output))
rc, output = self._execute("%s -e \"%s\"" % (mysqlCommand, userGrant),
withOutput=True, shell=True)
if rc != 0:
Util.printError("Error granting permission for user '%s'.\n%s" % (username, output))
# -------------------------------------------
# Bridge
# -------------------------------------------
def configureBridgeRemotely(self):
def doNotConfigureBridge():
return Util.isFalseConfVal(getattr(self, 'nodeBridgeConfigure', True))
if doNotConfigureBridge():
Util.printDetail('Asked not to configure bridge')
return
checkBridgeCmd = '"brctl show | grep ^%s.*%s$"' % \
(self.nodeBridgeName, self.nodeNetworkInterface)
rc, output = self._nodeShell(checkBridgeCmd, withOutput=True, shell=True)
if rc == 0:
Util.printDetail('Bridge already configured')
return
else:
Util.printDetail('Bridge is NOT configured. %s' % output)
configureBridgeCmd = 'nohup "brctl addbr %(bridge)s; sleep 10; ifconfig %(interf)s 0.0.0.0; sleep 10; brctl addif %(bridge)s %(interf)s; sleep 10; dhclient %(bridge)s"' % \
{'bridge' : self.nodeBridgeName,
'interf' : self.nodeNetworkInterface}
rc, output = self._nodeShell(configureBridgeCmd, withOutput=True, shell=True)
if rc != 0:
Util.printDetail('Failed to configure bridge.\n%s' % output)
else:
sleepTime = 5
Util.printDetail('Sleeping %i sec for the bridge one the node to come up.' % sleepTime)
time.sleep(sleepTime)
Util.printDetail('Testing connection to the node.')
rc, output = self._nodeShell('true', withOutput=True)
if rc == 0:
Util.printDetail('OK.')
else:
Util.printError('Could not connect to the node after attempt to configre bridge.\n%s' % output)
Util.printDetail('Testing if bridge was configured.')
rc, output = self._nodeShell(checkBridgeCmd, withOutput=True, shell=True)
if rc == 0:
Util.printDetail('OK.')
self._persistRemoteBridgeConfig(self.nodeNetworkInterface, self.nodeBridgeName)
return
else:
Util.printError('Bridge was not configured.\n%s' % output)
def _persistRemoteBridgeConfig(self):
pass
def _writeToFilesRemote(self, listOfFileNameContentTuples):
tmpFilename = tempfile.mktemp()
for remoteFilename, content in listOfFileNameContentTuples:
Util.filePutContent(tmpFilename, content)
self._nodeCopy(tmpFilename, remoteFilename)
try:
os.unlink(tmpFilename)
except: pass
FILE_IPFORWARD_HOT_ENABLE = '/proc/sys/net/ipv4/ip_forward'
FILE_IPFORWARD_PERSIST = '/etc/sysctl.conf'
def enableIpForwarding():
return BaseSystem.enableIpForwarding()
|
|
#!/usr/bin/env python
# Author: Willem Thiart <[email protected]>
# Copyright: Willem Thiart 2014
import locale
import re
from docutils import nodes, writers
from docutils.core import publish_cmdline, default_description
import codecs
import collections
import subprocess
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
class Dotted(object):
def __init__(self, s):
self.s = s
def __repr__(self):
return repr(self.s)
def __str__(self):
return str(self.s)
def _cmp(self, b):
from itertools import izip_longest
lines = []
for a, b in izip_longest(self.s.splitlines(), b.splitlines()):
if b:
def dotcompare(a, b):
dots = i = j = 0
while True:
try:
c1 = a[i]
except (IndexError, TypeError):
c1 = ''
try:
c2 = b[j]
except (IndexError, TypeError):
c2 = ''
if 3 == dots:
i += 1
if c1 == '' and c2 == '':
break
if c1 == '.':
dots += 1
i += 1
elif c1 == c2 and c1 != '.':
dots = 0
i += 1
elif 3 == dots:
pass
elif c1 != c2:
i += 1
j += 1
yield c1
continue
j += 1
yield c2
lines.append(u"".join(dotcompare(a, b)))
else:
lines.append(a)
self.s = u"\n".join(filter(lambda x: x is not None, lines))
return cmp(self.s, b)
def __cmp__(self, other):
return self._cmp(other)
def __lt__(self, other):
return self._cmp(other) < 0
def __le__(self, other):
return self._cmp(other) <= 0
def __eq__(self, other):
return self._cmp(other) == 0
def __ne__(self, other):
return self._cmp(other) != 0
def __gt__(self, other):
return 0 < self._cmp(other)
def __ge__(self, other):
return 0 <= self._cmp(other)
def pythonify_title(title):
mapping = {
'+': 'plus',
' ': '_',
'=': 'equals',
}
for k, v in mapping.items():
title = title.replace(k, v)
return ''.join(e for e in title if e.isalnum() or e == '_').lower()
def run(cmd):
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, executable='/bin/bash')
except subprocess.CalledProcessError as e:
output = e.output
output = output.decode('utf-8').strip().replace('\r', '')
return output.replace('\t', ' ' * 4)
class RstTstWriter(writers.Writer):
supported = ('manpage',)
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
self.current_test = 'default'
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
filename = 'test_{0}.py'.format(self.document.settings._source.lower().replace('.rst', ''))
f = codecs.open(filename, 'w', 'utf-8')
f.write(u"""# -*- coding: utf-8 -*-
from rsttst.core import run, Dotted
""")
for i, block in enumerate(visitor.blocks):
if 'ignore' in block.classes:
continue
title = i
if hasattr(block, 'title'):
title = block.title
f.write(u'def test_{0}():\n'.format(title))
text_in = block.input.astext()
text_in = re.sub(r'\\(\S)', r'\\\\\1', text_in)
f.write(u' output = run(u"""{0}""")\n'.format(text_in))
text_out = block.output.astext()
text_out = re.sub(r'\\(\S)', r'\\\\\1', text_out)
text_out = text_out.replace('"""', '\\"""').strip()
if 'dotted' in block.classes:
f.write(u' expected = Dotted(u"""{0}""")\n'.format(text_out))
f.write(u' cmp(output, expected)\n')
f.write(u' expected = u"{0}".format(expected)\n')
f.write(u' assert output == expected\n')
else:
f.write(u' assert output == u"""{0}"""\n'.format(text_out))
f.write(u'\n')
f.write(u'if __name__ == "__main__": pass\n')
class Block(object):
pass
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
possibly_a_roff_command = re.compile(r'\.\w')
document_start = """Tests generated from reStructuredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.blocks = []
self.current_block = None
self.current_title = None
self.titles = collections.defaultdict(lambda: 0)
def astext(self):
return ''
def visit_Text(self, node):
pass
def depart_Text(self, node):
pass
def visit_address(self, node):
pass
def depart_address(self, node):
pass
def visit_admonition(self, node, name=None):
pass
def depart_admonition(self, node):
pass
def visit_attention(self, node):
pass
def visit_docinfo_item(self, node, name):
pass
def depart_docinfo_item(self, node):
pass
def visit_author(self, node):
pass
def visit_authors(self, node):
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
pass
def depart_block_quote(self, node):
pass
def visit_bullet_list(self, node):
pass
def depart_bullet_list(self, node):
pass
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_caution(self, node):
pass
def visit_citation(self, node):
pass
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
pass
def visit_classifier(self, node):
pass
def depart_classifier(self, node):
pass
def visit_colspec(self, node):
pass
def depart_colspec(self, node):
pass
def write_colspecs(self):
pass
def visit_comment(self, node, sub=None):
pass
def visit_contact(self, node):
pass
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_copyright(self, node):
pass
def visit_danger(self, node):
pass
def visit_date(self, node):
pass
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
pass
def visit_definition_list(self, node):
pass
def depart_definition_list(self, node):
pass
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_docinfo(self, node):
pass
def depart_docinfo(self, node):
pass
def visit_doctest_block(self, node):
pass
def depart_doctest_block(self, node):
pass
def visit_document(self, node):
pass
def depart_document(self, node):
pass
def visit_emphasis(self, node):
pass
def depart_emphasis(self, node):
pass
def visit_entry(self, node):
pass
def depart_entry(self, node):
pass
def visit_enumerated_list(self, node):
pass
def depart_enumerated_list(self, node):
pass
def visit_error(self, node):
pass
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field_name(self, node):
pass
def depart_field_name(self, node):
pass
def visit_figure(self, node):
pass
def depart_figure(self, node):
pass
def visit_footer(self, node):
pass
def depart_footer(self, node):
pass
def visit_footnote(self, node):
pass
def depart_footnote(self, node):
pass
def footnote_backrefs(self, node):
pass
def visit_footnote_reference(self, node):
pass
def depart_footnote_reference(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
pass
def depart_header(self, node):
pass
def visit_hint(self, node):
pass
def visit_inline(self, node):
pass
def depart_inline(self, node):
pass
def visit_subscript(self, node):
pass
def depart_subscript(self, node):
pass
def visit_superscript(self, node):
pass
def depart_superscript(self, node):
pass
def visit_attribution(self, node):
pass
def depart_attribution(self, node):
pass
def visit_image(self, node):
pass
def depart_image(self, node):
pass
def visit_important(self, node):
pass
def visit_label(self, node):
pass
def depart_label(self, node):
pass
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
def visit_line_block(self, node):
pass
def depart_line_block(self, node):
pass
def visit_line(self, node):
pass
def depart_line(self, node):
pass
def visit_list_item(self, node):
pass
def depart_list_item(self, node):
pass
def visit_literal(self, node):
pass
def depart_literal(self, node):
pass
def visit_literal_block(self, node):
if not self.current_block:
self.current_block = Block()
self.current_block.input = node
self.current_block.classes = set(node.attributes['classes'])
self.blocks.append(self.current_block)
title = self.current_title
self.titles[title] += 1
if 1 < self.titles[title]:
title = '{0}__{1}'.format(title, self.titles[title])
self.current_block.title = title
if 'ignore' in self.current_block.classes:
self.current_block = None
else:
self.current_block.classes |= set(node.attributes['classes'])
self.current_block.output = node
self.current_block = None
def depart_literal_block(self, node):
pass
def visit_math(self, node):
pass
def depart_math(self, node):
pass
def visit_math_block(self, node):
pass
def depart_math_block(self, node):
pass
def visit_meta(self, node):
pass
def depart_meta(self, node):
pass
def visit_note(self, node):
pass
def indent(self, by=0.5):
pass
def dedent(self):
pass
def visit_option_list(self, node):
pass
def depart_option_list(self, node):
pass
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_group(self, node):
pass
def depart_option_group(self, node):
pass
def visit_option(self, node):
pass
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
pass
def depart_option_argument(self, node):
pass
def visit_organization(self, node):
pass
def depart_organization(self, node):
pass
def first_child(self, node):
pass
def visit_paragraph(self, node):
pass
def depart_paragraph(self, node):
pass
def visit_problematic(self, node):
pass
def depart_problematic(self, node):
pass
def visit_raw(self, node):
pass
def visit_reference(self, node):
pass
def depart_reference(self, node):
pass
def visit_revision(self, node):
pass
def visit_row(self, node):
pass
def depart_row(self, node):
pass
def visit_section(self, node):
pass
def depart_section(self, node):
pass
def visit_status(self, node):
pass
def visit_strong(self, node):
pass
def depart_strong(self, node):
pass
def visit_substitution_definition(self, node):
pass
def visit_substitution_reference(self, node):
pass
def visit_subtitle(self, node):
pass
def depart_subtitle(self, node):
pass
def visit_system_message(self, node):
pass
def depart_system_message(self, node):
pass
def visit_table(self, node):
pass
def depart_table(self, node):
pass
def visit_target(self, node):
pass
def depart_target(self, node):
pass
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_term(self, node):
pass
def depart_term(self, node):
pass
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
pass
def depart_thead(self, node):
pass
def visit_tip(self, node):
pass
def visit_title(self, node):
title = pythonify_title(node.astext())
self.current_title = title
def depart_title(self, node):
pass
def visit_title_reference(self, node):
pass
def depart_title_reference(self, node):
pass
def visit_topic(self, node):
pass
def depart_topic(self, node):
pass
def visit_sidebar(self, node):
pass
def depart_sidebar(self, node):
pass
def visit_rubric(self, node):
pass
def depart_rubric(self, node):
pass
def visit_transition(self, node):
pass
def depart_transition(self, node):
pass
def visit_version(self, node):
pass
def visit_warning(self, node):
pass
def unimplemented_visit(self, node):
pass
def main():
description = ("Generates test code. " + default_description)
publish_cmdline(writer=RstTstWriter(), description=description)
if __name__ == '__main__':
main()
# vim: set fileencoding=utf-8 et ts=4 ai :
|
|
import re
from Vintageous import PluginLogger
from Vintageous.vi.utils import modes
from Vintageous.vi import cmd_base
from Vintageous.plugins import plugins
from Vintageous.vi import variables
_logger = PluginLogger(__name__)
class mapping_scopes:
"""
Scopes for mappings.
"""
DEFAULT = 0
USER = 1
PLUGIN = 2
NAME_SPACE = 3
LEADER = 4
LOCAL_LEADER = 5
class seqs:
"""
Vim's built-in key sequences plus Sublime Text 3 staple commands.
These are the sequences of key presses known to Vintageous. Any other
sequence pressed will be treated as 'unmapped'.
"""
A = 'a'
ALT_CTRL_P = '<C-M-p>'
AMPERSAND = '&'
AW = 'aw'
B = 'b'
BACKSPACE = '<bs>'
G_BIG_C = 'gC'
GC = 'gc'
GCC = 'gcc'
GE = 'ge'
G_BIG_E = 'gE'
UP = '<up>'
DOWN = '<down>'
LEFT = '<left>'
RIGHT = '<right>'
HOME = '<home>'
END = '<end>'
BACKTICK = '`'
BIG_A = 'A'
SPACE = '<space>'
BIG_B = 'B'
CTRL_E = '<C-e>'
CTRL_Y = '<C-y>'
BIG_C = 'C'
BIG_D = 'D'
GH = 'gh'
G_BIG_H = 'gH'
BIG_E = 'E'
BIG_F = 'F'
BIG_G = 'G'
CTRL_0 = '<C-0>'
CTRL_1 = '<C-1>'
CTRL_2 = '<C-2>'
CTRL_3 = '<C-3>'
CTRL_4 = '<C-4>'
CTRL_5 = '<C-5>'
CTRL_6 = '<C-6>'
CTRL_7 = '<C-7>'
CTRL_8 = '<C-8>'
CTRL_9 = '<C-9>'
CTRL_C = '<C-c>'
CTRL_ENTER = '<C-cr>'
CTRL_SHIFT_B = '<C-S-b>'
CTRL_SHIFT_ENTER = '<C-S-cr>'
CTRL_DOT = '<C-.>'
CTRL_SHIFT_DOT = '<C-S-.>'
CTRL_LEFT_SQUARE_BRACKET = '<C-[>'
CTRL_W = '<C-w>'
CTRL_W_Q = '<C-w>q'
CTRL_W_V = '<C-w>v'
CTRL_W_L = '<C-w>l'
CTRL_W_BIG_L = '<C-w>L'
CTRL_K = '<C-k>'
CTRL_K_CTRL_B = '<C-k><C-b>'
CTRL_BIG_F = '<C-F>'
CTRL_BIG_P = '<C-P>'
CTRL_W_H = '<C-w>h'
CTRL_X = '<C-x>'
CTRL_X_CTRL_L = '<C-x><C-l>'
Q = 'q'
AT = '@'
CTRL_W_BIG_H = '<C-w>H'
BIG_H = 'H'
G_BIG_J = 'gJ'
CTRL_R= '<C-r>'
CTRL_R_EQUAL = '<C-r>='
CTRL_A = '<C-a>'
CTRL_I = '<C-i>'
CTRL_O = '<C-o>'
CTRL_X = '<C-x>'
Z = 'z'
Z_ENTER = 'z<cr>'
ZT = 'zt'
ZZ = 'zz'
Z_MINUS = 'z-'
ZB = 'zb'
BIG_I = 'I'
BIG_Z_BIG_Z = 'ZZ'
BIG_Z_BIG_Q = 'ZQ'
GV = 'gv'
BIG_J = 'J'
BIG_K = 'K'
BIG_L = 'L'
BIG_M = 'M'
BIG_N = 'N'
BIG_O = 'O'
BIG_P = 'P'
BIG_Q = 'Q'
BIG_R = 'R'
BIG_S = 'S'
BIG_T = 'T'
BIG_U = 'U'
BIG_V = 'V'
BIG_W = 'W'
BIG_X = 'X'
BIG_Y = 'Y'
BIG_Z = 'Z'
C = 'c'
CC = 'cc'
COLON = ':'
COMMA = ','
CTRL_D = '<C-d>'
CTRL_F12 = '<C-f12>'
CTRL_L = '<C-l>'
CTRL_B = '<C-b>'
CTRL_F = '<C-f>'
CTRL_G = '<C-g>'
CTRL_P = '<C-p>'
CTRL_U = '<C-u>'
CTRL_V = '<C-v>'
D = 'd'
DD = 'dd'
DOLLAR = '$'
DOT = '.'
DOUBLE_QUOTE = '"'
E = 'e'
ENTER = '<cr>' # Or rather <Enter>?
SHIFT_ENTER = '<S-cr>'
EQUAL = '='
EQUAL_EQUAL = '=='
ESC = '<esc>'
F = 'f'
F1 = '<f1>'
F10 = '<f10>'
F11 = '<f11>'
F12 = '<f12>'
F13 = '<f13>'
F14 = '<f14>'
F15 = '<f15>'
F2 = '<f2>'
F3 = '<f3>'
SHIFT_F2 = '<S-f2>'
SHIFT_F3 = '<S-f3>'
SHIFT_F4 = '<S-f4>'
F4 = '<f4>'
F5 = '<f5>'
F6 = '<f6>'
F7 = '<f7>'
F8 = '<f8>'
F9 = '<f9>'
CTRL_F2 = '<C-f2>'
CTRL_SHIFT_F2 = '<C-S-f2>'
G = 'g'
G_BIG_D = 'gD'
G_BIG_U = 'gU'
G_BIG_U_BIG_U = 'gUU'
G_BIG_U_G_BIG_U = 'gUgU'
G_TILDE = 'g~'
G_TILDE_G_TILDE = 'g~g~'
G_TILDE_TILDE = 'g~~'
G_UNDERSCORE = 'g_'
GD = 'gd'
GG = 'gg'
GJ = 'gj'
GK = 'gk'
GQ = 'gq'
GT = 'gt'
G_BIG_T = 'gT'
GM = 'gm'
GU = 'gu'
GUGU = 'gugu'
GUU = 'guu'
GREATER_THAN = '>'
GREATER_THAN_GREATER_THAN = '>>'
H = 'h'
HAT = '^'
I = 'i'
J = 'j'
K = 'k'
L = 'l'
LEFT_BRACE = '{'
LEFT_SQUARE_BRACKET = '['
LEFT_PAREN = '('
LESS_THAN = '<lt>'
LESS_THAN_LESS_THAN = '<lt><lt>'
MINUS = '-'
M = 'm'
N = 'n'
O = 'o'
P = 'p'
PLUS = '+'
OCTOTHORP = '#'
PAGE_DOWN = 'pagedown'
PAGE_UP = 'pageup'
PERCENT = '%'
PIPE = '|'
QUESTION_MARK = '?'
QUOTE = "'"
QUOTE_QUOTE = "''"
R = 'r'
RIGHT_BRACE = '}'
RIGHT_SQUARE_BRACKET = ']'
RIGHT_PAREN = ')'
S = 's'
SEMICOLON = ';'
SHIFT_CTRL_F12 = '<C-S-f12>'
SHIFT_F11 = '<S-f11>'
SLASH = '/'
STAR = '*'
T = 't'
TAB = '<tab>'
TILDE = '~'
U = 'u'
UNDERSCORE = '_'
V = 'v'
W = 'w'
X = 'x'
Y = 'y'
YY = 'yy'
ZERO = '0'
def seq_to_command(state, seq, mode=None):
"""
Returns the command definition mapped to @seq, or a 'missing' command
if none is found.
@mode
Forces the use of this mode instead of the global state's.
"""
mode = mode or state.mode
_logger.info('[seq_to_command] state/seq: {0}/{1}'.format(mode, seq))
command = None
if state.mode in plugins.mappings:
command = plugins.mappings[mode].get(seq, None)
if not command and state.mode in mappings:
command = mappings[mode].get(seq, cmd_base.ViMissingCommandDef())
return command
elif command:
return command
return cmd_base.ViMissingCommandDef()
# Mappings 'key sequence' ==> 'command definition'
#
# 'key sequence' is a sequence of key presses.
#
mappings = {
modes.INSERT: {},
modes.NORMAL: {},
modes.VISUAL: {},
modes.OPERATOR_PENDING: {},
modes.VISUAL_LINE: {},
modes.VISUAL_BLOCK: {},
modes.SELECT: {},
'_missing': dict(name='_missing')
}
# TODO: Add a timeout for ambiguous cmd_base.
# Key sequence to command mapping. Mappings are set by the user.
#
# Returns a partial definition containing the user-pressed keys so that we
# can replay the command exactly as it was typed in.
user_mappings = {
# 'jkl': dict(name='dd', type=cmd_types.USER),
}
EOF = -2
class key_names:
"""
Names of special keys.
"""
BACKSPACE = '<bs>'
CR = '<cr>'
DOWN = '<down>'
END = '<end>'
ENTER = '<enter>'
ESC = '<esc>'
HOME = '<home>'
LEFT = '<left>'
LESS_THAN = '<lt>'
RIGHT = '<right>'
SPACE = '<sp>'
SPACE_LONG = '<space>'
TAB = '<tab>'
UP = '<up>'
F1 = '<f1>'
F2 = '<f2>'
F3 = '<f3>'
F4 = '<f4>'
F5 = '<f5>'
F6 = '<f6>'
F7 = '<f7>'
F8 = '<f8>'
F9 = '<f9>'
F10 = '<f10>'
F11 = '<f11>'
F12 = '<f12>'
F13 = '<f13>'
F14 = '<f14>'
F15 = '<f15>'
Leader = '<leader>'
as_list = [
BACKSPACE,
CR,
DOWN,
END,
ENTER,
ESC,
HOME,
LEFT,
LESS_THAN,
RIGHT,
SPACE,
SPACE_LONG,
TAB,
UP,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
Leader,
]
max_len = len('<space>')
# TODO: detect counts, registers, marks...
class KeySequenceTokenizer(object):
"""
Takes in a sequence of key names and tokenizes it.
"""
def __init__(self, source):
"""
@source
A sequence of key names in Vim notation.
"""
self.idx = -1
self.source = source
self.in_named_key = False
def consume(self):
self.idx += 1
if self.idx >= len(self.source):
self.idx -= -1
return EOF
return self.source[self.idx]
def peek_one(self):
if (self.idx + 1) >= len(self.source):
return EOF
return self.source[self.idx + 1]
def is_named_key(self, key):
return key.lower() in key_names.as_list
def sort_modifiers(self, modifiers):
"""
Ensures consistency in the order of modifier letters according to:
c > m > s
"""
if len(modifiers) == 6:
modifiers = 'c-m-s-'
elif len(modifiers) > 2:
if modifiers.startswith('s-') and modifiers.endswith('c-'):
modifiers = 'c-s-'
elif modifiers.startswith('s-') and modifiers.endswith('m-'):
modifiers = 'm-s-'
elif modifiers.startswith('m-') and modifiers.endswith('c-'):
modifiers = 'c-m-'
return modifiers
def long_key_name(self):
self.in_named_key = True
key_name = ''
modifiers = ''
while True:
c = self.consume()
if c == EOF:
raise ValueError("expected '>' at index {0}".format(self.idx))
elif (c.lower() in ('c', 's', 'm')) and (self.peek_one() == '-'):
if c.lower() in modifiers.lower():
raise ValueError('invalid modifier sequence: {0}'.format(self.source))
modifiers += c + self.consume()
elif c == '>' and self.peek_one() == '>':
modifiers = self.sort_modifiers(modifiers.lower())
if len(key_name) == 0:
return '<' + modifiers.upper() + self.consume() + '>'
else:
raise ValueError('wrong key {0}'.format(key_name))
elif c == '>':
modifiers = self.sort_modifiers(modifiers.lower())
if len(key_name) == 1:
if not modifiers:
raise ValueError('wrong sequence {0}'.format(self.source))
return '<' + modifiers.upper() + key_name + '>'
elif self.is_named_key('<' + key_name + '>'):
self.in_named_key = False
return '<' + modifiers.upper() + key_name.lower() + '>'
else:
raise ValueError("'{0}' is not a known key".format(key_name))
else:
key_name += c
def tokenize_one(self):
c = self.consume()
if c == '<':
return self._expand_vars(self.long_key_name())
else:
return c
def iter_tokenize(self):
while True:
token = self.tokenize_one()
if token == EOF:
break
yield token
def _expand_vars(self, c):
return variables.get(c) if variables.is_key_name(c) else c
def to_bare_command_name(seq):
"""
Strips register and count data from @seq.
"""
# Special case.
if seq == '0':
return seq
new_seq = re.sub(r'^(?:".)?(?:[1-9]+)?', '', seq)
# Account for d2d and similar sequences.
new_seq = list(KeySequenceTokenizer(new_seq).iter_tokenize())
return ''.join(k for k in new_seq if not k.isdigit())
def assign(seq, modes, *args, **kwargs):
"""
Registers a 'key sequence' to 'command' mapping with Vintageous.
The registered key sequence must be known to Vintageous. The
registered command must be a ViMotionDef or ViOperatorDef.
The decorated class is instantiated with `*args` and `**kwargs`.
@keys
A list of (`mode:tuple`, `sequence:string`) pairs to map the decorated
class to.
"""
def inner(cls):
for mode in modes:
mappings[mode][seq] = cls(*args, **kwargs)
return cls
return inner
|
|
from django.conf import settings
from eulxml import xmlmap
from eulxml.xmlmap import teimap
from lxml import etree
import os
'''
:class:`eulxml.xmlmap.XmlObject` subclasses for dealing with TEI,
particularly for the TEI facsimile used for positional OCR data for
readux pages and for generating annotated TEI for export.
'''
class TeiBase(teimap.Tei):
'Base class for all TEI objects, with all namespaces'
ROOT_NS = teimap.TEI_NAMESPACE
ROOT_NAMESPACES = {
'tei' : ROOT_NS,
'xml': 'http://www.w3.org/XML/1998/namespace',
'xlink': 'http://www.w3.org/TR/xlink/',
}
class Graphic(TeiBase):
'TEI Graphic'
ROOT_NAME = 'graphic'
#: url
url = xmlmap.StringField('@url')
#: rend
rend = xmlmap.StringField('@rend')
class Zone(TeiBase):
'XmlObject for a zone in a TEI facsimile document'
ROOT_NAME = 'zone'
#: xml id
id = xmlmap.StringField('@xml:id')
#: n attribute
n = xmlmap.StringField('@n')
#: type attribute
type = xmlmap.StringField('@type')
#: upper left x coord
ulx = xmlmap.FloatField('@ulx')
#: upper left y coord
uly = xmlmap.FloatField('@uly')
#: lower right x coord
lrx = xmlmap.FloatField('@lrx')
#: lower right y coord
lry = xmlmap.FloatField('@lry')
#: xlink href
href = xmlmap.StringField('@xlink:href')
#: text content
text = xmlmap.StringField('tei:line|tei:w')
#: list of word zones contained in this zone (e.g., within a textLine zone)
word_zones = xmlmap.NodeListField('.//tei:zone[@type="string"]', 'self')
#: nearest preceding sibling word zone (e.g., previous word in this line), if any)
preceding = xmlmap.NodeField('preceding-sibling::tei:zone[1]', 'self')
#: nearest ancestor zone
parent = xmlmap.NodeField('ancestor::tei:zone[1]', 'self')
#: containing page
page = xmlmap.NodeField('ancestor::tei:surface[@type="page"]', 'self')
# not exactly a zone, but same attributes we care about (type, id, ulx/y, lrx/y)
#: list of graphic elements (i.e. page images)
graphics = xmlmap.NodeListField('tei:graphic', Graphic)
# convenience mappings to specific sizes of page image
#: full size image (tei:graphic with type "full")
full_image = xmlmap.NodeField('tei:graphic[@type="full"]', Graphic)
#: page size image (tei:graphic with type "page")
page_image = xmlmap.NodeField('tei:graphic[@type="page"]', Graphic)
#: thumbnail image (tei:graphic with type "thumbnail")
thumbnail = xmlmap.NodeField('tei:graphic[@type="thumbnail"]', Graphic)
#: small thumbnail image (tei:graphic with type "small-thumbnail")
small_thumbnail = xmlmap.NodeField('tei:graphic[@type="small-thumbnail"]', Graphic)
#: image info as provided by IIIF (tei:graphic with type "info")
image_info = xmlmap.NodeField('tei:graphic[@type="info"]', Graphic)
@property
def width(self):
'zone width'
return self.lrx - self.ulx
@property
def height(self):
'zone height'
return self.lry - self.uly
@property
def avg_height(self):
'''Calculated average height of word zones in the current zone
(i.e. in a text line)'''
if self.word_zones:
word_heights = [w.height for w in self.word_zones]
return sum(word_heights) / float(len(word_heights))
class Ref(TeiBase):
'Tei reference'
ROOT_NAME = 'ref'
#: target
target = xmlmap.StringField('@target')
#: type
type = xmlmap.StringField('@type')
#: text
text = xmlmap.StringField('text()')
class BiblStruct(TeiBase):
'Structured Bibliographic citation'
# minimal mappings for now
ROOT_NAME = 'BiblStruct'
#: xml id
id = xmlmap.StringField('@xml:id')
#: corresp
corresp = xmlmap.StringField('@corresp')
#: type
type = xmlmap.StringField('@type')
class AnnotationWorksCited(TeiBase):
milestone = xmlmap.NodeField('preceding-sibling::tei:milestone',
xmlmap.XmlObject)
ref_list = xmlmap.NodeField(
'parent::tei:list[contains(tei:item/tei:anchor/@xml:id, "zotero")]',
xmlmap.XmlObject)
class Note(TeiBase):
'Tei Note, used here to contain an annotation'
ROOT_NAME = 'note'
#: xml id
id = xmlmap.StringField('@xml:id')
#: responsibility
resp = xmlmap.StringField('@resp')
#: target
target = xmlmap.StringField('@target')
#: type
type = xmlmap.StringField('@type')
#: ana attribute, e.g. for tag identifiers
ana = xmlmap.StringField('@ana')
#: xlink href
href = xmlmap.StringField('@xlink:href')
#: list of paragraphs as strings
paragraphs = xmlmap.StringListField('tei:p')
#: code for the markdown used in the original annotation
markdown = xmlmap.StringField('tei:code[@lang="markdown"]')
#: links to related pages
related_pages = xmlmap.NodeListField('tei:ref[@type="related page"]',
Ref)
#: list of bibliographic citations/works cited
citations = xmlmap.NodeListField('tei:listBibl/tei:biblStruct', BiblStruct)
# in-text citation generated from markdown; these fields
# are mapped so they can be removed from the annotated tei document
works_cited = xmlmap.NodeField(
'tei:head[text() = "Works Cited"]',
xmlmap.XmlObject)
zotero_items = xmlmap.NodeField(
'tei:list[contains(tei:item/tei:anchor/@xml:id, "zotero")]',
xmlmap.XmlObject)
works_cited_milestone = xmlmap.NodeField(
'tei:milestone[following-sibling::tei:head/text() = "Works Cited"]',
xmlmap.XmlObject)
# mapped to remove empty list bibl element
list_bibl = xmlmap.NodeField('tei:listBibl', xmlmap.XmlObject)
class Bibl(TeiBase):
'TEI Bibl, with mappings for digital edition and pdf urls'
#: type
type = xmlmap.StringField('@type')
#: title
title = xmlmap.StringField('tei:title')
#: author
authors = xmlmap.StringListField('tei:author')
#: date
date = xmlmap.StringField('tei:date')
#: url to digital edition
url = xmlmap.StringField('tei:ref[@type="digital-edition"]/@target')
#: url to pdf of digital edition
pdf_url = xmlmap.StringField('tei:ref[@type="pdf"]/@target')
class PublicationStatement(TeiBase):
'Publication statement, with mapping for readux distributor'
#: descriptive statement (paragraph)
desc = xmlmap.StringField('tei:p')
#: date in human-readable display format
date = xmlmap.DateField('tei:date', '%B %d, %Y')
#: normalized date
date_normal = xmlmap.DateField('tei:date/@when', '%Y-%m-%d')
#: readux distributor reference (includes ref with target of readux.library.emory.edu)
distributor_readux = xmlmap.StringField('tei:distributor[@xml:id="readux"]/tei:ref[@target="http://readux.library.emory.edu"]')
class Facsimile(TeiBase):
'''Extension of :class:`eulxml.xmlmap.teimap.TEI` to provide access
to TEI facsimile elements'''
#: local xsd schema
XSD_SCHEMA = 'file://%s' % os.path.join(os.path.abspath(os.path.dirname(__file__)),
'schema', 'TEIPageView.xsd')
# NOTE: using absolute path for schema to avoid path issues when
# building documentation on readthedocs.org
ROOT_NAME = 'TEI'
xmlschema = etree.XMLSchema(etree.parse(XSD_SCHEMA))
# NOTE: not using xmlmap.loadSchema because it doesn't correctly load
# referenced files in the same directory
#: surface with type page, as :class:`Zone`
page = xmlmap.NodeField('tei:facsimile/tei:surface[@type="page"]', Zone)
#: list of pages (surface with type page)
page_list = xmlmap.NodeListField('tei:facsimile/tei:surface[@type="page"]', Zone)
# NOTE: tei facsimile could include illustrations, but ignoring those for now
#: list of zones with type textLine or line as :class:`Zone`
lines = xmlmap.NodeListField('tei:facsimile//tei:zone[@type="textLine" or @type="line"]', Zone)
#: list of word zones (type string) as :class:`Zone`
word_zones = xmlmap.NodeListField('tei:facsimile//tei:zone[@type="string"]', Zone)
#: publication statment distributor
distributor = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:publicationStmt/tei:distributor')
#: publication statmnt as :class:`PublicationStatement`
pubstmt = xmlmap.NodeField('tei:teiHeader/tei:fileDesc/tei:publicationStmt',
PublicationStatement)
#: encoding description
encoding_desc = xmlmap.NodeField('tei:teiHeader/tei:encodingDesc',
xmlmap.XmlObject)
#: source description for the original volume
original_source = xmlmap.NodeField('tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:bibl[@type="original"]',
Bibl)
#: source description for the readux digital edition
digital_source = xmlmap.NodeField('tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:bibl[@type="digital"]',
Bibl)
class Name(TeiBase):
'Tei NAME, with id attribute and value'
ROOT_NAME = 'name'
#: xml id
id = xmlmap.StringField('@xml:id')
#: full name
value = xmlmap.StringField('.')
class Interp(TeiBase, teimap.TeiInterp):
# extend eulxml.xmlmap.teimap version because it does not include
# the xml namespace for setting xml:id
ROOT_NAME = 'interp'
value = xmlmap.StringField('.')
class InterpGroup(teimap.TeiInterpGroup):
# extend eulxml.xmlmap.teimap version to map our local interp
interp = xmlmap.NodeListField("tei:interp", Interp)
class AnnotatedFacsimile(Facsimile):
'''Annotated Tei facsimile, with mappings needed to generate
TEI with annotations.
'''
#: main tei title
main_title = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:title[@type="full"]/tei:title[@type="main"]')
#: tei subtitle (e.g., annotated edition)
subtitle = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:title[@type="full"]/tei:title[@type="sub"]')
#: responsibility statement text
responsibility = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:respStmt/tei:resp')
#: responsibility statement names
responsible_names = xmlmap.NodeListField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:respStmt/tei:name',
Name)
# additional mappings for annotation data
#: list of annotations at body/div[@type="annotations"]/note[@type="annotation"], as :class:`Note`
annotations = xmlmap.NodeListField('tei:text/tei:body/tei:div[@type="annotations"]/tei:note[@type="annotation"]',
Note)
#: list of bibliographic citations/works cited
citations = xmlmap.NodeListField('tei:text/tei:body/tei:div[@type="works-cited"]/tei:listBibl/tei:biblStruct', BiblStruct)
#: list of bibliographic citation ids
citation_ids = xmlmap.StringListField('tei:text/tei:body/tei:div[@type="works-cited"]/tei:listBibl/tei:biblStruct/@xml:id')
#: annotation tags, as :class:`~eulxml.xmlmap.teimap.TeiInterpGroup`
tags = xmlmap.NodeField('tei:text/tei:back/tei:interpGrp[@type="tags"]',
InterpGroup)
def page_id_by_xlink(self, link):
results = self.node.xpath('//tei:surface[@type="page"][@xlink:href="%s"]/@xml:id' \
% link, namespaces=self.ROOT_NAMESPACES)
if results:
return results[0]
class Anchor(TeiBase):
'TEI Anchor, for marking start and end of text annotation highlights'
ROOT_NAME = 'anchor'
#: xml id
id = xmlmap.StringField('@xml:id')
#: type
type = xmlmap.StringField('@type')
#: next attribute
next = xmlmap.StringField('@next')
|
|
#!/usr/bin/env jython
# SenSafety.MidTerm.CEPengine provides the Esper based complex event processing
# engine for the mid-term SenSafety demo.
# Main.py registers the query lists defined in this module.
# Query lists are returned by functions that accept configurable parameters.
# Marc de Lignie, Politie IV-organisatie
# October 7, 2014
import java.lang
import time, urllib, urllib2
ANOMALOUS_SOUND = 'Anomalous_Sound' #ToDo: use in query
SOUNDGROUP = 'SoundGroup' #ToDo: use in query
COUNTSOUNDS = 'CountSounds' #ToDo: use in query
FACECOUNT = 'Facecount'
AVGFACECOUNT = 'AvgFacecount'
BUSY = 'Busy'
TILT = 'Tilt'
CONTACT = 'Contact'
# WebMonitor URLs
URL_SOUND = 'http://localhost:8555/SenSafety_MidTerm/eventdb/sound'
URL_FACE = 'http://localhost:8555/SenSafety_MidTerm/eventdb/face'
URL_TILT = 'http://localhost:8555/SenSafety_MidTerm/eventdb/tilt'
URL_ACTIVITY = 'http://localhost:8555/SenSafety_MidTerm/eventdb/activity'
class QueryFacecount(object):
def __init__(self, twindow):
self.twindow = twindow
def getResultEvent(self):
return (AVGFACECOUNT, {
'timestamp': java.lang.String, # ISO 8601
'mac': java.lang.String,
'avgfacecount': java.lang.Double
})
def getQueries(self):
return [' '.join(['insert into AvgFacecount', # For further analysis
'select cam as mac, avg(facecount) as avgfacecount',
'from %s.win:time_batch(%i sec)'%(FACECOUNT,self.twindow),
'group by cam']
)] #, 'select * from %s' % FACECOUNT] # For the terminal
def listener(self, data_new, data_old):
if not isinstance(data_new, list):
data_new = [data_new]
for item in data_new:
print 'Facecount event passed through CEPengine:\n',str(item)[0:160]
# Post to Web monitor (correct timestamp bug in facecount agent)
event = {
'mac': item['mac'],
'timestamp': time.strftime("%Y-%m-%dT%H:%M:%S",
time.localtime(time.time())),
'facecount': round(item['avgfacecount'],2)
}
urllib2.urlopen(URL_FACE, urllib.urlencode(event))
class QueryAnomalousSound(object):
def getResultEvent(self):
return (SOUNDGROUP, {
'timestamp': java.lang.String,
'mac': java.lang.String,
'soundlevel': java.lang.String
})
def getQueries(self):
return [
'insert into SoundGroup\
select s1.timestamp as timestamp, s1.mac as mac,\
s1.soundlevel as soundlevel\
from pattern[every s1=Anomalous_Sound() ->\
timer:interval(2 sec) and not s2=Anomalous_Sound(s1.mac=s2.mac)]'
]
def listener(self, data_new, data_old):
if not isinstance(data_new, list):
data_new = [data_new]
for item in data_new:
print 'Anomalous sound event passed through CEPengine:\n', \
str(item)[:160]
# Post to Web monitor
urllib2.urlopen(URL_SOUND, urllib.urlencode(item))
class QueryCountSounds(object):
def __init__(self, twindow):
self.twindow = twindow
def getResultEvent(self):
return (COUNTSOUNDS, {
'mac': java.lang.String,
'nsg': java.lang.Long }
)
def getQueries(self):
return [' '.join(['insert into CountSounds',
'select mac, count(soundlevel) as nsg',
'from SoundGroup.win:time_batch(%i sec)'% self.twindow,
'group by mac'])
]
def listener(self, data_new, data_old):
if not isinstance(data_new, list):
data_new = [data_new]
for item in data_new:
print 'CountSounds event passed through CEPengine:\n', \
str(item)[:160]
# Not needed by Web monitor
class QueryBusy(object):
# Query is specifiek voor twee sound sensors en 1 facecount sensor.
# AvgFacecount and CountSounds are almost synchronous
#
# It should be possible to simplify the pattern by replacing "->"
# with "and" + using a different time window control,
# but I am in a hurry now
def __init__(self, ilpclient, level):
self._level = level
self._ilpclient = ilpclient
def getResultEvent(self):
return (BUSY, {
'timestamp': java.lang.String, # ISO 8601
'avgfacecount': java.lang.Double,
'nsg1': java.lang.Long,
'nsg2': java.lang.Long,
'busylevel': java.lang.Double
})
def getQueries(self):
return [' '.join(['insert into Busy',
'select a.avgfacecount as avgfacecount,',
'cs1.nsg as nsg1, cs2.nsg as nsg2,',
'(2*a.avgfacecount+1)*(1+cs1.nsg)*(1+cs2.nsg) as busylevel',
'from pattern[(every a=AvgFacecount ->',
'cs1=CountSounds where timer:within(20 sec)->',
'cs2=CountSounds(cs1.mac!=cs2.mac) where timer:within(4 sec))',
'or (every cs1=CountSounds ->',
'a=AvgFacecount where timer:within(20 sec)->',
'cs2=CountSounds(cs1.mac!=cs2.mac) where timer:within(4 sec))',
'or (every cs1=CountSounds ->',
'cs2=CountSounds(cs1.mac!=cs2.mac) where timer:within(4 sec) ->',
'a=AvgFacecount where timer:within(20 sec))]'
# Level comparison moved to listener
#,'where (2*a.avgfacecount+1)*(1+cs1.nsg)*(1+cs2.nsg) > %i'%self.level
])
]
def listener(self, data_new, data_old):
if not isinstance(data_new, list):
data_new = [data_new]
for item in data_new:
print 'Busy event passed through CEPengine:\n', \
str(item)[:160]
if item['busylevel'] > self._level:
self._ilpclient.busy(True)
eventtype = 'busy'
else:
self._ilpclient.busy(False)
eventtype = 'quiet'
# Post to Web monitor
eventdata = {
'eventtype': eventtype,
'timestamp': time.strftime("%Y-%m-%dT%H:%M:%S",
time.localtime(time.time())),
'busylevel': round(item['busylevel'], 1),
'facecount': round(item['avgfacecount'], 3),
'soundlevel': round((1+item['nsg1'])*(1+item['nsg2']), 3)
}
urllib2.urlopen(URL_ACTIVITY, urllib.urlencode(eventdata))
class QueryTilt(object):
def __init__(self, ilpclient):
self._ilpclient = ilpclient
def getQueries(self):
return ['select * from %s' % TILT]
def listener(self, data_new, data_old):
if not isinstance(data_new, list):
data_new = [data_new]
for item in data_new:
print 'Tilt event passed through CEPengine:\n', str(item)[:320]
# Post to Web monitor
eventdata = {
'sensor_id': item['sensor_id'],
'timestamp': time.strftime("%Y-%m-%dT%H:%M:%S",
time.localtime(time.time())),
# 'timestamp': item['timestamp'],
'event': item['event'],
'state': item['state']}
urllib2.urlopen(URL_TILT, urllib.urlencode(eventdata))
if item['event'] == 'MOTIONSTART':
self._ilpclient.tilt()
""" Not for MidTerm event
class QueryContact(object):
# For now: just print incoming events after passing through the cep engine
def getQueries(self):
return ['select * from %s' % CONTACT]
def listener(self, data_new, data_old):
if not isinstance(data_new, list):
data_new = [data_new]
for item in data_new:
print 'Contact event passed through CEPengine:\n', str(item)[:320]
"""
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.rnn.python.kernel_tests import benchmarking
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access
class _MaskedRandomUniformInitializer(init_ops.RandomUniform):
"""Initializer for uniform dist tensors with trailing bits zeroed-out.
Allow returning tensors with last few mantissa bits set to 0. This potentially
helps avoid getting into precision issues when testing low precision (float16)
computation.
"""
def __init__(self,
minval=0,
maxval=None,
seed=None,
dtype=dtypes.float16,
num_valid_mantissa_bits=4):
"""Constructor.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only supports tf.float16 for now.
num_valid_mantissa_bits: number of non-zero mantissa bits, default to 4.
Raises:
ValueError: An error if `dtype` is not tf.float16.
"""
if dtype not in (dtypes.float16,):
raise ValueError("dtype: %s not supported" % dtype.name)
super(_MaskedRandomUniformInitializer, self).__init__(
minval=minval, maxval=maxval, seed=seed, dtype=dtype)
self._num_mantissa_bits = 10
self._num_valid_mantissa_bits = num_valid_mantissa_bits
def __call__(self, shape, dtype=dtypes.float16, partition_info=None):
if dtype and dtype != dtypes.float16:
raise ValueError("dtype: %s not supported" % dtype.name)
res = super(_MaskedRandomUniformInitializer, self).__call__(
shape, dtype, partition_info)
# get uint16 view of the underlying buffer.
res = gen_array_ops.bitcast(res, dtypes.uint16)
# mask the last `shift` mantissa bits.
shift = self._num_mantissa_bits - self._num_valid_mantissa_bits
mask = (0xffff >> shift) << shift
res = gen_bitwise_ops.bitwise_and(res, mask)
# restore float16 view.
return gen_array_ops.bitcast(res, dtype)
def _get_initializer(init_bound, dtype, seed):
if dtype == dtypes.float16:
return _MaskedRandomUniformInitializer(
-init_bound, init_bound, dtype=dtype, seed=seed)
else:
return init_ops.random_uniform_initializer(
-init_bound, init_bound, dtype=dtype, seed=seed)
def blocks_match(sess, use_peephole, dtype=dtypes.float32, cell_clip=None):
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 4
inputs = []
for _ in range(sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtype)
inputs.append(inp)
stacked_inputs = array_ops.stack(inputs)
init_bound = 1e-1 if dtype == dtypes.float16 else 1e-2
initializer = _get_initializer(init_bound, dtype=dtype, seed=19890212)
with variable_scope.variable_scope("test", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
if use_peephole:
wci = variable_scope.get_variable(
"rnn/lstm_cell/w_i_diag", shape=[cell_size], dtype=dtype)
wcf = variable_scope.get_variable(
"rnn/lstm_cell/w_f_diag", shape=[cell_size], dtype=dtype)
wco = variable_scope.get_variable(
"rnn/lstm_cell/w_o_diag", shape=[cell_size], dtype=dtype)
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtype)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtype,
initializer=init_ops.zeros_initializer())
basic_cell = rnn_cell.LSTMCell(
cell_size,
use_peepholes=use_peephole,
cell_clip=cell_clip,
dtype=dtype,
state_is_tuple=True,
reuse=True)
basic_outputs_op, basic_state_op = rnn.static_rnn(
basic_cell, inputs, dtype=dtype)
if use_peephole:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=cell_clip,
use_peephole=True)
else:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
cell_clip=cell_clip)
fused_cell = lstm_ops.LSTMBlockFusedCell(
cell_size,
cell_clip=cell_clip,
use_peephole=use_peephole,
reuse=True,
name="rnn/lstm_cell")
fused_outputs_op, fused_state_op = fused_cell(stacked_inputs, dtype=dtype)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]])
basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs))
block_outputs = sess.run(block_outputs_op)
block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs))
block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs))
return (basic_state, fused_state, basic_outputs, block_outputs,
fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads,
block_wgrads, fused_wgrads)
class LSTMBlockCellTest(test.TestCase, parameterized.TestCase):
TEST_CASES = ({
"testcase_name": "Fp32",
"dtype": dtypes.float32,
"rtol": 1e-6,
"atol": 1e-6
}, {
"testcase_name": "Fp16",
"dtype": dtypes.float16,
"rtol": 8e-3,
"atol": 8e-4
})
def testNoneDimsWithDynamicRNN(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = lstm_ops.LSTMBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))
output, _ = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])
})
self.assertEqual(len(res), 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testCompatibleNames(self):
with self.session(use_gpu=True, graph=ops.Graph()):
cell = rnn_cell.LSTMCell(10)
pcell = rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
basic_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockCell(10)
pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
block_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockFusedCell(10)
pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)
inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6)
cell(inputs, dtype=dtypes.float32, scope="basic/lstm_cell")
pcell(inputs, dtype=dtypes.float32, scope="peephole/lstm_cell")
fused_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
self.assertEqual(basic_names, block_names)
self.assertEqual(basic_names, fused_names)
def testLSTMBasicToBlockCell(self):
with self.session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlockCellPeeping(self):
with self.session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[
rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)
for _ in range(2)
],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def LSTMBasicToBlockTestHelper(self,
dtype=dtypes.float32,
use_peephole=False,
cell_clip=None,
rtol=1e-6,
atol=1e-6):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=use_peephole, dtype=dtype, cell_clip=cell_clip)
self.assertAllClose(basic_outputs, block_outputs, rtol=rtol, atol=atol)
self.assertAllClose(basic_grads, block_grads, rtol=rtol, atol=atol)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=rtol, atol=atol)
self.assertAllClose(basic_outputs, fused_outputs, rtol=rtol, atol=atol)
self.assertAllClose(basic_state, fused_state, rtol=rtol, atol=atol)
self.assertAllClose(basic_grads, fused_grads, rtol=rtol, atol=atol)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=rtol, atol=atol)
@parameterized.named_parameters(*TEST_CASES)
def testLSTMBasicToBlock(self, dtype, rtol, atol):
self.LSTMBasicToBlockTestHelper(
dtype, use_peephole=False, rtol=rtol, atol=atol)
@parameterized.named_parameters(*TEST_CASES)
def testLSTMBasicToBlockPeeping(self, dtype, rtol, atol):
self.LSTMBasicToBlockTestHelper(
dtype, use_peephole=True, rtol=rtol, atol=atol)
@parameterized.named_parameters(*TEST_CASES)
def testLSTMBasicToBlockCellClip(self, dtype, rtol, atol):
self.LSTMBasicToBlockTestHelper(
dtype, use_peephole=True, cell_clip=0.5, rtol=rtol, atol=atol)
def testLSTMFusedSequenceLengths(self):
"""Verify proper support for sequence lengths in LSTMBlockFusedCell."""
with self.session(use_gpu=True) as sess:
batch_size = 3
input_size = 4
cell_size = 5
max_sequence_length = 6
inputs = []
for _ in range(max_sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
seq_lengths = constant_op.constant([3, 4, 5])
cell_inputs = array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
with variable_scope.variable_scope("lstm_cell", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
variable_scope.get_variable(
"kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
variable_scope.get_variable(
"bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False, reuse=True,
name="lstm_cell")
fused_outputs_op, fused_state_op = cell(
cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths)
cell_vars = [
v for v in variables.trainable_variables()
if v.name.endswith("kernel") or v.name.endswith("bias")
]
# Verify that state propagation works if we turn our sequence into
# tiny (single-time) subsequences, i.e. unfuse the cell
unfused_outputs_op = []
state = None
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
for i, inp in enumerate(inputs):
lengths = [int(i < l) for l in seq_lengths.eval()]
output, state = cell(
array_ops.expand_dims(inp, 0),
initial_state=state,
dtype=dtypes.float32,
sequence_length=lengths)
unfused_outputs_op.append(output[0])
unfused_outputs_op = array_ops.stack(unfused_outputs_op)
sess.run([variables.global_variables_initializer()])
unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]])
unfused_grads = sess.run(
gradients_impl.gradients(unfused_outputs_op, inputs))
unfused_wgrads = sess.run(
gradients_impl.gradients(unfused_outputs_op, cell_vars))
fused_outputs, fused_state = sess.run(
[fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(
gradients_impl.gradients(fused_outputs_op, cell_vars))
self.assertAllClose(fused_outputs, unfused_outputs)
self.assertAllClose(fused_state, unfused_state)
self.assertAllClose(fused_grads, unfused_grads)
for fused, unfused in zip(fused_wgrads, unfused_wgrads):
self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)
#### Benchmarking.
class BenchmarkLSTMBlock(test.Benchmark):
def benchmarkLSTMBlockCellFpropWithDynamicRNN(self):
print("BlockLSTMCell forward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False],
"dtype": ["float32", "float16"],
}):
dtype = dtypes.float32 if config["dtype"] == "float32" else dtypes.float16
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
inputs = variable_scope.get_variable(
"x",
dtype=dtype,
shape=[
config["time_steps"], config["batch_size"],
config["cell_size"]
])
cell = lstm_ops.LSTMBlockCell(config["cell_size"], dtype=dtype)
outputs = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtype)
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(outputs, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
config["dtype"], config["batch_size"], config["cell_size"],
config["cell_size"], config["time_steps"], config["use_gpu"],
wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_fprop", "DT_%(dtype)s", "BS%(batch_size)i",
"CS%(cell_size)i", "IS%(cell_size)i", "TS%(time_steps)i",
"gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
def benchmarkLSTMBlockCellBpropWithDynamicRNN(self):
print("BlockLSTMCell backward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False],
"dtype": ["float32", "float16"],
}):
dtype = dtypes.float32 if config["dtype"] == "float32" else dtypes.float16
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
time_steps = config["time_steps"]
batch_size = config["batch_size"]
cell_size = input_size = config["cell_size"]
inputs = variable_scope.get_variable(
"x", [time_steps, batch_size, cell_size],
trainable=False,
dtype=dtype)
with variable_scope.variable_scope(
"rnn", reuse=variable_scope.AUTO_REUSE):
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtype)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtype,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockCell(cell_size, dtype=dtype)
outputs = rnn.dynamic_rnn(
cell, inputs, time_major=True, dtype=dtype)
grads = gradients_impl.gradients(outputs, [inputs, w, b])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(grads, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
config["dtype"], batch_size, cell_size, cell_size, time_steps,
config["use_gpu"], wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_bprop", "DT_%(dtype)s", "BS%(batch_size)i",
"CS%(cell_size)i", "IS%(cell_size)i", "TS%(time_steps)i",
"gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
"""Class for finding Youtube influencers"""
__author__ = "Peter J Usherwood"
__python_version__ = "3.6"
import pandas as pd
import csv
import json
import os
from scipy.stats import percentileofscore
import progressbar
from usherwood_ds.data_imports.youtube_api.api_class import YoutubeAPI
from usherwood_ds.data_imports.youtube_import import create_youtube_user_df, create_youtube_comment_df
import warnings
warnings.filterwarnings('ignore')
YT_SIZE = 300000000
def interests_identification(handles=None,
similar_videos=None,
max_comments_per_similar_influencer_video=-1,
save_path='',
TOP_X_CONNECTED=2000,
api_credentials=None):
"""
Run the analysis to find the top amplifying accounts on Youtube, good for identifying interests or quick influencer
analysis. For full influencer analysis use the influencers_identification function as it calculates
engagement scores
:param handles: List of Youtube handles
:param similar_videos: List of similar influencer video ids
:param max_comments_per_similar_influencer_video: Int, when search similar videos cap num comments retrieved to give
a rough cap on TM size
:param save_path: path of where save the dataframes to
:param TOP_X_CONNECTED: Int, take the top_x_connect influencers
:param api_credentials: Dict, api credentials
"""
if api_credentials is None:
with open(os.path.join(os.path.dirname(__file__), "data_imports/api_credentials.json"), 'r') as openfile:
api_credentials = json.load(openfile)
api = YoutubeAPI(api_credentials=api_credentials)
if not handles:
print('Getting TM from similar influencers')
tm_ids = retrieve_similar_influencer_auidence(api,
save_path=save_path,
video_ids=similar_videos,
num_comments=max_comments_per_similar_influencer_video)
print('Fortifying target market')
target_market, TM_SIZE = fortify_tm_without_engamements(tm_ids=tm_ids, save_path=save_path, api=api)
print('Getting sphere of influence')
influencers = get_sphere_of_influence(target_market, save_path=save_path, api=api)
print('Fortifying sphere of influence and getting amplification')
influencers = get_amplification_influencers(influencers=influencers,
api=api,
TM_SIZE=TM_SIZE,
TOP_X_CONNECTED=TOP_X_CONNECTED,
save_path=save_path)
print('Done')
return target_market, influencers
def retrieve_similar_influencer_auidence(api,
save_path='',
video_ids=['tUtLHo7UQMM'],
num_comments=-1): # max
comments = []
for video_id in video_ids:
comments += api.get_video_comments(video_id, num_comments=num_comments)
print(str(len(comments)), 'comments found.')
parsed_comments = []
for comment in comments:
parsed_comments += [api.parse_comment_to_youtube_comment(comment)]
df_comments = create_youtube_comment_df(parsed_comments)
target_market_ids = pd.DataFrame(df_comments['Youtube Author ID'].value_counts().index,
columns=['Youtube Channel ID'])
target_market_ids.to_csv(save_path + 'TM.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False)
return target_market_ids
def fortify_tm_without_engamements(tm_ids, api, save_path=''):
"""
fortify the tm with user info without engagement measures
:param tm_ids: List of Youtube channel ids
:param api: YoutubeAPI instance
:param save_path: path of where save the dataframes to
:return: target_market - pandas df of fortified Youtube users
"""
channels = []
for idx in tm_ids['Youtube Channel ID'].tolist():
json_response = api.fortify_channel(channel_id=idx, fortify_with='snippet,statistics')
if json_response.pop('found', True) is not False:
channels.append(json_response)
TM_SIZE = len(channels)
print(TM_SIZE)
target_market_arr = []
for user in channels:
target_market_arr += [api.parse_user_to_youtube_user(user)]
target_market = create_youtube_user_df(target_market_arr)
target_market.to_csv(save_path + 'TM.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False)
return target_market, TM_SIZE
def get_sphere_of_influence(target_market, api, save_path=''):
"""
Get the people the target market are following and rank by the most connected
:param target_market:
:param api: YoutubeAPI instance
:param save_path: path of where save the dataframes to
:return: partially populated influencers df
"""
sphere = []
with progressbar.ProgressBar(max_value=len(target_market['Youtube Author ID'])) as bar:
for i, user_id in enumerate(target_market['Youtube Author ID'].values.tolist()):
subscription_jsons = api.get_user_subscriptions(youtube_author_id=user_id)
for sub in subscription_jsons:
sphere += [sub['snippet']['resourceId']['channelId']]
bar.update(i)
influencers = pd.DataFrame(pd.Series(sphere).value_counts()).reset_index().rename(
columns={'index': 'Youtube Author ID', 0: 'TM Amplification'})
influencers.to_csv(save_path + 'Influencers.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False)
return influencers
def get_amplification_influencers(TM_SIZE,
api,
TOP_X_CONNECTED=2000,
save_path='',
influencers=None,
load_from_disk=False,
load_path='',
inc_tiers=True,
tiers=[1500, 5000, 20000, 100000]):
"""
Fortify the influencers df for the top_x_connected influencers
:param influencers: influencers df output from get_sphere_of_influence
:param TM_SIZE: Int, the size of the target market
:param TOP_X_CONNECTED: Int, take the top_x_connect influencers
:param save_path: path of where save the dataframes to
:param load_from_disk: Bool, load previously ran influencer sdata from disk
:param load_path: Str, path to the saved data if it is to be loaded, files must be named TM.csv and Influencers.csv
:param inc_tiers: Bool, divide rankings by number of followers
:param tiers: List, ascending list of integers as the upper boundaries of follower numbers per tier, a final tier
will be added for uses with more followers than your last divide
:return: partially populated influencers df
"""
if load_from_disk:
influencers = pd.read_csv(load_path + 'Influencers.csv')
influencers = influencers[:TOP_X_CONNECTED]
influencers_jsons = []
with progressbar.ProgressBar(max_value=len(influencers['Youtube Author ID'])) as bar:
for i, idx in enumerate(influencers['Youtube Author ID'].values.tolist()):
influencers_jsons += [api.fortify_channel(channel_id=idx, fortify_with='snippet,statistics')]
bar.update(i)
influencers_arr = []
for user in influencers_jsons:
influencers_arr += [api.parse_user_to_youtube_user(user)]
influencers_fort = create_youtube_user_df(influencers_arr)
influencers_fort['Youtube Author ID'] = influencers_fort['Youtube Author ID']
influencers = influencers_fort.merge(influencers, how='inner', on='Youtube Author ID')
influencers['Amplification Index'] = influencers[['Subscriber Count', 'TM Amplification']].apply(
lambda x: (x[1] / TM_SIZE) * (TM_SIZE / x[0]), axis=1)
influencers.sort_values(by='Amplification Index', inplace=True, ascending=False)
influencers['Tier'] = 0
tiers = tiers.copy()
tiers = [0] + tiers + [9999999999]
for tier_ix in range(len(tiers) - 1):
sub = influencers[(influencers['Subscriber Count'] >= tiers[tier_ix]) &
(influencers['Subscriber Count'] < tiers[tier_ix + 1])]
arr = sorted(sub['Amplification Index'].values)
influencers.ix[sub.index, 'Amplification Index'] = sub['Amplification Index']. \
apply(lambda e: percentileofscore(arr, e)).values
influencers.ix[sub.index, 'Tier'] = tier_ix + 1
influencers.reset_index(drop=True, inplace=True)
influencers.to_csv(save_path + 'Influencers.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False)
return influencers
|
|
# Copyright 2015 Chelsio Communications Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import shutil
import StringIO
import tempfile
import mock
from oslo_utils import timeutils
from cinder import context
from cinder.openstack.common import fileutils
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.targets import cxt
class TestCxtAdmDriver(test.TestCase):
def __init__(self, *args, **kwargs):
super(TestCxtAdmDriver, self).__init__(*args, **kwargs)
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.iscsi_ip_address = '10.9.8.7'
self.cxt_subdir = cxt.CxtAdm.cxt_subdir
self.fake_id_1 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
self.fake_id_2 = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'
self.target = cxt.CxtAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.fake_volume = 'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.testvol_1 =\
{'project_id': self.fake_id_1,
'name': 'testvol',
'size': 1,
'id': self.fake_id_2,
'volume_type_id': None,
'provider_location': '10.9.8.7:3260 '
'iqn.2010-10.org.openstack:'
'volume-%s 0' % self.fake_id_2,
'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
'c76370d66b 2FE0CQ8J196R',
'provider_geometry': '512 512',
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'}
self.expected_iscsi_properties = \
{'auth_method': 'CHAP',
'auth_password': '2FE0CQ8J196R',
'auth_username': 'stack-1-a60e2611875f40199931f2c76370d66b',
'encrypted': False,
'logical_block_size': '512',
'physical_block_size': '512',
'target_discovered': False,
'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
self.fake_id_2,
'target_lun': 0,
'target_portal': '10.10.7.1:3260',
'volume_id': self.fake_id_2}
self.fake_iscsi_scan =\
('\n'
'TARGET: iqn.2010-10.org.openstack:%s, id=1, login_ip=0\n' # noqa
' [email protected]:3260,timeout=0\n'
' TargetDevice=/dev/stack-volumes-lvmdriver-1/%s,BLK,PROD=CHISCSI Target,SN=0N0743000000000,ID=0D074300000000000000000,WWN=:W00743000000000\n' # noqa
% (self.fake_volume, self.fake_volume))
def setUp(self):
super(TestCxtAdmDriver, self).setUp()
self.fake_base_dir = tempfile.mkdtemp()
self.fake_volumes_dir = os.path.join(self.fake_base_dir,
self.cxt_subdir)
fileutils.ensure_tree(self.fake_volumes_dir)
self.addCleanup(self._cleanup)
self.exec_patcher = mock.patch.object(utils, 'execute')
self.mock_execute = self.exec_patcher.start()
self.addCleanup(self.exec_patcher.stop)
def _cleanup(self):
if os.path.exists(self.fake_base_dir):
shutil.rmtree(self.fake_base_dir)
@mock.patch('cinder.utils.execute')
def test_get_target(self, mock_execute):
mock_execute.return_value = (self.fake_iscsi_scan, None)
with mock.patch.object(self.target, '_get_volumes_dir') as mock_get:
mock_get.return_value = self.fake_volumes_dir
self.assertEqual('1',
self.target._get_target(
'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa
))
self.assertTrue(mock_execute.called)
def test_get_target_chap_auth(self):
tmp_file = StringIO.StringIO()
tmp_file.write(
'target:\n'
' TargetName=iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' TargetDevice=/dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' [email protected]:3260\n'
' AuthMethod=CHAP\n'
' Auth_CHAP_Policy=Oneway\n'
' Auth_CHAP_Initiator="otzLy2UYbYfnP4zXLG5z":"234Zweo38VGBBvrpK9nt"\n' # noqa
)
tmp_file.seek(0)
test_vol = ('iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45')
expected = ('otzLy2UYbYfnP4zXLG5z', '234Zweo38VGBBvrpK9nt')
with mock.patch('__builtin__.open') as mock_open:
ctx = context.get_admin_context()
mock_open.return_value = contextlib.closing(tmp_file)
self.assertEqual(expected,
self.target._get_target_chap_auth(ctx, test_vol))
self.assertTrue(mock_open.called)
def test_get_target_chap_auth_negative(self):
test_vol =\
'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
with mock.patch('__builtin__.open') as mock_open:
e = IOError()
e.errno = 123
mock_open.side_effect = e
ctxt = context.get_admin_context()
self.assertRaises(IOError,
self.target._get_target_chap_auth,
ctxt, test_vol)
mock_open.side_effect = StandardError()
self.assertRaises(StandardError,
self.target._get_target_chap_auth,
ctxt, test_vol)
@mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target',
return_value=1)
@mock.patch('cinder.utils.execute')
def test_create_iscsi_target(self, mock_execute, mock_get_targ):
mock_execute.return_value = ('', '')
with mock.patch.object(self.target, '_get_volumes_dir') as mock_get:
mock_get.return_value = self.fake_volumes_dir
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
self.assertTrue(mock_get.called)
self.assertTrue(mock_execute.called)
self.assertTrue(mock_get_targ.called)
@mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target',
return_value=1)
@mock.patch('cinder.utils.execute')
def test_create_iscsi_target_already_exists(self, mock_execute,
mock_get_targ):
mock_execute.return_value = ('fake out', 'fake err')
with mock.patch.object(self.target, '_get_volumes_dir') as mock_get:
mock_get.return_value = self.fake_volumes_dir
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_targ.called)
self.assertTrue(mock_execute.called)
@mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target',
return_value=1)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.generate_password',
return_value="P68eE7u9eFqDGexd28DQ")
@mock.patch('cinder.volume.utils.generate_username',
return_value="QZJbisGmn9AL954FNF4D")
def test_create_export(self, mock_user, mock_pass, mock_execute,
mock_get_targ):
mock_execute.return_value = ('', '')
with mock.patch.object(self.target, '_get_volumes_dir') as mock_get:
mock_get.return_value = self.fake_volumes_dir
expected_result = {'location': '10.9.8.7:3260,1 '
'iqn.2010-10.org.openstack:testvol 0',
'auth': 'CHAP '
'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'}
ctxt = context.get_admin_context()
self.assertEqual(expected_result,
self.target.create_export(ctxt,
self.testvol_1,
self.fake_volumes_dir))
self.assertTrue(mock_get.called)
self.assertTrue(mock_execute.called)
@mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target_chap_auth')
def test_ensure_export(self, mock_get_chap):
fake_creds = ('asdf', 'qwert')
mock_get_chap.return_value = fake_creds
ctxt = context.get_admin_context()
with mock.patch.object(self.target, 'create_iscsi_target'):
self.target.ensure_export(ctxt,
self.testvol_1,
self.fake_volumes_dir)
self.target.create_iscsi_target.assert_called_once_with(
'iqn.2010-10.org.openstack:testvol',
1, 0, self.fake_volumes_dir, fake_creds,
check_exit_code=False,
old_name=None)
|
|
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation, NearTestCase
setOutDir(__name__)
import unittest
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators.
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
"""
found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def subsetToUnicode(ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)]))
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.vera = TTFont("Vera", "Vera.ttf")
pdfmetrics.registerFont(self.vera)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='Vera')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Vera', 10),57.7685546875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Vera', 10),57.7685546875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Vera', 10),279.809570313)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Vera', 10),279.809570313)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Vera', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Vera', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Vera', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.vera, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvNoEuro', 'Helvetica-Oblique', 'MacRomanEncoding'))
c.setFont('MacHelvNoEuro', 12)
c.drawString(125, 600, 'Standard MacRoman, no Euro: Character 219 = "\333"') # oct(219)=0333
# now make our hacked encoding
euroMac = pdfmetrics.Encoding('MacWithEuro', 'MacRomanEncoding')
euroMac[219] = 'Euro'
pdfmetrics.registerEncoding(euroMac)
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvWithEuro', 'Helvetica-Oblique', 'MacWithEuro'))
c.setFont('MacHelvWithEuro', 12)
c.drawString(125, 575, 'Hacked MacRoman with Euro: Character 219 = "\333"') # oct(219)=0333
# now test width setting with and without _rl_accel - harder
# make an encoding where 'm' becomes 'i'
c.setFont('Helvetica', 12)
c.drawString(100, 500, "Recode 'm' to 'i' and check we can measure widths. Boxes should surround letters.")
sample = 'Mmmmm. ' * 6 + 'Mmmm'
c.setFont('Helvetica-Oblique',12)
c.drawString(125, 475, sample)
w = c.stringWidth(sample, 'Helvetica-Oblique', 12)
c.rect(125, 475, w, 12)
narrowEnc = pdfmetrics.Encoding('m-to-i')
narrowEnc[ord('m')] = 'i'
narrowEnc[ord('M')] = 'I'
pdfmetrics.registerEncoding(narrowEnc)
pdfmetrics.registerFont(pdfmetrics.Font('narrow', 'Helvetica-Oblique', 'm-to-i'))
c.setFont('narrow', 12)
c.drawString(125, 450, sample)
w = c.stringWidth(sample, 'narrow', 12)
c.rect(125, 450, w, 12)
c.setFont('Helvetica', 12)
c.drawString(100, 400, "Symbol & Dingbats fonts - check we still get valid PDF in StandardEncoding")
c.setFont('Symbol', 12)
c.drawString(100, 375, 'abcdefghijklmn')
c.setFont('ZapfDingbats', 12)
c.drawString(300, 375, 'abcdefghijklmn')
c.save()
def makeSuite():
return makeSuiteForClasses(
TextEncodingTestCase,
#FontEncodingTestCase - nobbled for now due to old stuff which needs removing.
)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many many transactions without needing to spend
# time signing.
P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP"
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP"
SCRIPT_SIG = ["0451025175", "0451025275"]
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'''
Create and send a transaction with a random fee.
The transaction pays to a trival P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
'''
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
inputs = []
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
outputs = {}
outputs[P2SH_1] = total_in - amount - fee
outputs[P2SH_2] = amount
rawtx = from_node.createrawtransaction(inputs, outputs)
# Createrawtransaction constructions a transaction that is ready to be signed
# These transactions don't need to be signed, but we still have to insert the ScriptSig
# that will satisfy the ScriptPubKey.
completetx = rawtx[0:10]
inputnum = 0
for inp in inputs:
completetx += rawtx[10+82*inputnum:82+82*inputnum]
completetx += SCRIPT_SIG[inp["vout"]]
completetx += rawtx[84+82*inputnum:92+82*inputnum]
inputnum += 1
completetx += rawtx[10+82*inputnum:]
txid = from_node.sendrawtransaction(completetx, True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (completetx, fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
'''
We need to generate a lot of very small inputs so we can generate a ton of transactions
and they will have low priority.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
'''
prevtxout = txins.pop()
inputs = []
outputs = {}
inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] })
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
outputs[P2SH_1] = half_change
outputs[P2SH_2] = rem_change
rawtx = from_node.createrawtransaction(inputs, outputs)
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the property ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(rawtx)["hex"]
else :
completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:]
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
'''
This function calls estimatefee and verifies that the estimates
meet certain invariants.
'''
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in filter(lambda x: x >= 0, all_estimates):
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for e in all_estimates:
if e >= 0:
valid_estimate = True
else:
invalid_estimates += 1
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate and e < 0:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(BitcoinTestFramework):
def setup_network(self):
'''
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of small low priority outputs
which we will use to generate our transactions.
'''
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-relaypriority=0", "-whitelist=127.0.0.1"]))
print("This test is time consuming, please be patient")
print("Splitting inputs to small size so we can generate low priority tx's")
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
print("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
# Node1 mines small blocks but that are bigger than the expected transaction rate,
# and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=18000",
"-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 70 or so transactions)
node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.is_network_split = False
self.sync_all()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex)/2)/1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3],.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3],.1)
#update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
print("Checking estimates for 1/2/3/6/15/25 blocks")
print("Creating transactions and mining them with a huge block size")
# Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied
self.transact_and_mine(30, self.nodes[0])
check_estimates(self.nodes[1], self.fees_per_kb, 1)
print("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(20, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 3)
print("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 40 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(40, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3],.1)
print("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
|
|
from __future__ import print_function, division
from sympy.core import S, sympify, diff
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.relational import Eq
from sympy.core.logic import fuzzy_not
from sympy.polys.polyerrors import PolynomialError
from sympy.functions.elementary.complexes import im, sign
from sympy.functions.elementary.piecewise import Piecewise
###############################################################################
################################ DELTA FUNCTION ###############################
###############################################################################
class DiracDelta(Function):
"""
The DiracDelta function and its derivatives.
DiracDelta function has the following properties:
1) ``diff(Heaviside(x),x) = DiracDelta(x)``
2) ``integrate(DiracDelta(x-a)*f(x),(x,-oo,oo)) = f(a)`` and
``integrate(DiracDelta(x-a)*f(x),(x,a-e,a+e)) = f(a)``
3) ``DiracDelta(x) = 0`` for all ``x != 0``
4) ``DiracDelta(g(x)) = Sum_i(DiracDelta(x-x_i)/abs(g'(x_i)))``
Where ``x_i``-s are the roots of ``g``
Derivatives of ``k``-th order of DiracDelta have the following property:
5) ``DiracDelta(x,k) = 0``, for all ``x != 0``
See Also
========
Heaviside
simplify, is_simple
sympy.functions.special.tensor_functions.KroneckerDelta
References
==========
.. [1] http://mathworld.wolfram.com/DeltaFunction.html
"""
is_real = True
def fdiff(self, argindex=1):
if argindex == 1:
#I didn't know if there is a better way to handle default arguments
k = 0
if len(self.args) > 1:
k = self.args[1]
return self.func(self.args[0], k + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg, k=0):
k = sympify(k)
if not k.is_Integer or k.is_negative:
raise ValueError("Error: the second argument of DiracDelta must be \
a non-negative integer, %s given instead." % (k,))
arg = sympify(arg)
if arg is S.NaN:
return S.NaN
if arg.is_positive or arg.is_negative:
return S.Zero
def simplify(self, x):
"""simplify(self, x)
Compute a simplified representation of the function using
property number 4.
x can be:
- a symbol
Examples
========
>>> from sympy import DiracDelta
>>> from sympy.abc import x, y
>>> DiracDelta(x*y).simplify(x)
DiracDelta(x)/Abs(y)
>>> DiracDelta(x*y).simplify(y)
DiracDelta(y)/Abs(x)
>>> DiracDelta(x**2 + x - 2).simplify(x)
DiracDelta(x - 1)/3 + DiracDelta(x + 2)/3
See Also
========
is_simple, Directdelta
"""
from sympy.polys.polyroots import roots
if not self.args[0].has(x) or (len(self.args) > 1 and self.args[1] != 0 ):
return self
try:
argroots = roots(self.args[0], x)
result = 0
valid = True
darg = abs(diff(self.args[0], x))
for r, m in argroots.items():
if r.is_real is not False and m == 1:
result += self.func(x - r)/darg.subs(x, r)
else:
# don't handle non-real and if m != 1 then
# a polynomial will have a zero in the derivative (darg)
# at r
valid = False
break
if valid:
return result
except PolynomialError:
pass
return self
def is_simple(self, x):
"""is_simple(self, x)
Tells whether the argument(args[0]) of DiracDelta is a linear
expression in x.
x can be:
- a symbol
Examples
========
>>> from sympy import DiracDelta, cos
>>> from sympy.abc import x, y
>>> DiracDelta(x*y).is_simple(x)
True
>>> DiracDelta(x*y).is_simple(y)
True
>>> DiracDelta(x**2+x-2).is_simple(x)
False
>>> DiracDelta(cos(x)).is_simple(x)
False
See Also
========
simplify, Directdelta
"""
p = self.args[0].as_poly(x)
if p:
return p.degree() == 1
return False
@staticmethod
def _latex_no_arg(printer):
return r'\delta'
def _sage_(self):
import sage.all as sage
return sage.dirac_delta(self.args[0]._sage_())
###############################################################################
############################## HEAVISIDE FUNCTION #############################
###############################################################################
class Heaviside(Function):
"""Heaviside Piecewise function
Heaviside function has the following properties [*]_:
1) ``diff(Heaviside(x),x) = DiracDelta(x)``
``( 0, if x < 0``
2) ``Heaviside(x) = < ( undefined if x==0 [*]``
``( 1, if x > 0``
.. [*] Regarding to the value at 0, Mathematica defines ``H(0) = 1``,
but Maple uses ``H(0) = undefined``
See Also
========
DiracDelta
References
==========
.. [1] http://mathworld.wolfram.com/HeavisideStepFunction.html
"""
is_real = True
def fdiff(self, argindex=1):
if argindex == 1:
# property number 1
return DiracDelta(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg is S.NaN:
return S.NaN
elif fuzzy_not(im(arg).is_zero):
raise ValueError("Function defined only for Real Values. Complex part: %s found in %s ." % (repr(im(arg)), repr(arg)) )
elif arg.is_negative:
return S.Zero
elif arg.is_positive:
return S.One
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((1, arg > 0), (S(1)/2, Eq(arg, 0)), (0, True))
def _eval_rewrite_as_sign(self, arg):
if arg.is_real:
return (sign(arg)+1)/2
def _sage_(self):
import sage.all as sage
return sage.heaviside(self.args[0]._sage_())
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import getpass
import os
import uuid
from cgroupspy import trees
import psutil
from airflow.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.helpers import reap_process_group
class CgroupTaskRunner(BaseTaskRunner):
"""
Runs the raw Airflow task in a cgroup that has containment for memory and
cpu. It uses the resource requirements defined in the task to construct
the settings for the cgroup.
Note that this task runner will only work if the Airflow user has root privileges,
e.g. if the airflow user is called `airflow` then the following entries (or an even
less restrictive ones) are needed in the sudoers file (replacing
/CGROUPS_FOLDER with your system's cgroups folder, e.g. '/sys/fs/cgroup/'):
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/* *
"""
def __init__(self, local_task_job):
super(CgroupTaskRunner, self).__init__(local_task_job)
self.process = None
self._finished_running = False
self._cpu_shares = None
self._mem_mb_limit = None
self._created_cpu_cgroup = False
self._created_mem_cgroup = False
self._cur_user = getpass.getuser()
def _create_cgroup(self, path):
"""
Create the specified cgroup.
:param path: The path of the cgroup to create.
E.g. cpu/mygroup/mysubgroup
:return: the Node associated with the created cgroup.
:rtype: cgroupspy.nodes.Node
"""
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup %s in %s", path_element, node.path)
node = node.create_cgroup(path_element)
else:
self.log.debug(
"Not creating cgroup %s in %s since it already exists",
path_element, node.path
)
node = name_to_node[path_element]
return node
def _delete_cgroup(self, path):
"""
Delete the specified cgroup.
:param path: The path of the cgroup to delete.
E.g. cpu/mygroup/mysubgroup
"""
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
return
else:
node = name_to_node[path_element]
# node is now the leaf node
parent = node.parent
self.log.debug("Deleting cgroup %s/%s", parent, node.name)
parent.delete_cgroup(node.name)
def start(self):
# Use bash if it's already in a cgroup
cgroups = self._get_cgroup_names()
if cgroups["cpu"] != "/" or cgroups["memory"] != "/":
self.log.debug(
"Already running in a cgroup (cpu: %s memory: %s) so not "
"creating another one",
cgroups.get("cpu"), cgroups.get("memory")
)
self.process = self.run_command(['bash', '-c'], join_args=True)
return
# Create a unique cgroup name
cgroup_name = "airflow/{}/{}".format(datetime.datetime.utcnow().
strftime("%Y-%m-%d"),
str(uuid.uuid1()))
self.mem_cgroup_name = "memory/{}".format(cgroup_name)
self.cpu_cgroup_name = "cpu/{}".format(cgroup_name)
# Get the resource requirements from the task
task = self._task_instance.task
resources = task.resources
cpus = resources.cpus.qty
self._cpu_shares = cpus * 1024
self._mem_mb_limit = resources.ram.qty
# Create the memory cgroup
mem_cgroup_node = self._create_cgroup(self.mem_cgroup_name)
self._created_mem_cgroup = True
if self._mem_mb_limit > 0:
self.log.debug(
"Setting %s with %s MB of memory",
self.mem_cgroup_name, self._mem_mb_limit
)
mem_cgroup_node.controller.limit_in_bytes = self._mem_mb_limit * 1024 * 1024
# Create the CPU cgroup
cpu_cgroup_node = self._create_cgroup(self.cpu_cgroup_name)
self._created_cpu_cgroup = True
if self._cpu_shares > 0:
self.log.debug(
"Setting %s with %s CPU shares",
self.cpu_cgroup_name, self._cpu_shares
)
cpu_cgroup_node.controller.shares = self._cpu_shares
# Start the process w/ cgroups
self.log.debug(
"Starting task process with cgroups cpu,memory: %s",
cgroup_name
)
self.process = self.run_command(
['cgexec', '-g', 'cpu,memory:{}'.format(cgroup_name)]
)
def return_code(self):
return_code = self.process.poll()
# TODO(plypaul) Monitoring the the control file in the cgroup fs is better than
# checking the return code here. The PR to use this is here:
# https://github.com/plypaul/airflow/blob/e144e4d41996300ffa93947f136eab7785b114ed/airflow/contrib/task_runner/cgroup_task_runner.py#L43
# but there were some issues installing the python butter package and
# libseccomp-dev on some hosts for some reason.
# I wasn't able to track down the root cause of the package install failures, but
# we might want to revisit that approach at some other point.
if return_code == 137:
self.log.warning("Task failed with return code of 137. This may indicate "
"that it was killed due to excessive memory usage. "
"Please consider optimizing your task or using the "
"resources argument to reserve more memory for your task")
return return_code
def terminate(self):
if self.process and psutil.pid_exists(self.process.pid):
reap_process_group(self.process.pid, self.log)
def on_finish(self):
# Let the OOM watcher thread know we're done to avoid false OOM alarms
self._finished_running = True
# Clean up the cgroups
if self._created_mem_cgroup:
self._delete_cgroup(self.mem_cgroup_name)
if self._created_cpu_cgroup:
self._delete_cgroup(self.cpu_cgroup_name)
def _get_cgroup_names(self):
"""
:return: a mapping between the subsystem name to the cgroup name
:rtype: dict[str, str]
"""
with open("/proc/self/cgroup") as f:
lines = f.readlines()
d = {}
for line in lines:
line_split = line.rstrip().split(":")
subsystem = line_split[1]
group_name = line_split[2]
d[subsystem] = group_name
return d
|
|
from npyscreen import BoxBasic, MultiLineEdit, FormBaseNew, MultiLine, notify_confirm
import curses
import weakref
class LogForm(FormBaseNew):
def create(self):
self.log = self.add(LogView, max_height=self.lines-15, name="Messages", scroll_exit=True)
self.optionBox = self.add(FixTextBox, name="Options", editable=False, scroll_exit=True, value="test",
max_width=30, rely=self.lines-13)
self.optionBox.value = "Press ^L to return"
self.infoBox = self.add(InfoBox, name="Information", scroll_exit=True,
max_width=self.columns-36, relx=33, rely=self.lines-13)
self.add_handlers({"^L": self.show_last,
"^Q": self.exit})
def appendMessage(self, msg, info):
self.log.addLogMessage(msg, info)
def exit(self, *args, **keywords):
self.parentApp.change_form(None)
def show_last(self, *args, **keywords):
self.parentApp.switchFormPrevious()
class LogView(MultiLine):
infoValues = []
def addLogMessage(self,msg,info):
self.values.insert(0,'{0}: {1}'.format(len(self.values), msg))
self.infoValues.insert(0,str(info))
self.value = 0
self.when_value_edited()
def when_value_edited(self):
try:
self.display()
infoMsg = self.infoValues[self.value]
except:
infoMsg = ""
try:
self.parent.infoBox.value=infoMsg
self.parent.infoBox.display()
except:
notify_confirm(infoMsg)
class FixTextBox(BoxBasic):
_contained_widget = MultiLineEdit
def __init__(self, screen, *args, **keywords):
super(FixTextBox, self).__init__(screen, *args, **keywords)
self.make_contained_widget()
def make_contained_widget(self):
self._my_widgets = []
self._my_widgets.append(self._contained_widget(self.parent,
rely=self.rely+1, relx = self.relx+2,
max_width=self.width-4, max_height=self.height-2,
))
self.entry_widget = weakref.proxy(self._my_widgets[0])
def update(self, clear=True):
if self.hidden and clear:
self.clear()
return False
elif self.hidden:
return False
super(FixTextBox, self).update(clear=clear)
for w in self._my_widgets:
w.update(clear=clear)
def edit(self):
self.editing=False
self.display()
#self.entry_widget.edit()
#self.value = self.textarea.value
self.how_exited = self.entry_widget.how_exited
self.editing=False
self.display()
def get_value(self):
if hasattr(self, 'entry_widget'):
return self.entry_widget.value
elif hasattr(self, '__tmp_value'):
return self.__tmp_value
else:
return None
def set_value(self, value):
if hasattr(self, 'entry_widget'):
self.entry_widget.value = value
else:
# probably trying to set the value before the textarea is initialised
self.__tmp_value = value
def del_value(self):
del self.entry_widget.value
value = property(get_value, set_value, del_value)
def get_values(self):
if hasattr(self, 'entry_widget'):
return self.entry_widget.values
elif hasattr(self, '__tmp_value'):
return self.__tmp_values
else:
return None
def set_values(self, value):
if hasattr(self, 'entry_widget'):
self.entry_widget.values = value
elif hasattr(self, '__tmp_value'):
# probably trying to set the value before the textarea is initialised
self.__tmp_values = value
def del_values(self):
del self.entry_widget.value
values = property(get_values, set_values, del_values)
def get_editable(self):
return False
# if hasattr(self, 'entry_widget'):
# return False
# return self.entry_widget.editable
# else:
# return None
def set_editable(self, value):
if hasattr(self, 'entry_widget'):
self.entry_widget.editable = value
elif hasattr(self, '__tmp_value'):
# probably trying to set the value before the textarea is initialised
self.__tmp_values = value
def del_editable(self):
del self.entry_widget.editable
editable = property(get_editable, set_editable, del_editable)
class InfoBox(BoxBasic):
_contained_widget = MultiLineEdit
def __init__(self, screen, *args, **keywords):
super(InfoBox, self).__init__(screen, *args, **keywords)
self.make_contained_widget()
def make_contained_widget(self):
self._my_widgets = []
self._my_widgets.append(self._contained_widget(self.parent,
rely=self.rely+1, relx = self.relx+2,
max_width=self.width-4, max_height=self.height-2,
))
self.entry_widget = weakref.proxy(self._my_widgets[0])
def update(self, clear=True):
if self.hidden and clear:
self.clear()
return False
elif self.hidden:
return False
super(InfoBox, self).update(clear=clear)
for w in self._my_widgets:
w.update(clear=clear)
def edit(self):
self.editing=True
self.display()
self.entry_widget.edit()
#self.value = self.textarea.value
self.how_exited = self.entry_widget.how_exited
self.editing=False
self.display()
def get_value(self):
if hasattr(self, 'entry_widget'):
return self.entry_widget.value
elif hasattr(self, '__tmp_value'):
return self.__tmp_value
else:
return None
def set_value(self, value):
if hasattr(self, 'entry_widget'):
self.entry_widget.value = value
else:
# probably trying to set the value before the textarea is initialised
self.__tmp_value = value
def del_value(self):
del self.entry_widget.value
value = property(get_value, set_value, del_value)
def get_values(self):
if hasattr(self, 'entry_widget'):
return self.entry_widget.values
elif hasattr(self, '__tmp_value'):
return self.__tmp_values
else:
return None
def set_values(self, value):
if hasattr(self, 'entry_widget'):
self.entry_widget.values = value
elif hasattr(self, '__tmp_value'):
# probably trying to set the value before the textarea is initialised
self.__tmp_values = value
def del_values(self):
del self.entry_widget.value
values = property(get_values, set_values, del_values)
def get_editable(self):
if hasattr(self, 'entry_widget'):
# return False
return self.entry_widget.editable
else:
return None
def set_editable(self, value):
if hasattr(self, 'entry_widget'):
self.entry_widget.editable = value
elif hasattr(self, '__tmp_value'):
# probably trying to set the value before the textarea is initialised
self.__tmp_values = value
def del_editable(self):
del self.entry_widget.editable
editable = property(get_editable, set_editable, del_editable)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import detail_placement_view
from google.ads.googleads.v9.services.types import detail_placement_view_service
from .transports.base import (
DetailPlacementViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import DetailPlacementViewServiceGrpcTransport
class DetailPlacementViewServiceClientMeta(type):
"""Metaclass for the DetailPlacementViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DetailPlacementViewServiceTransport]]
_transport_registry["grpc"] = DetailPlacementViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DetailPlacementViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DetailPlacementViewServiceClient(
metaclass=DetailPlacementViewServiceClientMeta
):
"""Service to fetch Detail Placement views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DetailPlacementViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DetailPlacementViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DetailPlacementViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
DetailPlacementViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def detail_placement_view_path(
customer_id: str, ad_group_id: str, base64_placement: str,
) -> str:
"""Return a fully-qualified detail_placement_view string."""
return "customers/{customer_id}/detailPlacementViews/{ad_group_id}~{base64_placement}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
base64_placement=base64_placement,
)
@staticmethod
def parse_detail_placement_view_path(path: str) -> Dict[str, str]:
"""Parse a detail_placement_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/detailPlacementViews/(?P<ad_group_id>.+?)~(?P<base64_placement>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DetailPlacementViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the detail placement view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DetailPlacementViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DetailPlacementViewServiceTransport):
# transport is a DetailPlacementViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = DetailPlacementViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_detail_placement_view(
self,
request: Union[
detail_placement_view_service.GetDetailPlacementViewRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> detail_placement_view.DetailPlacementView:
r"""Returns the requested Detail Placement view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetDetailPlacementViewRequest, dict]):
The request object. Request message for
[DetailPlacementViewService.GetDetailPlacementView][google.ads.googleads.v9.services.DetailPlacementViewService.GetDetailPlacementView].
resource_name (:class:`str`):
Required. The resource name of the
Detail Placement view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.DetailPlacementView:
A view with metrics aggregated by ad
group and URL or YouTube video.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a detail_placement_view_service.GetDetailPlacementViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, detail_placement_view_service.GetDetailPlacementViewRequest
):
request = detail_placement_view_service.GetDetailPlacementViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_detail_placement_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("DetailPlacementViewServiceClient",)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import six
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
def is_tensor_or_tensor_list(v):
v = nest.flatten(v)
if v and isinstance(v[0], ops.Tensor):
return True
else:
return False
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors/ops reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
inputs = nest.flatten(inputs, expand_composites=True)
reachable = object_identity.ObjectIdentitySet(inputs)
if targets:
remaining_targets = object_identity.ObjectIdentitySet(nest.flatten(targets))
queue = inputs[:]
while queue:
x = queue.pop()
if isinstance(x, tuple(_user_convertible_tensor_types)):
# Can't find consumers of user-specific types.
continue
if isinstance(x, ops.Operation):
outputs = x.outputs[:] or []
outputs += x._control_outputs # pylint: disable=protected-access
elif isinstance(x, variables.Variable):
try:
outputs = [x.op]
except AttributeError:
# Variables can be created in an Eager context.
outputs = []
elif tensor_util.is_tensor(x):
outputs = x.consumers()
else:
raise TypeError('Expected Operation, Variable, or Tensor, got ' + str(x))
for y in outputs:
if y not in reachable:
reachable.add(y)
if targets:
remaining_targets.discard(y)
queue.insert(0, y)
if targets and not remaining_targets:
return reachable
return reachable
# This function needs access to private functions of `nest`.
# pylint: disable=protected-access
def map_structure_with_atomic(is_atomic_fn, map_fn, nested):
"""Maps the atomic elements of a nested structure.
Arguments:
is_atomic_fn: A function that determines if an element of `nested` is
atomic.
map_fn: The function to apply to atomic elements of `nested`.
nested: A nested structure.
Returns:
The nested structure, with atomic elements mapped according to `map_fn`.
Raises:
ValueError: If an element that is neither atomic nor a sequence is
encountered.
"""
if is_atomic_fn(nested):
return map_fn(nested)
# Recursively convert.
if not nest.is_nested(nested):
raise ValueError(
'Received non-atomic and non-sequence element: {}'.format(nested))
if nest._is_mapping(nested):
values = [nested[k] for k in nest._sorted(nested)]
elif nest._is_attrs(nested):
values = _astuple(nested)
else:
values = nested
mapped_values = [
map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values
]
return nest._sequence_like(nested, mapped_values)
def get_shapes(tensors):
"""Gets shapes from tensors."""
return nest.map_structure(lambda x: x.shape, tensors)
# pylint: enable=protected-access
def convert_shapes(input_shape, to_tuples=True):
"""Converts nested shape representations to desired format.
Performs:
TensorShapes -> tuples if `to_tuples=True`.
tuples of int or None -> TensorShapes if `to_tuples=False`.
Valid objects to be converted are:
- TensorShapes
- tuples with elements of type int or None.
- ints
- None
Arguments:
input_shape: A nested structure of objects to be converted to TensorShapes.
to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts
all tuples representing shapes to TensorShapes.
Returns:
Nested structure of shapes in desired format.
Raises:
ValueError: when the input tensor shape can't be converted to tuples, eg
unknown tensor shape.
"""
def _is_shape_component(value):
return value is None or isinstance(value, (int, tensor_shape.Dimension))
def _is_atomic_shape(input_shape):
# Ex: TensorShape or (None, 10, 32) or 5 or `None`
if _is_shape_component(input_shape):
return True
if isinstance(input_shape, tensor_shape.TensorShape):
return True
if (isinstance(input_shape, (tuple, list)) and
all(_is_shape_component(ele) for ele in input_shape)):
return True
return False
def _convert_shape(input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if to_tuples:
input_shape = tuple(input_shape.as_list())
return input_shape
return map_structure_with_atomic(_is_atomic_shape, _convert_shape,
input_shape)
class ListWrapper(object):
"""A wrapper for lists to be treated as elements for `nest`."""
def __init__(self, list_to_wrap):
self._list = list_to_wrap
def as_list(self):
return self._list
def convert_inner_node_data(nested, wrap=False):
"""Either wraps or unwraps innermost node data lists in `ListWrapper` objects.
Arguments:
nested: A nested data structure.
wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`,
unwraps `ListWrapper` objects into lists.
Returns:
Structure of same type as nested, with lists wrapped/unwrapped.
"""
def _is_serialized_node_data(nested):
# Node data can be of form `[layer_name, node_id, tensor_id]` or
# `[layer_name, node_id, tensor_id, kwargs]`.
if (isinstance(nested, list) and (len(nested) in [3, 4]) and
isinstance(nested[0], six.string_types)):
return True
return False
def _is_atomic_nested(nested):
"""Returns `True` if `nested` is a list representing node data."""
if isinstance(nested, ListWrapper):
return True
if _is_serialized_node_data(nested):
return True
return not nest.is_nested(nested)
def _convert_object_or_list(nested):
"""Convert b/t `ListWrapper` object and list representations."""
if wrap:
if isinstance(nested, ListWrapper):
return nested
if _is_serialized_node_data(nested):
return ListWrapper(nested)
return nested
else:
if isinstance(nested, ListWrapper):
return nested.as_list()
return nested
return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list,
nested)
def shape_type_conversion(fn):
"""Decorator that handles tuple/TensorShape conversion.
Used in `compute_output_shape` and `build`.
Arguments:
fn: function to wrap.
Returns:
Wrapped function.
"""
def wrapper(instance, input_shape):
# Pass shapes as tuples to `fn`
# This preserves compatibility with external Keras.
if input_shape is not None:
input_shape = convert_shapes(input_shape, to_tuples=True)
output_shape = fn(instance, input_shape)
# Return shapes from `fn` as TensorShapes.
if output_shape is not None:
output_shape = convert_shapes(output_shape, to_tuples=False)
return output_shape
return wrapper
def are_all_symbolic_tensors(tensors):
return all(map(is_symbolic_tensor, tensors))
_user_convertible_tensor_types = set()
def is_extension_type(tensor):
"""Returns whether a tensor is of an ExtensionType.
github.com/tensorflow/community/pull/269
Currently it works by checking if `tensor` is a `CompositeTensor` instance,
but this will be changed to use an appropriate extensiontype protocol
check once ExtensionType is made public.
Arguments:
tensor: An object to test
Returns:
True if the tensor is an extension type object, false if not.
"""
return isinstance(tensor, composite_tensor.CompositeTensor)
def is_symbolic_tensor(tensor):
"""Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.
A Variable can be seen as either: it is considered symbolic
when we are in a graph scope, and eager when we are in an eager scope.
Arguments:
tensor: A tensor instance to test.
Returns:
True for symbolic tensors, False for eager tensors.
"""
if isinstance(tensor, ops.Tensor):
return hasattr(tensor, 'graph')
elif is_extension_type(tensor):
component_tensors = nest.flatten(tensor, expand_composites=True)
return any(hasattr(t, 'graph') for t in component_tensors)
elif isinstance(tensor, variables.Variable):
# Variables that are output of a Keras Layer in Functional API mode
# should be considered symbolic.
# TODO(omalleyt): We need a better way to check this in order to
# enable `run_eagerly=True` for Models containing Layers that
# return Variables as outputs.
return (getattr(tensor, '_keras_history', False) or
not context.executing_eagerly())
elif isinstance(tensor, tuple(_user_convertible_tensor_types)):
tensor = ops.convert_to_tensor_or_composite(tensor)
return is_symbolic_tensor(tensor)
else:
return False
def register_symbolic_tensor_type(cls):
"""Allows users to specify types regarded as symbolic `Tensor`s.
Used in conjunction with `tf.register_tensor_conversion_function`, calling
`tf.keras.utils.register_symbolic_tensor_type(cls)` allows non-`Tensor`
objects to be plumbed through Keras layers.
Example:
```python
# One-time setup.
class Foo(object):
def __init__(self, input_):
self._input = input_
def value(self):
return tf.constant(42.)
tf.register_tensor_conversion_function(
Foo, lambda x, *args, **kwargs: x.value())
tf.keras.utils.register_symbolic_tensor_type(Foo)
# User-land.
layer = tf.keras.layers.Lambda(lambda input_: Foo(input_))
```
Arguments:
cls: A `class` type which shall be regarded as a symbolic `Tensor`.
"""
global _user_convertible_tensor_types
_user_convertible_tensor_types.add(cls)
def type_spec_from_value(value):
"""Grab type_spec without converting array-likes to tensors."""
if is_extension_type(value):
return value._type_spec # pylint: disable=protected-access
# Get a TensorSpec for array-like data without
# converting the data to a Tensor
if hasattr(value, 'shape') and hasattr(value, 'dtype'):
return tensor_spec.TensorSpec(value.shape, value.dtype)
else:
return type_spec.type_spec_from_value(value)
def is_ragged(tensor):
"""Returns true if `tensor` is a ragged tensor or ragged tensor value."""
return isinstance(
tensor,
(ragged_tensor.RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def is_tensor_or_variable(x):
return tensor_util.is_tensor(x) or isinstance(x, variables.Variable)
def assert_no_legacy_layers(layers):
"""Prevent tf.layers.Layers from being used with Keras.
Certain legacy layers inherit from their keras analogs; however they are
not supported with keras and can lead to subtle and hard to diagnose bugs.
Args:
layers: A list of layers to check
Raises:
TypeError: If any elements of layers are tf.layers.Layers
"""
# isinstance check for tf.layers.Layer introduces a circular dependency.
legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]
if legacy_layers:
layer_str = '\n'.join(' ' + str(l) for l in legacy_layers)
raise TypeError(
'The following are legacy tf.layers.Layers:\n{}\nTo use keras as a '
'framework (for instance using the Network, Model, or Sequential '
'classes), please use the tf.keras.layers implementation instead. '
'(Or, if writing custom layers, subclass from tf.keras.layers rather '
'than tf.layers)'.format(layer_str))
@tf_contextlib.contextmanager
def maybe_init_scope(layer):
"""Open an `init_scope` if in V2 mode and using the keras graph.
Arguments:
layer: The Layer/Model that is currently active.
Yields:
None
"""
# Don't open an init_scope in V1 mode or when using legacy tf.layers.
if (ops.executing_eagerly_outside_functions() and
getattr(layer, '_keras_style', True)):
with ops.init_scope():
yield
else:
yield
@tf_contextlib.contextmanager
def graph_context_for_symbolic_tensors(*args, **kwargs):
"""Returns graph context manager if any of the inputs is a symbolic tensor."""
if any(is_symbolic_tensor(v) for v in list(args) + list(kwargs.values())):
with K.get_graph().as_default():
yield
else:
yield
def dataset_is_infinite(dataset):
"""True if the passed dataset is infinite."""
if ops.executing_eagerly_outside_functions():
return math_ops.equal(
cardinality.cardinality(dataset), cardinality.INFINITE)
else:
dataset_size = K.get_session().run(cardinality.cardinality(dataset))
return dataset_size == cardinality.INFINITE
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
# pylint: disable=protected-access
if isinstance(t, type_spec.TypeSpec):
spec = t
elif is_extension_type(t):
# TODO(b/148821952): Should these specs have a name attr?
spec = t._type_spec
elif (hasattr(t, '_keras_history') and
hasattr(t._keras_history[0], '_type_spec')):
return t._keras_history[0]._type_spec
elif hasattr(t, 'shape') and hasattr(t, 'dtype'):
spec = tensor_spec.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
dynamic_batch_spec = copy.deepcopy(spec)
# RaggedTensorSpec only has a private _shape.
shape = dynamic_batch_spec._shape.as_list()
if shape:
shape[0] = None
dynamic_batch_spec._shape = tensor_shape.TensorShape(shape)
return dynamic_batch_spec
# pylint: enable=protected-access
def to_numpy_or_python_type(tensors):
"""Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
def _to_single_numpy_or_python_type(t):
if isinstance(t, ops.Tensor):
x = t.numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
return nest.map_structure(_to_single_numpy_or_python_type, tensors)
def _astuple(attrs):
"""Converts the given attrs to tuple non-recursively."""
cls = type(attrs)
fields = getattr(cls, '__attrs_attrs__', None)
if fields is None:
raise ValueError('%r is not an attrs-decorated class.' % cls)
values = []
for field in fields:
values.append(getattr(attrs, field.name))
return tuple(values)
|
|
from SimPEG.utils import ndgrid, mkvc, sdiag
import discretize
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
def casing_currents(j, mesh, model_parameters):
"""
Compute the current (A) within the casing
:param numpy.ndarray j: current density
:param discretize.BaseMesh mesh: the discretize mesh which the casing is on
:param casingSimulations.model.BaseCasingParametersMixin: a model with
casing
:return: :code:`(ix_casing, iz_casing)`
"""
casing_a = model_parameters.casing_a
casing_b = model_parameters.casing_b
casing_z = model_parameters.casing_z
IxCasing = {}
IzCasing = {}
casing_faces_x = (
(mesh.gridFx[:, 0] >= casing_a) &
(mesh.gridFx[:, 0] <= casing_b) &
(mesh.gridFx[:, 2] <= casing_z[1]) &
(mesh.gridFx[:, 2] >= casing_z[0])
)
casing_faces_y = np.zeros(mesh.nFy, dtype=bool)
casing_faces_z = (
(mesh.gridFz[:, 0] >= casing_a) &
(mesh.gridFz[:, 0] <= casing_b) &
(mesh.gridFz[:, 2] <= casing_z[1]) &
(mesh.gridFz[:, 2] >= casing_z[0])
)
jA = sdiag(mesh.area) * j
jACasing = sdiag(
np.hstack([casing_faces_x, casing_faces_y, casing_faces_z])
) * jA
jxCasing = jACasing[:mesh.nFx, :].reshape(
mesh.vnFx[0], mesh.vnFx[1], mesh.vnFx[2], order='F'
)
jzCasing = jACasing[mesh.nFx + mesh.nFy:, :].reshape(
mesh.vnFz[0], mesh.vnFz[1], mesh.vnFz[2], order='F'
)
ixCasing = jxCasing.sum(0).sum(0)
izCasing = jzCasing.sum(0).sum(0)
ix_inds = (mesh.vectorCCz > casing_z[0]) & (mesh.vectorCCz < casing_z[1])
z_ix = mesh.vectorCCz[ix_inds]
iz_inds = (mesh.vectorNz > casing_z[0]) & (mesh.vectorNz < casing_z[1])
z_iz = mesh.vectorNz[iz_inds]
return {"x": (z_ix, ixCasing[ix_inds]), "z": (z_iz, izCasing[iz_inds])}
def casing_charges(charge, mesh, model_parameters):
casing_inds = (
(mesh.gridCC[:, 0] >= -model_parameters.casing_b-mesh.hx.min()) &
(mesh.gridCC[:, 0] <= model_parameters.casing_b+mesh.hx.min()) &
(mesh.gridCC[:, 2] < model_parameters.casing_z[1]) &
(mesh.gridCC[:, 2] > model_parameters.casing_z[0])
)
charge[~casing_inds] = 0.
charge = charge.reshape(mesh.vnC, order='F').sum(0).sum(0)
z_inds = (
(mesh.vectorCCz > model_parameters.casing_z[0]) &
(mesh.vectorCCz < model_parameters.casing_z[1])
)
z = mesh.vectorCCz[z_inds]
return z, charge[z_inds]
def plotCurrentDensity(
mesh,
fields_j, saveFig=False,
figsize=(4, 5), fontsize=12, csx=5., csz=5.,
xmax=1000., zmin=0., zmax=-1200., real_or_imag='real',
mirror=False, ax=None, fig=None, clim=None
):
csx, ncx = csx, np.ceil(xmax/csx)
csz, ncz = csz, np.ceil((zmin-zmax)/csz)
if mirror is True:
xlim = [-xmax, xmax]
x0 = [-xmax, -csx/2., zmax]
ncx *= 2.
else:
xlim = [0., xmax]
x0 = [0, -csx/2., zmax]
ylim = [zmax, zmin]
# define the tensor mesh
meshcart = discretize.TensorMesh(
[[(csx, ncx)], [(csx, 1)], [(csz, ncz)]], x0
)
projF = mesh.getInterpolationMatCartMesh(meshcart, 'F')
jcart = projF*fields_j
jcart = getattr(jcart, real_or_imag)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if saveFig is True:
# this looks obnoxious inline, but nice in the saved png
f = meshcart.plotSlice(
jcart, normal='Y', vType='F', view='vec',
pcolorOpts={
'norm': LogNorm(), 'cmap': plt.get_cmap('viridis')
},
streamOpts={'arrowsize': 6, 'color': 'k'},
ax=ax
)
else:
f = meshcart.plotSlice(
jcart, normal='Y', vType='F', view='vec',
pcolorOpts={
'norm': LogNorm(), 'cmap': plt.get_cmap('viridis')
},
ax=ax
)
plt.colorbar(
f[0], label='{} current density (A/m$^2$)'.format(
real_or_imag
)
)
if clim is not None:
f.set_clim(clim)
ax.set_ylim(ylim)
ax.set_xlim(xlim)
# ax.set_title('Current Density')
ax.set_xlabel('radius (m)', fontsize=fontsize)
ax.set_ylabel('z (m)', fontsize=fontsize)
if saveFig is True:
fig.savefig('primaryCurrents', dpi=300, bbox_inches='tight')
return ax
def plot_currents_over_freq(
IxCasing, IzCasing, modelParameters, mesh,
mur=1, subtract=None, real_or_imag='real', ax=None, xlim=[-1100., 0.],
logScale=True, srcinds=[0], ylim_0=None, ylim_1=None
):
print("mu = {} mu_0".format(mur))
ixCasing = IxCasing[mur]
izCasing = IzCasing[mur]
if ax is None:
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a in ax:
a.grid(
which='both', linestyle='-', linewidth=0.4, color=[0.8, 0.8, 0.8],
alpha=0.5
)
# getattr(a, 'semilogy' if logScale is True else 'plot')(
# [modelParameters.src_a[2], modelParameters.src_a[2]], [1e-14, 1], color=[0.3, 0.3, 0.3]
# )
a.set_xlim(xlim)
a.invert_xaxis()
col = ['b', 'g', 'r', 'c', 'm', 'y']
pos_linestyle = ['-', '-']
neg_linestyle = ['--', '--']
leg = []
for i, f in enumerate(modelParameters.freqs):
for srcind in srcinds:
# src = survey.getSrcByFreq(survey.freqs[freqind])[srcind]
# j = mkvc(fields[mur][src, 'j'].copy())
Iind = i + srcind*len(modelParameters.freqs)
Ix, Iz = ixCasing[Iind].copy(), izCasing[Iind].copy()
if subtract is not None:
Ix += -IxCasing[subtract][Iind].copy()
Iz += -IzCasing[subtract][Iind].copy()
Ix_plt = getattr(Ix, real_or_imag)
Iz_plt = getattr(Iz, real_or_imag)
if logScale is True:
ax0 = ax[0].semilogy(
mesh.vectorNz, Iz_plt,
'{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
),
label="{} Hz".format(f)
)
ax[0].semilogy(
mesh.vectorNz, -Iz_plt,
'{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind],
color=col[i]
)
)
ax[1].semilogy(
mesh.vectorCCz, Ix_plt, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
)
)
ax[1].semilogy(
mesh.vectorCCz, -Ix_plt, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind],
color=col[i]
)
)
else:
ax0 = ax[0].plot(
mesh.vectorNz, Iz_plt, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
), label="{} Hz".format(f)
)
ax[1].plot(
mesh.vectorCCz, Ix_plt, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
)
)
leg.append(ax0)
if ylim_0 is not None:
ax[0].set_ylim(ylim_0)
if ylim_1 is not None:
ax[1].set_ylim(ylim_1)
ax[0].legend(bbox_to_anchor=[1.25, 1])
# plt.show()
return ax
# plot current density over mu
def plot_currents_over_mu(
IxCasing, IzCasing, modelParameters, mesh,
freqind=0, real_or_imag='real',
subtract=None, ax=None, fig=None, logScale=True,
srcinds=[0],
ylim_0=None, ylim_1=None
):
print("{} Hz".format(modelParameters.freqs[freqind]))
if ax is None:
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a in ax:
a.grid(
which='both', linestyle='-', linewidth=0.4, color=[0.8, 0.8, 0.8],
alpha=0.5
)
# getattr(a, 'semilogy' if logScale is True else 'plot')(
# [modelParameters.src_a[2], modelParameters.src_a[2]], [1e-14, 1], color=[0.3, 0.3, 0.3]
# )
a.set_xlim([-1100., 0.])
# a.set_ylim([1e-3, 1.])
a.invert_xaxis()
col = ['b', 'g', 'r', 'c', 'm', 'y']
pos_linestyle = ['-', '-']
neg_linestyle = ['--', '--']
leg = []
for i, mur in enumerate(modelParameters.muModels):
for srcind in srcinds:
Iind = i + srcind*len(modelParameters.freqs)
ixCasing = IxCasing[mur]
izCasing = IzCasing[mur]
Ix, Iz = ixCasing[Iind].copy(), izCasing[Iind].copy()
if subtract is not None:
Ix = Ix - IxCasing[subtract][Iind]
Iz = Iz - IzCasing[subtract][Iind]
Iz_plt = getattr(Iz, real_or_imag)
Ix_plt = getattr(Ix, real_or_imag)
if logScale is True:
ax0 = ax[0].semilogy(
mesh.vectorNz, Iz_plt, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
), label="{} $\mu_0$".format(mur)
)
ax[0].semilogy(
mesh.vectorNz, -Iz_plt, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind], color=col[i]
)
)
ax[1].semilogy(
mesh.vectorCCz, Ix_plt, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
)
)
ax[1].semilogy(
mesh.vectorCCz, -Ix_plt, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind], color=col[i]
)
)
else:
ax0 = ax[0].plot(
mesh.vectorNz, Iz_plt, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
), label="{} $\mu_0$".format(mur)
)
ax[1].plot(
mesh.vectorCCz, Ix_plt, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
)
)
leg.append(ax0)
if ylim_0 is not None:
ax[0].set_ylim(ylim_0)
if ylim_1 is not None:
ax[1].set_ylim(ylim_1)
ax[0].legend(bbox_to_anchor=[1.25, 1])
# plt.show()
return ax
# plot over mu
def plot_j_over_mu_z(
modelParameters, fields, mesh, survey, freqind=0, r=1., xlim=[-1100., 0.],
real_or_imag='real', subtract=None, ax=None, logScale=True, srcinds=[0],
ylim_0=None, ylim_1=None, fig=None
):
print("{} Hz".format(modelParameters.freqs[freqind]))
x_plt = np.r_[r]
z_plt = np.linspace(xlim[0], xlim[1], int(xlim[1]-xlim[0]))
XYZ = ndgrid(x_plt, np.r_[0], z_plt)
Pfx = mesh.getInterpolationMat(XYZ, 'Fx')
Pfz = mesh.getInterpolationMat(XYZ, 'Fz')
Pc = mesh.getInterpolationMat(XYZ, 'CC')
Zero = sp.csr_matrix(Pc.shape)
Pcx, Pcz = sp.hstack([Pc, Zero]), sp.hstack([Zero, Pc])
if ax is None:
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a in ax:
a.grid(
which='both', linestyle='-', linewidth=0.4,
color=[0.8, 0.8, 0.8], alpha=0.5
)
# getattr(a, 'semilogy' if logScale is True else 'plot')(
# [modelParameters.src_a[2], modelParameters.src_a[2]], [1e-14, 1], color=[0.3, 0.3, 0.3]
# )
a.set_xlim(xlim)
a.invert_xaxis()
col = ['b', 'g', 'r', 'c', 'm', 'y']
pos_linestyle = ['-', '-']
neg_linestyle = ['--', '--']
leg = []
for i, mur in enumerate(modelParameters.muModels):
for srcind in srcinds:
src = survey.getSrcByFreq(survey.freqs[freqind])[srcind]
j = mkvc(fields[mur][src, 'j'].copy())
if subtract is not None:
j = j - mkvc(
fields[subtract][src, 'j'].copy()
)
if real_or_imag == 'real':
j = j.real
else:
j = j.imag
jx, jz = Pfx * j, Pfz * j
if logScale is True:
ax0 = ax[0].semilogy(
z_plt, jz, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
),
label="{} $\mu_0$".format(mur)
)
ax[0].semilogy(
z_plt, -jz, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind],
color=col[i]
)
)
ax[1].semilogy(
z_plt, jx, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
)
)
ax[1].semilogy(
z_plt, -jx, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind],
color=col[i]
)
)
else:
ax0 = ax[0].plot(
z_plt, jz, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
), label="{} $\mu_0$".format(mur)
)
ax[1].plot(
z_plt, jx, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
)
)
leg.append(ax0)
if ylim_0 is not None:
ax[0].set_ylim(ylim_0)
if ylim_1 is not None:
ax[1].set_ylim(ylim_1)
ax[0].legend(bbox_to_anchor=[1.25, 1])
return ax
# plot over mu
def plot_j_over_freq_z(
modelParameters, fields, mesh, survey, mur=1., r=1., xlim=[-1100., 0.],
real_or_imag='real', subtract=None, ax=None, logScale=True, srcinds=[0],
ylim_0=None, ylim_1=None, fig=None
):
print("mu = {} mu_0".format(mur))
x_plt = np.r_[r]
z_plt = np.linspace(xlim[0], xlim[1], int(xlim[1]-xlim[0]))
XYZ = ndgrid(x_plt, np.r_[0], z_plt)
Pfx = mesh.getInterpolationMat(XYZ, 'Fx')
Pfz = mesh.getInterpolationMat(XYZ, 'Fz')
Pc = mesh.getInterpolationMat(XYZ, 'CC')
Zero = sp.csr_matrix(Pc.shape)
Pcx, Pcz = sp.hstack([Pc, Zero]), sp.hstack([Zero, Pc])
if ax is None:
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a in ax:
a.grid(
which='both', linestyle='-', linewidth=0.4,
color=[0.8, 0.8, 0.8], alpha=0.5
)
a.set_xlim(xlim)
a.invert_xaxis()
col = ['b', 'g', 'r', 'c', 'm', 'y']
pos_linestyle = ['-', '-']
neg_linestyle = ['--', '--']
leg = []
for i, freq in enumerate(modelParameters.freqs):
for srcind in srcinds:
src = survey.getSrcByFreq(freq)[srcind]
j = mkvc(fields[mur][src, 'j'].copy())
if subtract is not None:
j = j - mkvc(
fields[subtract][src, 'j'].copy()
)
if real_or_imag == 'real':
j = j.real
else:
j = j.imag
jx, jz = Pfx * j, Pfz * j
if logScale is True:
ax0 = ax[0].semilogy(
z_plt, jz, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
),
label="{} Hz".format(freq)
)
ax[0].semilogy(
z_plt, -jz, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind],
color=col[i]
)
)
ax[1].semilogy(
z_plt, jx, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
)
)
ax[1].semilogy(
z_plt, -jx, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind],
color=col[i]
)
)
else:
ax0 = ax[0].plot(
z_plt, jz, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
), label="{} $\mu_0$".format(mur)
)
ax[1].plot(
z_plt, jx, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind],
color=col[i]
)
)
leg.append(ax0)
if ylim_0 is not None:
ax[0].set_ylim(ylim_0)
if ylim_1 is not None:
ax[1].set_ylim(ylim_1)
ax[0].legend(bbox_to_anchor=[1.25, 1])
return ax
# plot over mu
def plot_j_over_mu_x(
modelParameters, fields, mesh, survey, srcind=0, mur=1, z=-950., real_or_imag='real',
subtract=None, xlim=[0., 2000.], logScale=True, srcinds=[0],
ylim_0=None, ylim_1=None, ax=None, fig=None
):
print("mu = {} mu_0".format(mur))
x_plt = np.linspace(xlim[0], xlim[1], xlim[1])
z_plt = np.r_[z]
XYZ = ndgrid(x_plt, np.r_[0], z_plt)
Pfx = mesh.getInterpolationMat(XYZ, 'Fx')
Pfz = mesh.getInterpolationMat(XYZ, 'Fz')
Pc = mesh.getInterpolationMat(XYZ, 'CC')
Zero = sp.csr_matrix(Pc.shape)
Pcx, Pcz = sp.hstack([Pc, Zero]), sp.hstack([Zero, Pc])
if ax is None:
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a in ax:
a.grid(
which='both', linestyle='-', linewidth=0.4, color=[0.8, 0.8, 0.8],
alpha=0.5
)
# a.semilogy([src_a[2], src_a[2]], [1e-14, 1], color=[0.3, 0.3, 0.3])
a.set_xlim(xlim)
# a.invert_xaxis()
col = ['b', 'g', 'r', 'c', 'm', 'y']
pos_linestyle = ['-', '-']
neg_linestyle = ['--', '--']
leg = []
for i, f in enumerate(modelParameters.freqs):
for srcind in srcinds:
src = survey.getSrcByFreq(survey.freqs[freqind])[srcind]
j = mkvc(fields[mur][src, 'j'].copy())
if subtract is not None:
j = j - mkvc(
fields[subtract][src, 'j'].copy()
)
if real_or_imag == 'real':
j = j.real
else:
j = j.imag
jx, jz = Pfx * j, Pfz * j
if logScale is True:
if np.any(jz > 0):
ax0 = ax[0].semilogy(
x_plt, jz, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
), label="{} $\mu_0$".format(mur)
)
if np.any(jz < 0):
ax[0].semilogy(
x_plt, -jz, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind], color=col[i]
)
)
if np.any(jx > 0):
ax[1].semilogy(x_plt, jx, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
))
if np.any(jx < 0):
ax[1].semilogy(
x_plt, -jx, '{linestyle}{color}'.format(
linestyle=neg_linestyle[srcind], color=col[i]
)
)
else:
ax0 = ax[0].plot(
x_plt, jz, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
), label="{} $\mu_0$".format(mur)
)
ax[1].semilogy(x_plt, jx, '{linestyle}{color}'.format(
linestyle=pos_linestyle[srcind], color=col[i]
))
leg.append(ax0)
if ylim_0 is not None:
ax[0].set_ylim(ylim_0)
if ylim_1 is not None:
ax[1].set_ylim(ylim_1)
ax[0].legend(bbox_to_anchor=[1.25, 1])
return ax
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 11:19:03 2015
@author: ibackus
"""
import warnings
import logging
import os
import glob
import fnmatch
import numpy as np
import pynbody as pb
SimArray = pb.array.SimArray
import logging
def configparser(fname,ftype='auto'):
"""
--------------------------------------------------
parameters = configparser(fname,ftype='auto')
Tries to parse ChaNGa configuration files
ftype can be 'auto', 'param', or 'director'. If auto, config parser will
try to determine the filetype on its own.
returns:
dictionary 'parameters'. The keys are the names of the parameters and
the values are the values defined in the file fname
--------------------------------------------------
"""
types = np.array(['param','director'])
ftype = ftype.lower()
param = {}
if ftype == 'auto':
# Try using extension to determine file type
a = fname.split('.')
ftype = a[-1].lower()
if np.sum(types == ftype) == 0:
# Could not find file type
print ('Could not determine config filetype...exiting')
return param
# Try to determine filetype
# --------------------------------------------------
# Parse param file
# --------------------------------------------------
if ftype == 'param':
farray = np.genfromtxt(fname,delimiter='=',dtype=None)
for n in range(len(farray)):
param[farray[n,0].strip()] = str2num(farray[n,1].strip())
# --------------------------------------------------
# Parse director file
# --------------------------------------------------
elif ftype == 'director':
f = open(fname,'r')
f.seek(0)
for line in f:
a = line.strip().split()
if len(a) == 1:
# we're dealing with a flag
param[a[0]] = str2num(a[0])
elif len(a) > 1:
param[a[0]] = str2num(a[1:])
else:
# This is an empty line
pass
f.close()
# --------------------------------------------------
# Throw warning, return 'param' as empty
# --------------------------------------------------
else:
warnings.warn('Still cannot determine filetype.')
return param
def logparser(fname, verbose=False):
"""
Parses a ChaNGa log file to find run-time parameters. Also returns the
header under the key 'header'
Parameters
----------
fname : str
Filename of the log file to open
verbose : bool
(optional) If True, prints all the parameters
Returns
-------
param : dict
Dictionary of the parameters
See Also
--------
configparser
"""
header = []
with open(fname,'r') as f:
# Parse until finding parameters
found = False
while not found:
line = f.readline().strip()
if line[0] != '#':
raise RuntimeError, 'Could not find parameters'
line = line.strip('#').strip()
if line == 'Parameters:':
found = True
else:
header.append(line)
# Now read in parameters
done = False
param = {'header': header}
while not done:
line = f.readline().strip()
if line[0] != '#':
raise RuntimeError, 'Expected # at beginning of line: ' + line
if ':' not in line:
done = True
else:
line = line.strip('#').strip()
k, v = line.split(': ')
v = str2num(v)
param[k] = v
if verbose:
print k, v
return param
def configsave(param,filename,ftype='auto'):
"""
--------------------------------------------------
Saves parameters defined by param (see configparser) to filename.
Possible ftypes are 'director' and 'param'. If set to auto, configsave
tries to guess file type from the extension.
--------------------------------------------------
"""
f = open(filename,'w')
ftype = ftype.lower()
if ftype == 'auto':
# Try to figure out filetype
a = filename.split('.')
ftype = a[-1].lower()
if ftype == 'param':
pars = sorted(param.iteritems())
for n in range(len(pars)):
f.write('{0:25s}= {1}\n'.format(pars[n][0],pars[n][1]))
elif ftype == 'director':
values = param.values()
keys = param.keys()
for n in range(len(keys)):
outstr = keys[n]
if outstr == values[n]:
# We just have a flag
pass
elif isinstance(values[n],(float,int,str)):
outstr = outstr + ' {0}'.format(values[n])
else:
outstr = outstr + ' ' + ' '.join(map(str,values[n]))
f.write('{0}\n'.format(outstr))
else:
#no file type
warnings.warn('no such filetype {0}\nCould not save'.format(ftype))
f.close()
def units_from_param(param):
"""
Figures out the simulation units from a .param file
**ARGUMENTS**
param : str or param dict (see configparser)
Simulation .param file or param dict loaded by configparser
Can also be a list or numpy array of these in which case a list
of units dicts is returned
**RETURNS**
units : dict
A dictionary of the units used in the simulation, returned as
pynbody units
"""
# Define function to load the units from a given param
def _load_units(param):
# Load param if necessary
if isinstance(param, str):
param = configparser(param, 'param')
# Universal G
G = pb.units.G
# Load units
dKpcUnit = param['dKpcUnit']
dMsolUnit = param['dMsolUnit']
# Set up pynbody units
m_unit = pb.units.Unit('{0} Msol'.format(dMsolUnit))
l_unit = pb.units.Unit('{0} kpc'.format(dKpcUnit))
t_unit = (l_unit**3/(G*m_unit))**(1,2)
# Convert the time unit to something sensible
years = t_unit.in_units('yr')
t_unit = pb.units.Unit('{0} yr'.format(years))
# Return
outdict = {'l_unit':l_unit, 'm_unit':m_unit, 't_unit':t_unit}
return outdict
# Iterate over param if necessary
if isinstance(param, (list, np.ndarray)):
outlist = []
for par in param:
outlist.append(_load_units(par))
return outlist
else:
# Not iterable
return _load_units(param)
def strip_units(x):
"""
Removes the units from a SimArray and returns as a numpy array. Note
that x is copied so that it is not destroyed
x can be a single SimArray or a tuple or list of SimArrays
If any of the inputs are single number, they are returned as a number
USAGE:
array = strip_units(SimArray)
array1, array2, ... = strip_units([SimArray1, SimArray2, ...])
"""
if isinstance(x, (tuple,list)):
# loop through and assign output
x_out = []
for x_i in x:
x_i = np.asarray(x_i)
if np.prod(x_i.shape) == 1:
# There is only one element in x_i. Make sure to return it as
# a number (not an array)
if np.sum(x_i.shape) == 0:
# This is a zero dimensional SimArray
x_out.append(x_i.tolist())
else:
# This is 1 dimensional SimArray
x_out.append(x_i[0])
else:
#This is a multi-element SimArray
x_out.append(np.asarray(x_i.tolist()))
else:
x = np.asarray(x)
if np.prod(x.shape) == 1:
# There is only one element in x_i. Return as a number
if np.sum(x.shape) == 0:
# This is a 0 dimensional SimArray
x_out = x.tolist()
else:
# This a 1 dimensional SimArray
x_out = x[0]
else:
x_out = np.asarray(x.tolist())
return x_out
def set_units(x, units):
"""
Sets the units of x to units. If x has units, they are ignored.
Does not destroy/alter x
USAGE:
SimArray = set_units(x, units)
SimArray1, SimArray2, ... = set_units([x1, x2, ...], units)
SimArray1, SimArray2, ... = set_units([x1, x2, ...], [units1, units2, ...])
"""
if isinstance(x, (tuple,list)):
x_out = []
if not isinstance(units, (tuple, list)):
units = [units]*len(x)
for i in range(len(x)):
x_i = x[i]
if pb.units.has_units(x_i):
x_i_array = strip_units(x_i)
x_out.append(SimArray(x_i_array, units[i]))
else:
x_out.append(SimArray(x_i, units[i]))
else:
if pb.units.has_units(x):
x_array = strip_units(x)
x_out = SimArray(x_array, units)
else:
x_out = SimArray(x, units)
return x_out
def match_units(x, y):
"""
Matches the units of x to y and returns x and y in the same units.
IF x and y don't have units, they are unchanged
IF one of x or y has units, the unitless quantity is returned as a
SimArray (see pb.array.SimArray) with the units of the other quantity.
IF both have units, then an attempt is made to convert x into the units of
y. If this is not possible, an error is raised, for example if x is in
units of 'au' and y is in units of 'Msol'
x, y can be: scalar, array, SimArray, pynbody unit (eg pb.units.G),
or a unit string (eg 'Msol a**-2')
*** RETURNS ***
x, y are returned as a tuple
"""
# ----------------------------------------------
# Check if either is a string
# ----------------------------------------------
if isinstance(x, str):
x = SimArray(1.0, x)
if isinstance(y,str):
y = SimArray(1.0, y)
# ----------------------------------------------
# Check if one is a pynbody unit
# ----------------------------------------------
# If one is a named unit (eg pb.units.G), convert to SimArray
if isinstance(x, pb.units.UnitBase):
x = SimArray(1.0, x)
if isinstance(y, pb.units.UnitBase):
y = SimArray(1.0, y)
# ----------------------------------------------
# Check the units
# ----------------------------------------------
# If both have units, try to convert x to the units of y
if (pb.units.has_units(x)) & (pb.units.has_units(y)):
x_out = (x.in_units(y.units))
y_out = y
# If only x has units, make y a SimArray with the units of x
elif (pb.units.has_units(x)):
y_out = SimArray(y, x.units)
x_out = x
# If only y has units, make x a SimArray with the units of y
elif (pb.units.has_units(y)):
x_out = SimArray(x, y.units)
y_out = y
# Otherwise, neither has units
else:
x_out = x
y_out = y
# Try to copy so that changing x_out, y_out will not change x,y
try:
x_out = x_out.copy()
except AttributeError:
pass
try:
y_out = y_out.copy()
except AttributeError:
pass
return x_out, y_out
def findfiles(filefilter='*', basedir='.'):
"""
Recursively find files according to filefilter
** ARGUMENTS **
filefilter : str
Filter for finding files. ie, '*.jpg' or 'file.txt'
basedir : str
Base directory to search. Default is the current directory
** RETURNS **
files : list
A list of the full path to all files matching filefilter
"""
matches = []
for root, dirnames, filenames in os.walk(basedir):
for filename in fnmatch.filter(filenames, filefilter):
fname = os.path.join(root, filename)
fname = os.path.realpath(fname)
matches.append(fname)
return matches
def pbverbosity(cmd=None):
"""
Changes and returns pynbody verbosity. Works for different versions
of pynbody.
**ARGUMENTS**
cmd
-If None (default) current verbosity level is returned, nothing is done
-If 'off', pynbody is silenced
-If 'on', pynbody verbosity is set on
-If something else, cmd is assumed to be a verbosity level
**RETURNS**
current_verbosity
pynbody verbosity level before any changes were made
**EXAMPLES**
*Toggle pynbody verbosity*
current_verbosity = pbverbosity('off')
...
do stuff
...
pbverbosity(current_verbosity)
"""
# -----------------------------
# Get current verbosity level
# -----------------------------
if hasattr(pb, 'logger'):
# As of v0.30, pynbody uses python's logging to handle verbosity
useLogger = True
logger = logging.getLogger('pynbody')
current_verbosity = logger.getEffectiveLevel()
else:
# For pynbody version < 0.3, verbosity is handled in the config
useLogger = False
current_verbosity = pb.config['verbose']
# -----------------------------
# Change verbosity
# -----------------------------
if cmd is None:
# Don't change verbosity. just return the current verbosity
pass
elif cmd == 'off':
# Toggle verbosity off
if useLogger:
logger.setLevel(logging.ERROR)
else:
pb.config['verbose'] = False
elif cmd == 'on':
# Toggle verbosity on
if useLogger:
logger.setLevel(logging.DEBUG)
else:
pb.config['verbose'] = True
else:
# Set verbosity to the verbosity level specified by cmd
if useLogger:
logger.setLevel(cmd)
else:
pb.config['verbose'] = cmd
# Return the verbosity level before any changes were made
return current_verbosity
def str2num(string):
"""
--------------------------------------------------
Tries to see if 'string' is a number
If 'string' is a string, returns:
int(string) for integers
float(string) for floats
'string' otherwise
If 'string' is a float or an integer, returns:
string
If none of the above, treats it like a list or tuple
and returns for each entry of 'string' a float,int,
or str as required. Returns as a list
--------------------------------------------------
"""
if isinstance(string,int):
output = string
elif isinstance(string,float):
output = string
elif not isinstance(string,str):
output = []
for a in string:
try:
output.append(int(a))
except:
try:
output.append(float(a))
except:
output.append(a)
if len(output) == 1:
output = output[0]
else:
output = string
try:
output = int(string)
except:
try:
output = float(string)
except:
pass
return output
def get_module_names(fname):
"""
An import utility that returns the module names in the directory of file.
Ignores filenames beginning with an underscore.
Parameters
----------
fname : str
Filename
Returns
-------
modulenames : list
A list of the modules
"""
directory = os.path.dirname(os.path.realpath(fname))
searchstr = os.path.join(directory, '*.py')
fullpaths = glob.glob(searchstr)
fnames = []
for fullpath in fullpaths:
f = os.path.split(fullpath)[-1]
if f[0] is not '_':
fnames.append(fullpath)
modulenames = []
for fname in fnames:
modulename = os.path.split(os.path.splitext(fname)[0])[-1]
modulenames.append(modulename)
return modulenames
|
|
""" @package antlr3.tree
@brief ANTLR3 runtime package, treewizard module
A utility module to create ASTs at runtime.
See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import INVALID_TOKEN_TYPE
from antlr3.tokens import CommonToken
from antlr3.tree import CommonTree, CommonTreeAdaptor
def computeTokenTypes(tokenNames):
"""
Compute a dict that is an inverted index of
tokenNames (which maps int token types to names).
"""
if tokenNames is None:
return {}
return dict((name, type) for type, name in enumerate(tokenNames))
## token types for pattern parser
EOF = -1
BEGIN = 1
END = 2
ID = 3
ARG = 4
PERCENT = 5
COLON = 6
DOT = 7
class TreePatternLexer(object):
def __init__(self, pattern):
## The tree pattern to lex like "(A B C)"
self.pattern = pattern
## Index into input string
self.p = -1
## Current char
self.c = None
## How long is the pattern in char?
self.n = len(pattern)
## Set when token type is ID or ARG
self.sval = None
self.error = False
self.consume()
__idStartChar = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
)
__idChar = __idStartChar | frozenset('0123456789')
def nextToken(self):
self.sval = ""
while self.c != EOF:
if self.c in (' ', '\n', '\r', '\t'):
self.consume()
continue
if self.c in self.__idStartChar:
self.sval += self.c
self.consume()
while self.c in self.__idChar:
self.sval += self.c
self.consume()
return ID
if self.c == '(':
self.consume()
return BEGIN
if self.c == ')':
self.consume()
return END
if self.c == '%':
self.consume()
return PERCENT
if self.c == ':':
self.consume()
return COLON
if self.c == '.':
self.consume()
return DOT
if self.c == '[': # grab [x] as a string, returning x
self.consume()
while self.c != ']':
if self.c == '\\':
self.consume()
if self.c != ']':
self.sval += '\\'
self.sval += self.c
else:
self.sval += self.c
self.consume()
self.consume()
return ARG
self.consume()
self.error = True
return EOF
return EOF
def consume(self):
self.p += 1
if self.p >= self.n:
self.c = EOF
else:
self.c = self.pattern[self.p]
class TreePatternParser(object):
def __init__(self, tokenizer, wizard, adaptor):
self.tokenizer = tokenizer
self.wizard = wizard
self.adaptor = adaptor
self.ttype = tokenizer.nextToken() # kickstart
def pattern(self):
if self.ttype == BEGIN:
return self.parseTree()
elif self.ttype == ID:
node = self.parseNode()
if self.ttype == EOF:
return node
return None # extra junk on end
return None
def parseTree(self):
if self.ttype != BEGIN:
return None
self.ttype = self.tokenizer.nextToken()
root = self.parseNode()
if root is None:
return None
while self.ttype in (BEGIN, ID, PERCENT, DOT):
if self.ttype == BEGIN:
subtree = self.parseTree()
self.adaptor.addChild(root, subtree)
else:
child = self.parseNode()
if child is None:
return None
self.adaptor.addChild(root, child)
if self.ttype != END:
return None
self.ttype = self.tokenizer.nextToken()
return root
def parseNode(self):
# "%label:" prefix
label = None
if self.ttype == PERCENT:
self.ttype = self.tokenizer.nextToken()
if self.ttype != ID:
return None
label = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if self.ttype != COLON:
return None
self.ttype = self.tokenizer.nextToken() # move to ID following colon
# Wildcard?
if self.ttype == DOT:
self.ttype = self.tokenizer.nextToken()
wildcardPayload = CommonToken(0, ".")
node = WildcardTreePattern(wildcardPayload)
if label is not None:
node.label = label
return node
# "ID" or "ID[arg]"
if self.ttype != ID:
return None
tokenName = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if tokenName == "nil":
return self.adaptor.nil()
text = tokenName
# check for arg
arg = None
if self.ttype == ARG:
arg = self.tokenizer.sval
text = arg
self.ttype = self.tokenizer.nextToken()
# create node
treeNodeType = self.wizard.getTokenType(tokenName)
if treeNodeType == INVALID_TOKEN_TYPE:
return None
node = self.adaptor.createFromType(treeNodeType, text)
if label is not None and isinstance(node, TreePattern):
node.label = label
if arg is not None and isinstance(node, TreePattern):
node.hasTextArg = True
return node
class TreePattern(CommonTree):
"""
When using %label:TOKENNAME in a tree for parse(), we must
track the label.
"""
def __init__(self, payload):
CommonTree.__init__(self, payload)
self.label = None
self.hasTextArg = None
def toString(self):
if self.label is not None:
return '%' + self.label + ':' + CommonTree.toString(self)
else:
return CommonTree.toString(self)
class WildcardTreePattern(TreePattern):
pass
class TreePatternTreeAdaptor(CommonTreeAdaptor):
"""This adaptor creates TreePattern objects for use during scan()"""
def createWithPayload(self, payload):
return TreePattern(payload)
class TreeWizard(object):
"""
Build and navigate trees with this object. Must know about the names
of tokens so you have to pass in a map or array of token names (from which
this class can build the map). I.e., Token DECL means nothing unless the
class can translate it to a token type.
In order to create nodes and navigate, this class needs a TreeAdaptor.
This class can build a token type -> node index for repeated use or for
iterating over the various nodes with a particular type.
This class works in conjunction with the TreeAdaptor rather than moving
all this functionality into the adaptor. An adaptor helps build and
navigate trees using methods. This class helps you do it with string
patterns like "(A B C)". You can create a tree from that pattern or
match subtrees against it.
"""
def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
if adaptor is None:
self.adaptor = CommonTreeAdaptor()
else:
self.adaptor = adaptor
if typeMap is None:
self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
else:
if tokenNames is not None:
raise ValueError("Can't have both tokenNames and typeMap")
self.tokenNameToTypeMap = typeMap
def getTokenType(self, tokenName):
"""Using the map of token names to token types, return the type."""
try:
return self.tokenNameToTypeMap[tokenName]
except KeyError:
return INVALID_TOKEN_TYPE
def create(self, pattern):
"""
Create a tree or node from the indicated tree pattern that closely
follows ANTLR tree grammar tree element syntax:
(root child1 ... child2).
You can also just pass in a node: ID
Any node can have a text argument: ID[foo]
(notice there are no quotes around foo--it's clear it's a string).
nil is a special name meaning "give me a nil node". Useful for
making lists: (nil A B C) is a list of A B C.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, self.adaptor)
return parser.pattern()
def index(self, tree):
"""Walk the entire tree and make a node name to nodes mapping.
For now, use recursion but later nonrecursive version may be
more efficient. Returns a dict int -> list where the list is
of your AST node type. The int is the token type of the node.
"""
m = {}
self._index(tree, m)
return m
def _index(self, t, m):
"""Do the work for index"""
if t is None:
return
ttype = self.adaptor.getType(t)
elements = m.get(ttype)
if elements is None:
m[ttype] = elements = []
elements.append(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._index(child, m)
def find(self, tree, what):
"""Return a list of matching token.
what may either be an integer specifzing the token type to find or
a string with a pattern that must be matched.
"""
if isinstance(what, (int, long)):
return self._findTokenType(tree, what)
elif isinstance(what, basestring):
return self._findPattern(tree, what)
else:
raise TypeError("'what' must be string or integer")
def _findTokenType(self, t, ttype):
"""Return a List of tree nodes with token type ttype"""
nodes = []
def visitor(tree, parent, childIndex, labels):
nodes.append(tree)
self.visit(t, ttype, visitor)
return nodes
def _findPattern(self, t, pattern):
"""Return a List of subtrees matching pattern."""
subtrees = []
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return None
rootTokenType = tpattern.getType()
def visitor(tree, parent, childIndex, label):
if self._parse(tree, tpattern, None):
subtrees.append(tree)
self.visit(t, rootTokenType, visitor)
return subtrees
def visit(self, tree, what, visitor):
"""Visit every node in tree matching what, invoking the visitor.
If what is a string, it is parsed as a pattern and only matching
subtrees will be visited.
The implementation uses the root node of the pattern in combination
with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
Patterns with wildcard roots are also not allowed.
If what is an integer, it is used as a token type and visit will match
all nodes of that type (this is faster than the pattern match).
The labels arg of the visitor action method is never set (it's None)
since using a token type rather than a pattern doesn't let us set a
label.
"""
if isinstance(what, (int, long)):
self._visitType(tree, None, 0, what, visitor)
elif isinstance(what, basestring):
self._visitPattern(tree, what, visitor)
else:
raise TypeError("'what' must be string or integer")
def _visitType(self, t, parent, childIndex, ttype, visitor):
"""Do the recursive work for visit"""
if t is None:
return
if self.adaptor.getType(t) == ttype:
visitor(t, parent, childIndex, None)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._visitType(child, t, i, ttype, visitor)
def _visitPattern(self, tree, pattern, visitor):
"""
For all subtrees that match the pattern, execute the visit action.
"""
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return
rootTokenType = tpattern.getType()
def rootvisitor(tree, parent, childIndex, labels):
labels = {}
if self._parse(tree, tpattern, labels):
visitor(tree, parent, childIndex, labels)
self.visit(tree, rootTokenType, rootvisitor)
def parse(self, t, pattern, labels=None):
"""
Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
on the various nodes and '.' (dot) as the node/subtree wildcard,
return true if the pattern matches and fill the labels Map with
the labels pointing at the appropriate nodes. Return false if
the pattern is malformed or the tree does not match.
If a node specifies a text arg in pattern, then that must match
for that node in t.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
return self._parse(t, tpattern, labels)
def _parse(self, t1, tpattern, labels):
"""
Do the work for parse. Check to see if the tpattern fits the
structure and token types in t1. Check text if the pattern has
text arguments on nodes. Fill labels map with pointers to nodes
in tree matched against nodes in pattern with labels.
"""
# make sure both are non-null
if t1 is None or tpattern is None:
return False
# check roots (wildcard matches anything)
if not isinstance(tpattern, WildcardTreePattern):
if self.adaptor.getType(t1) != tpattern.getType():
return False
# if pattern has text, check node text
if (tpattern.hasTextArg
and self.adaptor.getText(t1) != tpattern.getText()):
return False
if tpattern.label is not None and labels is not None:
# map label in pattern to node in t1
labels[tpattern.label] = t1
# check children
n1 = self.adaptor.getChildCount(t1)
n2 = tpattern.getChildCount()
if n1 != n2:
return False
for i in range(n1):
child1 = self.adaptor.getChild(t1, i)
child2 = tpattern.getChild(i)
if not self._parse(child1, child2, labels):
return False
return True
def equals(self, t1, t2, adaptor=None):
"""
Compare t1 and t2; return true if token types/text, structure match
exactly.
The trees are examined in their entirety so that (A B) does not match
(A B C) nor (A (B C)).
"""
if adaptor is None:
adaptor = self.adaptor
return self._equals(t1, t2, adaptor)
def _equals(self, t1, t2, adaptor):
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots
if adaptor.getType(t1) != adaptor.getType(t2):
return False
if adaptor.getText(t1) != adaptor.getText(t2):
return False
# check children
n1 = adaptor.getChildCount(t1)
n2 = adaptor.getChildCount(t2)
if n1 != n2:
return False
for i in range(n1):
child1 = adaptor.getChild(t1, i)
child2 = adaptor.getChild(t2, i)
if not self._equals(child1, child2, adaptor):
return False
return True
|
|
## 1. Equal interval scales ##
car_speeds = [10,20,30,50,20]
earthquake_intensities = [2,7,4,5,8]
mean_car_speed = sum(car_speeds)/len(car_speeds)
mean_earthquake_intensities = sum(earthquake_intensities)/len(earthquake_intensities)
## 2. Discrete and continuous scales ##
day_numbers = [1,2,3,4,5,6,7]
snail_crawl_length = [.5,2,5,10,1,.25,4]
cars_in_parking_lot = [5,6,4,2,1,7,8]
import matplotlib.pyplot as plt
plt.plot(day_numbers,snail_crawl_length)
plt.show()
plt.plot(day_numbers,cars_in_parking_lot)
plt.show()
## 3. Scale starting points ##
fahrenheit_degrees = [32, 64, 78, 102]
yearly_town_population = [100,102,103,110,105,120]
population_zero = yearly_town_population
degrees_zero = [f + 459.67 for f in fahrenheit_degrees]
## 4. Ordinal scales ##
# Results from our survey on how many cigarettes people smoke per day
survey_responses = ["none", "some", "a lot", "none", "a few", "none", "none"]
survey_scale = ["none", "a few", "some", "a lot"]
survey_numbers = [survey_scale.index(response) for response in survey_responses]
average_smoking = sum(survey_numbers) / len(survey_numbers)
## 5. Categorical scales ##
# Let's say that these lists are both columns in a matrix. Index 0 in both is the first row, and so on.
gender = ["male", "female", "female", "male", "male", "female"]
savings = [1200, 5000, 3400, 2400, 2800, 4100]
male_count = 0
female_count = 0
m_c = 0
f_c = 0
for i in range(len(gender)):
if gender[i] == 'male':
male_count += savings[i]
m_c +=1
elif gender[i] == 'female':
female_count += savings[i]
f_c +=1
male_savings = male_count/m_c
female_savings = female_count/f_c
## 6. Frequency histograms ##
# Let's say that we watch cars drive by, and measure average speed in miles per hour
average_speed = [10, 20, 25, 27, 28, 22, 15, 18, 17]
import matplotlib.pyplot as plt
plt.hist(average_speed)
plt.show()
# Let's say we measure student test scores, from 0-100
student_scores = [15, 80, 95, 100, 45, 75, 65]
plt.hist(student_scores)
plt.show()
## 7. Histogram bins ##
average_speed = [10, 20, 25, 27, 28, 22, 15, 18, 17]
import matplotlib.pyplot as plt
plt.hist(average_speed, bins=6)
plt.show()
# As you can see, the values in the list are counted into the nearest bin.
# If we have less bins, each bin will have a higher count (because it's showing all of the values that fall into it)
# With more bins, the counts are less, because each bin contains less values.
plt.hist(average_speed, bins=4)
plt.show()
plt.hist(average_speed, bins=2)
plt.show()
## 8. Skew ##
# Some numpy arrays are already loaded in, and we'll make some plots with them.
# The arrays contain student test scores from an exam, on a 0-100 scale.
import matplotlib.pyplot as plt
# See how there is a long slope to the left?
# The data is concentrated in the right part of the distribution, but some people also scored poorly.
# This plot is negatively skewed.
plt.hist(test_scores_negative)
plt.show()
# This plot has a long slope to the right.
# Most students did poorly, but a few did really well.
# This is positively skewed.
plt.hist(test_scores_positive)
plt.show()
# This plot has no skew either way -- most of the values are in the center, and there is no long slope either way.
# Is is an unskewed distribution.
plt.hist(test_scores_normal)
plt.show()
# We can test how skewed a distribution is using the skew function.
# A positive value means positive skew, a negative value means negative skew, and close to zero means no skew.
from scipy.stats import skew
from scipy.stats import skew
positive_skew = skew(test_scores_positive)
negative_skew = skew(test_scores_negative)
no_skew = skew(test_scores_normal)
## 9. Kurtosis ##
import matplotlib.pyplot as plt
# This plot is short, making it platykurtic
# See how the values are distributed pretty evenly, and there isn't a huge cluster in the middle?
# Students had a wide variation in their performance
plt.hist(test_scores_platy)
plt.show()
# This plot is tall, and is leptokurtic
# Most students did very similarly to the others
plt.hist(test_scores_lepto)
plt.show()
# This plot is in between, and is mesokurtic
plt.hist(test_scores_meso)
plt.show()
# We can measure kurtosis with the kurtosis function
# Negative values indicate platykurtic distributions, positive values indicate leptokurtic distributions, and values close to 0 are mesokurtic
from scipy.stats import kurtosis
kurt_platy = kurtosis(test_scores_platy)
kurt_lepto = kurtosis(test_scores_lepto)
kurt_meso = kurtosis(test_scores_meso)
## 10. Modality ##
import matplotlib.pyplot as plt
# This plot has one mode, making it unimodal
plt.hist(test_scores_uni)
plt.show()
# This plot has two peaks, and is bimodal
# This could happen if one group of students learned the material, and one learned something else, for example.
plt.hist(test_scores_bi)
plt.show()
# More than one peak means that the plot is multimodal
# We can't easily measure the modality of a plot, like we can with kurtosis or skew.
# Often, the best way to detect multimodality is to observe the plot.
plt.hist(test_scores_multi)
plt.show()
## 11. Measures of central tendency ##
import matplotlib.pyplot as plt
# We're going to put a line over our plot that shows the mean.
# This is the same histogram we plotted for skew a few screens ago
plt.hist(test_scores_normal)
# We can use the .mean() method of a numpy array to compute the mean
mean_test_score = test_scores_normal.mean()
# The axvline function will plot a vertical line over an existing plot
plt.axvline(mean_test_score)
# Now we can show the plot and clear the figure
plt.show()
# When we plot test_scores_negative, a very negatively skewed distribution, we see that the mean is pulled to the left by the small values there.
# The mean can be changed easily by very large or very small values.
# This can make it misleading with distributions that are very skewed, when we expect the mean to be the center.
plt.hist(test_scores_negative)
plt.axvline(test_scores_negative.mean())
plt.show()
# We can do the same with the positive side
# See how the very high values pull the mean to the right more than you would expect?
plt.hist(test_scores_positive)
plt.axvline(test_scores_positive.mean())
plt.show()
mean_normal = sum(test_scores_normal)/len(test_scores_normal)
mean_negative = sum(test_scores_negative)/len(test_scores_negative)
mean_positive = sum(test_scores_positive)/len(test_scores_positive)
## 12. The median ##
# Let's plot the mean and median side by side in a negatively skewed distribution.
# Sadly, arrays don't have a nice median method, so we have to use a numpy function to compute it.
import numpy
import matplotlib.pyplot as plt
# Plot the histogram
plt.hist(test_scores_negative)
# Compute the median
median = numpy.median(test_scores_negative)
# Plot the median in blue (the color argument of "b" means blue)
plt.axvline(median, color="b")
# Plot the mean in red
plt.axvline(test_scores_negative.mean(), color="r")
# See how the median is further to the right than the mean?
# It's less sensitive to outliers, and isn't pulled to the left.
plt.show()
plt.hist(test_scores_positive)
p_median = numpy.median(test_scores_positive)
plt.axvline(p_median,color = 'b')
plt.axvline(test_scores_positive.mean(),color = 'r')
plt.show()
## 14. Cleaning missing data ##
import pandas
f = "titanic_survival.csv"
titanic_survival = pandas.read_csv(f)
# Luckily, pandas dataframes have a method that can drop rows that have missing data
# Let's look at how big the dataframe is first
print(titanic_survival.shape)
# There were 1310 passengers on the titanic, according to our data
# Now let's drop any row with missing data
# The dropna method on dataframes will do this for us
# Any row with any missing values will be removed
new_titanic_survival = titanic_survival.dropna()
# Hmm, it looks like we were too zealous with dropping rows with na values
# We now have no rows in our dataframe
# This is because some of the later columns, which aren't immediately relevant to our analysis, have a lot of missing values
print(new_titanic_survival.shape)
# We can use the subset keyword argument to the dropna method to only drop rows if there are na values in certain columns
# This will drop any row where the embarkation port (where people boarded the Titanic), or cabin number is missing
new_titanic_survival = titanic_survival.dropna(subset=["embarked", "cabin"])
# This is much better -- we have removed only the rows that we need to remove.
print(new_titanic_survival.shape)
new_titanic_survival = titanic_survival.dropna(subset=['age','sex'])
print(new_titanic_survival.shape)
## 15. Plotting age ##
# The cleaned up data has been loaded into the new_titanic_survival variable
import matplotlib.pyplot as plt
import numpy
plt.hist(new_titanic_survival['age'])
p_median = numpy.median(new_titanic_survival['age'])
plt.axvline(p_median,color = 'b')
plt.axvline(new_titanic_survival['age'].mean(),color = 'r')
plt.show()
## 16. Calculating indices for age ##
from scipy.stats import skew,kurtosis
mean_age = new_titanic_survival['age'].mean()
median_age = numpy.median(new_titanic_survival['age'])
skew_age = skew(new_titanic_survival['age'])
kurtosis_age = kurtosis(new_titanic_survival['age'])
|
|
from __future__ import division, print_function
import numpy as np
import numpy.ctypeslib as npc
from ctypes import cast, POINTER, Structure, c_int, c_float, c_double
c_float_p = POINTER(c_float)
c_double_p = POINTER(c_double)
from .vl_ctypes import LIB, c_to_np_types
from .utils import as_float_image
np_float = c_to_np_types[c_float]
np_double = c_to_np_types[c_double]
class VLDsiftKeypoint(Structure):
_fields_ = [
('x', c_double),
('y', c_double),
('s', c_double),
('norm', c_double),
]
class VLDsiftDescriptorGeometry(Structure):
_fields_ = [
('numBinT', c_int),
('numBinX', c_int),
('numBinY', c_int),
('binSizeX', c_int),
('binSizeY', c_int),
]
class VLDsiftFilter(Structure):
_fields_ = [
('imWidth', c_int),
('imHeight', c_int),
('stepX', c_int),
('stepY', c_int),
('boundMinX', c_int),
('boundMinY', c_int),
('boundMaxX', c_int),
('boundMaxY', c_int),
('geom', VLDsiftDescriptorGeometry),
('useFlatWindow', c_int),
('windowSize', c_double),
('numFrames', c_int),
('descrSize', c_int),
('frames', POINTER(VLDsiftKeypoint)),
('descrs', c_float_p),
('numBinAlloc', c_int),
('numFrameAlloc', c_int),
('numGradAlloc', c_int),
('grads', POINTER(c_float_p)),
('convTmp1', c_float_p),
('convTmp2', c_float_p),
]
VLDsiftFilter_p = POINTER(VLDsiftFilter)
### functions in the shared object
# most of the utility functions are actually inlined and so not in the so...
vl_dsift_new = LIB['vl_dsift_new']
vl_dsift_new.restype = VLDsiftFilter_p
vl_dsift_new.argtypes = [c_int, c_int]
vl_dsift_new_basic = LIB['vl_dsift_new_basic']
vl_dsift_new_basic.restype = VLDsiftFilter_p
vl_dsift_new_basic.argtypes = [c_int, c_int, c_int, c_int]
vl_dsift_delete = LIB['vl_dsift_delete']
vl_dsift_delete.restype = None
vl_dsift_delete.argtypes = [VLDsiftFilter_p]
vl_dsift_process = LIB['vl_dsift_process']
vl_dsift_process.restype = None
vl_dsift_process.argtypes = [VLDsiftFilter_p, npc.ndpointer(dtype=np_float)]
_vl_dsift_update_buffers = LIB['_vl_dsift_update_buffers']
_vl_dsift_update_buffers.restype = None
_vl_dsift_update_buffers.argtypes = [VLDsiftFilter_p]
# near-direct port of the c function
# should be vectorized, if we were going to call it more than once...
def vl_dsift_transpose_descriptor(dest, src, num_bin_t, num_bin_x, num_bin_y):
for y in xrange(num_bin_y):
for x in xrange(num_bin_x):
offset = num_bin_t * (x + y * num_bin_x)
offsetT = num_bin_t * (y + x * num_bin_x)
for t in xrange(num_bin_t):
tT = num_bin_t // 4 - t
dest[offsetT + (tT + num_bin_t) % num_bin_t] = src[offset + t]
def vl_dsift(data, fast=False, norm=False, bounds=None, size=3, step=1,
window_size=None, float_descriptors=False,
verbose=False, matlab_style=True):
'''
Dense sift descriptors from an image.
Returns:
frames: num_frames x (2 or 3) matrix of x, y, (norm)
descrs: num_frames x 128 matrix of descriptors
'''
if not matlab_style:
import warnings
warnings.warn("matlab_style=False gets different results than matlab, "
"not sure why or how incorrect they are.")
order = 'F' if matlab_style else 'C'
data = as_float_image(data, dtype=np.float32, order=order)
if data.ndim != 2:
raise TypeError("data should be a 2d array")
if window_size is not None:
assert np.isscalar(window_size) and window_size >= 0
# construct the dsift object
M, N = data.shape
dsift_p = vl_dsift_new_basic(M, N, step, size)
try:
dsift = dsift_p.contents
# set parameters
if bounds is not None:
if matlab_style:
y0, x0, y1, x1 = bounds # transposed
else:
x0, y0, x1, y1 = bounds
dsift.boundMinX = int(max(x0, 0))
dsift.boundMinY = int(max(y0, 0))
dsift.boundMaxX = int(min(x1, M - 1))
dsift.boundMaxY = int(min(y1, N - 1))
_vl_dsift_update_buffers(dsift_p)
dsift.useFlatWindow = fast
if window_size is not None:
dsift.windowSize = window_size
# get calculated parameters
descr_size = dsift.descrSize
num_frames = dsift.numFrames
geom = dsift.geom
if verbose:
pr = lambda *a, **k: print('vl_dsift:', *a, **k)
pr("image size [W, H] = [{}, {}]".format(N, M))
x0 = dsift.boundMinX + 1
y0 = dsift.boundMinY + 1
x1 = dsift.boundMaxX + 1
y1 = dsift.boundMaxY + 1
bound_args = [y0, x0, y1, x1] if matlab_style else [x0, y0, x1, y1]
pr("bounds: [minX,minY,maxX,maxY] = [{}, {}, {}, {}]"
.format(*bound_args))
pr("subsampling steps: stepX={}, stepY={}".format(
dsift.stepX, dsift.stepY))
pr("num bins: [numBinT, numBinX, numBinY] = [{}, {}, {}]"
.format(geom.numBinT, geom.numBinX, geom.numBinY))
pr("descriptor size: {}".format(descr_size))
pr("bin sizes: [binSizeX, binSizeY] = [{}, {}]".format(
geom.binSizeX, geom.binSizeY))
pr("flat window: {}".format(bool(fast)))
pr("window size: {}".format(dsift.windowSize))
pr("num of features: {}".format(num_frames))
# do the actual processing
vl_dsift_process(dsift_p, data)
# copy frames' locations, norms out
# the frames are a structure of just 4 doubles (VLDsiftKeypoint),
# which luckily looks exactly like an array of doubles. :)
# NOTE: this might be platform/compiler-dependent...but it works with
# the provided binaries on os x, at least
frames_p = cast(dsift.frames, c_double_p)
frames_p_a = npc.as_array(frames_p, shape=(num_frames, 4))
cols = [1, 0] if matlab_style else [0, 1]
if norm:
cols.append(3)
frames = np.require(frames_p_a[:, cols], requirements=['C', 'O'])
# copy descriptors into a new array
descrs_p = npc.as_array(dsift.descrs, shape=(num_frames, descr_size))
descrs = descrs_p * 512
assert descrs.flags.owndata
np.minimum(descrs, 255, out=descrs)
if not float_descriptors:
descrs = descrs.astype(np.uint8) # TODO: smarter about copying?
if matlab_style:
new_order = np.empty(descr_size, dtype=int)
vl_dsift_transpose_descriptor(new_order, np.arange(descr_size),
geom.numBinT, geom.numBinX, geom.numBinY)
descrs = descrs[:, new_order]
# the old, super-slow way:
## # gross pointer arithmetic to get the relevant descriptor
## descrs_addr = addressof(descrs.contents)
## descrs_step = descr_size * sizeof(c_float)
##
## for k in range(num_frames):
## out_frames[:2, k] = [frames[k].y + 1, frames[k].x + 1]
## if norm: # there's an implied / 2 in norm, because of clipping
## out_frames[2, k] = frames[k].norm
##
## # gross pointer arithmetic to get the relevant descriptor
## the_descr = cast(descrs_addr + k * descrs_step, c_float_p)
## transposed = vl_dsift_transpose_descriptor(
## the_descr,
## geom.numBinT, geom.numBinX, geom.numBinY)
## out_descrs[:, k] = np.minimum(512. * transposed, 255.)
return frames, descrs
finally:
vl_dsift_delete(dsift_p)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import eventlet
eventlet.monkey_patch()
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ovs_lib # noqa
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import service
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
class DhcpAgent(manager.Manager):
OPTS = [
cfg.IntOpt('resync_interval', default=5,
help=_("Interval to resync.")),
cfg.StrOpt('dhcp_driver',
default='neutron.agent.linux.dhcp.Dnsmasq',
help=_("The driver used to manage the DHCP server.")),
cfg.BoolOpt('enable_isolated_metadata', default=False,
help=_("Support Metadata requests on isolated networks.")),
cfg.BoolOpt('enable_metadata_network', default=False,
help=_("Allows for serving metadata requests from a "
"dedicated network. Requires "
"enable_isolated_metadata = True")),
cfg.IntOpt('num_sync_threads', default=4,
help=_('Number of threads to use during sync process.')),
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')),
]
def __init__(self, host=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = []
self.conf = cfg.CONF
self.cache = NetworkCache()
self.root_helper = config.get_root_helper(self.conf)
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
if not os.path.isdir(dhcp_dir):
os.makedirs(dhcp_dir, 0o755)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
def _populate_networks_cache(self):
"""Populate the networks cache when the DHCP-agent starts."""
try:
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
self.conf,
self.root_helper
)
for net_id in existing_networks:
net = dhcp.NetModel(self.conf.use_namespaces,
{"id": net_id,
"subnets": [],
"ports": []})
self.cache.put(net)
except NotImplementedError:
# just go ahead with an empty networks cache
LOG.debug(
_("The '%s' DHCP-driver does not support retrieving of a "
"list of existing networks"),
self.conf.dhcp_driver
)
def after_start(self):
self.run()
LOG.info(_("DHCP agent started"))
def run(self):
"""Activate the DHCP agent."""
self.sync_state()
self.periodic_resync()
def call_driver(self, action, network, **action_kwargs):
"""Invoke an action on a DHCP driver instance."""
LOG.debug(_('Calling driver for network: %(net)s action: %(action)s'),
{'net': network.id, 'action': action})
try:
# the Driver expects something that is duck typed similar to
# the base models.
driver = self.dhcp_driver_cls(self.conf,
network,
self.root_helper,
self.dhcp_version,
self.plugin_rpc)
getattr(driver, action)(**action_kwargs)
return True
except exceptions.Conflict:
# No need to resync here, the agent will receive the event related
# to a status update for the network
LOG.warning(_('Unable to %(action)s dhcp for %(net_id)s: there is '
'a conflict with its current state; please check '
'that the network and/or its subnet(s) still exist.')
% {'net_id': network.id, 'action': action})
except Exception as e:
self.schedule_resync(e)
if (isinstance(e, n_rpc.RemoteError)
and e.exc_type == 'NetworkNotFound'
or isinstance(e, exceptions.NetworkNotFound)):
LOG.warning(_("Network %s has been deleted."), network.id)
else:
LOG.exception(_('Unable to %(action)s dhcp for %(net_id)s.')
% {'net_id': network.id, 'action': action})
def schedule_resync(self, reason):
"""Schedule a resync for a given reason."""
self.needs_resync_reasons.append(reason)
@utils.synchronized('dhcp-agent')
def sync_state(self):
"""Sync the local DHCP state with Neutron."""
LOG.info(_('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
active_networks = self.plugin_rpc.get_active_networks_info()
active_network_ids = set(network.id for network in active_networks)
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
except Exception as e:
self.schedule_resync(e)
LOG.exception(_('Unable to sync network state on deleted '
'network %s'), deleted_id)
for network in active_networks:
pool.spawn(self.safe_configure_dhcp_for_network, network)
pool.waitall()
LOG.info(_('Synchronizing state complete'))
except Exception as e:
self.schedule_resync(e)
LOG.exception(_('Unable to sync network state.'))
@utils.exception_logger()
def _periodic_resync_helper(self):
"""Resync the dhcp state at the configured interval."""
while True:
eventlet.sleep(self.conf.resync_interval)
if self.needs_resync_reasons:
# be careful to avoid a race with additions to list
# from other threads
reasons = self.needs_resync_reasons
self.needs_resync_reasons = []
for r in reasons:
LOG.debug(_("resync: %(reason)s"),
{"reason": r})
self.sync_state()
def periodic_resync(self):
"""Spawn a thread to periodically resync the dhcp state."""
eventlet.spawn(self._periodic_resync_helper)
def safe_get_network_info(self, network_id):
try:
network = self.plugin_rpc.get_network_info(network_id)
if not network:
LOG.warn(_('Network %s has been deleted.'), network_id)
return network
except Exception as e:
self.schedule_resync(e)
LOG.exception(_('Network %s info call failed.'), network_id)
def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria."""
network = self.safe_get_network_info(network_id)
if network:
self.configure_dhcp_for_network(network)
@utils.exception_logger()
def safe_configure_dhcp_for_network(self, network):
try:
self.configure_dhcp_for_network(network)
except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_('Network %s may have been deleted and its resources '
'may have already been disposed.'), network.id)
def configure_dhcp_for_network(self, network):
if not network.admin_state_up:
return
for subnet in network.subnets:
if subnet.enable_dhcp:
if self.call_driver('enable', network):
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
self.enable_isolated_metadata_proxy(network)
self.cache.put(network)
break
def disable_dhcp_helper(self, network_id):
"""Disable DHCP for a network known to the agent."""
network = self.cache.get_network_by_id(network_id)
if network:
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
self.disable_isolated_metadata_proxy(network)
if self.call_driver('disable', network):
self.cache.remove(network)
def refresh_dhcp_helper(self, network_id):
"""Refresh or disable DHCP for a network depending on the current state
of the network.
"""
old_network = self.cache.get_network_by_id(network_id)
if not old_network:
# DHCP current not running for network.
return self.enable_dhcp_helper(network_id)
network = self.safe_get_network_info(network_id)
if not network:
return
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
if new_cidrs and old_cidrs == new_cidrs:
self.call_driver('reload_allocations', network)
self.cache.put(network)
elif new_cidrs:
if self.call_driver('restart', network):
self.cache.put(network)
else:
self.disable_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def network_create_end(self, context, payload):
"""Handle the network.create.end notification event."""
network_id = payload['network']['id']
self.enable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_update_end(self, context, payload):
"""Handle the network.update.end notification event."""
network_id = payload['network']['id']
if payload['network']['admin_state_up']:
self.enable_dhcp_helper(network_id)
else:
self.disable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_delete_end(self, context, payload):
"""Handle the network.delete.end notification event."""
self.disable_dhcp_helper(payload['network_id'])
@utils.synchronized('dhcp-agent')
def subnet_update_end(self, context, payload):
"""Handle the subnet.update.end notification event."""
network_id = payload['subnet']['network_id']
self.refresh_dhcp_helper(network_id)
# Use the update handler for the subnet create event.
subnet_create_end = subnet_update_end
@utils.synchronized('dhcp-agent')
def subnet_delete_end(self, context, payload):
"""Handle the subnet.delete.end notification event."""
subnet_id = payload['subnet_id']
network = self.cache.get_network_by_subnet_id(subnet_id)
if network:
self.refresh_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def port_update_end(self, context, payload):
"""Handle the port.update.end notification event."""
updated_port = dhcp.DictModel(payload['port'])
network = self.cache.get_network_by_id(updated_port.network_id)
if network:
self.cache.put_port(updated_port)
self.call_driver('reload_allocations', network)
# Use the update handler for the port create event.
port_create_end = port_update_end
@utils.synchronized('dhcp-agent')
def port_delete_end(self, context, payload):
"""Handle the port.delete.end notification event."""
port = self.cache.get_port_by_id(payload['port_id'])
if port:
network = self.cache.get_network_by_id(port.network_id)
self.cache.remove_port(port)
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
neutron_lookup_param = '--network_id=%s' % network.id
meta_cidr = netaddr.IPNetwork(dhcp.METADATA_DEFAULT_CIDR)
has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets)
if (self.conf.enable_metadata_network and has_metadata_subnet):
router_ports = [port for port in network.ports
if (port.device_owner ==
constants.DEVICE_OWNER_ROUTER_INTF)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
neutron_lookup_param = ('--router_id=%s' %
router_ports[0].device_id)
def callback(pid_file):
metadata_proxy_socket = cfg.CONF.metadata_proxy_socket
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
neutron_lookup_param,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%d' % dhcp.METADATA_PORT]
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % network.id))
return proxy_cmd
pm = external_process.ProcessManager(
self.conf,
network.id,
self.root_helper,
network.namespace)
pm.enable(callback)
def disable_isolated_metadata_proxy(self, network):
pm = external_process.ProcessManager(
self.conf,
network.id,
self.root_helper,
network.namespace)
pm.disable()
class DhcpPluginApi(n_rpc.RpcProxy):
"""Agent side of the dhcp rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic, context, use_namespaces):
super(DhcpPluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.context = context
self.host = cfg.CONF.host
self.use_namespaces = use_namespaces
def get_active_networks_info(self):
"""Make a remote process call to retrieve all network info."""
networks = self.call(self.context,
self.make_msg('get_active_networks_info',
host=self.host))
return [dhcp.NetModel(self.use_namespaces, n) for n in networks]
def get_network_info(self, network_id):
"""Make a remote process call to retrieve network info."""
network = self.call(self.context,
self.make_msg('get_network_info',
network_id=network_id,
host=self.host))
if network:
return dhcp.NetModel(self.use_namespaces, network)
def get_dhcp_port(self, network_id, device_id):
"""Make a remote process call to get the dhcp port."""
port = self.call(self.context,
self.make_msg('get_dhcp_port',
network_id=network_id,
device_id=device_id,
host=self.host))
if port:
return dhcp.DictModel(port)
def create_dhcp_port(self, port):
"""Make a remote process call to create the dhcp port."""
port = self.call(self.context,
self.make_msg('create_dhcp_port',
port=port,
host=self.host))
if port:
return dhcp.DictModel(port)
def update_dhcp_port(self, port_id, port):
"""Make a remote process call to update the dhcp port."""
port = self.call(self.context,
self.make_msg('update_dhcp_port',
port_id=port_id,
port=port,
host=self.host))
if port:
return dhcp.DictModel(port)
def release_dhcp_port(self, network_id, device_id):
"""Make a remote process call to release the dhcp port."""
return self.call(self.context,
self.make_msg('release_dhcp_port',
network_id=network_id,
device_id=device_id,
host=self.host))
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
"""Make a remote process call to release a fixed_ip on the port."""
return self.call(self.context,
self.make_msg('release_port_fixed_ip',
network_id=network_id,
subnet_id=subnet_id,
device_id=device_id,
host=self.host))
class NetworkCache(object):
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
self.subnet_lookup = {}
self.port_lookup = {}
def get_network_ids(self):
return self.cache.keys()
def get_network_by_id(self, network_id):
return self.cache.get(network_id)
def get_network_by_subnet_id(self, subnet_id):
return self.cache.get(self.subnet_lookup.get(subnet_id))
def get_network_by_port_id(self, port_id):
return self.cache.get(self.port_lookup.get(port_id))
def put(self, network):
if network.id in self.cache:
self.remove(self.cache[network.id])
self.cache[network.id] = network
for subnet in network.subnets:
self.subnet_lookup[subnet.id] = network.id
for port in network.ports:
self.port_lookup[port.id] = network.id
def remove(self, network):
del self.cache[network.id]
for subnet in network.subnets:
del self.subnet_lookup[subnet.id]
for port in network.ports:
del self.port_lookup[port.id]
def put_port(self, port):
network = self.get_network_by_id(port.network_id)
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
self.port_lookup[port.id] = network.id
def remove_port(self, port):
network = self.get_network_by_port_id(port.id)
for index in range(len(network.ports)):
if network.ports[index] == port:
del network.ports[index]
del self.port_lookup[port.id]
break
def get_port_by_id(self, port_id):
network = self.get_network_by_port_id(port_id)
if network:
for port in network.ports:
if port.id == port_id:
return port
def get_state(self):
net_ids = self.get_network_ids()
num_nets = len(net_ids)
num_subnets = 0
num_ports = 0
for net_id in net_ids:
network = self.get_network_by_id(net_id)
num_subnets += len(network.subnets)
num_ports += len(network.ports)
return {'networks': num_nets,
'subnets': num_subnets,
'ports': num_ports}
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.schedule_resync(_("Agent updated: %(payload)s") %
{"payload": payload})
LOG.info(_("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_("DHCP agent started"))
def register_options():
cfg.CONF.register_opts(DhcpAgent.OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.register_opts(interface.OPTS)
def main():
register_options()
common_config.init(sys.argv[1:])
config.setup_logging(cfg.CONF)
server = neutron_service.Service.create(
binary='neutron-dhcp-agent',
topic=topics.DHCP_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport')
service.launch(server).wait()
|
|
import pandas as pd
# smart frame functions (to make dataframes themselves 'smart')
def is_smart_frame(df):
return hasattr(df, '__sf_is_smart_frame')
def make_df_smart(df, suffix):
try:
if is_smart_frame(df):
print('WHOA THIS DATAFRAME IS ALREADY SMART')
return df
except:
pass
# okay, so it isn't already a 'smart' frame.
df.__sf_suffix = suffix
df.__sf_foreign_keys = dict()
df.__sf_is_smart_frame = True
df._metadata.append('__sf_suffix')
df._metadata.append('__sf_foreign_keys')
df._metadata.append('__sf_is_smart_frame')
return df
def suffix(smart_df):
return smart_df.__sf_suffix
def fkeys(smart_df): # foreign key
return smart_df.__sf_foreign_keys
def get_fkey_for_dfid(smart_df, target_df_id):
return fkeys(smart_df)[target_df_id]
def get_fkey(smart_df, target_df):
return get_fkey_for_dfid(smart_df, id(target_df))
def add_fkey_for_dfid(smart_df, target_df_id, fkey):
fkeys(smart_df)[target_df_id] = fkey
def add_fkey(smart_df, target_df, fkey): # gets id and passes along
add_fkey_for_dfid(smart_df, id(target_df), fkey)
def sf_has_target(smart_df, target_df):
if id(target_df) in fkeys(smart_df):
return True
return False
class DataframeSmartMerger(object):
def __init__(self):
self._smart_frames = dict()
self._names_of_dfs_known_to_be_smart = dict()
self._smart_frames_which_have_a_foreign_key_for_this_dfid = dict()
def add(self, df, name, suffix=None):
print('adding dataframe ' + name)
if not suffix:
suffix = name
df_id = id(df)
# print('df {} has id {}'.format(name, df_id))
if name in self._smart_frames:
print('WARNING: Overwriting known smart frame!!!!!')
# TODO: fix up all references and/or disallow this
self._smart_frames[name] = make_df_smart(df, suffix)
self._names_of_dfs_known_to_be_smart[id(df)] = name
# print('Adding smart frame ' + name + ' with id ' + str(id(df)))
# this just gets a dataframe by name
def __getitem__(self, name):
return self._smart_frames[name]
def __iter__(self):
return iter(self._smart_frames.keys())
def _convert_to_name(self, df_or_name):
# if it isn't a name, it's a dataframe and can be reverse-lookuped
try:
return self._names_of_dfs_known_to_be_smart[id(df_or_name)]
except:
return df_or_name # it needs to already be a name if it's not a dataframe
def _get_best_printable_name(self, df):
# this might already be a name
try:
# first guess is that it's a dataframe
return self.get_known_name(df)
except:
try:
# next guess is that it's an id
return self._names_of_dfs_known_to_be_smart[df]
except:
# last guess is that it is a dataframe that we don't know about
# print('couldnt return known name of id ' + str(id(df)))
return str(id(df))
def _get_smart_frame(self, df):
# this might be a name, or it might be an actual dataframe
df_name = self._convert_to_name(df)
try:
return self._smart_frames[df_name]
except KeyError:
return None
def _get_df_if_known_name(self, df):
try:
return self._smart_frames[df]
except:
# hopefully this is already a dataframe
return df
def _add_reverse_smart_merge(self, df_id, smart_frame):
df_name = self._get_best_printable_name(df_id)
if df_id not in self._smart_frames_which_have_a_foreign_key_for_this_dfid:
# print('creating reverse lookup smart frame list for ' + df_name)
self._smart_frames_which_have_a_foreign_key_for_this_dfid[df_id] = list()
# print('noting that df {} is known (a foreign key is possessed) by smart frame {}'.format(
# df_name,
# self._get_best_printable_name(smart_frame.df)))
sf_list = self._smart_frames_which_have_a_foreign_key_for_this_dfid[df_id]
sf_id = id(smart_frame)
contains = False
for item in sf_list:
iid = id(item)
if iid == sf_id:
contains = True
if contains:
self._smart_frames_which_have_a_foreign_key_for_this_dfid[df_id].append(smart_frame)
else:
# it's possible that two dataframes which can both be merged
# into the same other dataframe may get merged together.
# if this happens, there will be two foreign keys for a single
# dataframe.
# we don't need to record two instances of this, because we're just
# a convenience for notifying that other dataframe that the new
# merged dataframe exists and that it knows how to merge into it.
# Since they presumably shared the exact same key name, one of those
# key names will have been destroyed by the merge process,
# leaving the other one to be merged into.
# I *believe* that only adding one suffix at merge time
# should always preserve a valid foreign key for these sorts of
# situations. However, in cases where two foreign keys share
# the same name but don't refer to the same table, this will
# cause breakage. In this case, which is of course based on
# unfortunate data naming that could potentially be avoided,
# we could maybe check every time we do a merge to see if
# the columns have been renamed. If they have been renamed,
# we could add a new foreign key record to all SmartFrames
# with the new merged name. But for now we'll leave this as-is.
# print("WARNING - this smart frame {} has already been added for df {}".format(
# self._get_best_printable_name(smart_frame), df_name))
pass
# Registering a smart merge means declaring that the first dataframe
# has a foreign key that corresponds to the primary index of other_df.
# Smart merges will be performed between dataframes based on these declarations.
# Since the merge is performed by definition on the primary key of one dataframe,
# it is not currently supported to have more than one column that is a foreign
# key for a given other dataframe. Future versions may or may not support this
# additional semantic.
# NB: These may be names or dataframes, but the first one at least must
# be a known smart frame, or this will fail.
def register_smart_merge(self, df, foreign_key, other_df):
smart_frame = self._get_smart_frame(df)
other_df = self._get_df_if_known_name(other_df) # if it's just a name but we already know about it
self._register_smart_merge(smart_frame, foreign_key, id(other_df))
def _register_smart_merge(self, smart_frame, foreign_key, df_id):
# print('I declare that df ' + self._get_best_printable_name(smart_frame.df)
# + ' has an attribute ' + foreign_key + ' that allows it to join to '
# + self._get_best_printable_name(df_id) + '\'s primary key')
add_fkey_for_dfid(smart_frame, df_id, foreign_key)
self._add_reverse_smart_merge(df_id, smart_frame)
def get_known_names(self):
return self._smart_frames.keys()
def get_known_name(self, df):
return self._names_of_dfs_known_to_be_smart[id(df)]
# As long as one of these is a dataframe or dataframe name that is known
# by the DataFrameBrowser, and as long as that smart frame has a
# registered smart merge for the other dataframe, this should return
# a merged dataframe.
def smart_merge(self, df1, df2, name_callback=None,
id_callback=None, suffix_callback=None, preferred_df_to_suffix=None):
if df1 is None or df1 is None:
# just die immediately. it's not worth dealing with this later
self[df1]
self[df2]
# when we get to a merge, we assume unless told otherwise that
# the caller wants df columns with matching names to be suffixed
# only in the names of df2.
if preferred_df_to_suffix is None or (id(preferred_df_to_suffix) != id(df1) and
id(preferred_df_to_suffix) != id(df2)):
preferred_df_to_suffix = df2
smart_frame_1 = self._get_smart_frame(df1)
smart_frame_2 = self._get_smart_frame(df2)
# we expect df1 to be a smart frame and therefore possibly the foreign key holder
if smart_frame_2 is not None and smart_frame_1 is None:
# print('### performing swap, because df1 is not "smart" at all')
# if it isn't a smart frame at all, but df2 is, we swap
temp = smart_frame_1
smart_frame_1 = smart_frame_2
smart_frame_2 = temp
elif smart_frame_1 is None and smart_frame_2 is None:
# TODO: we don't even have a smart frame. use new smart frame callbacks!
# (for now, we just die by trying and failing to 'get' df1)
print(df1, df2)
print('we can\'t find either of these as smart frames')
self._smart_frames[self.get_name(df1)]
# EARLY DEATH
#
# at this point we have ensured that at least one smart frame (smart_frame_1) exists
#
# Therefore we should not be using 'df1' anymore
df1 = None
if smart_frame_2 is not None:
# df2 may have been a known name instead of a df, so assign the actual dataframe
df2 = smart_frame_2
# we give preference to the first smart frame, if there are two
if sf_has_target(smart_frame_1, df2):
smart_frame_w_fkey = smart_frame_1
df_w_primkey = df2
if smart_frame_2 is not None:
smart_frame_w_primkey = smart_frame_2
elif smart_frame_2 is not None and sf_has_target(smart_frame_2, smart_frame_1):
smart_frame_w_fkey = smart_frame_2
smart_frame_w_primkey = smart_frame_1
df_w_primkey = smart_frame_w_primkey
else:
# we don't know how to merge these either direction
# TODO: so perform 'merge clarification callback'
# (but for now we just raise an exception)
print('we dont know how to merge these in either direction')
get_fkey(smart_frame_1, df2)
# EARLY DEATH
# get shortcut names for easier printing
df_w_primkey_name = self._get_best_printable_name(df_w_primkey)
df_w_fkey_name = self._get_best_printable_name(smart_frame_w_fkey)
#
# past here, we should not refer to anything except in terms of w_primkey and w_fkey
#
smart_frame_1 = None
smart_frame_2 = None
df2 = None
# this would be the place to precheck column names and do my own
# column renaming. and then we'd need a reverse-lookup by foreign key
# in the smart frames, so that we can add the updated foreign key.
# and maybe also check to see if this already has the one,
# because if it does we should eliminate the duplicate column.
# but is it really a duplicate? maybe not...
# now that we KNOW which direction to merge and how, so DO MERGE!
foreign_key = get_fkey(smart_frame_w_fkey, df_w_primkey)
# print('### merging {} with {}\'s primary key using fkey {}'.format(
# df_w_fkey_name, df_w_primkey_name, foreign_key))
if id(preferred_df_to_suffix) == id(smart_frame_w_fkey):
merged = smart_frame_w_fkey.merge(df_w_primkey,
left_on=foreign_key, right_index=True,
suffixes=(suffix(smart_frame_w_fkey), ''))
elif id(preferred_df_to_suffix) == id(df_w_primkey) and is_smart_frame(df_w_primkey):
merged = smart_frame_w_fkey.merge(df_w_primkey,
left_on=foreign_key, right_index=True,
suffixes=('', suffix(df_w_primkey)))
else:
merged = smart_frame_w_fkey.merge(df_w_primkey,
left_on=foreign_key, right_index=True,
suffixes=(suffix(smart_frame_w_fkey),
suffix(df_w_primkey)))
# now we need to do bookkeeping and record any new known smart merges
# add the new merged dataframe as a smart frame, since it's based on at least one smart frame
merged_name = df_w_fkey_name + '+' + df_w_primkey_name
self.add(merged, merged_name)
merged_smart_frame = self._get_smart_frame(merged_name)
# print('add available foreign keys of foreign_key df to merged df smart frame')
# add available foreign keys of component dfs to merged df
for df_id in fkeys(smart_frame_w_fkey).keys():
fkey = fkeys(smart_frame_w_fkey)[df_id]
if fkey == foreign_key:
continue # we just merged on this, so it can't be merged on for the new df
self._register_smart_merge(merged_smart_frame, fkey, df_id)
# print('add available foreign keys of primary key df to merged df')
if smart_frame_w_primkey is not None:
for df_id in fkeys(smart_frame_w_primkey).keys():
fkey = fkeys(smart_frame_w_primkey)[df_id]
# we shouldn't have reuse in here, because we didn't use these
self._register_smart_merge(merged_smart_frame, fkey, df_id)
# now add available foreign keys of smart frames
# that know how to merge into the component dfs
merged_id = id(merged)
df_primkey_id = id(df_w_primkey)
# print('STEP TWO {}'.format(df_primkey_id))
# print('add available foreign keys of smart frames that know how to merge into the primkey df')
if df_primkey_id in self._smart_frames_which_have_a_foreign_key_for_this_dfid:
smart_frames_which_know_this_df_id = self._smart_frames_which_have_a_foreign_key_for_this_dfid[df_primkey_id]
for smart_frame in smart_frames_which_know_this_df_id:
the_fkey = get_fkey_for_dfid(smart_frame, df_primkey_id)
if the_fkey == foreign_key:
# print('skipping fkey {} possessed by {} because it disappeared in this merge'.format(
# the_fkey, self._get_best_printable_name(smart_frame)))
continue
self._register_smart_merge(smart_frame,
the_fkey,
merged_id)
df_fkey_id = id(smart_frame_w_fkey)
# print('add available foreign keys of smart frames that know how to merge into the foreignkey df')
if df_fkey_id in self._smart_frames_which_have_a_foreign_key_for_this_dfid:
smart_frames_which_know_this_df_id = self._smart_frames_which_have_a_foreign_key_for_this_dfid[df_fkey_id]
for smart_frame in smart_frames_which_know_this_df_id:
the_fkey = get_fkey_for_dfid(smart_frame, df_fkey_id)
if the_fkey == foreign_key:
# print('skipping fkey {} possessed by {} because it disappeared in this merge'.format(
# the_fkey, self._get_best_printable_name(smart_frame)))
continue
self._register_smart_merge(smart_frame,
the_fkey,
merged_id)
return merged
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework_json_api import serializers
from rest_framework_mongoengine import serializers as m_serializers
from emgapi import fields as emg_fields
from . import models as m_models
class GoTermSerializer(m_serializers.DocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:goterms-detail',
lookup_field='accession',
)
def get_analysis(self, obj):
return None
class Meta:
model = m_models.GoTerm
fields = '__all__'
class InterproIdentifierSerializer(m_serializers.DocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:interproidentifier-detail',
lookup_field='accession',
)
def get_analysis(self, obj):
return None
class Meta:
model = m_models.InterproIdentifier
fields = '__all__'
class KeggModuleSerializer(m_serializers.DocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:keggmodules-detail',
lookup_field='accession',
)
class Meta:
model = m_models.KeggModule
fields = '__all__'
class PfamSerializer(m_serializers.DocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:pfam-detail',
lookup_field='accession',
)
class Meta:
model = m_models.PfamEntry
fields = '__all__'
class KeggOrthologSerializer(m_serializers.DocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:kegg-orthologs-detail',
lookup_field='accession',
)
class Meta:
model = m_models.KeggOrtholog
fields = '__all__'
class GenomePropertySerializer(m_serializers.DocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:genome-properties-detail',
lookup_field='accession',
)
class Meta:
model = m_models.GenomeProperty
fields = '__all__'
class AntiSmashGeneClusterSerializer(m_serializers.DocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:antismash-gene-clusters-detail',
lookup_field='accession',
)
class Meta:
model = m_models.AntiSmashGeneCluster
fields = '__all__'
class GoTermRetriveSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:goterms-detail',
lookup_field='accession',
)
count = serializers.IntegerField(required=False)
class Meta:
model = m_models.GoTerm
fields = '__all__'
class InterproIdentifierRetriveSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:interproidentifier-detail',
lookup_field='accession',
)
count = serializers.IntegerField(required=False)
class Meta:
model = m_models.InterproIdentifier
fields = '__all__'
class KeggModuleRetrieveSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:keggmodules-detail',
lookup_field='accession',
)
completeness = serializers.FloatField(required=True)
matching_kos = serializers.ListField()
missing_kos = serializers.ListField()
class Meta:
model = m_models.KeggModule
fields = '__all__'
class PfamRetrieveSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:pfam-detail',
lookup_field='accession',
)
count = serializers.IntegerField(required=True)
class Meta:
model = m_models.PfamEntry
fields = '__all__'
class KeggOrthologRetrieveSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:kegg-orthologs-detail',
lookup_field='accession',
)
count = serializers.IntegerField(required=True)
class Meta:
model = m_models.KeggOrtholog
fields = '__all__'
class GenomePropertyRetrieveSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:genome-properties-detail',
lookup_field='accession',
)
presence = serializers.CharField(source="get_presence_display", required=True)
class Meta:
model = m_models.GenomeProperty
fields = '__all__'
class AntiSmashGeneClusterRetrieveSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='emgapi_v1:antismash-gene-clusters-detail',
lookup_field='accession',
)
count = serializers.IntegerField(required=True)
class Meta:
model = m_models.AntiSmashGeneCluster
fields = '__all__'
class OrganismSerializer(m_serializers.DynamicDocumentSerializer,
serializers.HyperlinkedModelSerializer):
url = emg_fields.OrganismHyperlinkedIdentityField(
view_name='emgapi_v1:organisms-children-list',
lookup_field='lineage',
)
class Meta:
model = m_models.Organism
exclude = (
'id',
'ancestors',
)
class OrganismRetriveSerializer(OrganismSerializer):
count = serializers.IntegerField(required=False)
class Meta:
model = m_models.Organism
exclude = (
'id',
'ancestors',
)
class AnalysisJobContigSerializer(m_serializers.DocumentSerializer):
class Meta:
model = m_models.AnalysisJobContig
exclude = (
'cogs',
'keggs',
'pfams',
'gos',
'interpros',
'kegg_modules',
'as_geneclusters'
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.special_math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class LBetaTest(tf.test.TestCase):
_use_gpu = False
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(1, tf.exp(tf.lbeta(x_one)).eval())
self.assertAllClose(0.5, tf.exp(tf.lbeta(x_one_half)).eval())
self.assertEqual([], tf.lbeta(x_one).get_shape())
def test_one_dimensional_arg_dynamic_alloc(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=self._use_gpu):
ph = tf.placeholder(tf.float32)
beta_ph = tf.exp(tf.lbeta(ph))
self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one}))
self.assertAllClose(0.5, beta_ph.eval(feed_dict={ph: x_one_half}))
def test_two_dimensional_arg(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose([0.5, 0.5], tf.exp(tf.lbeta(x_one_half)).eval())
self.assertEqual((2,), tf.lbeta(x_one_half).get_shape())
def test_two_dimensional_arg_dynamic_alloc(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
ph = tf.placeholder(tf.float32)
beta_ph = tf.exp(tf.lbeta(ph))
self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half}))
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose([0.5, 0.5], tf.exp(tf.lbeta(x_one_half)).eval())
self.assertEqual((2,), tf.shape(tf.lbeta(x_one_half)).eval())
self.assertEqual(tf.TensorShape([2]), tf.lbeta(x_one_half).get_shape())
def test_complicated_shape(self):
with self.test_session(use_gpu=self._use_gpu):
x = tf.convert_to_tensor(np.random.rand(3, 2, 2))
self.assertAllEqual((3, 2), tf.shape(tf.lbeta(x)).eval())
self.assertEqual(tf.TensorShape([3, 2]), tf.lbeta(x).get_shape())
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(1, tf.exp(tf.lbeta(x_a)).eval())
self.assertAllClose(1, tf.exp(tf.lbeta(x_b)).eval())
self.assertEqual((), tf.lbeta(x_a).get_shape())
def test_empty_rank2_or_greater_input_gives_empty_output(self):
with self.test_session(use_gpu=self._use_gpu):
self.assertAllEqual([], tf.lbeta([[]]).eval())
self.assertEqual((0,), tf.lbeta([[]]).get_shape())
self.assertAllEqual([[]], tf.lbeta([[[]]]).eval())
self.assertEqual((1, 0), tf.lbeta([[[]]]).get_shape())
def test_empty_rank2_or_greater_input_gives_empty_output_dynamic_alloc(self):
with self.test_session(use_gpu=self._use_gpu):
ph = tf.placeholder(tf.float32)
self.assertAllEqual([], tf.lbeta(ph).eval(feed_dict={ph: [[]]}))
self.assertAllEqual([[]], tf.lbeta(ph).eval(feed_dict={ph: [[[]]]}))
def test_empty_rank1_input_raises_value_error(self):
with self.test_session(use_gpu=self._use_gpu):
with self.assertRaisesRegexp(ValueError, 'rank'):
tf.lbeta([])
def test_empty_rank1_dynamic_alloc_input_raises_op_error(self):
with self.test_session(use_gpu=self._use_gpu):
ph = tf.placeholder(tf.float32)
with self.assertRaisesOpError('rank'):
tf.lbeta(ph).eval(feed_dict={ph: []})
class LBetaTestGpu(LBetaTest):
_use_gpu = True
class EinsumTest(tf.test.TestCase):
# standard cases
simple_cases = [
'ij,jk->ik',
'ijk,jklm->il',
'ij,jk,kl->il',
'ijk->i',
]
# where axes are not in order
misordered_cases = [
'ji,kj->ik',
'ikl,kji->kl',
'klj,lki->ij',
]
# more than two arguments
multiarg_cases = [
'ijk,ijl,ikl->i',
'i,ijk,j->k',
'ij,ij,jk,kl->il',
]
invalid_cases = [
# bad formats
'ijk ijk',
'ij,jk,kl'
'ij->',
# axis in output that does not exist
'ij,jk->im',
# incorrect number of dimensions
'ij,jkl->kl',
]
dim_mismatch_cases = [
('ijk,jkl->il',
[(2,3,4), (3,5,6)]),
]
def test_simple(self):
for case in self.simple_cases:
self.run_test(case)
def test_misordered(self):
for case in self.misordered_cases:
self.run_test(case)
def test_multiarg(self):
for case in self.multiarg_cases:
self.run_test(case)
def test_invalid(self):
for axes in self.invalid_cases:
result = None
inputs = [
tf.placeholder(tf.float32, shape=(3,4)),
tf.placeholder(tf.float32, shape=(3,4)),
]
try:
result = tf.einsum(axes, *inputs)
except AssertionError as e:
print(e)
assert result is None, \
"An exception should have been thrown."
def test_dim_mismatch(self):
for axes, input_shapes in self.dim_mismatch_cases:
inputs = [
tf.placeholder(tf.float32, shape=shape)
for shape in input_shapes
]
result = None
try:
result = tf.einsum(axes, *inputs)
except AssertionError:
pass
assert result is None, "An exception should have been thrown."
def run_test(self, axes):
all_axes = {ax: np.random.randint(4, 12)
for ax in axes if ax.isalpha()}
input_vals = []
input_axes, _, _ = axes.partition('->')
for idx in input_axes.split(','):
shape = [all_axes[ax] for ax in idx]
input_vals.append(np.random.random(shape))
input_tensors = [tf.constant(val) for val in input_vals]
output_tensor = tf.einsum(axes, *input_tensors)
with self.test_session():
output_value = output_tensor.eval()
correct_value = np.einsum(axes, *input_vals)
err = np.abs(correct_value - output_value).max()
print(axes, err)
assert err < 1e-8
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTest(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = dataset_ops.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = dataset_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 1})
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 10})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(init_batch_op, feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: 5})
for _ in range(10):
self.assertAllEqual([self._lineText(0, i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual([self._lineText(1, i) for i in range(5)],
sess.run(get_next))
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def testFixedLengthRecordDataset(self):
test_filenames = self._createFiles()
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = (dataset_ops.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
batch_dataset = repeat_dataset.batch(batch_size)
iterator = dataset_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(init_batch_op, feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: self._num_records})
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual([self._record(j, i)
for i in range(self._num_records)],
sess.run(get_next))
class TFRecordDatasetTest(test.TestCase):
def setUp(self):
super(TFRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = dataset_ops.TFRecordDataset(
self.filenames, self.compression_type).repeat(self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = dataset_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def testReadOneEpoch(self):
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(self.init_op,
feed_dict={self.filenames: [self.test_filenames[0]],
self.num_epochs: 1})
for i in range(self._num_records):
self.assertAllEqual(self._record(0, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from file 1.
sess.run(self.init_op,
feed_dict={self.filenames: [self.test_filenames[1]],
self.num_epochs: 1})
for i in range(self._num_records):
self.assertAllEqual(self._record(1, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from both files.
sess.run(self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochs(self):
with self.test_session() as sess:
sess.run(self.init_op, feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochsOfBatches(self):
with self.test_session() as sess:
sess.run(self.init_batch_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10,
self.batch_size: self._num_records})
for _ in range(10):
for j in range(self._num_files):
values = sess.run(self.get_next)
self.assertAllEqual([self._record(j, i)
for i in range(self._num_records)], values)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
sess.run(self.init_op,
feed_dict={self.filenames: zlib_files,
self.compression_type: "ZLIB"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
with self.test_session() as sess:
sess.run(self.init_op,
feed_dict={self.filenames: gzip_files,
self.compression_type: "GZIP"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
class ReadBatchFeaturesTest(test.TestCase):
def setUp(self):
super(ReadBatchFeaturesTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _read_batch_features(self, filenames, num_epochs, batch_size):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return dataset_ops.read_batch_features(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string)
},
reader=dataset_ops.TFRecordDataset,
randomize_input=False,
num_epochs=self.num_epochs)
def _record(self, f, r):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[f])),
"record":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[r])),
"keywords":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r)))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def _next_actual_batch(self, sess):
file_op = self.outputs["file"]
keywords_indices_op = self.outputs["keywords"].indices
keywords_values_op = self.outputs["keywords"].values
keywords_dense_shape_op = self.outputs["keywords"].dense_shape
record_op = self.outputs["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op
])
def _next_expected_batch(self, file_indices, batch_size, num_epochs):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
for _ in range(num_epochs):
for record in _next_record(file_indices):
f = record[0]
r = record[1]
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend([[batch_index, i]
for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch
]
def _verify_records(self, sess, batch_size, file_index=None, num_epochs=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(file_indices, batch_size,
num_epochs):
actual_batch = self._next_actual_batch(sess)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default():
with self.test_session(graph=ops.get_default_graph()) as sess:
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 0, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default():
with self.test_session(graph=ops.get_default_graph()) as sess:
# Basic test: read from file 1.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[1],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 1, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default():
with self.test_session(graph=ops.get_default_graph()) as sess:
# Basic test: read from both files.
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testReadWithEquivalentDataset(self):
# TODO(mrry): Add support for tf.SparseTensor as a Dataset component.
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (dataset_ops.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10)
.batch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for file_batch, _, _, _, record_batch in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = sess.run(next_element)
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import redis
import connection
from scrapy.http import Request
from scrapy.spider import Spider
from unittest import TestCase
from dupefilter import RFPDupeFilter
from queue import SpiderQueue, SpiderPriorityQueue, SpiderStack
from scheduler import Scheduler
# allow test settings from environment
REDIS_HOST = os.environ.get('REDIST_HOST', 'localhost')
REDIS_PORT = int(os.environ.get('REDIS_PORT', 6379))
class DupeFilterTest(TestCase):
def setUp(self):
self.server = redis.Redis(REDIS_HOST, REDIS_PORT)
self.key = 'scrapy_redis:tests:dupefilter:'
self.df = RFPDupeFilter(self.server, self.key)
def tearDown(self):
self.server.delete(self.key)
def test_dupe_filter(self):
req = Request('http://example.com')
self.assertFalse(self.df.request_seen(req))
self.assertTrue(self.df.request_seen(req))
self.df.close('nothing')
class QueueTestMixin(object):
queue_cls = None
def setUp(self):
self.spider = Spider('myspider')
self.key = 'scrapy_redis:tests:%s:queue' % self.spider.name
self.server = redis.Redis(REDIS_HOST, REDIS_PORT)
self.q = self.queue_cls(self.server, Spider('myspider'), self.key)
def tearDown(self):
self.server.delete(self.key)
def test_clear(self):
self.assertEqual(len(self.q), 0)
for i in range(10):
# XXX: can't use same url for all requests as SpiderPriorityQueue
# uses redis' set implemention and we will end with only one
# request in the set and thus failing the test. It should be noted
# that when using SpiderPriorityQueue it acts as a request
# duplication filter whenever the serielized requests are the same.
# This might be unwanted on repetitive requests to the same page
# even with dont_filter=True flag.
req = Request('http://example.com/?page=%s' % i)
self.q.push(req)
self.assertEqual(len(self.q), 10)
self.q.clear()
self.assertEqual(len(self.q), 0)
class SpiderQueueTest(QueueTestMixin, TestCase):
queue_cls = SpiderQueue
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop()
self.assertEqual(out1.url, req1.url)
self.assertEqual(out2.url, req2.url)
class SpiderPriorityQueueTest(QueueTestMixin, TestCase):
queue_cls = SpiderPriorityQueue
def test_queue(self):
req1 = Request('http://example.com/page1', priority=100)
req2 = Request('http://example.com/page2', priority=50)
req3 = Request('http://example.com/page2', priority=200)
self.q.push(req1)
self.q.push(req2)
self.q.push(req3)
out1 = self.q.pop()
out2 = self.q.pop()
out3 = self.q.pop()
self.assertEqual(out1.url, req3.url)
self.assertEqual(out2.url, req1.url)
self.assertEqual(out3.url, req2.url)
class SpiderStackTest(QueueTestMixin, TestCase):
queue_cls = SpiderStack
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop()
self.assertEqual(out1.url, req2.url)
self.assertEqual(out2.url, req1.url)
class SchedulerTest(TestCase):
def setUp(self):
self.server = redis.Redis(REDIS_HOST, REDIS_PORT)
self.key_prefix = 'scrapy_redis:tests:'
self.queue_key = self.key_prefix + '%(spider)s:requests'
self.dupefilter_key = self.key_prefix + '%(spider)s:dupefilter'
self.idle_before_close = 0
self.scheduler = Scheduler(self.server, False, self.queue_key,
SpiderQueue, self.dupefilter_key,
self.idle_before_close)
def tearDown(self):
for key in self.server.keys(self.key_prefix):
self.server.delete(key)
def test_scheduler(self):
# default no persist
self.assertFalse(self.scheduler.persist)
spider = Spider('myspider')
self.scheduler.open(spider)
self.assertEqual(len(self.scheduler), 0)
req = Request('http://example.com')
self.scheduler.enqueue_request(req)
self.assertTrue(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 1)
# dupefilter in action
self.scheduler.enqueue_request(req)
self.assertEqual(len(self.scheduler), 1)
out = self.scheduler.next_request()
self.assertEqual(out.url, req.url)
self.assertFalse(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 0)
self.scheduler.close('finish')
def test_scheduler_persistent(self):
messages = []
spider = Spider('myspider')
spider.log = lambda *args, **kwargs: messages.append([args, kwargs])
self.scheduler.persist = True
self.scheduler.open(spider)
self.assertEqual(messages, [])
self.scheduler.enqueue_request(Request('http://example.com/page1'))
self.scheduler.enqueue_request(Request('http://example.com/page2'))
self.assertTrue(self.scheduler.has_pending_requests())
self.scheduler.close('finish')
self.scheduler.open(spider)
self.assertEqual(messages, [
[('Resuming crawl (2 requests scheduled)',), {}],
])
self.assertEqual(len(self.scheduler), 2)
self.scheduler.persist = False
self.scheduler.close('finish')
self.assertEqual(len(self.scheduler), 0)
class ConnectionTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# We can get a connection from just REDIS_URL.
def test_redis_url(self):
settings = dict(
REDIS_URL = 'redis://foo:bar@localhost:9001/42'
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We can get a connection from REDIS_HOST/REDIS_PORT.
def test_redis_host_port(self):
settings = dict(
REDIS_HOST = 'localhost',
REDIS_PORT = 9001
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
# REDIS_URL takes precedence over REDIS_HOST/REDIS_PORT.
def test_redis_url_precedence(self):
settings = dict(
REDIS_HOST = 'baz',
REDIS_PORT = 1337,
REDIS_URL = 'redis://foo:bar@localhost:9001/42'
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We fallback to REDIS_HOST/REDIS_PORT if REDIS_URL is None.
def test_redis_host_port_fallback(self):
settings = dict(
REDIS_HOST = 'baz',
REDIS_PORT = 1337,
REDIS_URL = None
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'baz')
self.assertEqual(connect_args['port'], 1337)
# We use default values for REDIS_HOST/REDIS_PORT.
def test_redis_default(self):
settings = dict()
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 6379)
|
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build input pipelines that span TPU pods for optimal performance.
It's common to batch sequences according to their length. Unfortunately, a
naive scaling of such an input pipeline across a pod will result in each host
choosing the sequence length bucket independently. Concretely, host A may select
sequences of a short length, while host B may select sequences of a very long
length. Because every step involves a blocking all-reduce phase, host A must
wait for host B.
The input pipeline designed within synchronizes the hosts such that they all
select a sequence length bucket of the same length, resulting in up to 50%
performance improvements across large TPU pod slices.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.python.data.ops import multi_device_iterator_ops
from mlperf_compliance import mlperf_log
import async_checkpoint
import estimator as nmt_estimator
import low_level_runner
from utils import vocab_utils
def create_train_runner(hparams, num_workers):
params = {}
steps_per_epoch = int(hparams.num_examples_per_epoch/hparams.batch_size)
return low_level_runner.TrainLowLevelRunner(
iterations=steps_per_epoch,
hparams=hparams,
per_host_v1=True)
def train_fn(hparams, num_workers):
"""Copy of train function from estimator.py."""
# TODO: Merge improvements into the original.
# pylint: disable=protected-access
hparams.tgt_sos_id, hparams.tgt_eos_id = nmt_estimator._get_tgt_sos_eos_id(
hparams)
model_fn = nmt_estimator.make_model_fn(hparams)
def print_log():
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_LOOP)
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_EPOCH, value=0)
mlperf_log.gnmt_print(key=mlperf_log.INPUT_SIZE,
value=hparams.num_examples_per_epoch)
if hparams.use_tpu_low_level_api:
runner = create_train_runner(hparams, num_workers)
mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
input_fn = DistributedPipeline(hparams, num_workers)
runner.initialize(input_fn, {})
runner.build_model(model_fn, {})
print_log()
runner.train(0, hparams.num_train_steps)
return 0.0
# cluster = tf.contrib.cluster_resolver.TPUClusterResolver(hparams.tpu_name)
# cluster_spec = cluster.cluster_spec()
# print('cluster_spec: %s' % cluster_spec)
# num_workers = cluster_spec.num_tasks('tpu_worker')
# print('num_workers: %s' % num_workers)
pipeline = DistributedPipeline(hparams, num_workers)
print_log()
if hparams.use_tpu:
run_config = nmt_estimator._get_tpu_run_config(hparams, True)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=run_config,
use_tpu=hparams.use_tpu,
train_batch_size=hparams.batch_size,
eval_batch_size=hparams.batch_size,
predict_batch_size=hparams.infer_batch_size)
else:
raise ValueError("Distributed input pipeline only supported on TPUs.")
hooks = [pipeline]
if hparams.use_async_checkpoint:
hooks.append(
async_checkpoint.AsyncCheckpointSaverHook(
checkpoint_dir=hparams.out_dir,
save_steps=int(
hparams.num_examples_per_epoch / hparams.batch_size)))
estimator.train(
input_fn=pipeline, max_steps=hparams.num_train_steps, hooks=hooks)
# Return value is not used
return 0.0
def train_and_eval_with_low_level_api(hparams, num_workers):
"""Train and evaluation function."""
# pylint: disable=protected-access
hparams.tgt_sos_id, hparams.tgt_eos_id = 1, 2
model_fn = nmt_estimator.make_model_fn(hparams)
train_runner = create_train_runner(hparams, num_workers)
eval_runner = nmt_estimator.create_eval_runner(hparams, model_fn)
mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
train_input_fn = DistributedPipeline(hparams, num_workers)
train_runner.initialize(train_input_fn, {})
train_runner.build_model(model_fn, {})
eval_input_fn = nmt_estimator.make_input_fn(
hparams, tf.contrib.learn.ModeKeys.INFER)
params = {
"infer_batch_size": int(hparams.infer_batch_size / hparams.num_shards)
}
eval_runner.initialize(eval_input_fn, params)
eval_runner.build_model(model_fn, params)
score = 0.0
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_LOOP)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_TARGET, value=hparams.target_bleu)
current_step = 0
for i in range(hparams.max_train_epochs):
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_EPOCH, value=i)
tf.logging.info("Start training epoch %d", i)
mlperf_log.gnmt_print(
key=mlperf_log.INPUT_SIZE, value=hparams.num_examples_per_epoch)
steps_per_epoch = int(hparams.num_examples_per_epoch / hparams.batch_size)
train_runner.train(current_step, steps_per_epoch)
mlperf_log.gnmt_print(
key=mlperf_log.TRAIN_CHECKPOINT, value=("Under " + hparams.out_dir))
tf.logging.info("End training epoch %d", i)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_START)
predictions = list(eval_runner.predict())
current_step = current_step + steps_per_epoch
score = nmt_estimator.get_metric(hparams, predictions, current_step)
tf.logging.info("Score after epoch %d: %f", i, score)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_ACCURACY,
value={"value": score, "epoch": i})
mlperf_log.gnmt_print(key=mlperf_log.EVAL_STOP, value=i)
if score >= hparams.target_bleu:
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": True})
return score
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": False})
return score
def train_and_eval_fn(hparams, num_workers):
"""Train and evaluation function."""
# pylint: disable=protected-access
mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
hparams.tgt_sos_id, hparams.tgt_eos_id = 1, 2
model_fn = nmt_estimator.make_model_fn(hparams)
pipeline = DistributedPipeline(hparams, num_workers)
run_config = nmt_estimator._get_tpu_run_config(hparams)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=run_config,
use_tpu=hparams.use_tpu,
train_batch_size=hparams.batch_size,
eval_batch_size=hparams.batch_size,
predict_batch_size=hparams.infer_batch_size)
score = 0.0
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_LOOP)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_TARGET, value=hparams.target_bleu)
for i in range(hparams.max_train_epochs):
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_EPOCH, value=i)
tf.logging.info("Start training epoch %d", i)
mlperf_log.gnmt_print(
key=mlperf_log.INPUT_SIZE, value=hparams.num_examples_per_epoch)
steps_per_epoch = int(hparams.num_examples_per_epoch / hparams.batch_size)
max_steps = steps_per_epoch * (i + 1)
estimator.train(input_fn=pipeline, max_steps=max_steps, hooks=[pipeline])
mlperf_log.gnmt_print(
key=mlperf_log.TRAIN_CHECKPOINT, value=("Under " + hparams.out_dir))
tf.logging.info("End training epoch %d", i)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_START)
score = nmt_estimator.get_metric_from_estimator(hparams, estimator)
tf.logging.info("Score after epoch %d: %f", i, score)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_ACCURACY,
value={"value": score, "epoch": i})
mlperf_log.gnmt_print(key=mlperf_log.EVAL_STOP, value=i)
if score >= hparams.target_bleu:
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": True})
return score
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": False})
return score
class DistributedPipeline(tf.train.SessionRunHook):
"""DistributedPipeline encapsulates constructing the distributed pipeline.
We use a class because we need to construct the pipeline in a graph managed
by [TPU]Estimator. As a result, we cannot pre-construct it using a normal
function, as Estimator wants to manage the graph itself.
We use a class because we need to capture the initializer and pass it to the
train call to TPUEstimator while simultaneously passing ourselves as the input
function.
"""
def __init__(self, hparams, num_hosts):
"""Constructs a DistributedPipeline.
Args:
hparams: The hparams object for this model.
num_hosts: The number of hosts in the slice of the TPU pod.
Throws:
ValueError: If the passed values are invalid.
"""
self._hparams = hparams
self._num_hosts = num_hosts
self._iterator = None
self._outputs = None
global_batch_size = hparams.batch_size
if global_batch_size % num_hosts != 0:
raise ValueError(
"global_batch_size (%s) must be a multiple of num_hosts (%s)" %
(global_batch_size, num_hosts))
def after_create_session(self, session, coord):
del coord
start = time.time()
session.run(self._iterator.initializer)
tf.logging.info("Initialized multi-host dataset iterators in %d seconds",
time.time() - start)
def __call__(self, params):
if not self._outputs:
self._iterator = _make_distributed_pipeline(self._hparams,
self._num_hosts)
self._outputs = self._iterator.get_next()
if "context" in params:
current_host = params["context"].current_input_fn_deployment()[1]
elif "dataset_index" in params:
current_host = params["dataset_index"]
else:
raise ValueError('Expect "context" or "dataset_index" in params.')
return self._outputs[current_host]
def _make_distributed_pipeline(hparams, num_hosts):
"""Makes the distributed input pipeline.
make_distributed_pipeline must be used in the PER_HOST_V1 configuration.
Note: we return both the input function and the hook because
MultiDeviceIterator is not compatible with Estimator / TPUEstimator.
Args:
hparams: The hyperparameters to use.
num_hosts: The number of hosts we're running across.
Returns:
A MultiDeviceIterator.
"""
# TODO: Merge with the original copy in iterator_utils.py.
# pylint: disable=g-long-lambda,line-too-long
global_batch_size = hparams.batch_size
if global_batch_size % num_hosts != 0:
raise ValueError(
"global_batch_size (%s) must be a multiple of num_hosts (%s)" %
(global_batch_size, num_hosts))
# Optionally choose from `choose_buckets` buckets simultaneously.
if hparams.choose_buckets:
window_batch_size = int(global_batch_size / hparams.choose_buckets)
else:
window_batch_size = global_batch_size
per_host_batch_size = global_batch_size / num_hosts
output_buffer_size = global_batch_size * 100
resolver = low_level_runner.get_resolver(hparams)
assert resolver
job_name = resolver.get_job_name() or "tpu_worker"
with tf.device("/job:%s/task:0/cpu:0" % job_name):
# From estimator.py
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_dataset = tf.data.TextLineDataset(src_file).prefetch(output_buffer_size)
tgt_dataset = tf.data.TextLineDataset(tgt_file).prefetch(output_buffer_size)
mlperf_log.gnmt_print(
key=mlperf_log.INPUT_BATCH_SIZE, value=global_batch_size)
mlperf_log.gnmt_print(
key=mlperf_log.TRAIN_HP_MAX_SEQ_LEN, value=hparams.src_max_len)
# Define local variables that are parameters in iterator_utils.make_input_fn
sos = hparams.sos
eos = hparams.eos
random_seed = hparams.random_seed
num_buckets = hparams.num_buckets
src_max_len = hparams.src_max_len
tgt_max_len = hparams.tgt_max_len
num_parallel_calls = 100 # constant in iterator_utils.py
skip_count = None # constant in estimator.py
reshuffle_each_iteration = True # constant in estimator.py
use_char_encode = hparams.use_char_encode
filter_oversized_sequences = True # constant in estimator.py
# From iterator_utils.py
if use_char_encode:
src_eos_id = vocab_utils.EOS_CHAR_ID
else:
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)
tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
mlperf_log.gnmt_print(key=mlperf_log.INPUT_SHARD, value=1)
if skip_count is not None:
src_tgt_dataset = src_tgt_dataset.skip(skip_count)
def map_fn_1(src, tgt):
src = tf.string_split([src]).values
tgt = tf.string_split([tgt]).values
src_size = tf.size(src)
tgt_size = tf.size(tgt)
size_ok_bool = tf.logical_and(src_size > 0, tgt_size > 0)
if filter_oversized_sequences:
oversized = tf.logical_and(src_size < src_max_len,
tgt_size < tgt_max_len)
size_ok_bool = tf.logical_and(size_ok_bool, oversized)
if src_max_len:
src = src[:src_max_len]
if tgt_max_len:
tgt = tgt[:tgt_max_len]
return (src, tgt, size_ok_bool)
src_tgt_bool_dataset = src_tgt_dataset.map(
map_fn_1, num_parallel_calls=num_parallel_calls)
src_tgt_bool_dataset = src_tgt_bool_dataset.filter(
lambda src, tgt, filter_bool: filter_bool)
def map_fn_2(src, tgt, unused_filter_bool):
if use_char_encode:
src = tf.reshape(vocab_utils.tokens_to_bytes(src), [-1])
tgt = tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)
else:
src = tf.cast(src_vocab_table.lookup(src), tf.int32)
tgt = tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)
# Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.
tgt_in = tf.concat(([tgt_sos_id], tgt), 0)
tgt_out = tf.concat((tgt, [tgt_eos_id]), 0)
# Add in sequence lengths.
if use_char_encode:
src_len = tf.to_int32(tf.size(src) / vocab_utils.DEFAULT_CHAR_MAXLEN)
else:
src_len = tf.size(src)
tgt_len = tf.size(tgt_in)
return src, tgt_in, tgt_out, src_len, tgt_len
# Convert the word strings to ids. Word strings that are not in the
# vocab get the lookup table's default_value integer.
mlperf_log.gnmt_print(key=mlperf_log.PREPROC_TOKENIZE_TRAINING)
src_tgt_dataset = src_tgt_bool_dataset.map(
map_fn_2, num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
src_tgt_dataset = src_tgt_dataset.cache()
src_tgt_dataset = src_tgt_dataset.shuffle(
output_buffer_size, random_seed, reshuffle_each_iteration).repeat()
# Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)
def batching_func(x):
return x.padded_batch(
window_batch_size,
# The first three entries are the source and target line rows;
# these have unknown-length vectors. The last two entries are
# the source and target row sizes; these are scalars.
padded_shapes=(
tf.TensorShape([src_max_len]), # src
tf.TensorShape([tgt_max_len]), # tgt_input
tf.TensorShape([tgt_max_len]), # tgt_output
tf.TensorShape([]), # src_len
tf.TensorShape([])), # tgt_len
# Pad the source and target sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
tgt_eos_id, # tgt_input
tgt_eos_id, # tgt_output
0, # src_len -- unused
0),
# For TPU, must set drop_remainder to True or batch size will be None
drop_remainder=True) # tgt_len -- unused
def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):
"""Calculate bucket_width by maximum source sequence length."""
# Pairs with length [0, bucket_width) go to bucket 0, length
# [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length
# over ((num_bucket-1) * bucket_width) words all go into the last bucket.
if src_max_len:
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
else:
bucket_width = 10
# Bucket sentence pairs by the length of their source sentence and target
# sentence.
bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
return tf.to_int64(tf.minimum(num_buckets, bucket_id))
def reduce_func(unused_key, windowed_data):
return batching_func(windowed_data)
if num_buckets > 1:
batched_dataset = src_tgt_dataset.apply(
tf.contrib.data.group_by_window(
key_func=key_func,
reduce_func=reduce_func,
window_size=window_batch_size))
else:
batched_dataset = batching_func(src_tgt_dataset)
batched_dataset = batched_dataset.map(
lambda src, tgt_in, tgt_out, source_size, tgt_in_size: (
{"source": src,
"target_input": tgt_in,
"target_output": tgt_out,
"source_sequence_length": source_size,
"target_sequence_length": tgt_in_size}))
re_batched_dataset = batched_dataset.apply(tf.contrib.data.unbatch()).batch(
int(per_host_batch_size), drop_remainder=True)
output_devices = [
"/job:%s/task:%d/cpu:0" % (job_name, i) for i in range(num_hosts)
]
options = tf.data.Options()
options.experimental_numa_aware = True
options.experimental_filter_fusion = True
options.experimental_map_and_filter_fusion = True
re_batched_dataset = re_batched_dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset=re_batched_dataset,
devices=output_devices,
max_buffer_size=10,
prefetch_buffer_size=10,
source_device=("/job:%s/task:0/cpu:0" % job_name))
return multi_device_iterator
|
|
# -*- coding: utf-8 -*-
"""
Document Library
"""
module = "doc"
#==============================================================================
resourcename = "document"
tablename = "doc_document"
table = db.define_table(tablename,
Field("name", length=128, notnull=True, unique=True, label=T("Name")),
Field("file", "upload", autodelete=True,),
Field("url", label=T("URL")),
person_id(label=T("Author")),
organisation_id(),
location_id(),
Field("date", "date"),
comments(),
Field("entered", "boolean", label=T("Entered")),
Field("checksum", readable=False, writable=False),
migrate=migrate, *s3_meta_fields())
table.name.requires = [IS_NOT_EMPTY(), IS_NOT_ONE_OF(db, "%s.name" % tablename)]
def shn_file_represent( file, table):
if file:
return A(table.file.retrieve(file)[0],
_href=URL(r=request, f="download", args=[file]))
else:
return NONE
table.file.represent = lambda file, table=table: shn_file_represent(file, table)
table.url.represent = lambda url: url and A(url,_href=url) or NONE
table.url.requires = [IS_NULL_OR(IS_URL()), IS_NULL_OR(IS_NOT_ONE_OF(db, "%s.url" % tablename))]
table.person_id.comment = shn_person_comment(T("Author"), T("The Author of this Document (optional)"))
table.location_id.readable = table.location_id.writable = False
table.entered.comment = DIV( _class="tooltip",
_title="%s|%s" % (T("Entered"),
T("Has data from this Reference Document been entered into Sahana?")))
# -----------------------------------------------------------------------------
def document_represent(id):
if not id:
return NONE
represent = shn_get_db_field_value(db = db,
table = "doc_document",
field = "name",
look_up = id)
#File
#Website
#Person
return A ( represent,
_href = URL(r=request, c="doc", f="document", args = [id], extension = ""),
_target = "blank"
)
DOCUMENT = T("Reference Document")
ADD_DOCUMENT = T("Add Reference Document")
document_comment = DIV( A( ADD_DOCUMENT,
_class="colorbox",
_href=URL(r=request, c="doc", f="document", args="create", vars=dict(format="popup")),
_target="top",
_title=T("If you need to add a new document then you can click here to attach one."),
),
DIV( _class="tooltip",
_title="%s|%s" % (DOCUMENT,
T("A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.")),
#T("Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead."),
),
#SPAN( I( T("If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.") ),
# _style = "color:red"
# )
)
# CRUD Strings
LIST_DOCUMENTS = T("List Documents")
s3.crud_strings[tablename] = Storage(
title_create = ADD_DOCUMENT,
title_display = T("Document Details"),
title_list = LIST_DOCUMENTS,
title_update = T("Edit Document"),
title_search = T("Search Documents"),
subtitle_create = T("Add New Document"),
subtitle_list = DOCUMENT,
label_list_button = LIST_DOCUMENTS,
label_create_button = ADD_DOCUMENT,
label_delete_button = T("Delete Document"),
msg_record_created = T("Document added"),
msg_record_modified = T("Document updated"),
msg_record_deleted = T("Document deleted"),
msg_list_empty = T("No Documents found"))
document_id = S3ReusableField("document_id",
table,
requires = IS_NULL_OR(IS_ONE_OF(db, "doc_document.id",
document_represent,
orderby="doc_document.name")),
represent = document_represent,
label = DOCUMENT,
comment = document_comment,
ondelete = "RESTRICT",
widget = S3AutocompleteWidget(request, module, resourcename)
)
def document_onvalidation(form):
import cgi
table = db.doc_document
doc = form.vars.file
url = form.vars.url
if not hasattr(doc, "file"):
id = request.post_vars.id
if id:
record = db(table.id == id).select(table.file, limitby=(0, 1)).first()
if record:
doc = record.file
if not hasattr(doc, "file") and not doc and not url:
form.errors.file = \
form.errors.url = T("Either file upload or document URL required.")
if isinstance(doc, cgi.FieldStorage) and doc.filename:
f = doc.file
form.vars.checksum = docChecksum(f.read())
f.seek(0)
if form.vars.checksum is not None:
result = db(table.checksum == form.vars.checksum).select(table.name,
limitby=(0, 1)).first()
if result:
doc_name = result.name
form.errors["file"] = "%s %s" % (T("This file already exists on the server as"),
doc_name)
return
s3xrc.model.configure(table,
mark_required=["file"],
onvalidation=document_onvalidation)
#==============================================================================
resourcename = "image"
tablename = "doc_image"
table = db.define_table(tablename,
Field("name", length=128, notnull=True, unique=True),
Field("image", "upload", autodelete=True),
# Web2Py r2867+ includes this functionality by default
#Field("image", "upload", autodelete=True, widget=S3UploadWidget.widget),
Field("url"),
person_id(),
organisation_id(),
location_id(),
Field("date", "date"),
comments(),
Field("checksum", readable=False, writable=False),
migrate=migrate, *s3_meta_fields())
table.name.requires = [IS_NOT_EMPTY(), IS_NOT_ONE_OF(db, "%s.name" % tablename)]
table.name.label = T("Name")
table.url.requires = IS_NULL_OR(IS_URL())
table.url.label = T("URL")
table.person_id.label = T("Person")
# upload folder needs to be visible to the download() function as well as the upload
table.image.uploadfolder = os.path.join(request.folder, "uploads/images")
IMAGE_EXTENSIONS = ["png", "PNG", "jpg", "JPG", "jpeg", "JPEG", "gif", "GIF", "tif", "TIF", "tiff", "TIFF", "bmp", "BMP", "raw", "RAW"]
table.image.requires = IS_IMAGE(extensions=(IMAGE_EXTENSIONS))
#table.image.requires = IS_EMPTY_OR(IS_IMAGE(extensions=(IMAGE_EXTENSIONS)))
table.image.represent = lambda image: image and \
DIV(A(IMG(_src=URL(r=request, c="default", f="download", args=image),
_height=60,
_alt=T("View Image")),
_href=URL(r=request, c="default", f="download", args=image))) or \
T("No Image")
ADD_IMAGE = T("Add Photo")
image_id = S3ReusableField("image_id", db.doc_image,
requires = IS_NULL_OR(IS_ONE_OF(db, "doc_image.id", "%(name)s")),
represent = lambda id: (id and [DIV(A(IMG(_src=URL(r=request, c="default", f="download", args=db(db.doc_image.id == id).select(db.doc_image.image,
limitby=(0, 1)).first().image),
_height=40),
_class="zoom",
_href="#zoom-media_image-%s" % id),
DIV(IMG(_src=URL(r=request, c="default", f="download", args=db(db.doc_image.id == id).select(db.doc_image.image,
limitby=(0, 1)).first().image),
_width=600),
_id="zoom-media_image-%s" % id,
_class="hidden"))] or [""])[0],
label = T("Image"),
comment = DIV(A(ADD_IMAGE,
_class="colorbox",
_href=URL(r=request, c="doc", f="image", args="create", vars=dict(format="popup")),
_target="top",
_title=ADD_IMAGE),
DIV( _class="tooltip",
_title="%s|%s" % (ADD_IMAGE,
T("Upload an image, such as a photo")))),
ondelete = "RESTRICT"
)
# CRUD Strings
LIST_IMAGES = T("List Photos")
s3.crud_strings[tablename] = Storage(
title_create = ADD_IMAGE,
title_display = T("Photo Details"),
title_list = LIST_IMAGES,
title_update = T("Edit Photo"),
title_search = T("Search Photos"),
subtitle_create = T("Add New Photo"),
subtitle_list = T("Photo"),
label_list_button = LIST_IMAGES,
label_create_button = ADD_IMAGE,
label_delete_button = T("Delete Photo"),
msg_record_created = T("Photo added"),
msg_record_modified = T("Photo updated"),
msg_record_deleted = T("Photo deleted"),
msg_list_empty = T("No Photos found"))
def image_onvalidation(form):
import cgi
table = db.doc_image
img = form.vars.image
if not hasattr(img, "file"):
id = request.post_vars.id
if id:
record = db(table.id == id).select(table.image,
limitby=(0, 1)).first()
if record:
img = record.image
if isinstance(img, cgi.FieldStorage) and img.filename:
f = img.file
form.vars.checksum = docChecksum(f.read())
f.seek(0)
if form.vars.checksum is not None:
result = db(table.checksum == form.vars.checksum).select(table.name,
limitby=(0, 1)).first()
if result:
image_name = result.name
form.errors["image"] = "%s %s" % (T("This file already exists on the server as"), image_name)
return
s3xrc.model.configure(table,
onvalidation=image_onvalidation)
#==============================================================================
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to plain array data.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def model_iteration(model,
inputs,
targets=None,
sample_weights=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
shuffle=True,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
mode=ModeKeys.TRAIN,
validation_in_fit=False,
prepared_feed_values_from_dataset=False,
steps_name='steps',
**kwargs):
"""Loop function for arrays of data with modes TRAIN/TEST/PREDICT.
Arguments:
model: Keras Model instance.
inputs: Either a list or dictionary of arrays, or a dataset instance.
targets: List/dictionary of input arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
Note that the progress bar is not particularly useful when
logged to a file, so verbose=2 is recommended when not running
interactively (eg, in a production environment).
callbacks: List of callbacks to be called during training
val_inputs: Either a list or dictionary of arrays, or a dataset instance.
val_targets: List/dictionary of target arrays.
val_sample_weights: Optional list of sample weight arrays.
shuffle: Whether to shuffle the data at the beginning of each epoch
concatenation of list the display names of the outputs of `f` and the
list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training (useful for resuming a
previous training run)
steps_per_epoch: Total number of steps (batches of samples) before
declaring one epoch finished and starting the next epoch. Ignored with
the default value of `None`.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with the default value of
`None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections_abc.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
validation_in_fit: if true, then this method is invoked from within
training iteration (for validation). In the case where `val_inputs` is
a dataset, this flag indicates that its iterator and feed values are
already created so should properly reuse resources.
prepared_feed_values_from_dataset: if True, `inputs` is a list of feed
tensors returned from `_prepare_feed_values` call on the validation
dataset, so do not call it again on `inputs`. Should only be used for
inline validation (i.e., only if `validation_in_fit` is also True).
steps_name: The string name of the steps argument, either `steps`,
`validation_steps`, or `steps_per_epoch`. Only used for error message
formatting.
**kwargs: Additional arguments for backwards compatibility.
Returns:
- In TRAIN mode: `History` object.
- In TEST mode: Evaluation metrics.
- In PREDICT mode: Outputs of the Model called on inputs.
Raises:
ValueError: in case of invalid arguments.
"""
# Backwards compatibility.
if 'steps' in kwargs:
steps_per_epoch = kwargs.pop('steps')
if kwargs:
raise TypeError('Unknown arguments: %s' % (kwargs,))
# In case we were passed a dataset, we extract symbolic tensors from it.
reset_dataset_after_each_epoch = False
input_iterator = None
is_dataset = isinstance(inputs,
(dataset_ops.DatasetV1, dataset_ops.DatasetV2))
# TODO(fchollet): consider moving `steps_per_epoch` inference to
# _standardize_user_data and set reset_dataset_after_each_epoch as an
# attribute on the dataset instance.
if is_dataset:
if steps_per_epoch is None:
reset_dataset_after_each_epoch = True
steps_per_epoch = training_utils.infer_steps_for_dataset(
model, inputs, steps_per_epoch, epochs=epochs, steps_name=steps_name)
input_iterator = _get_iterator(inputs, model._distribution_strategy)
# Enter tf.distribute.Strategy scope.
if model._distribution_strategy:
scope = distributed_training_utils.distributed_scope(
strategy=model._distribution_strategy,
learning_phase=(1 if mode == ModeKeys.TRAIN else 0))
scope.__enter__()
use_steps = is_dataset or steps_per_epoch is not None
do_validation = val_inputs is not None
# Convert Eager Tensors to NumPy arrays to support batching/shuffling.
inputs, targets, sample_weights = training_utils. \
convert_eager_tensors_to_numpy((inputs, targets, sample_weights))
# Prepare input data.
inputs = input_iterator or inputs
if validation_in_fit and prepared_feed_values_from_dataset:
# When invoking validation in training loop, avoid creating iterator and
# list of feed values for the same validation dataset multiple times (which
# essentially would call `iterator.get_next()` that slows down execution and
# leads to OOM errors eventually.
ins = inputs
else:
ins = _prepare_feed_values(model, inputs, targets, sample_weights, mode)
# `ins` is a function when a distribute strategy is used in Eager mode. In
# that case `is_dataset` is True. The code branches that have requirements
# about the type of `ins` do not trigger in the distributed case.
if not is_dataset:
num_samples_or_steps = _get_num_samples_or_steps(ins, batch_size,
steps_per_epoch)
else:
num_samples_or_steps = steps_per_epoch
# Update sample_weight_mode of the model if sample_weights is specified by the
# user. We need to call this function after we have a handle on the inputs
# (both numpy arrays and datasets) in order to determine if the user has
# specified sample_weights.
_update_sample_weight_mode(model, mode, ins)
# Get step function and loop type. As part of building the execution
# function we recompile the metrics based on the updated
# sample_weight_mode value.
f = _make_execution_function(model, mode)
# Prepare validation data. Hold references to the iterator and the input list
# to properly reinitialize and reuse in multiple validation passes.
val_iterator = None
if isinstance(val_inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
if validation_steps is None:
# Because we pass an iterator feed instead of a Dataset to the eval
# model_iteration() call, it will not trigger the dataset-input path
# that determines the number of steps required. To avoid this issue,
# set validation_steps here if validation_steps is None.
validation_steps = training_utils.infer_steps_for_dataset(
model,
val_inputs,
validation_steps,
epochs=epochs,
steps_name='validation_steps')
val_iterator = _get_iterator(val_inputs, model._distribution_strategy)
val_inputs = _prepare_feed_values(
model, val_iterator, val_targets, val_sample_weights, ModeKeys.TEST)
# Get num steps for printing.
val_samples_or_steps = validation_steps
else:
# Get num samples for printing.
val_samples_or_steps = val_inputs and nest.flatten(
val_inputs)[0].shape[0] or None
if mode == ModeKeys.TRAIN and verbose:
_print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset)
# Configure callbacks.
count_mode = 'steps' if use_steps else 'samples'
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=num_samples_or_steps,
verbose=0, # Handle ProgBarLogger separately in this loop.
mode=mode)
# TODO(omalleyt): Handle ProgBar as part of Callbacks once hooks are ready.
progbar = training_utils.get_progbar(
model, count_mode, mode != ModeKeys.PREDICT)
progbar.params = callbacks.params
progbar.params['verbose'] = verbose
# Find beforehand arrays that need sparse-to-dense conversion.
if issparse is not None and not use_steps:
indices_for_conversion_to_dense = []
feed = _get_model_feed(model, mode)
for i, (input_data, feed_tensor) in enumerate(zip(ins, feed)):
if issparse(input_data) and not K.is_sparse(feed_tensor):
indices_for_conversion_to_dense.append(i)
# Select aggregation method.
if mode == ModeKeys.PREDICT:
aggregator = training_utils.OutputsAggregator(
use_steps,
num_samples=None if steps_per_epoch else num_samples_or_steps,
steps=steps_per_epoch)
else:
aggregator = training_utils.MetricsAggregator(
use_steps,
num_samples=None if steps_per_epoch else num_samples_or_steps,
steps=steps_per_epoch)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
callbacks.model.stop_training = False
callbacks._call_begin_hook(mode)
progbar.on_train_begin()
initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode)
for epoch in range(initial_epoch, epochs):
if callbacks.model.stop_training:
break
# Setup work for each epoch
epoch_logs = {}
if mode != ModeKeys.PREDICT:
# Collecting and resetting metrics has non-zero cost and will needlessly
# slow down model.predict.
model.reset_metrics()
if mode == ModeKeys.TRAIN:
callbacks.on_epoch_begin(epoch, epoch_logs)
progbar.on_epoch_begin(epoch, epoch_logs)
if use_steps:
# Step-wise loop.
if steps_per_epoch is None:
# Loop over dataset until `OutOfRangeError` is raised.
target_steps = np.inf
else:
# Loop over dataset for the specified number of steps.
target_steps = steps_per_epoch
step = 0
while step < target_steps:
batch_logs = {'batch': step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', step, batch_logs)
progbar.on_batch_begin(step, batch_logs)
# Get outputs.
try:
# `ins` can be callable in tf.distribute.Strategy + eager case.
if not callable(ins) or (
model._distribution_strategy and
not distributed_training_utils.is_distributing_by_cloning(model)):
actual_inputs = ins
else:
actual_inputs = ins()
batch_outs = f(actual_inputs)
except errors.OutOfRangeError:
if is_dataset:
# The dataset passed by the user ran out of batches.
# Now we know the cardinality of the dataset.
# If steps_per_epoch was specified, then running out of data is
# unexpected, so we stop training and inform the user.
if steps_per_epoch:
callbacks.model.stop_training = True
logging.warning(
'Your dataset ran out of data; interrupting training. '
'Make sure that your dataset can generate at least '
'`%s * epochs` batches (in this case, %d batches). '
'You may need to use the repeat() function when '
'building your dataset.'
% (steps_name, steps_per_epoch * epochs))
elif step > 0:
steps_per_epoch = step
aggregator.steps = steps_per_epoch
if mode == ModeKeys.TRAIN:
progbar.params['steps'] = steps_per_epoch
progbar.progbar.target = steps_per_epoch
else:
# We ran out of batches while the user passed an iterator (legacy).
callbacks.model.stop_training = True
logging.warning(
'Your dataset iterator ran out of data; '
'interrupting training. Make sure that your iterator '
'can generate at least `%s * epochs` '
'batches (in this case, %d batches). You may need to'
'use the repeat() function when building your '
'dataset.' % (steps_name, steps_per_epoch * epochs))
break
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if model._distribution_strategy:
batch_outs = distributed_training_utils._per_replica_aggregate_batch(
model._distribution_strategy, batch_outs, model, mode)
# Aggregate results.
if step == 0:
aggregator.create(batch_outs)
aggregator.aggregate(batch_outs)
# Callbacks batch end.
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', step, batch_logs)
progbar.on_batch_end(step, batch_logs)
step += 1
if callbacks.model.stop_training:
break
else:
# Sample-wise loop.
index_array = np.arange(num_samples_or_steps)
if shuffle == 'batch':
index_array = training_utils.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_samples_or_steps, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
# Slice into a batch.
if len(batches) == 1:
# If we only have one batch, do not slice. This takes care of
# composite tensors in non-Dataset modes; we currently don't support
# slicing them.
# TODO(b/133517906): Add slicing support.
ins_batch = ins
else:
try:
if ins and isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
# Sparse to dense conversion.
if issparse is not None:
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
# Callbacks batch_begin.
batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
callbacks._call_batch_hook(mode, 'begin', batch_index, batch_logs)
progbar.on_batch_begin(batch_index, batch_logs)
# Get outputs.
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
# Aggregate results.
if batch_index == 0:
aggregator.create(batch_outs)
aggregator.aggregate(batch_outs, batch_start, batch_end)
# Callbacks batch end.
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', batch_index, batch_logs)
progbar.on_batch_end(batch_index, batch_logs)
if callbacks.model.stop_training:
break
aggregator.finalize()
results = aggregator.results
epoch_logs = cbks.make_logs(model, epoch_logs, results, mode)
if len(results) == 1:
results = results[0]
# Run the test loop every `validation_freq` epochs during training.
if (do_validation and
training_utils.should_run_validation(validation_freq, epoch) and
not callbacks.model.stop_training):
if model._compile_distribution:
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
distributed_training_utils._copy_weights_to_original_model(
model, ModeKeys.TRAIN)
val_results = model_iteration(
model,
val_inputs,
targets=val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
steps_per_epoch=validation_steps,
callbacks=callbacks,
verbose=0,
mode=ModeKeys.TEST,
validation_in_fit=True,
prepared_feed_values_from_dataset=(val_iterator is not None),
steps_name='validation_steps')
if not isinstance(val_results, list):
val_results = [val_results]
epoch_logs = cbks.make_logs(
model, epoch_logs, val_results, mode, prefix='val_')
if val_iterator and epoch < epochs - 1:
_reinitialize_iterator(val_iterator, model._distribution_strategy)
if mode == ModeKeys.TRAIN:
# Epochs only apply to `fit`.
callbacks.on_epoch_end(epoch, epoch_logs)
progbar.on_epoch_end(epoch, epoch_logs)
# Reinitialize dataset iterator for the next epoch.
if reset_dataset_after_each_epoch and epoch < epochs - 1:
_reinitialize_iterator(input_iterator, model._distribution_strategy)
model._successful_loop_finish = True
callbacks._call_end_hook(mode)
if model._distribution_strategy:
if model._compile_distribution:
# TODO(priyag, psv): Copy back metrics to the original model as well?
distributed_training_utils._copy_weights_to_original_model(model, mode)
scope.__exit__(None, None, None)
if mode == ModeKeys.TRAIN:
return model.history
return results
def _get_model_feed(model, mode):
if mode == ModeKeys.PREDICT:
feed = model._feed_inputs
else:
feed = (
model._feed_inputs + model._feed_targets + model._feed_sample_weights)
return feed
def _print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset):
increment = 'steps' if is_dataset else 'samples'
msg = 'Train on {0} {increment}'.format(
num_samples_or_steps, increment=increment)
if val_samples_or_steps:
msg += ', validate on {0} {increment}'.format(
val_samples_or_steps, increment=increment)
print(msg)
def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch):
"""Returns total number of samples (when training in batch mode) or steps."""
if steps_per_epoch:
return steps_per_epoch
return training_utils.check_num_samples(ins, batch_size, steps_per_epoch,
'steps_per_epoch')
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Arguments:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
if model._distribution_strategy:
if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
inputs = distributed_training_utils.get_iterator(
inputs, model._distribution_strategy)
def get_distributed_inputs():
return distributed_training_utils._prepare_feed_values(
model, inputs, targets, sample_weights, mode)
# In the eager case, we want to call the input method per step, so return
# a lambda from here that can be called. Note that this is applicable only
# in Distribution Strategy case as it follows the same code path for both
# eager and graph modes.
# TODO(priyag,omalleyt): Either we should move the training DS with
# OwnedIterator to use training_generator code path, or figure out how to
# set a symbolic Iterator out of a Dataset when in eager mode.
if context.executing_eagerly():
return get_distributed_inputs
else:
return get_distributed_inputs()
if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.Iterator)):
inputs, targets, sample_weights = model._standardize_user_data(
inputs,
extract_tensors_from_dataset=True)
inputs = training_utils.ModelInputs(inputs).as_list()
targets = list(targets or [])
sample_weights = list(sample_weights or [])
ins = inputs + targets + sample_weights
if mode == ModeKeys.TRAIN and not isinstance(K.symbolic_learning_phase(),
int):
ins += [True] # Add learning phase value.
return ins
def _get_iterator(inputs, distribution_strategy=None):
if distribution_strategy:
return distributed_training_utils.get_iterator(
inputs, distribution_strategy)
return training_utils.get_iterator(inputs)
def _reinitialize_iterator(iterator, distribution_strategy=None):
if distribution_strategy:
distributed_training_utils.initialize_iterator(
iterator, distribution_strategy)
else:
training_utils.initialize_iterator(iterator)
def _make_execution_function(model, mode):
"""Makes function to run one step of model execution."""
if model._distribution_strategy:
return distributed_training_utils._make_execution_function(model, mode)
return model._make_execution_function(mode)
def _update_sample_weight_mode(model, mode, inputs):
"""Updates the sample_weight_mode of a given model."""
# Add a quick return to prevent us from calling model._feed_targets that
# accesses certain model properties that may not be set in the `PREDICT` mode.
if mode == ModeKeys.PREDICT:
return
sample_weights = None
# `inputs` is the model's inputs + targets + sample_weights +
# learning phase placeholder if specified. To update the sample_weight_mode
# we need to determine if the user has passed sample weights as part of the
# input.
if not callable(inputs):
sample_weights = inputs[len(model._feed_inputs) + len(model._feed_targets):]
has_learning_phase_pl = (mode == ModeKeys.TRAIN and
not isinstance(K.symbolic_learning_phase(), int))
if has_learning_phase_pl:
sample_weights = sample_weights[:-1]
model._update_sample_weight_modes(sample_weights=sample_weights)
# Call the DistributionStrategy specific function to update the
# sample_weight_mode on the model.
if model._distribution_strategy:
distributed_training_utils._update_sample_weight_modes(model, mode,
sample_weights)
# For backwards compatibility for internal users of these loops.
fit_loop = functools.partial(model_iteration, mode=ModeKeys.TRAIN)
test_loop = functools.partial(
model_iteration, mode=ModeKeys.TEST, shuffle=False)
predict_loop = functools.partial(
model_iteration, mode=ModeKeys.PREDICT, shuffle=False)
class ArrayLikeTrainingLoop(training_utils.TrainingLoop):
"""TrainingLoop that handle inputs like array.
This is the default handler for most of the input data types, includes
symbolic tensors or Numpy array-like, Datasets and iterators in graph mode
(since they generate symbolic tensors). This Function is used to handle model
with `run_eagerly` = False.
"""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
batch_size = model._validate_or_infer_batch_size(batch_size,
steps_per_epoch, x)
x, y, sample_weights = model._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split,
shuffle=shuffle)
if validation_data:
val_x, val_y, val_sample_weights = model._prepare_validation_data(
validation_data, batch_size, validation_steps)
elif validation_split and 0. < validation_split < 1.:
(x, y, sample_weights, val_x, val_y,
val_sample_weights) = training_utils.split_training_and_validation_data(
x, y, sample_weights, validation_split)
else:
if validation_steps:
raise ValueError('`validation_steps` should not be specified if '
'`validation_data` is None.')
val_x, val_y, val_sample_weights = None, None, None
return fit_loop(
model,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
steps_name='steps_per_epoch')
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
x, y, sample_weights = model._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps',
steps=steps)
return test_loop(
model,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
x, _, _ = model._standardize_user_data(
x, check_steps=True, steps_name='steps', steps=steps)
return predict_loop(
model,
x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the system configuration methods work properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
def reset_eager(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
del context._context
context._context = context.Context()
ops.enable_eager_execution()
return wrapper
class ConfigTest(test.TestCase):
@test_util.run_gpu_only
@reset_eager
def testDevicePolicy(self):
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
# If no op has been executed we should be able to set the device policy as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_device_policy('silent')
config.set_intra_op_parallelism_threads(2)
# Excute a dummy op to ensure that the context has been initialized
constant_op.constant(1)
def copy_tensor(dtype=dtypes.int32):
cpu_tensor = constant_op.constant(1, dtype=dtype)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
config.set_device_policy('silent')
self.assertEqual(config.get_device_policy(), 'silent')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
copy_tensor()
config.set_device_policy('silent_for_int32')
self.assertEqual(config.get_device_policy(), 'silent_for_int32')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
context.context().device_policy)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor(dtypes.float32)
copy_tensor()
config.set_device_policy('warn')
self.assertEqual(config.get_device_policy(), 'warn')
self.assertEqual(context.DEVICE_PLACEMENT_WARN,
context.context().device_policy)
copy_tensor()
config.set_device_policy('explicit')
self.assertEqual(config.get_device_policy(), 'explicit')
self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
context.context().device_policy)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor()
config.set_device_policy(None)
self.assertEqual(config.get_device_policy(), 'silent')
@reset_eager
def testExecutionMode(self):
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
# If no op has been executed we should be able to set the execution mode as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_synchronous_execution(False)
config.set_intra_op_parallelism_threads(2)
config.set_synchronous_execution(True)
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
config.set_synchronous_execution(False)
self.assertFalse(config.get_synchronous_execution())
self.assertEqual(context.ASYNC, context.context().execution_mode)
@reset_eager
def testGpuPerProcessMemoryFraction(self):
config.set_gpu_per_process_memory_fraction(0.5)
self.assertEqual(
config.get_gpu_per_process_memory_fraction(),
context.context().gpu_per_process_memory_fraction)
constant_op.constant(1)
with self.assertRaises(RuntimeError):
config.set_gpu_per_process_memory_fraction(0.5)
@reset_eager
def testGpuPerProcessMemoryGrowth(self):
self.assertFalse(config.get_gpu_per_process_memory_growth())
config.set_gpu_per_process_memory_growth(True)
self.assertTrue(config.get_gpu_per_process_memory_growth())
self.assertEqual(
config.get_gpu_per_process_memory_growth(),
context.context().gpu_per_process_memory_growth)
config.set_gpu_per_process_memory_growth(False)
self.assertFalse(config.get_gpu_per_process_memory_growth())
self.assertEqual(
config.get_gpu_per_process_memory_growth(),
context.context().gpu_per_process_memory_growth)
constant_op.constant(1)
with self.assertRaises(RuntimeError):
config.set_gpu_per_process_memory_growth(True)
@reset_eager
def testIntraOpParallelismThreads(self):
config.set_intra_op_parallelism_threads(10)
self.assertEqual(
config.get_intra_op_parallelism_threads(),
context.context().intra_op_parallelism_threads)
constant_op.constant(1)
with self.assertRaises(RuntimeError):
config.set_intra_op_parallelism_threads(1)
@reset_eager
def testInterOpParallelismThreads(self):
config.set_inter_op_parallelism_threads(10)
self.assertEqual(
config.get_inter_op_parallelism_threads(),
context.context().inter_op_parallelism_threads)
constant_op.constant(1)
with self.assertRaises(RuntimeError):
config.set_inter_op_parallelism_threads(1)
@reset_eager
def testEnableSoftPlacement(self):
self.assertEqual(config.get_soft_device_placement(), False)
config.set_soft_device_placement(True)
self.assertEqual(config.get_soft_device_placement(), True)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
config.set_soft_device_placement(False)
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
constant_op.constant(1)
with self.assertRaises(RuntimeError):
config.set_soft_device_placement(True)
with self.assertRaises(RuntimeError):
config.set_soft_device_placement(False)
@reset_eager
def testLogDevicePlacement(self):
self.assertEqual(context.get_log_device_placement(), False)
context.set_log_device_placement(True)
self.assertEqual(context.get_log_device_placement(), True)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.set_log_device_placement(False)
self.assertEqual(context.get_log_device_placement(), False)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
constant_op.constant(1)
with self.assertRaises(RuntimeError):
context.set_log_device_placement(True)
with self.assertRaises(RuntimeError):
context.set_log_device_placement(False)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
"""
Allow to create a user in a database.
Contain a model class for users and a manager for this model.
:copyright: (c) 2017 by Ol'ha Leskovs'ka
"""
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.core.mail import send_mail
from django.db import models
from django.utils.translation import ugettext_lazy as _
import permissions
class UserManager(BaseUserManager):
"""Manage creation of users in a database."""
def _create_user(self, email, name, password, **extra_fields):
"""Create, save and return a user.
Arguments:
email - user's email
name - user's name
password - user's password
extra_fields - any other fields
Return a User object.
"""
if not email:
raise ValueError('Users must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name, **extra_fields)
user.set_password(password)
user.save(using=self._db)
user.set_permissions(extra_fields.get('role'))
return user
def create_user(self, email, name, password, **extra_fields):
"""Create and save an ordinary user with the given email,
name and password.
Arguments:
email - user's email
name - user's name
password - user's password
extra_fields - any other fields
"""
extra_fields.setdefault('role', User.ROLE_USER)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, name, password, **extra_fields)
def create_superuser(self, email, name, password, **extra_fields):
"""Create and save a superuser with the given email,
name and password.
Arguments:
email - user's email
name - user's name
password - user's password
extra_fields - any other fields
"""
extra_fields.setdefault('role', User.ROLE_ADMIN)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_staff', True)
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, name, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
"""Implement a fully featured User model and handle users data
in DB.
Extend AbstractBaseUser class with such fields as status and
avatar. The email is used as the name when users login.
"""
STATUS_ACTIVE = 0
STATUS_DELETED = 1
STATUS_BANNED = 2
STATUS_UNAUTHORIZED = 3
USER_STATUSES = (
(STATUS_ACTIVE, 'active'),
(STATUS_DELETED, 'deleted'),
(STATUS_BANNED, 'banned'),
(STATUS_UNAUTHORIZED, 'unauthorized'),
)
ROLE_ADMIN = 0
ROLE_MANAGER = 1
ROLE_SUB_MANAGER = 2
ROLE_USER = 3
USER_ROLES = (
(ROLE_ADMIN, 'Admin'),
(ROLE_MANAGER, 'Manager'),
(ROLE_SUB_MANAGER, 'Sub-manager'),
(ROLE_USER, 'User'),
)
name = models.CharField(max_length=50, default='',
help_text=_('50 characters or fewer.'))
email = models.EmailField(_('email address'), unique=True, default='',
error_messages={'unique': _('A user with such '
'email already '
'exists.'), })
password = models.CharField(max_length=128, default='',
help_text=_('Your password cannot be too '
'similar to your other personal '
'information.<br /> Your password'
' must contain at least 8 '
'characters.<br /> Your password '
'cannot be a commonly used '
'password.<br /> Your password '
'cannot be entirely numeric.'))
phone = models.CharField(max_length=12, blank=True, null=True,
unique=True, help_text=_('Use just numbers: '
''''380931234567'''))
avatar = models.ImageField(upload_to='user_images', blank=True, null=True)
role = models.IntegerField(choices=USER_ROLES,
default=ROLE_USER, null=False)
status = models.IntegerField(choices=USER_STATUSES,
default=STATUS_ACTIVE, null=False)
is_staff = models.BooleanField(default=False,)
is_active = models.BooleanField(default=True, blank=True)
parent = models.ForeignKey("self", null=True, blank=True)
objects = UserManager()
USERNAME_FIELD = 'email'
# A list of the field names that will be prompted for when creating
# a user via the createsuperuser management command
REQUIRED_FIELDS = ['name']
class Meta():
"""Give some options (metadata) attached to the model."""
db_table = 'users'
permissions = (
('read_user', 'Can read information about user'),
)
def set_is_active(self, status):
"""Set is_active according to user's status.
Argument:
status - user's status
"""
if self.role == User.ROLE_ADMIN:
if not self.last_active_admin():
self.is_active = (status == User.STATUS_ACTIVE)
else:
self.is_active = True
self.status = User.STATUS_ACTIVE
else:
self.is_active = (status == User.STATUS_ACTIVE)
def set_is_staff(self, role):
"""Set is_staff according to user's role.
Argument:
Argument:
role - user's role
"""
self.is_staff = (role != User.ROLE_USER)
def get_short_name(self):
"""Return the user's email"""
# The user is identified by the email address
return self.email
def get_full_name(self):
"""Return the user's name and email"""
return self.name + " " + self.email
def email_to_user(self, subject, message, sender=None, **kwargs):
"""Send an email to the user
Arguments:
subject - theme of the letter
message - message of the email
sender - sender/author of the email
**kwargs - other arguments
"""
send_mail(subject, message, sender, [self.email], **kwargs)
def delete(self, *args, **kwargs):
"""Block the user instead of dropping.
Put is_active into False and change status.
Don't delete the last admin
"""
if self.role == User.ROLE_ADMIN:
if not self.last_active_admin():
self.is_active = False
self.status = User.STATUS_DELETED
self.save()
else:
self.is_active = False
self.status = User.STATUS_DELETED
self.save()
def last_active_admin(self):
"""Return True if it is the last active admin."""
number = User.objects.filter(role=User.ROLE_ADMIN,
is_active=True).count()
if number > 1:
return False
else:
return True
def set_permissions(self, role):
"""Set user_permissions according to user's role.
Argument:
role - user's role
"""
if role == User.ROLE_ADMIN:
for perm in permissions.admin_permissions():
self.user_permissions.add(perm)
elif role == User.ROLE_MANAGER:
for perm in permissions.manager_permissions():
self.user_permissions.add(perm)
elif role == User.ROLE_SUB_MANAGER:
for perm in permissions.sub_manager_permissions():
self.user_permissions.add(perm)
else:
for perm in permissions.user_permissions():
self.user_permissions.add(perm)
|
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from contextlib import contextmanager
from unittest import skip
from corehq.apps.fixtures.models import FixtureDataType, FixtureTypeField
from corehq.apps.receiverwrapper.exceptions import IgnoreDocument
from couchdbkit import ResourceNotFound
from custom.dhis2.const import ORG_UNIT_FIXTURES, REGISTER_CHILD_XMLNS, CASE_TYPE
from custom.dhis2.models import Dhis2OrgUnit, JsonApiRequest, JsonApiError, Dhis2Api, Dhis2ApiQueryError, \
FixtureManager
from custom.dhis2.payload_generators import FormRepeaterDhis2EventPayloadGenerator
from custom.dhis2.tasks import fetch_cases, fetch_org_units
from django.test import TestCase
from django.test.testcases import SimpleTestCase
from mock import patch, Mock, MagicMock
from couchforms.models import XFormInstance
DOMAIN = 'sheel-wvlanka-test'
SETTINGS = {
'dhis2_enabled': False,
'dhis2_host': '',
'dhis2_username': '',
'dhis2_password': '',
'dhis2_top_org_unit_name': None
}
@contextmanager
def fixture_type_context():
fixture_type = FixtureDataType(
domain=DOMAIN,
tag='dhis2_org_unit',
fields=[FixtureTypeField(field_name='id', properties=[]),
FixtureTypeField(field_name='name', properties=[])]
)
fixture_type.save()
try:
yield fixture_type
finally:
fixture_type.delete()
@contextmanager
def org_unit_context():
org_unit = Dhis2OrgUnit(id='QXOOG2Foong', name='Somerset West', parent_id=None)
org_unit.save()
try:
yield org_unit
finally:
try:
org_unit.delete()
except ResourceNotFound:
pass
@contextmanager
def response_context():
response_mock = Mock()
response_mock.status_code = 200
response_mock.json.return_value = {'spam': True}
yield response_mock
@contextmanager
def growth_monitoring_forms_context():
forms_data = [
{
'child_first_name': 'Foo',
'dhis2_te_inst_id': '', # Not enrolled
'dhis2_processed': '' # Not processed
},
{
'child_first_name': 'Bar',
'dhis2_te_inst_id': '123', # Enrolled
'dhis2_processed': '' # Not processed
},
{
'child_first_name': 'Baz',
'dhis2_te_inst_id': '456', # Enrolled
'dhis2_processed': '' # Not processed
}
]
forms = []
for data in forms_data:
form = XFormInstance(domain=DOMAIN, form=data)
form.save()
forms.append(form)
yield forms
class JsonApiRequestTest(SimpleTestCase):
def test_json_or_error_returns(self):
"""
JsonApiRequest.json_or_error should return a status code and JSON on success
"""
with response_context() as response_mock:
data = JsonApiRequest.json_or_error(response_mock)
self.assertEqual(data, {'spam': True})
def test_json_or_error_raises_404(self):
"""
JsonApiRequest.json_or_error should raise an error on HTTP status 404
"""
response_mock = Mock()
response_mock.url = 'http://nowhere.example.com'
response_mock.status_code = 404
response_mock.text = 'Where?'
with self.assertRaisesMessage(
JsonApiError,
'API request to http://nowhere.example.com failed with HTTP status 404: Where?'):
JsonApiRequest.json_or_error(response_mock)
def test_json_or_error_raises_500(self):
"""
JsonApiRequest.json_or_error should raise an error on HTTP status 500
"""
response_mock = Mock()
response_mock.url = 'http://broken.example.com'
response_mock.status_code = 500
response_mock.text = 'Oops!'
with self.assertRaisesMessage(
JsonApiError,
'API request to http://broken.example.com failed with HTTP status 500: Oops!'):
JsonApiRequest.json_or_error(response_mock)
def test_get_calls_requests(self):
"""
JsonApiRequest.get should call requests.get and return the JSON result
"""
with patch('requests.get') as requests_mock, \
response_context() as response_mock:
requests_mock.return_value = response_mock
request = JsonApiRequest('http://www.example.com', 'admin', 's3cr3t')
data = request.get('ham/eggs')
requests_mock.assert_called_with(
'http://www.example.com/api/ham/eggs',
headers={'Accept': 'application/json'},
auth=('admin', 's3cr3t'))
self.assertEqual(data, {'spam': True})
def test_post_calls_requests(self):
"""
JsonApiRequest.post should call requests.post and return the JSON result
"""
with patch('requests.post') as requests_mock, \
response_context() as response_mock:
requests_mock.return_value = response_mock
request = JsonApiRequest('http://www.example.com', 'admin', 's3cr3t')
data = request.post('ham/eggs', {'ham': True})
requests_mock.assert_called_with(
'http://www.example.com/api/ham/eggs',
'{"ham": true}',
headers={'Content-type': 'application/json', 'Accept': 'application/json'},
auth=('admin', 's3cr3t'))
self.assertEqual(data, {'spam': True})
def test_put_calls_requests(self):
"""
JsonApiRequest.put should call requests.get and return the JSON result
"""
with patch('requests.put') as requests_mock, \
response_context() as response_mock:
requests_mock.return_value = response_mock
request = JsonApiRequest('http://www.example.com', 'admin', 's3cr3t')
data = request.put('ham/eggs', {'ham': True})
requests_mock.assert_called_with(
'http://www.example.com/api/ham/eggs',
'{"ham": true}',
headers={'Content-type': 'application/json', 'Accept': 'application/json'},
auth=('admin', 's3cr3t'))
self.assertEqual(data, {'spam': True})
class Dhis2ApiTest(SimpleTestCase):
def test__fetch_tracked_entity_attributes(self):
"""
_fetch_tracked_entity_attributes should extend _tracked_entity_attributes
"""
te_attrs = {'trackedEntityAttributes': [
{'name': 'ham', 'id': 'deadbeef'},
{'name': 'spam', 'id': 'c0ffee'},
]}
dhis2_api = Dhis2Api('http://example.com/dhis', 'user', 'p4ssw0rd')
dhis2_api._request.get = Mock(return_value=te_attrs)
keys_before = set(dhis2_api._tracked_entity_attributes.keys())
dhis2_api._fetch_tracked_entity_attributes()
keys_after = set(dhis2_api._tracked_entity_attributes.keys())
fetched = keys_after - keys_before
self.assertIn('ham', fetched)
self.assertIn('spam', fetched)
@skip('Finish writing this test')
def test_add_te_inst(self):
pass
@skip('Finish writing this test')
def test_update_te_inst(self):
pass
@skip('Requires settings for live DHIS2 server')
def test_get_top_org_unit_settings(self):
"""
get_top_org_unit should return the name and ID of the org unit specified in settings
"""
if not SETTINGS['dhis2_top_org_unit_name']:
self.skipTest('An org unit is not set in settings.py')
dhis2_api = Dhis2Api(SETTINGS['dhis2_host'], SETTINGS['dhis2_username'], SETTINGS['dhis2_password'])
org_unit = dhis2_api.get_top_org_unit()
self.assertEqual(org_unit['name'], SETTINGS['dhis2_top_org_unit_name'])
self.assertTrue(bool(org_unit['id']))
@skip('Requires settings for live DHIS2 server')
def test_get_top_org_unit(self):
"""
get_top_org_unit should return the name and ID of the top org unit
"""
# TODO: Make sure get_top_org_unit navigates up tree of org units
dhis2_api = Dhis2Api(SETTINGS['dhis2_host'], SETTINGS['dhis2_username'], SETTINGS['dhis2_password'])
org_unit = dhis2_api.get_top_org_unit()
self.assertTrue(bool(org_unit['name']))
self.assertTrue(bool(org_unit['id']))
def test_get_resource_id(self):
"""
get_resource_id should query the API for the details of a named resource, and return the ID
"""
if not SETTINGS['dhis2_enabled']:
self.skipTest('DHIS2 is not configured')
resources = {'Knights': [
{'name': 'Michael Palin', 'id': 'c0ffee'},
]}
dhis2_api = Dhis2Api(SETTINGS['dhis2_host'], SETTINGS['dhis2_username'], SETTINGS['dhis2_password'])
dhis2_api._request.get = Mock(return_value=('foo', resources))
result = dhis2_api.get_resource_id('Knights', 'Who Say "Ni!"')
dhis2_api._request.get.assert_called_with('Knights', params={'links': 'false', 'query': 'Who Say "Ni!"'})
self.assertEqual(result, 'c0ffee')
def test_get_resource_id_none(self):
"""
get_resource_id should return None if none found
"""
if not SETTINGS['dhis2_enabled']:
self.skipTest('DHIS2 is not configured')
resources = {'Knights': []}
dhis2_api = Dhis2Api(SETTINGS['dhis2_host'], SETTINGS['dhis2_username'], SETTINGS['dhis2_password'])
dhis2_api._request.get = Mock(return_value=('foo', resources))
result = dhis2_api.get_resource_id('Knights', 'Who Say "Ni!"')
self.assertIsNone(result)
def test_get_resource_id_raises(self):
"""
get_resource_id should raise Dhis2ApiQueryError if multiple found
"""
if not SETTINGS['dhis2_enabled']:
self.skipTest('DHIS2 is not configured')
resources = {'Knights': [
{'name': 'Michael Palin', 'id': 'c0ffee'},
{'name': 'John Cleese', 'id': 'deadbeef'}
]}
dhis2_api = Dhis2Api(SETTINGS['dhis2_host'], SETTINGS['dhis2_username'], SETTINGS['dhis2_password'])
dhis2_api._request.get = Mock(return_value=('foo', resources))
with self.assertRaises(Dhis2ApiQueryError):
dhis2_api.get_resource_id('Knights', 'Who Say "Ni!"')
@skip('Finish writing this test')
def test_form_to_event(self):
form = XFormInstance(
domain='Foo',
form={
'name': ''
}
)
@skip('Finish writing this test')
def test_entities_to_dicts(self):
pass
class FixtureManagerTest(SimpleTestCase):
pass
class Dhis2OrgUnitTest(TestCase):
def test_save(self):
"""
Dhis2OrgUnit.save should save a FixtureDataItem
"""
# with fixture_type_context(), \
# patch('corehq.apps.fixtures.models.FixtureDataItem') as data_item_patch, \
# patch('couchdbkit.schema.base.DocumentBase.save') as save_patch:
# data_item_mock = Mock()
# data_item_mock.save.return_value = None
# data_item_mock.get_id = '123'
# data_item_patch.return_value = data_item_mock
#
# org_unit = Dhis2OrgUnit(id='QXOOG2Foong', name='Somerset West', parent_id=None)
# id_ = org_unit.save()
#
# data_item_patch.assert_called()
# data_item_mock.save.assert_called() # Which one gets called. Why?
# save_patch.assert_called()
# self.assertEqual(id_, '123')
# self.assertEqual(org_unit._fixture_id, '123')
# TODO: Figure out why mocks above don't work.
# In the meantime ...
with fixture_type_context():
Dhis2OrgUnit.objects = FixtureManager(Dhis2OrgUnit, DOMAIN, ORG_UNIT_FIXTURES)
org_unit = Dhis2OrgUnit(id='QXOOG2Foong', name='Somerset West', parent_id=None)
id_ = org_unit.save()
self.assertIsNotNone(id_)
self.assertIsNotNone(org_unit._fixture_id)
def test_delete_dhis2_org_unit_does_nothing(self):
"""
Dhis2OrgUnit.delete should do nothing if it's not saved
"""
with fixture_type_context(), \
patch('corehq.apps.fixtures.models.FixtureDataItem.get') as mock_get:
data_item_mock = Mock()
mock_get.return_value = data_item_mock
Dhis2OrgUnit.objects = FixtureManager(Dhis2OrgUnit, DOMAIN, ORG_UNIT_FIXTURES)
org_unit = Dhis2OrgUnit(id='QXOOG2Foong', name='Somerset West', parent_id=None)
org_unit.delete()
self.assertFalse(mock_get.called)
self.assertFalse(data_item_mock.delete.called)
def test_delete_dhis2_org_unit_deletes(self):
"""
Dhis2OrgUnit.delete should delete if it's saved
"""
with fixture_type_context(), \
patch('corehq.apps.fixtures.models.FixtureDataItem') as data_item_patch, \
patch('couchdbkit.schema.base.DocumentBase.get') as get_patch:
data_item_mock = Mock()
data_item_mock.get_id.return_value = '123'
data_item_patch.return_value = data_item_mock
doc_mock = Mock()
get_patch.return_value = data_item_mock
Dhis2OrgUnit.objects = FixtureManager(Dhis2OrgUnit, DOMAIN, ORG_UNIT_FIXTURES)
org_unit = Dhis2OrgUnit(id='QXOOG2Foong', name='Somerset West', parent_id=None)
org_unit.save()
org_unit.delete()
doc_mock.get.assert_called()
data_item_mock.delete.assert_called()
class TaskTest(SimpleTestCase):
def setUp(self):
# TODO: Enable DHIS2
pass
@skip('Fix mocks')
def test_fetch_org_units_dict_comps(self):
"""
sync_org_units should create dictionaries of CCHQ and DHIS2 org units
"""
with patch('custom.dhis2.models.Dhis2Api.gen_org_units') as gen_org_units_patch, \
patch('custom.dhis2.models.FixtureManager.all') as objects_all_patch:
ou_dict = {'id': '1', 'name': 'Sri Lanka'}
ou_obj = type('OrgUnit', (object,), ou_dict) # An object with attributes the same as ou_dict items
gen_org_units_patch.side_effect = lambda: (d for d in [ou_dict]) # Generates org unit dicts
objects_all_patch.side_effect = lambda: (o for o in [ou_obj]) # Generates org unit objects
fetch_org_units()
gen_org_units_patch.assert_called()
objects_all_patch.assert_called()
# TODO: No point in running this test if Dhis2OrgUnit patch doesn't work -- nothing to assert
@skip('Fix mocks')
def test_fetch_org_units_adds(self):
"""
sync_org_units should add new org units
"""
with fixture_type_context(), \
patch('custom.dhis2.models.Dhis2Api.gen_org_units') as gen_org_units_patch, \
patch('custom.dhis2.models.FixtureManager.all') as objects_all_patch, \
patch('custom.dhis2.models.Dhis2OrgUnit') as org_unit_patch:
ou_dict = {'id': '1', 'name': 'Sri Lanka'}
gen_org_units_patch.side_effect = lambda: (d for d in [ou_dict])
objects_all_patch.side_effect = lambda: (o for o in [])
fetch_org_units()
org_unit_patch.__init__.assert_called_with(id='1', name='Sri Lanka')
org_unit_patch.save.assert_called()
@skip('Fix mocks')
def test_fetch_org_units_deletes(self):
"""
sync_org_units should delete old org units
"""
with patch('custom.dhis2.models.Dhis2Api.gen_org_units') as gen_org_units_patch, \
patch('custom.dhis2.models.FixtureManager.all') as objects_all_patch:
delete_mock = Mock()
ou_obj = type('OrgUnit', (object,), {'id': '1', 'name': 'Sri Lanka', 'delete': delete_mock})
gen_org_units_patch.side_effect = lambda: (d for d in [])
objects_all_patch.side_effect = lambda: (o for o in [ou_obj])
fetch_org_units()
delete_mock.assert_called()
@skip('Fix mocks')
def test_fetch_cases(self):
with patch('custom.dhis2.tasks.get_children_only_theirs') as only_theirs_mock, \
patch('custom.dhis2.tasks.pull_child_entities') as pull_mock, \
patch('custom.dhis2.tasks.gen_children_only_ours') as only_ours_mock, \
patch('custom.dhis2.tasks.push_child_entities') as push_mock:
foo = object()
bar = object()
only_theirs_mock.return_value = foo
only_ours_mock.return_value = bar
fetch_cases()
only_theirs_mock.assert_called()
pull_mock.assert_called_with(DOMAIN, foo)
only_ours_mock.assert_called_with(DOMAIN)
push_mock.assert_called_with(bar)
class PayloadGeneratorTest(SimpleTestCase):
def test_get_payload_ignores_unknown_form(self):
"""
get_payload should raise IgnoreDocument on unknown form XMLNS
"""
form_mock = {'xmlns': 'unknown', 'domain': 'test-domain'}
payload_generator = FormRepeaterDhis2EventPayloadGenerator(None)
with self.assertRaises(IgnoreDocument):
payload_generator.get_payload(None, form_mock)
@patch('custom.dhis2.payload_generators.push_case')
@patch('casexml.apps.case.xform.cases_referenced_by_xform')
@patch('custom.dhis2.payload_generators.Dhis2Settings')
def test_get_payload_ignores_registration(self, Dhis2SettingsPatch, cases_referenced_by_xform, push_case):
"""
get_payload should raise IgnoreDocument given a registration form
"""
case_mock = Mock()
case_mock.type = CASE_TYPE
cases_referenced_by_xform.return_value = [case_mock]
class Settings(object):
dhis2 = {'host': 'foo', 'username': 'foo', 'password': 'foo', 'top_org_unit_name': 'foo'}
Dhis2SettingsPatch.for_domain.return_value = Settings()
form_mock = MagicMock()
form_mock.__getitem__.return_value = REGISTER_CHILD_XMLNS
payload_generator = FormRepeaterDhis2EventPayloadGenerator(None)
with self.assertRaises(IgnoreDocument):
payload_generator.get_payload(None, form_mock)
|
|
# -*- coding: utf-8 -*-
"""
TraceView API library
:copyright: (c) 2016 by Daniel Riti.
:license: MIT, see LICENSE for more details.
"""
__title__ = 'traceview'
__version__ = '0.7.0'
__author__ = 'Daniel Riti'
__license__ = 'MIT'
from .annotation import Annotation
from .api import Api
from .app import App, Assign
from .discovery import Action, Browser, Controller, Domain
from .discovery import Layer, Metric, Region
from .host import Host, Instrumentation
from .error import Rate
from .latency import Client, Server
from .organization import Organization
from .total_request import TotalRequests
class TraceView(object):
""" The :class:`TraceView <TraceView>` object.
Provides access to TraceView API resources.
:param api_key: The TraceView API access key.
:param func formatter: (optional) Function to format API results. See the module :mod:`traceview.formatters`.
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
"""
def __init__(self, api_key, formatter=None):
self._api = Api(api_key, after_request=formatter)
self._actions = Action(self._api)
self._annotation = Annotation(self._api)
self._apps = App(self._api)
self._assign = Assign(self._api)
self._browsers = Browser(self._api)
self._controllers = Controller(self._api)
self._domains = Domain(self._api)
self._error_rates = Rate(self._api)
self._hosts = Host(self._api)
self._instrumentation = Instrumentation(self._api)
self._layers = Layer(self._api)
self._metrics = Metric(self._api)
self._organization = Organization(self._api)
self._regions = Region(self._api)
#: Get :py:class:`Client <traceview.latency.Client>` latency information.
self.client = Client(self._api)
#: Get :py:class:`Server <traceview.latency.Server>` latency information.
self.server = Server(self._api)
#: Get :py:class:`TotalRequests <traceview.total_request.TotalRequests>` information.
self.total_requests = TotalRequests(self._api)
def actions(self):
""" Get all actions that have been traced.
:return: all actions traced.
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.actions()
[u'admin', u'products', u'blog', u'settings', u'logout']
"""
return self._actions.get()
def annotation(self, message, *args, **kwargs):
""" Create an annotation.
Annotations are used to log arbitrary events into TraceView, which are
used to understand the correlation between system events (i.e. code
release, server restarts, etc) and performance trends.
:param str message: The annotation message.
:param str appname: (optional) The application to associate the annotation with.
:param str hostname: (optional) The host to associate the annotation with.
:param str username: (optional) The user name to associate the annotation with.
:param str layer: (optional) The layer name to associate the annotation with.
:param str time: (optional) The time to associate the annotation with, in seconds since the epoch.
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.annotation('Code deployed', appname='production_web')
"""
self._annotation.create(message, *args, **kwargs)
def annotations(self, appname=None, *args, **kwargs):
""" Get annotations.
Annotations are used to log arbitrary events into TraceView, which are
used to understand the correlation between system events (i.e. code
release, server restarts, etc) and performance trends.
The default time window is one week.
:param str appname: (optional) The application name to filter annotations by.
:param str time_start: (optional) The start time for the time window, in milliseconds since the epoch.
:param str time_end: (optional) The end time for the time window, in milliseconds since the epoch.
:rtype: list
Usage::
>>> import pprint
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> pprint.pprint(tv.annotations(appname='production_web'))
[{u'app': 3,
u'host': u'prod-web.example.com',
u'id': 123,
u'message': u'Code deployed',
u'time': 1409685758,
u'username': u'dan'},
...
]
"""
return self._annotation.get(app=appname, *args, **kwargs)
def apps(self):
""" Get all available applications.
:return: all available applications
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.apps()
[u'Default', u'flask_app']
"""
return self._apps.get()
def assign(self, hostname, appname, *args, **kwargs):
""" Assign a host to an existing application.
Please note that you cannot join host names to the `Default`
application, as all hosts start there.
:param str hostname: The host name to assign to the application.
:param str appname: The existing application name.
:param str layer: (optional) The layer name to assign to the application.
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.assign(hostname='web-app-1234', appname='production_web')
"""
self._assign.update(hostname, appname, *args, **kwargs)
def browsers(self):
""" Get all browsers used by end users.
:return: all browsers used by end users
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.browsers()
[u'Chrome', u'Firefox', u'Links', u'Safari', u'Wii']
"""
return self._browsers.get()
def controllers(self):
""" Get all controllers that have been traced.
:return: all controllers traced
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.controllers()
[u'admin', u'products', u'blog', u'settings', u'logout']
"""
return self._controllers.get()
def delete(self, host_id, *args, **kwargs):
"""
.. deprecated:: 0.6.0
Use :func:`delete_host <traceview.TraceView.delete_host>` instead.
Delete an existing host.
:param str host_id: The id of the host to delete.
:return: indicates if host was successfully deleted.
:rtype: boolean
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.delete(host_id='123')
True
"""
return self.delete_host(host_id, *args, **kwargs)
def delete_host(self, host_id, *args, **kwargs):
""" Delete an existing host.
:param int host_id: The id of the host to delete.
:return: indicates if host was successfully deleted.
:rtype: boolean
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.delete_host(host_id=123)
True
"""
return self._hosts.delete(host_id, *args, **kwargs)
def delete_app(self, app_name, *args, **kwargs):
""" Delete an existing app.
:param str app_name: The name of the app to delete.
:return: indicates if app was successfully deleted.
:rtype: boolean
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.delete_app(app_name='APP_123')
True
"""
return self._apps.delete(app_name, *args, **kwargs)
def domains(self):
""" Get all domains that have been traced.
:return: all domains traced
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.domains()
[u'example.com', u'www.example.com', u'mail.example.com']
"""
return self._domains.get()
def error_rates(self, app, *args, **kwargs):
""" Get the error rate for an application.
Each item in the items list is a pair of values (timestamp, error_rate).
The error rate describes the number of traces with one or more errors,
per total number of traces.
:param str app: The application name.
:return: timeseries data of the application's error rate
:rtype: dict
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.error_rates('Default')
{u'fields': u'timestamp,error_rate', u'items': [[1399082880.0, 0], [1399082910.0, 0], ...]}
"""
return self._error_rates.get(app, *args, **kwargs)
def hosts(self, appname=None, *args, **kwargs):
""" Get all hosts that have been traced.
:param str appname: (optional) The application name to filter hosts by.
:return: all hosts traced
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.hosts()
[{u'last_trace': None, u'last_heartbeat': 1429033545, u'first_heartbeat': 1428060977, u'name': u'ip-127-0-0-1', u'id': 12345}, { ... }]
"""
return self._hosts.get(app=appname)
def instrumentation(self, host_id):
""" Get instrumentation version information for a host.
:param str host_id: The id of the host.
:return: instrumentation version information for a host
:rtype: list
Usage::
>>> import pprint
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> pprint.pprint(tv.instrumentation(host_id=1))
[{u'name': u'tracelyzer',
u'release_date': 1374537600,
u'update_required': True,
u'version': u'1.1.1'},
...
]
"""
return self._instrumentation.get(host_id)
def layers(self, app, *args, **kwargs):
""" Get all recent layers for an application.
The default time window for reported layers is 1 day.
:param str app: The app name to list layers.
:param int since_time: (optional) The start of the time window as a UTC timestamp in milliseconds.
:return: all available apps
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.layers('Default')
[u'PHP', u'cURL', u'lighttpd', u'php_mysql', u'php_mysqli']
"""
return self._layers.get(app, *args, **kwargs)
def licenses(self):
""" Get the current number of hosts reporting traces and the number of
hosts licensed to the organization.
:return: licensing information for organization.
:rtype: dict
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.licenses()
{u'hosts_used': 5, u'hosts_limit': 10}
"""
return self._organization.licenses()
def metrics(self):
""" Get all available host metrics that have been collected.
:return: all avaiable host metrics being collected.
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.metrics()
[u'cpu_user_frac:all', u'load', u'mem_apps', u'mem_cached', u'mem_swap', u'mem_totalused', ... ]
"""
return self._metrics.get()
def organization(self):
""" Get organization information.
:return: organization information
:rtype: dict
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.organization()
{u'fullname': u'the example organization', u'name': u'example'}
"""
return self._organization.get()
def regions(self):
""" Get all geographical region codes of end users.
Regions codes are ISO 3166-1 and ISO 3166-2 codes for all regions collected
in RUM. Currently, country codes (ISO-3166-1) are available worldwide, and
state codes (ISO-3166-2) are available in the US and Canada.
:return: all geographical region codes of end users
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.regions()
[u'CA', u'CA-BC', u'MX', u'RU', u'US', u'US-RI', ...]
"""
return self._regions.get()
def users(self):
""" Get user information.
:return: user information
:rtype: list
Usage::
>>> import traceview
>>> tv = traceview.TraceView('API KEY HERE')
>>> tv.users()
[{u'admin': True, u'name': u'Jane Doe', u'email': u'[email protected]'}, { ... }]
"""
return self._organization.users()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for binary_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def make_binary_op_tests(options,
binary_operator,
allow_fully_quantize=False,
expected_tf_failures=0,
test_parameters=None):
"""Make a set of tests to do binary ops with and without broadcast."""
if test_parameters is None:
test_parameters = []
test_parameters = test_parameters + [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [True],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False],
"fully_quantize": [True],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [False],
"fully_quantize": [True],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [True],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
"fully_quantize": [True],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [True],
},
]
# float64 types are supported via flex only.
if options.run_with_flex and options.use_experimental_converter:
test_parameters = test_parameters + [
{
"dtype": [tf.float64],
"input_shape_1": [[7]],
"input_shape_2": [[7]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
]
# High dimension broadcasting support in MLIR converter.
if options.use_experimental_converter:
test_parameters = test_parameters + [
{
"dtype": [tf.float32],
"input_shape_1": [[8, 7, 6, 5, 4, 3, 2, 1]],
"input_shape_2": [[4, 3, 2, 1]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
]
# test_parameters include fully_quantize option only when
# allow_fully_quantize is True.
if not allow_fully_quantize:
test_parameters = [
test_parameter for test_parameter in test_parameters
if True not in test_parameter["fully_quantize"]
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
# TODO(karimnosseir): Update condition after moving to new converter.
if parameters["activation"] and (not options.use_experimental_converter or
(parameters["dtype"] != tf.int32 and
parameters["dtype"] != tf.int64)):
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
if allow_fully_quantize:
input1 = create_tensor_data(
parameters["dtype"],
parameters["input_shape_1"],
min_value=-1,
max_value=1)
input2 = create_tensor_data(
parameters["dtype"],
parameters["input_shape_2"],
min_value=-1,
max_value=1)
else:
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add, allow_fully_quantize=True)
@register_make_test_function()
def make_div_tests(options):
"""Make zip tests for div op with 5D case."""
test_parameters = [
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 3, 3, 3]],
"input_shape_2": [[3]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [False, True],
},
]
make_binary_op_tests(
options, tf.compat.v1.div, test_parameters=test_parameters)
@register_make_test_function()
def make_sub_tests(options):
"""Make zip tests for sub op with additional cases."""
test_parameters = [
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 3, 3, 3]],
"input_shape_2": [[3]],
"activation": [False],
"fully_quantize": [False],
"dynamic_range_quantize": [False, True],
},
]
make_binary_op_tests(
options,
tf.subtract,
allow_fully_quantize=True,
test_parameters=test_parameters)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply, allow_fully_quantize=True)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.math.floordiv)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.math.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.math.squared_difference,
allow_fully_quantize=True)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_unregister_request(
resource_provider_namespace: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_register_at_management_group_scope_request(
resource_provider_namespace: str,
group_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Management/managementGroups/{groupId}/providers/{resourceProviderNamespace}/register')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"groupId": _SERIALIZER.url("group_id", group_id, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_register_request(
resource_provider_namespace: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
*,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_at_tenant_scope_request(
*,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_provider_namespace: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_at_tenant_scope_request(
resource_provider_namespace: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/{resourceProviderNamespace}')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProvidersOperations(object):
"""ProvidersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def unregister(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Unregisters a subscription from a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to unregister.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_unregister_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
template_url=self.unregister.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unregister.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister'} # type: ignore
@distributed_trace
def register_at_management_group_scope(
self,
resource_provider_namespace: str,
group_id: str,
**kwargs: Any
) -> None:
"""Registers a management group with a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to register.
:type resource_provider_namespace: str
:param group_id: The management group ID.
:type group_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_register_at_management_group_scope_request(
resource_provider_namespace=resource_provider_namespace,
group_id=group_id,
template_url=self.register_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
register_at_management_group_scope.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{groupId}/providers/{resourceProviderNamespace}/register'} # type: ignore
@distributed_trace
def register(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Registers a subscription with a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to register.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_register_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
template_url=self.register.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register'} # type: ignore
@distributed_trace
def list(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ProviderListResult"]:
"""Gets all resource providers for a subscription.
:param top: The number of results to return. If null is passed returns all deployments.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2021_01_01.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers'} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ProviderListResult"]:
"""Gets all resource providers for the tenant.
:param top: The number of results to return. If null is passed returns all providers.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2021_01_01.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_at_tenant_scope_request(
top=top,
expand=expand,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_at_tenant_scope_request(
top=top,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': '/providers'} # type: ignore
@distributed_trace
def get(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}'} # type: ignore
@distributed_trace
def get_at_tenant_scope(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider at the tenant level.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_tenant_scope_request(
resource_provider_namespace=resource_provider_namespace,
expand=expand,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': '/providers/{resourceProviderNamespace}'} # type: ignore
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A Web interface to beets."""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
import beets.library
import flask
from flask import g
from werkzeug.routing import BaseConverter, PathConverter
import os
from unidecode import unidecode
import json
import base64
# Utilities.
def _rep(obj, expand=False):
"""Get a flat -- i.e., JSON-ish -- representation of a beets Item or
Album object. For Albums, `expand` dictates whether tracks are
included.
"""
out = dict(obj)
if isinstance(obj, beets.library.Item):
if app.config.get('INCLUDE_PATHS', False):
out['path'] = util.displayable_path(out['path'])
else:
del out['path']
# Filter all bytes attributes and convert them to strings.
for key, value in out.items():
if isinstance(out[key], bytes):
out[key] = base64.b64encode(value).decode('ascii')
# Get the size (in bytes) of the backing file. This is useful
# for the Tomahawk resolver API.
try:
out['size'] = os.path.getsize(util.syspath(obj.path))
except OSError:
out['size'] = 0
return out
elif isinstance(obj, beets.library.Album):
del out['artpath']
if expand:
out['items'] = [_rep(item) for item in obj.items()]
return out
def json_generator(items, root, expand=False):
"""Generator that dumps list of beets Items or Albums as JSON
:param root: root key for JSON
:param items: list of :class:`Item` or :class:`Album` to dump
:param expand: If true every :class:`Album` contains its items in the json
representation
:returns: generator that yields strings
"""
yield '{"%s":[' % root
first = True
for item in items:
if first:
first = False
else:
yield ','
yield json.dumps(_rep(item, expand=expand))
yield ']}'
def is_expand():
"""Returns whether the current request is for an expanded response."""
return flask.request.args.get('expand') is not None
def resource(name):
"""Decorates a function to handle RESTful HTTP requests for a resource.
"""
def make_responder(retriever):
def responder(ids):
entities = [retriever(id) for id in ids]
entities = [entity for entity in entities if entity]
if len(entities) == 1:
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
else:
return flask.abort(404)
responder.__name__ = 'get_{0}'.format(name)
return responder
return make_responder
def resource_query(name):
"""Decorates a function to handle RESTful HTTP queries for resources.
"""
def make_responder(query_func):
def responder(queries):
return app.response_class(
json_generator(
query_func(queries),
root='results', expand=is_expand()
),
mimetype='application/json'
)
responder.__name__ = 'query_{0}'.format(name)
return responder
return make_responder
def resource_list(name):
"""Decorates a function to handle RESTful HTTP request for a list of
resources.
"""
def make_responder(list_all):
def responder():
return app.response_class(
json_generator(list_all(), root=name, expand=is_expand()),
mimetype='application/json'
)
responder.__name__ = 'all_{0}'.format(name)
return responder
return make_responder
def _get_unique_table_field_values(model, field, sort_field):
""" retrieve all unique values belonging to a key from a model """
if field not in model.all_keys() or sort_field not in model.all_keys():
raise KeyError
with g.lib.transaction() as tx:
rows = tx.query('SELECT DISTINCT "{0}" FROM "{1}" ORDER BY "{2}"'
.format(field, model._table, sort_field))
return [row[0] for row in rows]
class IdListConverter(BaseConverter):
"""Converts comma separated lists of ids in urls to integer lists.
"""
def to_python(self, value):
ids = []
for id in value.split(','):
try:
ids.append(int(id))
except ValueError:
pass
return ids
def to_url(self, value):
return ','.join(value)
class QueryConverter(PathConverter):
"""Converts slash separated lists of queries in the url to string list.
"""
def to_python(self, value):
return value.split('/')
def to_url(self, value):
return ','.join(value)
class EverythingConverter(PathConverter):
regex = '.*?'
# Flask setup.
app = flask.Flask(__name__)
app.url_map.converters['idlist'] = IdListConverter
app.url_map.converters['query'] = QueryConverter
app.url_map.converters['everything'] = EverythingConverter
@app.before_request
def before_request():
g.lib = app.config['lib']
# Items.
@app.route('/item/<idlist:ids>')
@resource('items')
def get_item(id):
return g.lib.get_item(id)
@app.route('/item/')
@app.route('/item/query/')
@resource_list('items')
def all_items():
return g.lib.items()
@app.route('/item/<int:item_id>/file')
def item_file(item_id):
item = g.lib.get_item(item_id)
# On Windows under Python 2, Flask wants a Unicode path. On Python 3, it
# *always* wants a Unicode path.
if os.name == 'nt':
item_path = util.syspath(item.path)
else:
item_path = util.py3_path(item.path)
try:
unicode_item_path = util.text_string(item.path)
except (UnicodeDecodeError, UnicodeEncodeError):
unicode_item_path = util.displayable_path(item.path)
base_filename = os.path.basename(unicode_item_path)
try:
# Imitate http.server behaviour
base_filename.encode("latin-1", "strict")
except UnicodeEncodeError:
safe_filename = unidecode(base_filename)
else:
safe_filename = base_filename
response = flask.send_file(
item_path,
as_attachment=True,
attachment_filename=safe_filename
)
response.headers['Content-Length'] = os.path.getsize(item_path)
return response
@app.route('/item/query/<query:queries>')
@resource_query('items')
def item_query(queries):
return g.lib.items(queries)
@app.route('/item/path/<everything:path>')
def item_at_path(path):
query = beets.library.PathQuery('path', path.encode('utf-8'))
item = g.lib.items(query).get()
if item:
return flask.jsonify(_rep(item))
else:
return flask.abort(404)
@app.route('/item/values/<string:key>')
def item_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key)
try:
values = _get_unique_table_field_values(beets.library.Item, key,
sort_key)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Albums.
@app.route('/album/<idlist:ids>')
@resource('albums')
def get_album(id):
return g.lib.get_album(id)
@app.route('/album/')
@app.route('/album/query/')
@resource_list('albums')
def all_albums():
return g.lib.albums()
@app.route('/album/query/<query:queries>')
@resource_query('albums')
def album_query(queries):
return g.lib.albums(queries)
@app.route('/album/<int:album_id>/art')
def album_art(album_id):
album = g.lib.get_album(album_id)
if album and album.artpath:
return flask.send_file(album.artpath.decode())
else:
return flask.abort(404)
@app.route('/album/values/<string:key>')
def album_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key)
try:
values = _get_unique_table_field_values(beets.library.Album, key,
sort_key)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Artists.
@app.route('/artist/')
def all_artists():
with g.lib.transaction() as tx:
rows = tx.query("SELECT DISTINCT albumartist FROM albums")
all_artists = [row[0] for row in rows]
return flask.jsonify(artist_names=all_artists)
# Library information.
@app.route('/stats')
def stats():
with g.lib.transaction() as tx:
item_rows = tx.query("SELECT COUNT(*) FROM items")
album_rows = tx.query("SELECT COUNT(*) FROM albums")
return flask.jsonify({
'items': item_rows[0][0],
'albums': album_rows[0][0],
})
# UI.
@app.route('/')
def home():
return flask.render_template('index.html')
# Plugin hook.
class WebPlugin(BeetsPlugin):
def __init__(self):
super(WebPlugin, self).__init__()
self.config.add({
'host': u'127.0.0.1',
'port': 8337,
'cors': '',
'cors_supports_credentials': False,
'reverse_proxy': False,
'include_paths': False,
})
def commands(self):
cmd = ui.Subcommand('web', help=u'start a Web interface')
cmd.parser.add_option(u'-d', u'--debug', action='store_true',
default=False, help=u'debug mode')
def func(lib, opts, args):
args = ui.decargs(args)
if args:
self.config['host'] = args.pop(0)
if args:
self.config['port'] = int(args.pop(0))
app.config['lib'] = lib
# Normalizes json output
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['INCLUDE_PATHS'] = self.config['include_paths']
# Enable CORS if required.
if self.config['cors']:
self._log.info(u'Enabling CORS with origin: {0}',
self.config['cors'])
from flask_cors import CORS
app.config['CORS_ALLOW_HEADERS'] = "Content-Type"
app.config['CORS_RESOURCES'] = {
r"/*": {"origins": self.config['cors'].get(str)}
}
CORS(
app,
supports_credentials=self.config[
'cors_supports_credentials'
].get(bool)
)
# Allow serving behind a reverse proxy
if self.config['reverse_proxy']:
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Start the web application.
app.run(host=self.config['host'].as_str(),
port=self.config['port'].get(int),
debug=opts.debug, threaded=True)
cmd.func = func
return [cmd]
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
From: http://flask.pocoo.org/snippets/35/
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
|
|
# -*- coding: utf-8 -*-
import healpy as hp
import numpy as np
from mocpy import MOC
from math import log
from .lvc_skymap import LVCskymap
import time
from .aladinSAMP import AladinScriptCommands
aladin = AladinScriptCommands()
class MOC_confidence_region(object):
"""Multi-Order coverage map (MOC) of sky areas enclosed within a contour plot
at a given confidence level."""
def read_prob(self, infile):
"""Reading healpix skymap.
Input parameters
----------------
infile : string
LVC probability sky localization in healpix format
Return
-------
hpx : list
1D array of values (probability stored in each pixel)
"""
hpx = hp.read_map(infile, verbose = False)
return hpx
def ipixs_in_percentage(self, hpx, percentage):
"""Finding the ipix indices confined in a given percentage.
Input parameters
----------------
hpx : numpy array
1D array of values (probability stored in each pixel)
percentage : float
fractional percentage from 0 to 1
Return
-------
ipixs : numpy array
indices of pixels
"""
# ranked the healpix pixels from most probable to least, and finally counted how many
# pixels summed to a given total probability.
# see https://arxiv.org/pdf/1404.5623.pdf
cumsum = np.sort(hpx)[::-1].cumsum()
how_many_ipixs, cut_percentage = min(enumerate(cumsum),
key = lambda x: abs(x[1] - percentage))
del(cumsum)
index = np.arange(0, len(hpx))
hpx_index = np.c_[hpx, index]
sort = hpx_index[hpx_index[:, 0].argsort()[::-1]]
ipixs = sort[0:how_many_ipixs, [1]].astype(int)
return ipixs
## def __ipix_box(self, ra_vertices, dec_vertices):
## """Return the ipix inside a polygon."""
##
## ## TO BE COMPLETED
##
## NSIDE=512 # fixed nside resolution
## theta = 0.5 * np.pi - np.deg2rad(dec_vertices)
## phi = np.deg2rad(ra_vertices)
## xyz = hp.ang2vec(theta, phi)
## ipix_poly = hp.query_polygon(NSIDE, xyz)
##
## return ipix_poly, NSIDE
def ipix_in_box(self, ra, dec, width, height):
"""Return the probability inside a box."""
self.lvc_skymap = LVCskymap()
v1_ra, v2_ra, v3_ra, v4_ra, v1_dec, v2_dec, v3_dec, v4_dec = self.lvc_skymap.vertices(
ra, dec, width, height)
ra_vertices, dec_vertices = (
[v1_ra, v2_ra, v4_ra, v3_ra], [v1_dec, v2_dec, v4_dec, v3_dec])
ipix_fov_box, NSIDE = self.__ipix_box(ra_vertices, dec_vertices)
return ipix_fov_box, NSIDE
def ipix_within_circle(self, ra_vertices, dec_vertices):
pass
def sky_coords(self, ipixs, nside):
"""Converting the ipix into right ascension and declination in degrees
Return
-------
contour_ipix : list
sky coords in degrees
"""
# from index to polar coordinates
theta, phi = hp.pix2ang(nside, ipixs)
# converting these to right ascension and declination in degrees
ra = np.rad2deg(phi)
dec = np.rad2deg(0.5 * np.pi - theta)
# creating an astropy.table with RA[deg] and DEC[deg]
#contour_ipix = Table([ra, dec], names = ('RA[deg]', 'DEC[deg]'),
# meta = {'ipix': 'ipix table'})
return ra, dec
def moc_order(self, nside):
"""Setting MOC order.
Return
-------
moc_order : int
"""
order = int(log( nside, 2))
return order
def create_moc(self):
"""Creating a MOC map from the contour_ipix table."""
self.moc = MOC.from_table(self.contour_ipix, 'RA[deg]', 'DEC[deg]',
self.moc_order)
return self.moc
def write_moc(self, percentage, short_name):
"""Writing MOC file in fits format.
Input parameters
----------------
percentage : float
fractional percentage from 0 to 1 converted into a string
short_name : str
file output
"""
return self.moc.write(short_name + '_MOC_' + str(percentage), format = 'fits')
def contour_plot(self, infile, percentage, short_name=''):
"""Creating/Writing a MOC contour region at a fixed level of probability.
Input parameters
---------------
infile : string
LVC probability sky localization in healpix format
percentage : float
fractional percentage from 0 to 1
"""
self.read_skymap(infile)
self.ipixs_in_percentage(percentage)
self.sky_coords()
self.moc_order()
self.create_moc()
return self.write_moc(percentage, short_name)
def contour_default(self, _from, _to, _step, skymap=""):
"""Creating & Showing MOC plots (from 10% to 90% in step of 10%) in a folder."""
colors=["#ff0000","#ffaa00 ","#aaff00","#00ff00","#00ffa9",
"#00a9ff","#0000ff","#aa00ff","#ff00aa"] # skymap viewer color
#short_name = skymap
suffix = skymap[0:]
aladin.md('MOC' + suffix) # creating a stack folder
aladin.remove('MOC' + suffix + '~1') # removing multiple copy of the folder
for i, color in zip(np.arange(_from, _to, _step),colors):
aladin.cmoc((i/100.0), skymap,'moc'+str(i/100.0) + suffix)
time.sleep(1) # random break to organize the aladin planes
plane = 'moc'+str(i/100.0) + suffix
aladin.set_color(plane, color)
aladin.set_moc('moc'+str(i/100.0)+ suffix)
aladin.mv('moc'+str(i/100.0)+ suffix,'MOC' + suffix)
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common attention."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
import tensorflow as tf
class CommonAttentionTest(tf.test.TestCase):
def testDotProductAttention(self):
x = np.random.rand(5, 7, 12, 32)
y = np.random.rand(5, 7, 12, 32)
with self.test_session() as session:
a = common_attention.dot_product_attention(
tf.constant(x, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
tf.constant(y, dtype=tf.float32), None)
session.run(tf.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (5, 7, 12, 32))
def testMaskedLocalAttention1D(self):
q = np.array([[[[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]]])
k = np.array([[[[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]]])
v = np.ones((1, 1, 8, 1))
with self.test_session() as session:
q_ = tf.constant(q, dtype=tf.float32)
k_ = tf.constant(k, dtype=tf.float32)
v_ = tf.constant(v, dtype=tf.float32)
y = common_attention.masked_local_attention_1d(
q_, k_, v_, block_length=tf.constant(2))
res = session.run(y)
self.assertEqual(res.shape, (1, 1, 8, 1))
def testLocalUnmaskedAttention1D(self):
x = np.random.rand(5, 4, 25, 16)
y = np.random.rand(5, 4, 25, 16)
with self.test_session() as session:
a = common_attention.local_attention_1d(
tf.constant(x, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
block_length=4,
filter_width=3)
session.run(tf.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (5, 4, 25, 16))
def testLocalUnmaskedAttention1DMatchingBlockLength(self):
x = np.random.rand(5, 4, 25, 16)
y = np.random.rand(5, 4, 25, 16)
with self.test_session() as session:
a = common_attention.local_attention_1d(
tf.constant(x, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
block_length=5,
filter_width=3)
session.run(tf.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (5, 4, 25, 16))
def testLocalUnmaskedAttention2D(self):
x = np.random.rand(5, 4, 25, 25, 16)
y = np.random.rand(5, 4, 25, 25, 16)
with self.test_session() as session:
a = common_attention.local_attention_2d(
tf.constant(x, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
query_shape=(4, 4),
memory_flange=(3, 3))
session.run(tf.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (5, 4, 25, 25, 16))
def testLocalUnmaskedAttention2DMatchingBlockLength(self):
x = np.random.rand(5, 4, 25, 25, 16)
y = np.random.rand(5, 4, 25, 25, 16)
with self.test_session() as session:
a = common_attention.local_attention_2d(
tf.constant(x, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
query_shape=(5, 5),
memory_flange=(3, 3))
session.run(tf.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (5, 4, 25, 25, 16))
def testMultiheadSelfAttentionMemoryEfficient(self):
num_heads = 4
io_size = 16
batch = 2
length = 7
head_size = 5
x = np.random.rand(batch, length, io_size)
dy = np.random.rand(batch, length, io_size)
with self.test_session() as session:
x = tf.to_float(x)
dy = tf.to_float(dy)
bias = common_attention.attention_bias_lower_triangle(length)
wqkv = tf.get_variable(
"wqkv", [num_heads, 1, io_size, 3 * head_size],
initializer=tf.random_normal_initializer(stddev=io_size**-0.5))
wo = tf.get_variable(
"wo", [num_heads, 1, head_size, io_size],
initializer=tf.random_normal_initializer(
stddev=(head_size * num_heads)**-0.5))
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = common_attention.multihead_self_attention_memory_efficient(
x, bias, num_heads, head_size=head_size, forget=False,
test_vars=(wqkv, wo, norm_scale, norm_bias))
y_forget = common_attention.multihead_self_attention_memory_efficient(
x, bias, num_heads, head_size=head_size, forget=True,
test_vars=(wqkv, wo, norm_scale, norm_bias))
dx, dwqkv, dwo, dnorm_scale, dnorm_bias = tf.gradients(
ys=[y], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy])
dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f = tf.gradients(
ys=[y_forget], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy])
session.run(tf.global_variables_initializer())
(y, y_forget,
dx, dwqkv, dwo, dnorm_scale, dnorm_bias,
dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f) = session.run(
[y, y_forget,
dx, dwqkv, dwo, dnorm_scale, dnorm_bias,
dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f])
self.assertAllClose(y, y_forget)
self.assertAllClose(dwo, dwo_f)
self.assertAllClose(dwqkv, dwqkv_f)
self.assertAllClose(dnorm_scale, dnorm_scale_f)
self.assertAllClose(dnorm_bias, dnorm_bias_f)
self.assertAllClose(dx, dx_f)
def test2dGatherAndScatterInvertibility(self):
"""2d gather and scatter invertibility test."""
batch_size = 2
num_heads = 2
height = 4
width = 6
depth = 8
query_shape = (2, 3)
x = np.random.rand(batch_size, num_heads, height, width, depth)
with self.test_session() as session:
x_indices = common_attention.gather_indices_2d(
x, query_shape, query_shape)
gathered_x = common_attention.gather_blocks_2d(x, x_indices)
x_shape = tf.constant([batch_size, num_heads, height, width, depth])
scattered_x = common_attention.scatter_blocks_2d(
gathered_x, x_indices, x_shape)
session.run(tf.global_variables_initializer())
res = session.run(scattered_x)
self.assertAllClose(x, res)
def test2dBlockRasterScanMask(self):
"""Testing the 2d block raster scan mask."""
query_shape = (2, 3)
memory_flange = (2, 1)
with self.test_session() as session:
mask = common_attention.make_2d_block_raster_mask(
query_shape, memory_flange)
res = session.run(mask)
correct_mask = np.array(
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0,
1.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
1.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
self.assertAllClose(correct_mask, res)
def test2dGather(self):
"""Testing 2d index gather and block gather functions."""
batch_size = 2
num_heads = 2
height = 4
width = 6
depth = 8
query_shape = (2, 3)
x = np.random.rand(batch_size, num_heads, height, width, depth)
y = np.reshape(x, (batch_size, num_heads, -1, depth))
correct_indices = [[0, 1, 2, 6, 7, 8],
[3, 4, 5, 9, 10, 11],
[12, 13, 14, 18, 19, 20],
[15, 16, 17, 21, 22, 23]]
correct_gathered_x = [[[y[0, 0, correct_indices[0]],
y[0, 0, correct_indices[1]],
y[0, 0, correct_indices[2]],
y[0, 0, correct_indices[3]]],
[y[0, 1, correct_indices[0]],
y[0, 1, correct_indices[1]],
y[0, 1, correct_indices[2]],
y[0, 1, correct_indices[3]]]],
[[y[1, 0, correct_indices[0]],
y[1, 0, correct_indices[1]],
y[1, 0, correct_indices[2]],
y[1, 0, correct_indices[3]]],
[y[1, 1, correct_indices[0]],
y[1, 1, correct_indices[1]],
y[1, 1, correct_indices[2]],
y[1, 1, correct_indices[3]]]]]
with self.test_session() as session:
x_indices = common_attention.gather_indices_2d(
x, query_shape, query_shape)
gathered_x = common_attention.gather_blocks_2d(x, x_indices)
x_indices, gathered_x = session.run([x_indices, gathered_x])
self.assertAllEqual(correct_indices, x_indices)
self.assertAllClose(correct_gathered_x, gathered_x)
def testGetMemoryRegion(self):
"""Testing the function that gathers the flanged memory region."""
np.set_printoptions(threshold=np.inf)
batch_size = 2
num_heads = 2
height = 4
width = 6
depth = 3
query_shape = (2, 3)
memory_flange = (1, 1)
x = np.random.rand(batch_size, num_heads, height, width, depth)
y = np.reshape(x, (batch_size, num_heads, -1, depth))
zeros = np.zeros((depth), dtype=np.float32)
five_zeros = np.array([zeros]*5)
seven_zeros = np.array([zeros]*7)
two_zeros = np.array([zeros]*2)
zeros = np.array([zeros])
correct_x_flange = [[[seven_zeros,
np.concatenate((five_zeros, y[0, 0, [2, 8]]),
axis=0),
np.concatenate((zeros, y[0, 0, [6, 7, 8, 9]],
two_zeros), axis=0),
np.concatenate((y[0, 0, [8, 9, 10, 11]], zeros,
y[0, 0, [14, 20]]), axis=0)],
[seven_zeros,
np.concatenate((five_zeros, y[0, 1, [2, 8]]),
axis=0),
np.concatenate((zeros, y[0, 1, [6, 7, 8, 9]],
two_zeros), axis=0),
np.concatenate((y[0, 1, [8, 9, 10, 11]], zeros,
y[0, 1, [14, 20]]), axis=0)]],
[[seven_zeros,
np.concatenate((five_zeros, y[1, 0, [2, 8]]),
axis=0),
np.concatenate((zeros, y[1, 0, [6, 7, 8, 9]],
two_zeros), axis=0),
np.concatenate((y[1, 0, [8, 9, 10, 11]], zeros,
y[1, 0, [14, 20]]), axis=0)],
[seven_zeros,
np.concatenate((five_zeros, y[1, 1, [2, 8]]),
axis=0),
np.concatenate((zeros, y[1, 1, [6, 7, 8, 9]],
two_zeros), axis=0),
np.concatenate((y[1, 1, [8, 9, 10, 11]], zeros,
y[1, 1, [14, 20]]), axis=0)]]]
correct_x_flange = np.array(correct_x_flange)
correct_x_center = [[[y[0, 0, [0, 1, 2, 6, 7, 8]],
y[0, 0, [3, 4, 5, 9, 10, 11]],
y[0, 0, [12, 13, 14, 18, 19, 20]],
y[0, 0, [15, 16, 17, 21, 22, 23]]],
[y[0, 1, [0, 1, 2, 6, 7, 8]],
y[0, 1, [3, 4, 5, 9, 10, 11]],
y[0, 1, [12, 13, 14, 18, 19, 20]],
y[0, 1, [15, 16, 17, 21, 22, 23]]]],
[[y[1, 0, [0, 1, 2, 6, 7, 8]],
y[1, 0, [3, 4, 5, 9, 10, 11]],
y[1, 0, [12, 13, 14, 18, 19, 20]],
y[1, 0, [15, 16, 17, 21, 22, 23]]],
[y[1, 1, [0, 1, 2, 6, 7, 8]],
y[1, 1, [3, 4, 5, 9, 10, 11]],
y[1, 1, [12, 13, 14, 18, 19, 20]],
y[1, 1, [15, 16, 17, 21, 22, 23]]]]]
correct_x_center = np.array(correct_x_center)
with self.test_session() as session:
x_indices = common_attention.gather_indices_2d(
x, query_shape, query_shape)
x_flange, x_center = common_attention.get_memory_region(
tf.constant(x, dtype=tf.float32),
query_shape,
memory_flange,
x_indices)
session.run(tf.global_variables_initializer())
[x_flange, x_center] = session.run([x_flange, x_center])
self.assertAllClose(correct_x_flange, x_flange)
self.assertAllClose(correct_x_center, x_center)
def testGetShiftedCenterBlocks(self):
"""Testing the function that gathers the flanged memory region."""
np.set_printoptions(threshold=np.inf)
batch_size = 2
num_heads = 2
height = 4
width = 6
depth = 3
query_shape = (2, 3)
x = np.random.rand(batch_size, num_heads, height, width, depth)
y = np.reshape(x, (batch_size, num_heads, -1, depth))
zeros = np.zeros((depth), dtype=np.float32)
zeros = np.array([zeros])
correct_gathered_x = [[[np.concatenate((zeros, y[0, 0, [0, 1, 2, 6, 7]]),
axis=0),
np.concatenate((zeros, y[0, 0, [3, 4, 5, 9, 10]]),
axis=0),
np.concatenate((zeros,
y[0, 0, [12, 13, 14, 18, 19]]),
axis=0),
np.concatenate((zeros,
y[0, 0, [15, 16, 17, 21, 22]]),
axis=0)],
[np.concatenate((zeros, y[0, 1, [0, 1, 2, 6, 7]]),
axis=0),
np.concatenate((zeros, y[0, 1, [3, 4, 5, 9, 10]]),
axis=0),
np.concatenate((zeros,
y[0, 1, [12, 13, 14, 18, 19]]),
axis=0),
np.concatenate((zeros,
y[0, 1, [15, 16, 17, 21, 22]]),
axis=0)]],
[[np.concatenate((zeros, y[1, 0, [0, 1, 2, 6, 7]]),
axis=0),
np.concatenate((zeros, y[1, 0, [3, 4, 5, 9, 10]]),
axis=0),
np.concatenate((zeros,
y[1, 0, [12, 13, 14, 18, 19]]),
axis=0),
np.concatenate((zeros,
y[1, 0, [15, 16, 17, 21, 22]]),
axis=0)],
[np.concatenate((zeros, y[1, 1, [0, 1, 2, 6, 7]]),
axis=0),
np.concatenate((zeros, y[1, 1, [3, 4, 5, 9, 10]]),
axis=0),
np.concatenate((zeros,
y[1, 1, [12, 13, 14, 18, 19]]),
axis=0),
np.concatenate((zeros,
y[1, 1, [15, 16, 17, 21, 22]]),
axis=0)]]]
correct_gathered_x = np.array(correct_gathered_x)
with self.test_session() as session:
x_indices = common_attention.gather_indices_2d(
x, query_shape, query_shape)
gathered_x = common_attention.get_shifted_center_blocks(
tf.constant(x, dtype=tf.float32),
x_indices)
session.run(tf.global_variables_initializer())
x_indices, gathered_x = session.run([x_indices, gathered_x])
self.assertAllClose(correct_gathered_x, gathered_x)
def testDotProductAttentionRelative(self):
x = np.random.rand(5, 7, 12, 32)
y = np.random.rand(5, 7, 12, 32)
with self.test_session() as session:
a = common_attention.dot_product_attention_relative(
tf.constant(x, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
tf.constant(y, dtype=tf.float32),
None,
max_relative_position=3)
session.run(tf.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (5, 7, 12, 32))
def testBiasBatchCoordinates(self):
"""Testing the batch cooridnates mask."""
q = tf.constant([0, 0, 1, 1, 1, 1, 2, 2, 2], dtype=tf.int32)
q = tf.expand_dims(q, axis=-1)
k = tf.constant([0, 0, 0, 2, 2, 3, 3, 3], dtype=tf.int32)
k = tf.expand_dims(k, axis=-1)
ground_truth = np.array([
[0, 0, 0, 1, 1, 1, 1, 1], # 0
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1], # 1 (just masked)
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 1], # 2
[1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 1],
], np.float32) * -1e9
bias = common_attention.attention_bias_coordinates(q, k)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
self.assertAllClose(
bias.eval(),
ground_truth,
)
def testBiasFuture(self):
"""Testing the sequence order mask."""
q = tf.constant([0, 1, 2, 3, 0, 1, 2, 0, 1], dtype=tf.int32)
q = tf.expand_dims(q, axis=-1)
k = tf.constant([0, 1, 2, 3, 4, 0, 1, 2], dtype=tf.int32)
k = tf.expand_dims(k, axis=-1)
ground_truth = np.array([
[0, 1, 1, 1, 1, 0, 1, 1], # 0
[0, 0, 1, 1, 1, 0, 0, 1], # 1
[0, 0, 0, 1, 1, 0, 0, 0], # 2
[0, 0, 0, 0, 1, 0, 0, 0], # 3
[0, 1, 1, 1, 1, 0, 1, 1], # 0
[0, 0, 1, 1, 1, 0, 0, 1], # 1
[0, 0, 0, 1, 1, 0, 0, 0], # 2
[0, 1, 1, 1, 1, 0, 1, 1], # 0
[0, 0, 1, 1, 1, 0, 0, 1], # 1
], np.float32) * -1e9
bias = common_attention.attention_bias_future(q, k)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
self.assertAllClose(
bias.eval(),
ground_truth,
)
if __name__ == "__main__":
tf.test.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import uuid
import six
from keystoneclient.auth import token_endpoint
from keystoneclient import exceptions
from keystoneclient import session
from keystoneclient.tests.unit.v3 import client_fixtures
from keystoneclient.tests.unit.v3 import utils
from keystoneclient.v3 import client
class KeystoneClientTest(utils.TestCase):
def test_unscoped_init(self):
token = client_fixtures.unscoped_token()
self.stub_auth(json=token)
c = client.Client(user_domain_name=token.user_domain_name,
username=token.user_name,
password='password',
auth_url=self.TEST_URL)
self.assertIsNotNone(c.auth_ref)
self.assertFalse(c.auth_ref.domain_scoped)
self.assertFalse(c.auth_ref.project_scoped)
self.assertEqual(token.user_id, c.auth_user_id)
self.assertFalse(c.has_service_catalog())
self.assertEqual(token.user_id, c.get_user_id(session=None))
self.assertIsNone(c.get_project_id(session=None))
def test_domain_scoped_init(self):
token = client_fixtures.domain_scoped_token()
self.stub_auth(json=token)
c = client.Client(user_id=token.user_id,
password='password',
domain_name=token.domain_name,
auth_url=self.TEST_URL)
self.assertIsNotNone(c.auth_ref)
self.assertTrue(c.auth_ref.domain_scoped)
self.assertFalse(c.auth_ref.project_scoped)
self.assertEqual(token.user_id, c.auth_user_id)
self.assertEqual(token.domain_id, c.auth_domain_id)
def test_project_scoped_init(self):
token = client_fixtures.project_scoped_token()
self.stub_auth(json=token),
c = client.Client(user_id=token.user_id,
password='password',
user_domain_name=token.user_domain_name,
project_name=token.project_name,
auth_url=self.TEST_URL)
self.assertIsNotNone(c.auth_ref)
self.assertFalse(c.auth_ref.domain_scoped)
self.assertTrue(c.auth_ref.project_scoped)
self.assertEqual(token.user_id, c.auth_user_id)
self.assertEqual(token.project_id, c.auth_tenant_id)
self.assertEqual(token.user_id, c.get_user_id(session=None))
self.assertEqual(token.project_id, c.get_project_id(session=None))
def test_auth_ref_load(self):
token = client_fixtures.project_scoped_token()
self.stub_auth(json=token)
c = client.Client(user_id=token.user_id,
password='password',
project_id=token.project_id,
auth_url=self.TEST_URL)
cache = json.dumps(c.auth_ref)
new_client = client.Client(auth_ref=json.loads(cache))
self.assertIsNotNone(new_client.auth_ref)
self.assertFalse(new_client.auth_ref.domain_scoped)
self.assertTrue(new_client.auth_ref.project_scoped)
self.assertEqual(token.user_name, new_client.username)
self.assertIsNone(new_client.password)
self.assertEqual(new_client.management_url,
'http://admin:35357/v3')
def test_auth_ref_load_with_overridden_arguments(self):
new_auth_url = 'https://newkeystone.com/v3'
user_id = uuid.uuid4().hex
user_name = uuid.uuid4().hex
project_id = uuid.uuid4().hex
first = client_fixtures.project_scoped_token(user_id=user_id,
user_name=user_name,
project_id=project_id)
second = client_fixtures.project_scoped_token(user_id=user_id,
user_name=user_name,
project_id=project_id)
self.stub_auth(json=first)
self.stub_auth(json=second, base_url=new_auth_url)
c = client.Client(user_id=user_id,
password='password',
project_id=project_id,
auth_url=self.TEST_URL)
cache = json.dumps(c.auth_ref)
new_client = client.Client(auth_ref=json.loads(cache),
auth_url=new_auth_url)
self.assertIsNotNone(new_client.auth_ref)
self.assertFalse(new_client.auth_ref.domain_scoped)
self.assertTrue(new_client.auth_ref.project_scoped)
self.assertEqual(new_auth_url, new_client.auth_url)
self.assertEqual(user_name, new_client.username)
self.assertIsNone(new_client.password)
self.assertEqual(new_client.management_url,
'http://admin:35357/v3')
def test_trust_init(self):
token = client_fixtures.trust_token()
self.stub_auth(json=token)
c = client.Client(user_domain_name=token.user_domain_name,
username=token.user_name,
password='password',
auth_url=self.TEST_URL,
trust_id=token.trust_id)
self.assertIsNotNone(c.auth_ref)
self.assertFalse(c.auth_ref.domain_scoped)
self.assertFalse(c.auth_ref.project_scoped)
self.assertEqual(token.trust_id, c.auth_ref.trust_id)
self.assertEqual(token.trustee_user_id, c.auth_ref.trustee_user_id)
self.assertEqual(token.trustor_user_id, c.auth_ref.trustor_user_id)
self.assertTrue(c.auth_ref.trust_scoped)
self.assertEqual(token.user_id, c.auth_user_id)
def test_init_err_no_auth_url(self):
self.assertRaises(exceptions.AuthorizationFailure,
client.Client,
username='exampleuser',
password='password')
def _management_url_is_updated(self, fixture, **kwargs):
second = copy.deepcopy(fixture)
first_url = 'http://admin:35357/v3'
second_url = "http://secondurl:%d/v3'"
for entry in second['token']['catalog']:
if entry['type'] == 'identity':
entry['endpoints'] = [{
'url': second_url % 5000,
'region': 'RegionOne',
'interface': 'public'
}, {
'url': second_url % 5000,
'region': 'RegionOne',
'interface': 'internal'
}, {
'url': second_url % 35357,
'region': 'RegionOne',
'interface': 'admin'
}]
self.stub_auth(response_list=[{'json': fixture}, {'json': second}])
cl = client.Client(username='exampleuser',
password='password',
auth_url=self.TEST_URL,
**kwargs)
self.assertEqual(cl.management_url, first_url)
cl.authenticate()
self.assertEqual(cl.management_url, second_url % 35357)
def test_management_url_is_updated_with_project(self):
self._management_url_is_updated(client_fixtures.project_scoped_token(),
project_name='exampleproject')
def test_management_url_is_updated_with_domain(self):
self._management_url_is_updated(client_fixtures.domain_scoped_token(),
domain_name='exampledomain')
def test_client_with_region_name_passes_to_service_catalog(self):
# NOTE(jamielennox): this is deprecated behaviour that should be
# removed ASAP, however must remain compatible.
self.deprecations.expect_deprecations()
self.stub_auth(json=client_fixtures.auth_response_body())
cl = client.Client(username='exampleuser',
password='password',
project_name='exampleproject',
auth_url=self.TEST_URL,
region_name='North')
self.assertEqual(cl.service_catalog.url_for(service_type='image'),
'http://glance.north.host/glanceapi/public')
cl = client.Client(username='exampleuser',
password='password',
project_name='exampleproject',
auth_url=self.TEST_URL,
region_name='South')
self.assertEqual(cl.service_catalog.url_for(service_type='image'),
'http://glance.south.host/glanceapi/public')
def test_client_without_auth_params(self):
self.assertRaises(exceptions.AuthorizationFailure,
client.Client,
project_name='exampleproject',
auth_url=self.TEST_URL)
def test_client_params(self):
opts = {'auth': token_endpoint.Token('a', 'b'),
'connect_retries': 50,
'endpoint_override': uuid.uuid4().hex,
'interface': uuid.uuid4().hex,
'region_name': uuid.uuid4().hex,
'service_name': uuid.uuid4().hex,
'user_agent': uuid.uuid4().hex,
}
sess = session.Session()
cl = client.Client(session=sess, **opts)
for k, v in six.iteritems(opts):
self.assertEqual(v, getattr(cl._adapter, k))
self.assertEqual('identity', cl._adapter.service_type)
self.assertEqual((3, 0), cl._adapter.version)
|
|
from enum import Enum
import string
from io import IOBase
from ..errors import *
#
# Constants
#
Node = None
class Node:
links = None
token_type = None
verify = None
def __init__(self, links:[(Node, "func"),] = None, token_type = None, verify = None):
links = links or []
self.links = links
self.token_type = token_type
self.verify = verify
def evaluate(self, character):
out = []
for target, condition in self.links:
if condition(character):
out.append(target)
return out
def getToken(self, start, stop, value):
if self.verify is not None:
value = self.verify(value)
assert self.token_type is not None
return Token(self.token_type, start, stop, value)
def __repr__(self):
return "Node(token:{})".format(self.token_type, self.links)
Tokens = Enum("Tokens", [
"newline",
"identifier",
"const_kwd",
"ref_kwd",
"def_kwd",
"end_kwd",
"return_kwd",
"class_kwd",
"new_kwd",
"as_kwd",
"module_kwd",
"loop_kwd",
"while_kwd",
"for_kwd",
"in_kwd",
"break_kwd",
"self_kwd",
"if_kwd",
"elif_kwd",
"else_kwd",
"import_kwd",
"pragma_kwd",
"true_kwd",
"false_kwd",
"string",
"format_string",
"integer",
"group_start",
"group_end",
"typeof",
"returns",
"dot",
"comma",
"assign",
"addition",
"subtraction",
"multiplication",
"integer_division",
"mod",
"division",
"equality",
"inequality",
"smaller_than",
"smaller_than_or_equal_to",
"greater_than",
"greater_than_or_equal_to",
"logical_negation",
"logical_and",
"logical_or",
"function",
])
TREE = Node()
# Ignore whitespace
WHITESPACE = set(" \t")
TREE.links.append((TREE, lambda c: c in WHITESPACE))
# Newlines
NEWLINE_CHAR = "\n"
newline_node = Node(token_type=Tokens.newline)
TREE.links.append((newline_node, lambda c: c == NEWLINE_CHAR))
# Comments
COMMENT_CHAR = "#"
node = Node()
TREE.links.append((node, lambda c: c == COMMENT_CHAR))
node.links.append((node, lambda c: c != NEWLINE_CHAR))
node.links.append((newline_node, lambda c: c == NEWLINE_CHAR or c is None))
# Strings
FORMAT_STRING_CHAR = "\""
FORMAT_STRING_ESCAPE_CHAR = "\\"
node = Node()
TREE.links.append((node, lambda c: c == FORMAT_STRING_CHAR))
node.links.append((node, lambda c: c != FORMAT_STRING_CHAR and c != FORMAT_STRING_ESCAPE_CHAR))
escape = Node()
node.links.append((escape, lambda c: c == FORMAT_STRING_ESCAPE_CHAR))
escape.links.append((node, lambda c: True))
end_node = Node(token_type=Tokens.format_string, verify=lambda s: s[1:-1])
node.links.append((end_node, lambda c: c == FORMAT_STRING_CHAR))
# WYSIWYG Strings
WYSIWYG_STRING_CHAR = "`"
node = Node()
TREE.links.append((node, lambda c: c == WYSIWYG_STRING_CHAR))
node.links.append((node, lambda c: c != WYSIWYG_STRING_CHAR))
end_node = Node(token_type=Tokens.string, verify=lambda s: s[1:-1])
node.links.append((end_node, lambda c: c == WYSIWYG_STRING_CHAR))
# Direct maps
# Must be ordered by length for duplicated characters
DIRECT_MAP = [
# Operators
("+", Tokens.addition),
("-", Tokens.subtraction),
("*", Tokens.multiplication),
("//", Tokens.integer_division),
("/", Tokens.division),
("%", Tokens.mod),
("==", Tokens.equality),
("!=", Tokens.inequality),
("<=", Tokens.smaller_than_or_equal_to),
("<", Tokens.smaller_than),
(">=", Tokens.greater_than_or_equal_to),
(">", Tokens.greater_than),
("!", Tokens.logical_negation),
("&&", Tokens.logical_and),
("||", Tokens.logical_or),
("=>", Tokens.function),
# Instructions
("(", Tokens.group_start),
(")", Tokens.group_end),
(":", Tokens.typeof),
("->", Tokens.returns),
(",", Tokens.comma),
("=", Tokens.assign),
(".", Tokens.dot),
# Keywords
("const", Tokens.const_kwd),
("ref", Tokens.ref_kwd),
("def", Tokens.def_kwd),
("end", Tokens.end_kwd),
("return", Tokens.return_kwd),
("class", Tokens.class_kwd),
("new", Tokens.new_kwd),
("as", Tokens.as_kwd),
("module", Tokens.module_kwd),
("loop", Tokens.loop_kwd),
("while", Tokens.while_kwd),
("for", Tokens.for_kwd),
("in", Tokens.in_kwd),
("break", Tokens.break_kwd),
("self", Tokens.self_kwd),
("elif", Tokens.elif_kwd),
("if", Tokens.if_kwd),
("else", Tokens.else_kwd),
("import", Tokens.import_kwd),
("pragma", Tokens.pragma_kwd),
# Constants
("true", Tokens.true_kwd),
("false", Tokens.false_kwd),
]
for value, token_type in DIRECT_MAP:
node = TREE
for char in value:
next_node = Node()
node.links.append((next_node, lambda c, char=char: c == char))
node = next_node
node.token_type = token_type
# Identifiers
WORD_CHARACTERS = set(string.ascii_letters + "_")
WORD_CHARACTERS_AFTER = WORD_CHARACTERS | set(string.digits)
node = Node(token_type=Tokens.identifier)
TREE.links.append((node, lambda c: c in WORD_CHARACTERS))
end_node = Node(token_type=Tokens.identifier)
node.links.append((end_node, lambda c: c in WORD_CHARACTERS_AFTER))
end_node.links.append((end_node, lambda c: c in WORD_CHARACTERS_AFTER))
# Numbers
DIGIT_CHARACTERS = set(string.digits)
node = Node(token_type=Tokens.integer)
TREE.links.append((node, lambda c: c in DIGIT_CHARACTERS))
underscore_node = Node()
node.links.append((underscore_node, lambda c: c == "_"))
end_node = Node(token_type=Tokens.integer)
underscore_node.links.append((end_node, lambda c: c in DIGIT_CHARACTERS))
node.links.append((end_node, lambda c: c in DIGIT_CHARACTERS))
end_node.links.append((underscore_node, lambda c: c == "_"))
end_node.links.append((end_node, lambda c: c in DIGIT_CHARACTERS))
#
# Lexer
#
class Token:
def __init__(self, type:Tokens, start:int, end:int, data:str = None):
self.type = type
self.start = start
self.end = end
self.data = data
def __repr__(self):
if self.data is None:
return str(self.type)
return "{}({})".format(self.type, self.data)
class Lexer:
source = None
current = None
position = 0
def __init__(self, source:IOBase):
self.source = source
self.next()
# Read the next character into current
def next(self):
self.current = self.source.read(1)
self.position += 1
#
# Lexing Methods
#
# Lex a single token
def lex(self):
token_start = self.position - 1
token_data = ""
current_nodes = [TREE]
while True:
next_nodes = []
for node in current_nodes:
next_nodes += node.evaluate(self.current)
if len(next_nodes) == 0:
if len(current_nodes) > 0:
return self.outputNode(current_nodes, token_start, token_data)
raise InternalError("Zero current nodes in lex tree.")
elif len(next_nodes) == 1 and next_nodes[0] is TREE:
# Restart
token_start = self.position
token_data = ""
current_nodes = [TREE]
else:
token_data += self.current
if not self.current:
return None
self.next()
current_nodes = next_nodes
def outputNode(self, nodes, start, data):
for node in nodes:
if node.token_type is not None:
return node.getToken(start, self.position - 1, data)
elif node is TREE and not self.current:
return None
raise SyntaxError(message="Unexpected character").add(content=self.current, tokens=[Token(None, self.position - 1, self.position)], source=self.source)
|
|
from auth_helper import *
from bulk_service_manager_helper import *
from output_helper import *
# You must provide credentials in auth_helper.py.
def main(authorization_data):
try:
# The Bing Merchant Center Store Id cannot be retrieved via the Bulk service,
# so we'll use the Campaign Management service i.e., the GetBMCStoresByCustomerId service operation below.
# Get a list of all Bing Merchant Center stores associated with your CustomerId.
output_status_message("-----\nGetBMCStoresByCustomerId:")
stores=campaign_service.GetBMCStoresByCustomerId()['BMCStore']
if stores is None:
output_status_message(
"You do not have any BMC stores registered for CustomerId {0}.".format(authorization_data.customer_id)
)
sys.exit(0)
upload_entities=[]
# Create a Shopping campaign with product conditions.
bulk_campaign=BulkCampaign()
bulk_campaign.client_id='YourClientIdGoesHere'
campaign=set_elements_to_none(campaign_service.factory.create('Campaign'))
campaign.BudgetType='DailyBudgetStandard'
campaign.CampaignType=['Shopping']
campaign.DailyBudget=50
languages=campaign_service.factory.create('ns3:ArrayOfstring')
languages.string.append('All')
campaign.Languages=languages
campaign.Name="Women's Shoes " + strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
settings=campaign_service.factory.create('ArrayOfSetting')
setting=set_elements_to_none(campaign_service.factory.create('ShoppingSetting'))
setting.Priority=0
setting.SalesCountryCode ='US'
setting.StoreId=stores[0].Id
settings.Setting.append(setting)
campaign.Settings=settings
campaign.TimeZone='PacificTimeUSCanadaTijuana'
campaign.Id=CAMPAIGN_ID_KEY
bulk_campaign.campaign=campaign
upload_entities.append(bulk_campaign)
# Optionally, you can create a ProductScope criterion that will be associated with your Microsoft Shopping campaign.
# You'll also be able to add more specific product conditions for each ad group.
bulk_campaign_product_scope=BulkCampaignProductScope()
bulk_campaign_product_scope.status='Active'
campaign_criterion=set_elements_to_none(campaign_service.factory.create('CampaignCriterion'))
product_scope=set_elements_to_none(campaign_service.factory.create('ProductScope'))
conditions=campaign_service.factory.create('ArrayOfProductCondition')
condition_new=campaign_service.factory.create('ProductCondition')
condition_new.Operand='Condition'
condition_new.Attribute='New'
conditions.ProductCondition.append(condition_new)
condition_custom_label_0=campaign_service.factory.create('ProductCondition')
condition_custom_label_0.Operand='CustomLabel0'
condition_custom_label_0.Attribute='MerchantDefinedCustomLabel'
conditions.ProductCondition.append(condition_custom_label_0)
product_scope.Conditions=conditions
campaign_criterion.CampaignId=CAMPAIGN_ID_KEY
campaign_criterion.Criterion=product_scope
bulk_campaign_product_scope.biddable_campaign_criterion=campaign_criterion
upload_entities.append(bulk_campaign_product_scope)
# Create the ad group that will have the product partitions.
bulk_ad_group=BulkAdGroup()
bulk_ad_group.campaign_id=CAMPAIGN_ID_KEY
ad_group=set_elements_to_none(campaign_service.factory.create('AdGroup'))
ad_group.Id=AD_GROUP_ID_KEY
ad_group.Name="Product Categories"
ad_group.Status='Paused'
end_date=campaign_service.factory.create('Date')
end_date.Day=31
end_date.Month=12
end_date.Year=strftime("%Y", gmtime())
ad_group.EndDate=end_date
cpc_bid=campaign_service.factory.create('Bid')
cpc_bid.Amount=0.09
ad_group.CpcBid=cpc_bid
bulk_ad_group.ad_group=ad_group
upload_entities.append(bulk_ad_group)
#Create a product ad. You must add at least one product ad to the ad group.
#The product ad identifier can be used for reporting analytics.
#Use Merchant Promotions if you want tags to appear at the bottom of your product ad
#as "special offer" links, helping to increase customer engagement. For details
#on Merchant Promotions see https://help.bingads.microsoft.com/#apex/3/en/56805/0.
bulk_product_ad=BulkProductAd()
bulk_product_ad.ad_group_id=AD_GROUP_ID_KEY
ads=campaign_service.factory.create('ArrayOfAd')
product_ad=set_elements_to_none(campaign_service.factory.create('ProductAd'))
product_ad.Type='Product'
bulk_product_ad.ad=product_ad
upload_entities.append(bulk_product_ad)
output_status_message("-----\nAdding the campaign, product scope, ad group, and ad...")
download_entities=write_entities_and_upload_file(
bulk_service_manager=bulk_service_manager,
upload_entities=upload_entities)
output_status_message("Upload results:")
# Write the upload output
campaign_results=[]
campaign_product_scope_results=[]
ad_group_results=[]
product_ad_results=[]
for entity in download_entities:
if isinstance(entity, BulkCampaign):
campaign_results.append(entity)
output_bulk_campaigns([entity])
if isinstance(entity, BulkCampaignProductScope):
campaign_product_scope_results.append(entity)
output_bulk_campaign_product_scopes([entity])
if isinstance(entity, BulkAdGroup):
ad_group_results.append(entity)
output_bulk_ad_groups([entity])
if isinstance(entity, BulkProductAd):
product_ad_results.append(entity)
output_bulk_product_ads([entity])
ad_group_id=ad_group_results.pop(0).ad_group.Id
# Bid all products
helper=ProductPartitionHelper(ad_group_id)
root_condition=set_elements_to_none(campaign_service.factory.create('ProductCondition'))
root_condition.Operand='All'
root_condition.Attribute=None
root=helper.add_unit(
None,
root_condition,
0.35,
False,
"root"
)
output_status_message("-----\nApplying only the root as a Unit with a bid...")
apply_bulk_product_partition_actions_results=apply_bulk_product_partition_actions(helper.partition_actions)
product_partitions=get_bulk_ad_group_product_partition_tree(ad_group_id)
output_status_message("The ad group's product partition only has a tree root node:")
output_bulk_product_partitions(product_partitions)
# Let's update the bid of the root Unit we just added.
updated_root=get_node_by_client_id(apply_bulk_product_partition_actions_results, "root")
fixed_bid=set_elements_to_none(campaign_service.factory.create('FixedBid'))
fixed_bid.Amount=0.45
updated_root.ad_group_criterion.CriterionBid=fixed_bid
helper=ProductPartitionHelper(ad_group_id)
helper.update_partition(updated_root)
output_status_message("-----\nUpdating the bid for the tree root node...")
apply_bulk_product_partition_actions_results=apply_bulk_product_partition_actions(helper.partition_actions)
product_partitions=get_bulk_ad_group_product_partition_tree(ad_group_id)
output_status_message("Updated the bid for the tree root node:")
output_bulk_product_partitions(product_partitions)
# Initialize and overwrite any existing tree root, and build a product partition group tree structure in multiple steps.
# You could build the entire tree in a single call since there are less than 20,000 nodes; however,
# we will build it in steps to demonstrate how to use the results from bulk upload to update the tree.
helper=ProductPartitionHelper(ad_group_id)
# Check whether a root node exists already.
existing_root=get_node_by_client_id(apply_bulk_product_partition_actions_results, "root")
if existing_root is not None:
existing_root.client_id="deletedroot"
helper.delete_partition(existing_root)
root_condition=campaign_service.factory.create('ProductCondition')
root_condition.Operand='All'
root_condition.Attribute=None
root=helper.add_subdivision(
None,
root_condition,
"root"
)
#The direct children of any node must have the same Operand.
#For this example we will use CategoryL1 nodes as children of the root.
#For a list of valid CategoryL1 through CategoryL5 values, see the Bing Category Taxonomy:
#https://go.microsoft.com/fwlink?LinkId=507666
animals_condition=campaign_service.factory.create('ProductCondition')
animals_condition.Operand='CategoryL1'
animals_condition.Attribute='Animals & Pet Supplies'
animals_subdivision=helper.add_subdivision(
root,
animals_condition,
"animals_subdivision"
)
#If you use a CategoryL2 node, it must be a descendant (child or later) of a CategoryL1 node.
#In other words you cannot have a CategoryL2 node as parent of a CategoryL1 node.
#For this example we will a CategoryL2 node as child of the CategoryL1 Animals & Pet Supplies node.
pet_supplies_condition=campaign_service.factory.create('ProductCondition')
pet_supplies_condition.Operand='CategoryL2'
pet_supplies_condition.Attribute='Pet Supplies'
pet_supplies_subdivision=helper.add_subdivision(
animals_subdivision,
pet_supplies_condition,
"pet_supplies_subdivision"
)
brand_a_condition=campaign_service.factory.create('ProductCondition')
brand_a_condition.Operand='Brand'
brand_a_condition.Attribute='Brand A'
brand_a=helper.add_unit(
pet_supplies_subdivision,
brand_a_condition,
0.35,
False,
"brand_a"
)
#If you won't bid on Brand B, set the helper method's bidAmount to '0' and isNegative to True.
#The helper method will create a NegativeAdGroupCriterion and apply the condition.
brand_b_condition=campaign_service.factory.create('ProductCondition')
brand_b_condition.Operand='Brand'
brand_b_condition.Attribute='Brand B'
brand_b=helper.add_unit(
pet_supplies_subdivision,
brand_b_condition,
0,
True,
"brand_b"
)
other_brands_condition=campaign_service.factory.create('ProductCondition')
other_brands_condition.Operand='Brand'
other_brands_condition.Attribute=None
other_brands=helper.add_unit(
pet_supplies_subdivision,
other_brands_condition,
0.35,
False,
"other_brands"
)
other_pet_supplies_condition=campaign_service.factory.create('ProductCondition')
other_pet_supplies_condition.Operand='CategoryL2'
other_pet_supplies_condition.Attribute=None
other_pet_supplies=helper.add_unit(
animals_subdivision,
other_pet_supplies_condition,
0.35,
False,
"other_pet_supplies"
)
electronics_condition=campaign_service.factory.create('ProductCondition')
electronics_condition.Operand='CategoryL1'
electronics_condition.Attribute='Electronics'
electronics=helper.add_unit(
root,
electronics_condition,
0.35,
False,
"electronics"
)
other_categoryL1_condition=campaign_service.factory.create('ProductCondition')
other_categoryL1_condition.Operand='CategoryL1'
other_categoryL1_condition.Attribute=None
other_categoryL1=helper.add_unit(
root,
other_categoryL1_condition,
0.35,
False,
"other_categoryL1"
)
output_status_message("-----\nApplying product partitions to the ad group...")
apply_bulk_product_partition_actions_results=apply_bulk_product_partition_actions(helper.partition_actions)
product_partitions=get_bulk_ad_group_product_partition_tree(ad_group_id)
#The product partition group tree now has 9 nodes.
#All other (Root Node)
#|
#+-- Animals & Pet Supplies (CategoryL1)
#| |
#| +-- Pet Supplies (CategoryL2)
#| | |
#| | +-- Brand A
#| | |
#| | +-- Brand B
#| | |
#| | +-- All other (Brand)
#| |
#| +-- All other (CategoryL2)
#|
#+-- Electronics (CategoryL1)
#|
#+-- All other (CategoryL1)
output_status_message("The product partition group tree now has 9 nodes:")
output_bulk_product_partitions(product_partitions)
#Let's replace the Electronics (CategoryL1) node created above with an Electronics (CategoryL1) node that
#has children i.e. Brand C (Brand), Brand D (Brand), and All other (Brand) as follows:
#Electronics (CategoryL1)
#|
#+-- Brand C (Brand)
#|
#+-- Brand D (Brand)
#|
#+-- All other (Brand)
helper=ProductPartitionHelper(ad_group_id)
#To replace a node we must know its Id and its ParentCriterionId. In this case the parent of the node
#we are replacing is All other (Root Node), and was created at Index 1 of the previous ApplyProductPartitionActions call.
#The node that we are replacing is Electronics (CategoryL1), and was created at Index 8.
root_id=get_node_by_client_id(apply_bulk_product_partition_actions_results, "root").ad_group_criterion.Id
electronics.ad_group_criterion.Id=get_node_by_client_id(apply_bulk_product_partition_actions_results, "electronics").ad_group_criterion.Id
helper.delete_partition(electronics)
parent=BulkAdGroupProductPartition()
parent.ad_group_criterion=set_elements_to_none(campaign_service.factory.create('BiddableAdGroupCriterion'))
parent.ad_group_criterion.Id=root_id
electronics_subdivision_condition=campaign_service.factory.create('ProductCondition')
electronics_subdivision_condition.Operand='CategoryL1'
electronics_subdivision_condition.Attribute='Electronics'
electronics_subdivision=helper.add_subdivision(
parent,
electronics_subdivision_condition,
"electronics_subdivision"
)
brand_c_condition=campaign_service.factory.create('ProductCondition')
brand_c_condition.Operand='Brand'
brand_c_condition.Attribute='Brand C'
brand_c=helper.add_unit(
electronics_subdivision,
brand_c_condition,
0.35,
False,
"brand_c"
)
brand_d_condition=campaign_service.factory.create('ProductCondition')
brand_d_condition.Operand='Brand'
brand_d_condition.Attribute='Brand D'
brand_d=helper.add_unit(
electronics_subdivision,
brand_d_condition,
0.35,
False,
"brand_d"
)
other_electronics_brands_condition=campaign_service.factory.create('ProductCondition')
other_electronics_brands_condition.Operand='Brand'
other_electronics_brands_condition.Attribute=None
other_electronics_brands=helper.add_unit(
electronics_subdivision,
other_electronics_brands_condition,
0.35,
False,
"other_electronics_brands"
)
output_status_message(
"-----\nUpdating the product partition group to refine Electronics (CategoryL1) with 3 child nodes..."
)
apply_bulk_product_partition_actions_results=apply_bulk_product_partition_actions(helper.partition_actions)
product_partitions=get_bulk_ad_group_product_partition_tree(ad_group_id)
#The product partition group tree now has 12 nodes, including the children of Electronics (CategoryL1):
#All other (Root Node)
#|
#+-- Animals & Pet Supplies (CategoryL1)
#| |
#| +-- Pet Supplies (CategoryL2)
#| | |
#| | +-- Brand A
#| | |
#| | +-- Brand B
#| | |
#| | +-- All other (Brand)
#| |
#| +-- All other (CategoryL2)
#|
#+-- Electronics (CategoryL1)
#| |
#| +-- Brand C (Brand)
#| |
#| +-- Brand D (Brand)
#| |
#| +-- All other (Brand)
#|
#+-- All other (CategoryL1)
output_status_message(
"The product partition group tree now has 12 nodes, including the children of Electronics (CategoryL1):"
)
output_bulk_product_partitions(product_partitions)
# Delete the campaign and everything it contains e.g., ad groups and ads.
upload_entities=[]
for campaign_result in campaign_results:
campaign_result.campaign.Status='Deleted'
upload_entities.append(campaign_result)
output_status_message("-----\nDeleting the campaign and everything it contains e.g., ad groups and ads...")
download_entities=write_entities_and_upload_file(
bulk_service_manager=bulk_service_manager,
upload_entities=upload_entities)
output_status_message("Upload results:")
for entity in download_entities:
if isinstance(entity, BulkCampaign):
output_bulk_campaigns([entity])
except WebFault as ex:
output_webfault_errors(ex)
except Exception as ex:
output_status_message(ex)
def apply_bulk_product_partition_actions(upload_entities):
download_entities=write_entities_and_upload_file(
bulk_service_manager=bulk_service_manager,
upload_entities=upload_entities)
output_status_message("Upload results:")
bulk_ad_group_product_partitions=[]
for entity in download_entities:
if isinstance(entity, BulkAdGroupProductPartition):
bulk_ad_group_product_partitions.append(entity)
output_bulk_ad_group_product_partitions([entity])
return bulk_ad_group_product_partitions
def get_bulk_ad_group_product_partition_tree(ad_group_id):
download_parameters=DownloadParameters(
download_entities=[
'AdGroupProductPartitions'
],
result_file_directory=FILE_DIRECTORY,
result_file_name=DOWNLOAD_FILE_NAME,
overwrite_result_file=True,
last_sync_time_in_utc=None
)
download_entities=download_file(
bulk_service_manager=bulk_service_manager,
download_parameters=download_parameters)
bulk_ad_group_product_partitions=[]
for entity in download_entities:
if isinstance(entity, BulkAdGroupProductPartition) and entity.ad_group_criterion is not None and entity.ad_group_criterion.AdGroupId == ad_group_id:
bulk_ad_group_product_partitions.append(entity)
return bulk_ad_group_product_partitions
def get_node_by_client_id(product_partitions, client_id=None):
"""
Returns the root node of a tree. This operation assumes that a complete
product partition tree is provided for one ad group. The node that has
null ParentCriterionId is the root node.
:param product_partitions: The list of BulkAdGroupProductPartition that make up the product partition tree.
:type product_partitions: BulkAdGroupProductPartition[]
:return: The BulkAdGroupProductPartition corresponding to the specified Client Id.
:rtype: BulkAdGroupProductPartition
"""
client_node=None
for product_partition in product_partitions:
if product_partition.client_id == client_id:
client_node=product_partition
break
return client_node
class ProductPartitionHelper:
"""
Helper class used to maintain a list of product partition actions for an ad group.
The list of partition actions can be uploaded to the Bulk service.
"""
def __init__(self,
ad_group_id):
"""
Initialize an instance of this class.
:param ad_group_id: Each criterion is associated with the same ad group.
:type ad_group_id: long
"""
self._ad_group_id=ad_group_id
self._reference_id=-1
self._partition_actions=[]
@property
def partition_actions(self):
"""
The list of BulkAdGroupProductPartition that can be uploaded to the Bulk service
:rtype: BulkAdGroupProductPartition[]
"""
return self._partition_actions
def add_subdivision(self, parent, condition, client_id=None):
"""
Sets the Add action for a new BiddableAdGroupCriterion corresponding to the specified ProductCondition,
and adds it to the helper's list of BulkAdGroupProductPartition.
:param parent: The parent of the product partition subdivision that you want to add.
:type parent: BulkAdGroupProductPartition
:param condition: The condition or product filter for the new product partition.
:type condition: ProductCondition
:param client_id: The Client Id in the bulk upload file corresponding to the product partition.
:type client_id: string
:return: The BulkAdGroupProductPartition that was added to the list of partition_actions.
:rtype: BulkAdGroupProductPartition
"""
biddable_ad_group_criterion=set_elements_to_none(campaign_service.factory.create('BiddableAdGroupCriterion'))
product_partition=set_elements_to_none(campaign_service.factory.create('ProductPartition'))
# If the root node is a unit, it would not have a parent
product_partition.ParentCriterionId=parent.ad_group_criterion.Id if parent is not None and parent.ad_group_criterion is not None else None
product_partition.Condition=condition
product_partition.PartitionType='Subdivision'
biddable_ad_group_criterion.Criterion=product_partition
biddable_ad_group_criterion.CriterionBid=None
biddable_ad_group_criterion.AdGroupId=self._ad_group_id
biddable_ad_group_criterion.Status=None
if hasattr(biddable_ad_group_criterion, 'EditorialStatus'):
biddable_ad_group_criterion.EditorialStatus=None
biddable_ad_group_criterion.Id=self._reference_id
self._reference_id=self._reference_id
self._reference_id-=1
partition_action=BulkAdGroupProductPartition()
partition_action.client_id=client_id
partition_action.ad_group_criterion=biddable_ad_group_criterion
self._partition_actions.append(partition_action)
return partition_action
def add_unit(self, parent, condition, bid_amount, is_negative=False, client_id=None):
"""
Sets the Add action for a new AdGroupCriterion corresponding to the specified ProductCondition,
and adds it to the helper's list of BulkAdGroupProductPartition.
:param parent: The parent of the product partition unit that you want to add.
:type parent: BulkAdGroupProductPartition
:param condition: The condition or product filter for the new product partition.
:type condition: ProductCondition
:param bid_amount: The bid amount for the new product partition.
:type bid_amount: double
:param is_negative: (Optional) Indicates whether or not to add a NegativeAdGroupCriterion.
The default value is False, in which case a BiddableAdGroupCriterion will be added.
:type is_negative: bool
:param client_id: The Client Id in the bulk upload file corresponding to the product partition.
:type client_id: string
:return: The BulkAdGroupProductPartition that was added to the list of partition_actions.
:rtype: BulkAdGroupProductPartition
"""
ad_group_criterion=None
if is_negative:
ad_group_criterion=set_elements_to_none(campaign_service.factory.create('NegativeAdGroupCriterion'))
else:
ad_group_criterion=set_elements_to_none(campaign_service.factory.create('BiddableAdGroupCriterion'))
fixed_bid=set_elements_to_none(campaign_service.factory.create('FixedBid'))
fixed_bid.Amount=bid_amount
ad_group_criterion.CriterionBid=fixed_bid
ad_group_criterion.AdGroupId=self._ad_group_id
if hasattr(ad_group_criterion, 'EditorialStatus'):
ad_group_criterion.EditorialStatus=None
ad_group_criterion.Status=None
product_partition=set_elements_to_none(campaign_service.factory.create('ProductPartition'))
# If the root node is a unit, it would not have a parent
product_partition.ParentCriterionId=parent.ad_group_criterion.Id if parent is not None and parent.ad_group_criterion is not None else None
product_partition.Condition=condition
product_partition.PartitionType='Unit'
ad_group_criterion.Criterion=product_partition
partition_action=BulkAdGroupProductPartition()
partition_action.client_id=client_id
partition_action.ad_group_criterion=ad_group_criterion
self._partition_actions.append(partition_action)
return partition_action
def delete_partition(self, bulk_ad_group_product_partition):
"""
Sets the Delete action for the specified AdGroupCriterion,
and adds it to the helper's list of BulkAdGroupProductPartition.
:param bulk_ad_group_product_partition: The BulkAdGroupProductPartition whose product partition you want to delete.
:type bulk_ad_group_product_partition: BulkAdGroupProductPartition
"""
if bulk_ad_group_product_partition is not None and bulk_ad_group_product_partition.ad_group_criterion is not None:
bulk_ad_group_product_partition.ad_group_criterion.AdGroupId=self._ad_group_id
bulk_ad_group_product_partition.ad_group_criterion.Status='Deleted'
if hasattr(bulk_ad_group_product_partition.ad_group_criterion, 'EditorialStatus'):
bulk_ad_group_product_partition.ad_group_criterion.EditorialStatus=None
self._partition_actions.append(bulk_ad_group_product_partition)
def update_partition(self, bulk_ad_group_product_partition):
"""
Sets the Update action for the specified BiddableAdGroupCriterion,
and adds it to the helper's list of BulkAdGroupProductPartition.
You can only update the CriterionBid and DestinationUrl elements
of the BiddableAdGroupCriterion.
When working with product partitions, youu cannot update the Criterion (ProductPartition).
To update a ProductPartition, you must delete the existing node (delete_partition) and
add a new one (add_unit or add_subdivision) during the same upload.
:param bulk_ad_group_product_partition: The BulkAdGroupProductPartition to update.
:type bulk_ad_group_product_partition: BulkAdGroupProductPartition
"""
if bulk_ad_group_product_partition is not None and bulk_ad_group_product_partition.ad_group_criterion is not None:
bulk_ad_group_product_partition.ad_group_criterion.AdGroupId=self._ad_group_id
bulk_ad_group_product_partition.ad_group_criterion.Status=None
if hasattr(bulk_ad_group_product_partition.ad_group_criterion, 'EditorialStatus'):
bulk_ad_group_product_partition.ad_group_criterion.EditorialStatus=None
self._partition_actions.append(bulk_ad_group_product_partition)
# Main execution
if __name__ == '__main__':
print("Loading the web service client proxies...")
authorization_data=AuthorizationData(
account_id=None,
customer_id=None,
developer_token=DEVELOPER_TOKEN,
authentication=None,
)
bulk_service_manager=BulkServiceManager(
authorization_data=authorization_data,
poll_interval_in_milliseconds=5000,
environment=ENVIRONMENT,
)
campaign_service=ServiceClient(
service='CampaignManagementService',
version=13,
authorization_data=authorization_data,
environment=ENVIRONMENT,
)
authenticate(authorization_data)
main(authorization_data)
|
|
import itertools
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.directives import ObjectDescription, Directive
from sphinx.domains import Domain, ObjType
from sphinx.domains.python import PyObject
from sphinx.locale import l_, _
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.nodes import set_source_info
from sphinx.domains import python as python_domain
import salt
class Event(PyObject):
'''
Document Salt events
'''
domain = 'salt'
class LiterateCoding(Directive):
'''
Auto-doc SLS files using literate-style comment/code separation
'''
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def parse_file(self, fpath):
'''
Read a file on the file system (relative to salt's base project dir)
:returns: A file-like object.
:raises IOError: If the file cannot be found or read.
'''
sdir = os.path.abspath(os.path.join(os.path.dirname(salt.__file__),
os.pardir))
with open(os.path.join(sdir, fpath), 'rb') as f:
return f.readlines()
def parse_lit(self, lines):
'''
Parse a string line-by-line delineating comments and code
:returns: An tuple of boolean/list-of-string pairs. True designates a
comment; False designates code.
'''
comment_char = '#' # TODO: move this into a directive option
comment = re.compile(r'^\s*{0}[ \n]'.format(comment_char))
section_test = lambda val: bool(comment.match(val))
sections = []
for is_doc, group in itertools.groupby(lines, section_test):
if is_doc:
text = [comment.sub('', i).rstrip('\r\n') for i in group]
else:
text = [i.rstrip('\r\n') for i in group]
sections.append((is_doc, text))
return sections
def run(self):
try:
lines = self.parse_lit(self.parse_file(self.arguments[0]))
except IOError as exc:
document = self.state.document
return [document.reporter.warning(str(exc), line=self.lineno)]
node = nodes.container()
node['classes'] = ['lit-container']
node.document = self.state.document
enum = nodes.enumerated_list()
enum['classes'] = ['lit-docs']
node.append(enum)
# make first list item
list_item = nodes.list_item()
list_item['classes'] = ['lit-item']
for is_doc, line in lines:
if is_doc and line == ['']:
continue
section = nodes.section()
if is_doc:
section['classes'] = ['lit-annotation']
nested_parse_with_titles(self.state, ViewList(line), section)
else:
section['classes'] = ['lit-content']
code = '\n'.join(line)
literal = nodes.literal_block(code, code)
literal['language'] = 'yaml'
set_source_info(self, literal)
section.append(literal)
list_item.append(section)
# If we have a pair of annotation/content items, append the list
# item and create a new list item
if len(list_item.children) == 2:
enum.append(list_item)
list_item = nodes.list_item()
list_item['classes'] = ['lit-item']
# Non-semantic div for styling
bg = nodes.container()
bg['classes'] = ['lit-background']
node.append(bg)
return [node]
class LiterateFormula(LiterateCoding):
'''
Customizations to handle finding and parsing SLS files
'''
def parse_file(self, sls_path):
'''
Given a typical Salt SLS path (e.g.: apache.vhosts.standard), find the
file on the file system and parse it
'''
config = self.state.document.settings.env.config
formulas_dirs = config.formulas_dirs
fpath = sls_path.replace('.', '/')
name_options = (
'{0}.sls'.format(fpath),
os.path.join(fpath, 'init.sls')
)
paths = [os.path.join(fdir, fname)
for fname in name_options
for fdir in formulas_dirs]
for i in paths:
try:
with open(i, 'rb') as f:
return f.readlines()
except IOError:
pass
raise IOError("Could not find sls file '{0}'".format(sls_path))
class CurrentFormula(Directive):
domain = 'salt'
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
env = self.state.document.settings.env
modname = self.arguments[0].strip()
if modname == 'None':
env.temp_data['salt:formula'] = None
else:
env.temp_data['salt:formula'] = modname
return []
class Formula(Directive):
domain = 'salt'
has_content = True
required_arguments = 1
def run(self):
env = self.state.document.settings.env
formname = self.arguments[0].strip()
env.temp_data['salt:formula'] = formname
if 'noindex' in self.options:
return []
env.domaindata['salt']['formulas'][formname] = (
env.docname,
self.options.get('synopsis', ''),
self.options.get('platform', ''),
'deprecated' in self.options)
targetnode = nodes.target('', '', ids=['module-' + formname],
ismod=True)
self.state.document.note_explicit_target(targetnode)
indextext = u'{0}-formula)'.format(formname)
inode = addnodes.index(entries=[('single', indextext,
'module-' + formname, '')])
return [targetnode, inode]
class State(Directive):
domain = 'salt'
has_content = True
required_arguments = 1
def run(self):
env = self.state.document.settings.env
statename = self.arguments[0].strip()
if 'noindex' in self.options:
return []
targetnode = nodes.target('', '', ids=['module-' + statename],
ismod=True)
self.state.document.note_explicit_target(targetnode)
formula = env.temp_data.get('salt:formula')
indextext = u'{1} ({0}-formula)'.format(formula, statename)
inode = addnodes.index(entries=[
('single', indextext, 'module-{0}'.format(statename), ''),
])
return [targetnode, inode]
class SLSXRefRole(XRefRole):
pass
class SaltModuleIndex(python_domain.PythonModuleIndex):
name = 'modindex'
localname = l_('Salt Module Index')
shortname = l_('all salt modules')
class SaltDomain(python_domain.PythonDomain):
name = 'salt'
label = 'Salt'
data_version = 2
object_types = python_domain.PythonDomain.object_types
object_types.update({
'state': ObjType(l_('state'), 'state'),
})
directives = python_domain.PythonDomain.directives
directives.update({
'event': Event,
'state': State,
'formula': LiterateFormula,
'currentformula': CurrentFormula,
'saltconfig': LiterateCoding,
})
roles = python_domain.PythonDomain.roles
roles.update({
'formula': SLSXRefRole(),
})
initial_data = python_domain.PythonDomain.initial_data
initial_data.update({
'formulas': {},
})
indices = [
SaltModuleIndex,
]
def resolve_xref(self, env, fromdocname, builder, type, target, node,
contnode):
if type == 'formula' and target in self.data['formulas']:
doc, _, _, _ = self.data['formulas'].get(target, (None, None))
if doc:
return make_refnode(builder, fromdocname, doc, target,
contnode, target)
else:
super(SaltDomain, self).resolve_xref(env, fromdocname, builder,
type, target, node, contnode)
# Monkey-patch the Python domain remove the python module index
python_domain.PythonDomain.indices = []
def setup(app):
app.add_domain(SaltDomain)
formulas_path = 'templates/formulas'
formulas_dir = os.path.join(os.path.abspath(os.path.dirname(salt.__file__)),
formulas_path)
app.add_config_value('formulas_dirs', [formulas_dir], 'env')
app.add_crossref_type(directivename="conf_master", rolename="conf_master",
indextemplate="pair: %s; conf/master")
app.add_crossref_type(directivename="conf_minion", rolename="conf_minion",
indextemplate="pair: %s; conf/minion")
app.add_crossref_type(directivename="conf_log", rolename="conf_log",
indextemplate="pair: %s; conf/logging")
|
|
#
#
# Copyright (C) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script to prepare a node for joining a cluster.
"""
import os
import os.path
import optparse
import sys
import logging
from ganeti import cli
from ganeti import constants
from ganeti import errors
from ganeti import pathutils
from ganeti import utils
from ganeti import ht
from ganeti import ssh
from ganeti.tools import common
_SSH_KEY_LIST_ITEM = \
ht.TAnd(ht.TIsLength(3),
ht.TItems([
ht.TElemOf(constants.SSHK_ALL),
ht.Comment("public")(ht.TNonEmptyString),
ht.Comment("private")(ht.TNonEmptyString),
]))
_SSH_KEY_LIST = ht.TListOf(_SSH_KEY_LIST_ITEM)
_DATA_CHECK = ht.TStrictDict(False, True, {
constants.SSHS_CLUSTER_NAME: ht.TNonEmptyString,
constants.SSHS_NODE_DAEMON_CERTIFICATE: ht.TNonEmptyString,
constants.SSHS_SSH_HOST_KEY: _SSH_KEY_LIST,
constants.SSHS_SSH_ROOT_KEY: _SSH_KEY_LIST,
constants.SSHS_SSH_AUTHORIZED_KEYS:
ht.TDictOf(ht.TNonEmptyString, ht.TListOf(ht.TNonEmptyString)),
})
class JoinError(errors.GenericError):
"""Local class for reporting errors.
"""
def ParseOptions():
"""Parses the options passed to the program.
@return: Options and arguments
"""
program = os.path.basename(sys.argv[0])
parser = optparse.OptionParser(usage="%prog [--dry-run]",
prog=program)
parser.add_option(cli.DEBUG_OPT)
parser.add_option(cli.VERBOSE_OPT)
parser.add_option(cli.DRY_RUN_OPT)
(opts, args) = parser.parse_args()
return common.VerifyOptions(parser, opts, args)
def _UpdateKeyFiles(keys, dry_run, keyfiles):
"""Updates SSH key files.
@type keys: sequence of tuple; (string, string, string)
@param keys: Keys to write, tuples consist of key type
(L{constants.SSHK_ALL}), public and private key
@type dry_run: boolean
@param dry_run: Whether to perform a dry run
@type keyfiles: dict; (string as key, tuple with (string, string) as values)
@param keyfiles: Mapping from key types (L{constants.SSHK_ALL}) to file
names; value tuples consist of public key filename and private key filename
"""
assert set(keyfiles) == constants.SSHK_ALL
for (kind, private_key, public_key) in keys:
(private_file, public_file) = keyfiles[kind]
logging.debug("Writing %s ...", private_file)
utils.WriteFile(private_file, data=private_key, mode=0600,
backup=True, dry_run=dry_run)
logging.debug("Writing %s ...", public_file)
utils.WriteFile(public_file, data=public_key, mode=0644,
backup=True, dry_run=dry_run)
def UpdateSshDaemon(data, dry_run, _runcmd_fn=utils.RunCmd,
_keyfiles=None):
"""Updates SSH daemon's keys.
Unless C{dry_run} is set, the daemon is restarted at the end.
@type data: dict
@param data: Input data
@type dry_run: boolean
@param dry_run: Whether to perform a dry run
"""
keys = data.get(constants.SSHS_SSH_HOST_KEY)
if not keys:
return
if _keyfiles is None:
_keyfiles = constants.SSH_DAEMON_KEYFILES
logging.info("Updating SSH daemon key files")
_UpdateKeyFiles(keys, dry_run, _keyfiles)
if dry_run:
logging.info("This is a dry run, not restarting SSH daemon")
else:
result = _runcmd_fn([pathutils.DAEMON_UTIL, "reload-ssh-keys"],
interactive=True)
if result.failed:
raise JoinError("Could not reload SSH keys, command '%s'"
" had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output))
def UpdateSshRoot(data, dry_run, _homedir_fn=None):
"""Updates root's SSH keys.
Root's C{authorized_keys} file is also updated with new public keys.
@type data: dict
@param data: Input data
@type dry_run: boolean
@param dry_run: Whether to perform a dry run
"""
authorized_keys = data.get(constants.SSHS_SSH_AUTHORIZED_KEYS)
(auth_keys_file, _) = \
ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=True,
_homedir_fn=_homedir_fn)
if dry_run:
logging.info("This is a dry run, not replacing the SSH keys.")
else:
common.GenerateRootSshKeys(error_fn=JoinError, _homedir_fn=_homedir_fn)
if authorized_keys:
if dry_run:
logging.info("This is a dry run, not modifying %s", auth_keys_file)
else:
all_authorized_keys = []
for keys in authorized_keys.values():
all_authorized_keys += keys
ssh.AddAuthorizedKeys(auth_keys_file, all_authorized_keys)
def Main():
"""Main routine.
"""
opts = ParseOptions()
utils.SetupToolLogging(opts.debug, opts.verbose)
try:
data = common.LoadData(sys.stdin.read(), _DATA_CHECK)
# Check if input data is correct
common.VerifyClusterName(data, JoinError)
common.VerifyCertificateSoft(data, JoinError)
# Update SSH files
UpdateSshDaemon(data, opts.dry_run)
UpdateSshRoot(data, opts.dry_run)
logging.info("Setup finished successfully")
except Exception, err: # pylint: disable=W0703
logging.debug("Caught unhandled exception", exc_info=True)
(retcode, message) = cli.FormatError(err)
logging.error(message)
return retcode
else:
return constants.EXIT_SUCCESS
|
|
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2014, OVH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Except as contained in this notice, the name of OVH and or its trademarks
# (and among others RunAbove) shall not be used in advertising or otherwise to
# promote the sale, use or other dealings in this Software without prior
# written authorization from OVH.
import unittest
import mock
import json
import runabove
class TestInstance(unittest.TestCase):
instance_id = '8c687d5d-a1c7-4670-aca8-65acfb23ab44'
answer_list = '''[
{
"instanceId": "8c687d5d-a1c7-4670-aca8-65acfb23ab44",
"name": "Test1",
"ip": "192.168.0.1",
"flavorId": "ab35df0e-4632-48b2-b6a5-c1f1d922bd43",
"imageId": "82a56d09-882d-48cc-82ce-eef59820879f",
"keyName": "",
"status": "ACTIVE",
"created": "2014-06-01T09:13:15Z",
"region": "BHS-1"
},
{
"instanceId": "6736e98e-d40c-408d-8198-8a20d21124f3",
"name": "Test2",
"ip": "192.168.0.1",
"flavorId": "ab35df0e-4632-48b2-b6a5-c1f1d922bd43",
"imageId": "6915107b-e40d-4fd7-95f5-5e2bd5c106d3",
"keyName": "MyTestKey",
"status": "ACTIVE",
"created": "2014-06-20T10:10:38Z",
"region": "BHS-1"
}
]'''
answer_one = '''{
"instanceId": "8c687d5d-a1c7-4670-aca8-65acfb23ab44",
"name": "Test",
"ipv4": "192.168.0.3",
"created": "2014-06-01T09:13:15Z",
"status": "ACTIVE",
"flavor": {
"id": "ab35df0e-4632-48b2-b6a5-c1f1d922bd43",
"disk": 240,
"name": "pci2.d.c1",
"ram": 16384,
"vcpus": 6,
"region": "BHS-1",
"type": "ra.sb"
},
"image": {
"id": "82a56d09-882d-48cc-82ce-eef59820879f",
"name": "Debian 7",
"region": "BHS-1",
"visibility": "public"
},
"sshKey": null,
"region": "BHS-1"
}'''
answer_one_without_flavor_nor_image_nor_key = '''{
"instanceId": "8c687d5d-a1c7-4670-aca8-65acfb23ab44",
"name": "Test",
"ipv4": "192.168.0.3",
"created": "2014-06-01T09:13:15Z",
"status": "ACTIVE",
"flavor": null,
"image": null,
"sshKey": null,
"region": "BHS-1"
}'''
answer_create_with_key = '''{
"instanceId": "8c687d5d-a1c7-4670-aca8-65acfb23ab44",
"name": "Test",
"ipv4": "",
"created": "2014-07-02T14:02:39Z",
"status": "BUILD",
"flavor": {
"id": "4245b91e-d9cf-4c9d-a109-f6a32da8a5cc",
"disk": 240,
"name": "pci2.d.r1",
"ram": 28672,
"vcpus": 4,
"region": "BHS-1"
},
"image": {
"id": "82a56d09-882d-48cc-82ce-eef59820879f",
"name": "Debian 7",
"region": "BHS-1"
},
"sshKey": {
"publicKey": "ssh-rsa very-strong-key key-comment",
"name": "MyTestKey",
"fingerPrint": "aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa:aa",
"region": "BHS-1"
},
"region": "BHS-1"
}'''
answer_create_without_key = '''{
"instanceId": "8c687d5d-a1c7-4670-aca8-65acfb23ab44",
"name": "Test",
"ipv4": "",
"created": "2014-07-02T14:02:39Z",
"status": "BUILD",
"flavor": {
"id": "4245b91e-d9cf-4c9d-a109-f6a32da8a5cc",
"disk": 240,
"name": "pci2.d.r1",
"ram": 28672,
"vcpus": 4,
"region": "BHS-1"
},
"image": {
"id": "82a56d09-882d-48cc-82ce-eef59820879f",
"name": "Debian 7",
"region": "BHS-1"
},
"sshKey": null,
"region": "BHS-1"
}'''
@mock.patch('runabove.wrapper_api')
@mock.patch('runabove.client')
def setUp(self, mock_wrapper, mock_client):
self.mock_wrapper = mock_wrapper
self.instances = runabove.instance.InstanceManager(mock_wrapper,
mock_client)
def test_base_path(self):
self.assertEqual(self.instances.basepath, '/instance')
def test_list(self):
self.mock_wrapper.get.return_value = json.loads(self.answer_list)
instance_list = self.instances.list()
self.mock_wrapper.get.assert_called_once_with(
self.instances.basepath
)
self.assertIsInstance(instance_list, list)
self.assertTrue(len(instance_list) > 0)
def test_get_by_id(self):
self.mock_wrapper.encode_for_api.return_value = self.instance_id
self.mock_wrapper.get.return_value = json.loads(self.answer_one)
instance = self.instances.get_by_id(self.instance_id)
self.mock_wrapper.get.assert_called_once_with(
self.instances.basepath + '/' + self.instance_id
)
self.assertIsInstance(instance, runabove.instance.Instance)
def test_get_by_id_without_flavor_nor_image_nor_key(self):
answer = self.answer_one_without_flavor_nor_image_nor_key
self.mock_wrapper.encode_for_api.return_value = self.instance_id
self.mock_wrapper.get.return_value = json.loads(answer)
instance = self.instances.get_by_id(self.instance_id)
self.mock_wrapper.get.assert_called_once_with(
self.instances.basepath + '/' + self.instance_id
)
self.assertIsInstance(instance, runabove.instance.Instance)
def test_create_with_key(self):
name = "Test"
image_id = "82a56d09-882d-48cc-82ce-eef59820879f"
flavor_id = "4245b91e-d9cf-4c9d-a109-f6a32da8a5cc"
region_name = "BHS-1"
public_key = "ssh-rsa very-strong-key key-comment"
content = {
'flavorId': flavor_id,
'imageId': image_id,
'name': name,
'region': region_name,
'sshKeyName': public_key
}
self.mock_wrapper.post.return_value = json.loads(
self.answer_create_with_key
)
self.mock_wrapper.get.return_value = json.loads(
self.answer_create_with_key
)
self.mock_wrapper.encode_for_api.return_value = self.instance_id
instance = self.instances.create(
region_name,
name,
flavor_id,
image_id,
public_key
)
self.mock_wrapper.post.assert_called_once_with(
self.instances.basepath,
content
)
self.mock_wrapper.get.assert_called_once_with(
self.instances.basepath + '/' + self.instance_id
)
def test_create_without_key(self):
name = "Test"
image_id = "82a56d09-882d-48cc-82ce-eef59820879f"
flavor_id = "4245b91e-d9cf-4c9d-a109-f6a32da8a5cc"
region_name = "BHS-1"
content = {
'flavorId': flavor_id,
'imageId': image_id,
'name': name,
'region': region_name
}
self.mock_wrapper.post.return_value = json.loads(
self.answer_create_without_key
)
self.mock_wrapper.get.return_value = json.loads(
self.answer_create_without_key
)
self.mock_wrapper.encode_for_api.return_value = self.instance_id
self.instances.create(
region_name,
name,
flavor_id,
image_id
)
self.mock_wrapper.post.assert_called_once_with(
self.instances.basepath,
content
)
self.mock_wrapper.get.assert_called_once_with(
self.instances.basepath + '/' + self.instance_id
)
def test_rename_vm(self):
name = 'MyTestInstanceWithNewName'
self.mock_wrapper.encode_for_api.return_value = self.instance_id
content = {"name": name}
self.instances.rename(self.instance_id, name)
self.mock_wrapper.put.assert_called_once_with(
self.instances.basepath + '/' + self.instance_id,
content
)
def test_delete(self):
self.mock_wrapper.encode_for_api.return_value = self.instance_id
self.instances.delete(self.instance_id)
self.mock_wrapper.delete.assert_called_once_with(
self.instances.basepath + '/' + self.instance_id
)
def test_load_vnc(self):
url = "https://vnc-url"
self.mock_wrapper.get.return_value = json.loads('''{
"type": "novnc",
"url": "%s"
}''' % url)
vnc = self.instances._load_vnc(self.instance_id)
self.mock_wrapper.get.assert_called_once_with(
self.instances.basepath + '/' + self.instance_id + '/vnc'
)
self.assertEqual(vnc, url)
class TestInstanceObject(unittest.TestCase):
@mock.patch('runabove.instance.InstanceManager')
def setUp(self, mock_instances):
self.mock_instances = mock_instances
self.instance = runabove.instance.Instance(
self.mock_instances,
'9c687d5d-a1c7-4670-aca8-65acfb23ab44',
'MyTestInstance',
'192.168.0.1',
'BHS-1',
'fc4c428d-c88b-4027-b35d-2ca176a8bd1a',
'b37437ea-e8de-474b-9628-54f563a3fd1e',
'MyTestKey',
'ACTIVE',
'2014-07-01T09:13:15Z'
)
def test_delete_object(self):
self.instance.delete()
self.mock_instances.delete.assert_called_once_with(self.instance)
def test_rename_object(self):
name = 'MyTestInstanceWithNewName'
self.instance.rename(name)
self.mock_instances.rename.assert_called_once_with(self.instance, name)
def test_get_vnc_link(self):
self.instance.vnc
self.mock_instances.vnc.assert_called_once()
def test_get_flavor_not_in_cache(self):
self.instance.flavor
self.mock_instances._handler.flavors.get_by_id.assert_called_once_with(
self.instance._flavor_id
)
def test_get_flavor_none(self):
self.instance._flavor = False
res = self.instance.flavor
self.assertEquals(res, None)
def test_get_flavor_404(self):
self.mock_instances._handler.flavors.get_by_id.side_effect=\
runabove.exception.ResourceNotFoundError
res = self.instance.flavor
self.mock_instances._handler.flavors.get_by_id.assert_called_once_with(
self.instance._flavor_id
)
self.assertEquals(res, None)
def test_get_image_not_in_cache(self):
self.instance.image
self.mock_instances._handler.images.get_by_id.assert_called_once_with(
self.instance._image_id
)
def test_get_image_none(self):
self.instance._image = False
res = self.instance.image
self.assertEquals(res, None)
def test_get_image_404(self):
self.mock_instances._handler.images.get_by_id.side_effect=\
runabove.exception.ResourceNotFoundError
res = self.instance.image
self.mock_instances._handler.images.get_by_id.assert_called_once_with(
self.instance._image_id
)
self.assertEquals(res, None)
def test_get_ssh_key(self):
self.instance.ssh_key
self.mock_instances._handler.ssh_keys.get_by_name.\
assert_called_once_with(
self.instance.region,
self.instance._ssh_key_name
)
def test_get_ssh_key_empty(self):
self.instance._ssh_key_name = None
self.assertEqual(self.instance.ssh_key, None)
def test_get_ips(self):
self.instance.ips
self.mock_instances.get_by_id.assert_called_once_with(
self.instance.id
)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
from saml2.mdstore import MetadataStore, name
from saml2 import BINDING_HTTP_REDIRECT, BINDING_SOAP, BINDING_HTTP_POST
from saml2.config import SPConfig, IdPConfig, Config
from py.test import raises
from saml2 import root_logger
from pathutils import dotname, full_path
sp1 = {
"entityid": "urn:mace:umu.se:saml:roland:sp",
"service": {
"sp": {
"endpoints": {
"assertion_consumer_service": [
"http://lingon.catalogix.se:8087/"],
},
"name": "test",
"idp": {
"urn:mace:example.com:saml:roland:idp": {
'single_sign_on_service':
{'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect':
'http://localhost:8088/sso/'}},
}
}
},
"key_file": full_path("test.key"),
"cert_file": full_path("test.pem"),
"metadata": {
"local": [full_path("metadata.xml"),
full_path("urn-mace-swami.se-swamid-test-1.0-metadata.xml")],
},
"virtual_organization": {
"coip": {
"nameid_format": "urn:oasis:names:tc:SAML:2.0:nameid-format:transient",
"common_identifier": "eduPersonPrincipalName",
"attribute_auth": [
"https://coip-test.sunet.se/idp/shibboleth",
]
}
},
"attribute_map_dir": full_path("attributemaps"),
"only_use_keys_in_metadata": True,
}
sp2 = {
"entityid": "urn:mace:umu.se:saml:roland:sp",
"name": "Rolands SP",
"service": {
"sp": {
"endpoints": {
"assertion_consumer_service": [
"http://lingon.catalogix.se:8087/"],
},
"required_attributes": ["surName", "givenName", "mail"],
"optional_attributes": ["title"],
"idp": {
"": "https://example.com/saml2/idp/SSOService.php",
}
}
},
#"xmlsec_binary" : "/opt/local/bin/xmlsec1",
}
IDP1 = {
"entityid": "urn:mace:umu.se:saml:roland:idp",
"name": "Rolands IdP",
"service": {
"idp": {
"endpoints": {
"single_sign_on_service": ["http://localhost:8088/"],
},
"policy": {
"default": {
"attribute_restrictions": {
"givenName": None,
"surName": None,
"eduPersonAffiliation": ["(member|staff)"],
"mail": [".*@example.com"],
}
},
"urn:mace:umu.se:saml:roland:sp": None
},
}
},
#"xmlsec_binary" : "/usr/local/bin/xmlsec1",
}
IDP2 = {
"entityid": "urn:mace:umu.se:saml:roland:idp",
"name": "Rolands IdP",
"service": {
"idp": {
"endpoints": {
"single_sign_on_service": ["http://localhost:8088/"],
"single_logout_service": [
("http://localhost:8088/", BINDING_HTTP_REDIRECT)],
},
"policy": {
"default": {
"attribute_restrictions": {
"givenName": None,
"surName": None,
"eduPersonAffiliation": ["(member|staff)"],
"mail": [".*@example.com"],
}
},
"urn:mace:umu.se:saml:roland:sp": None
},
}
},
#"xmlsec_binary" : "/usr/local/bin/xmlsec1",
}
PDP = {
"entityid": "http://example.org/pysaml2/pdp",
"name": "Rolands PdP",
"service": {
"pdp": {
"endpoints": {
"authz_service": [("http://example.org/pysaml2/pdp/authz",
BINDING_SOAP)],
},
}
},
"key_file": full_path("test.key"),
"cert_file": full_path("test.pem"),
"organization": {
"name": "Exempel AB",
"display_name": [("Exempel AB", "se"), ("Example Co.", "en")],
"url": "http://www.example.com/roland",
},
"contact_person": [{
"given_name": "John",
"sur_name": "Smith",
"email_address": ["[email protected]"],
"contact_type": "technical",
},
],
}
ECP_SP = {
"entityid": "urn:mace:umu.se:saml:roland:ecpsp",
"name": "Rolands ECP_SP",
"service": {
"sp": {
"endpoints": {
"assertion_consumer_service": [
"http://lingon.catalogix.se:8087/"],
},
"ecp": {
"130.239.": "http://example.com/idp",
}
}
},
#"xmlsec_binary" : "/opt/local/bin/xmlsec1",
}
def _eq(l1, l2):
return set(l1) == set(l2)
def test_1():
c = SPConfig().load(sp1)
c.context = "sp"
print c
assert c._sp_endpoints
assert c._sp_name
assert c._sp_idp
md = c.metadata
assert isinstance(md, MetadataStore)
assert len(c._sp_idp) == 1
assert c._sp_idp.keys() == ["urn:mace:example.com:saml:roland:idp"]
assert c._sp_idp.values() == [{'single_sign_on_service':
{
'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect':
'http://localhost:8088/sso/'}}]
assert c.only_use_keys_in_metadata
def test_2():
c = SPConfig().load(sp2)
c.context = "sp"
print c
assert c._sp_endpoints
assert c.getattr("endpoints", "sp")
assert c._sp_idp
assert c._sp_optional_attributes
assert c.name
assert c._sp_required_attributes
assert len(c._sp_idp) == 1
assert c._sp_idp.keys() == [""]
assert c._sp_idp.values() == [
"https://example.com/saml2/idp/SSOService.php"]
assert c.only_use_keys_in_metadata is True
def test_minimum():
minimum = {
"entityid": "urn:mace:example.com:saml:roland:sp",
"service": {
"sp": {
"endpoints": {
"assertion_consumer_service": ["http://sp.example.org/"],
},
"name": "test",
"idp": {
"": "https://example.com/idp/SSOService.php",
},
}
},
#"xmlsec_binary" : "/usr/local/bin/xmlsec1",
}
c = SPConfig().load(minimum)
c.context = "sp"
assert c is not None
def test_idp_1():
c = IdPConfig().load(IDP1)
c.context = "idp"
print c
assert c.endpoint("single_sign_on_service")[0] == 'http://localhost:8088/'
attribute_restrictions = c.getattr("policy",
"idp").get_attribute_restriction("")
assert attribute_restrictions["edupersonaffiliation"][0].match("staff")
def test_idp_2():
c = IdPConfig().load(IDP2)
c.context = "idp"
print c
assert c.endpoint("single_logout_service",
BINDING_SOAP) == []
assert c.endpoint("single_logout_service",
BINDING_HTTP_REDIRECT) == ["http://localhost:8088/"]
attribute_restrictions = c.getattr("policy",
"idp").get_attribute_restriction("")
assert attribute_restrictions["edupersonaffiliation"][0].match("staff")
def test_wayf():
c = SPConfig().load_file("server_conf")
c.context = "sp"
idps = c.metadata.with_descriptor("idpsso")
ent = idps.values()[0]
assert name(ent) == 'Example Co.'
assert name(ent, "se") == 'Exempel AB'
c.setup_logger()
assert root_logger.level != logging.NOTSET
assert root_logger.level == logging.INFO
assert len(root_logger.handlers) == 1
assert isinstance(root_logger.handlers[0],
logging.handlers.RotatingFileHandler)
handler = root_logger.handlers[0]
assert handler.backupCount == 5
try:
assert handler.maxBytes == 100000
except AssertionError:
assert handler.maxBytes == 500000
assert handler.mode == "a"
assert root_logger.name == "saml2"
assert root_logger.level == 20
def test_conf_syslog():
c = SPConfig().load_file("server_conf_syslog")
c.context = "sp"
# otherwise the logger setting is not changed
root_logger.level = logging.NOTSET
root_logger.handlers = []
print c.logger
c.setup_logger()
assert root_logger.level != logging.NOTSET
assert root_logger.level == logging.INFO
assert len(root_logger.handlers) == 1
assert isinstance(root_logger.handlers[0],
logging.handlers.SysLogHandler)
handler = root_logger.handlers[0]
print handler.__dict__
assert handler.facility == "local3"
assert handler.address == ('localhost', 514)
if sys.version >= (2, 7):
assert handler.socktype == 2
else:
pass
assert root_logger.name == "saml2"
assert root_logger.level == 20
#noinspection PyUnresolvedReferences
def test_3():
cnf = Config()
cnf.load_file(dotname("sp_1_conf"))
assert cnf.entityid == "urn:mace:example.com:saml:roland:sp"
assert cnf.debug == 1
assert cnf.key_file == full_path("test.key")
assert cnf.cert_file == full_path("test.pem")
#assert cnf.xmlsec_binary == "/usr/local/bin/xmlsec1"
assert cnf.accepted_time_diff == 60
assert cnf.secret == "0123456789"
assert cnf.metadata is not None
assert cnf.attribute_converters is not None
def test_sp():
cnf = SPConfig()
cnf.load_file(dotname("sp_1_conf"))
assert cnf.endpoint("assertion_consumer_service") == \
["http://lingon.catalogix.se:8087/"]
def test_dual():
cnf = Config().load_file(dotname("idp_sp_conf"))
spe = cnf.getattr("endpoints", "sp")
idpe = cnf.getattr("endpoints", "idp")
assert spe
assert idpe
assert spe != idpe
def test_ecp():
cnf = SPConfig()
cnf.load(ECP_SP)
assert cnf.endpoint("assertion_consumer_service") == \
["http://lingon.catalogix.se:8087/"]
eid = cnf.ecp_endpoint("130.239.16.3")
assert eid == "http://example.com/idp"
eid = cnf.ecp_endpoint("130.238.20.20")
assert eid is None
def test_assertion_consumer_service():
c = IdPConfig()
c.load_file(dotname("idp_conf"))
c.context = "idp"
c.metadata.load("local", full_path("InCommon-metadata.xml"))
entity_id = "https://www.zimride.com/shibboleth"
acs = c.metadata.assertion_consumer_service(entity_id)
assert len(acs) == 1
assert acs[0][
"location"] == 'https://www.zimride.com/Shibboleth.sso/SAML2/POST'
if __name__ == "__main__":
test_idp_1()
|
|
"""
Serializers for Stone data types.
Currently, only JSON is officially supported, but there's an experimental
msgpack integration. If possible, serializers should be kept separate from the
RPC format.
This module should be dropped into a project that requires the use of Stone. In
the future, this could be imported from a pre-installed Python package, rather
than being added to a project.
"""
from __future__ import absolute_import, unicode_literals
import base64
import collections
import datetime
import functools
import json
import re
import six
import time
try:
from . import stone_base as bb # noqa: F401 # pylint: disable=unused-import
from . import stone_validators as bv
except (ImportError, SystemError, ValueError):
# Catch errors raised when importing a relative module when not in a package.
# This makes testing this file directly (outside of a package) easier.
import stone_validators as bb # type: ignore # noqa: F401 # pylint: disable=unused-import
import stone_validators as bv # type: ignore
_MYPY = False
if _MYPY:
import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression
# ------------------------------------------------------------------------
class CallerPermissionsInterface(object):
@property
def permissions(self):
"""
Returns the list of permissions this caller has access to.
"""
raise NotImplementedError
class CallerPermissionsDefault(CallerPermissionsInterface):
@property
def permissions(self):
return []
# ------------------------------------------------------------------------
class StoneEncoderInterface(object):
"""
Interface defining a stone object encoder.
"""
def encode(self, validator, value):
# type: (bv.Validator, typing.Any) -> typing.Any
"""
Validate ``value`` using ``validator`` and return the encoding.
Args:
validator: the ``stone_validators.Validator`` used to validate
``value``
value: the object to encode
Returns:
The encoded object. This is implementation-defined.
Raises:
stone_validators.ValidationError: Raised if ``value`` (or one
of its sub-values).
"""
raise NotImplementedError
# ------------------------------------------------------------------------
class StoneSerializerBase(StoneEncoderInterface):
def __init__(self, caller_permissions, alias_validators):
# type: (CallerPermissionsInterface, typing.Mapping[bv.Validator, typing.Callable[[typing.Any], None]]) -> None # noqa: E501
"""
Constructor, `obviously
<http://www.geekalerts.com/ew-hand-sanitizer/>`.
Args:
caller_permissions (list): The list of raw-string caller permissions with which
to serialize.
alias_validators (``typing.Mapping``, optional): A mapping of
custom validation callables in the format
``{stone_validators.Validator:
typing.Callable[[typing.Any], None], ...}``. These callables must
raise a ``stone_validators.ValidationError`` on failure.
Defaults to ``None``.
"""
self.caller_permissions = (caller_permissions if
caller_permissions else CallerPermissionsDefault())
self._alias_validators = {} # type: typing.Dict[bv.Validator, typing.Callable[[typing.Any], None]] # noqa: E501
if alias_validators is not None:
self._alias_validators.update(alias_validators)
@property
def alias_validators(self):
"""
A ``typing.Mapping`` of custom validation callables in the format
``{stone_validators.Validator: typing.Callable[typing.Any],
...}``.
"""
return self._alias_validators
def encode(self, validator, value):
return self.encode_sub(validator, value)
def encode_sub(self, validator, value):
# type: (bv.Validator, typing.Any) -> typing.Any
"""
Callback intended to be called by other ``encode`` methods to
delegate encoding of sub-values. Arguments have the same semantics
as with the ``encode`` method.
"""
if isinstance(validator, bv.List):
# Because Lists are mutable, we always validate them during
# serialization
validate_f = validator.validate
encode_f = self.encode_list
elif isinstance(validator, bv.Map):
# Also validate maps during serialization because they are also mutable
validate_f = validator.validate
encode_f = self.encode_map
elif isinstance(validator, bv.Nullable):
validate_f = validator.validate
encode_f = self.encode_nullable
elif isinstance(validator, bv.Primitive):
validate_f = validator.validate
encode_f = self.encode_primitive
elif isinstance(validator, bv.Struct):
if isinstance(validator, bv.StructTree):
if self.caller_permissions.permissions:
def validate_with_permissions(val):
validator.validate_with_permissions(val, self.caller_permissions)
validate_f = validate_with_permissions
else:
validate_f = validator.validate
encode_f = self.encode_struct_tree
else:
# Fields are already validated on assignment
if self.caller_permissions.permissions:
def validate_with_permissions(val):
validator.validate_with_permissions(val, self.caller_permissions)
validate_f = validate_with_permissions
else:
validate_f = validator.validate_type_only
encode_f = self.encode_struct
elif isinstance(validator, bv.Union):
# Fields are already validated on assignment
validate_f = validator.validate_type_only
encode_f = self.encode_union
else:
raise bv.ValidationError('Unsupported data type {}'.format(type(validator).__name__))
validate_f(value)
return encode_f(validator, value)
def encode_list(self, validator, value):
# type: (bv.List, typing.Any) -> typing.Any
"""
Callback for serializing a ``stone_validators.List``. Arguments
have the same semantics as with the ``encode`` method.
"""
raise NotImplementedError
def encode_map(self, validator, value):
# type: (bv.Map, typing.Any) -> typing.Any
"""
Callback for serializing a ``stone_validators.Map``. Arguments
have the same semantics as with the ``encode`` method.
"""
raise NotImplementedError
def encode_nullable(self, validator, value):
# type: (bv.Nullable, typing.Any) -> typing.Any
"""
Callback for serializing a ``stone_validators.Nullable``.
Arguments have the same semantics as with the ``encode`` method.
"""
raise NotImplementedError
def encode_primitive(self, validator, value):
# type: (bv.Primitive, typing.Any) -> typing.Any
"""
Callback for serializing a ``stone_validators.Primitive``.
Arguments have the same semantics as with the ``encode`` method.
"""
raise NotImplementedError
def encode_struct(self, validator, value):
# type: (bv.Struct, typing.Any) -> typing.Any
"""
Callback for serializing a ``stone_validators.Struct``. Arguments
have the same semantics as with the ``encode`` method.
"""
raise NotImplementedError
def encode_struct_tree(self, validator, value):
# type: (bv.StructTree, typing.Any) -> typing.Any
"""
Callback for serializing a ``stone_validators.StructTree``.
Arguments have the same semantics as with the ``encode`` method.
"""
raise NotImplementedError
def encode_union(self, validator, value):
# type: (bv.Union, bb.Union) -> typing.Any
"""
Callback for serializing a ``stone_validators.Union``. Arguments
have the same semantics as with the ``encode`` method.
"""
raise NotImplementedError
# ------------------------------------------------------------------------
class StoneToPythonPrimitiveSerializer(StoneSerializerBase):
def __init__(self, caller_permissions, alias_validators, for_msgpack, old_style, should_redact):
# type: (CallerPermissionsInterface, typing.Mapping[bv.Validator, typing.Callable[[typing.Any], None]], bool, bool, bool) -> None # noqa: E501
"""
Args:
alias_validators (``typing.Mapping``, optional): Passed
to ``StoneSerializer.__init__``. Defaults to ``None``.
for_msgpack (bool, optional): See the like-named property.
Defaults to ``False``.
old_style (bool, optional): See the like-named property.
Defaults to ``False``.
should_redact (bool, optional): Whether to perform redaction on
marked fields. Defaults to ``False``.
"""
super(StoneToPythonPrimitiveSerializer, self).__init__(
caller_permissions, alias_validators=alias_validators)
self._for_msgpack = for_msgpack
self._old_style = old_style
self.should_redact = should_redact
@property
def for_msgpack(self):
"""
EXPERIMENTAL: A flag associated with the serializer indicating
whether objects produced by the ``encode`` method should be
encoded for msgpack.
"""
return self._for_msgpack
@property
def old_style(self):
"""
A flag associated with the serializer indicating whether objects
produced by the ``encode`` method should be encoded according to
Dropbox's old or new API styles.
"""
return self._old_style
def encode_sub(self, validator, value):
if self.should_redact and hasattr(validator, '_redact'):
if isinstance(value, list):
return [validator._redact.apply(v) for v in value]
elif isinstance(value, dict):
return {k: validator._redact.apply(v) for k, v in value.items()}
else:
return validator._redact.apply(value)
# Encode value normally
return super(StoneToPythonPrimitiveSerializer, self).encode_sub(validator, value)
def encode_list(self, validator, value):
validated_value = validator.validate(value)
return [self.encode_sub(validator.item_validator, value_item) for value_item in
validated_value]
def encode_map(self, validator, value):
validated_value = validator.validate(value)
return {
self.encode_sub(validator.key_validator, key):
self.encode_sub(validator.value_validator, value) for
key, value in validated_value.items()
}
def encode_nullable(self, validator, value):
if value is None:
return None
return self.encode_sub(validator.validator, value)
def encode_primitive(self, validator, value):
if validator in self.alias_validators:
self.alias_validators[validator](value)
if isinstance(validator, bv.Void):
return None
elif isinstance(validator, bv.Timestamp):
return _strftime(value, validator.format)
elif isinstance(validator, bv.Bytes):
if self.for_msgpack:
return value
else:
return base64.b64encode(value).decode('ascii')
elif isinstance(validator, bv.Integer) \
and isinstance(value, bool):
# bool is sub-class of int so it passes Integer validation,
# but we want the bool to be encoded as ``0`` or ``1``, rather
# than ``False`` or ``True``, respectively
return int(value)
else:
return value
def encode_struct(self, validator, value):
# Skip validation of fields with primitive data types because
# they've already been validated on assignment
d = collections.OrderedDict() # type: typing.Dict[str, typing.Any]
all_fields = validator.definition._all_fields_
for extra_permission in self.caller_permissions.permissions:
all_fields_name = '_all_{}_fields_'.format(extra_permission)
all_fields = all_fields + getattr(validator.definition, all_fields_name, [])
for field_name, field_validator in all_fields:
try:
field_value = getattr(value, field_name)
except AttributeError as exc:
raise bv.ValidationError(exc.args[0])
presence_key = '_%s_present' % field_name
if field_value is not None \
and getattr(value, presence_key):
# Only serialize struct fields that have been explicitly
# set, even if there is a default
try:
d[field_name] = self.encode_sub(field_validator, field_value)
except bv.ValidationError as exc:
exc.add_parent(field_name)
raise
return d
def encode_struct_tree(self, validator, value):
assert type(value) in validator.definition._pytype_to_tag_and_subtype_, \
'%r is not a serializable subtype of %r.' % (type(value), validator.definition)
tags, subtype = validator.definition._pytype_to_tag_and_subtype_[type(value)]
assert len(tags) == 1, tags
assert not isinstance(subtype, bv.StructTree), \
'Cannot serialize type %r because it enumerates subtypes.' % subtype.definition
if self.old_style:
d = {
tags[0]: self.encode_struct(subtype, value),
}
else:
d = collections.OrderedDict()
d['.tag'] = tags[0]
d.update(self.encode_struct(subtype, value))
return d
def encode_union(self, validator, value):
if value._tag is None:
raise bv.ValidationError('no tag set')
if not validator.definition._is_tag_present(value._tag, self.caller_permissions):
raise bv.ValidationError(
"caller does not have access to '{}' tag".format(value._tag))
field_validator = validator.definition._get_val_data_type(value._tag,
self.caller_permissions)
is_none = isinstance(field_validator, bv.Void) \
or (isinstance(field_validator, bv.Nullable)
and value._value is None)
def encode_sub(sub_validator, sub_value, parent_tag):
try:
encoded_val = self.encode_sub(sub_validator, sub_value)
except bv.ValidationError as exc:
exc.add_parent(parent_tag)
raise
else:
return encoded_val
if self.old_style:
if field_validator is None:
return value._tag
elif is_none:
return value._tag
else:
encoded_val = encode_sub(field_validator, value._value, value._tag)
return {value._tag: encoded_val}
elif is_none:
return {'.tag': value._tag}
else:
encoded_val = encode_sub(field_validator, value._value, value._tag)
if isinstance(field_validator, bv.Nullable):
# We've already checked for the null case above,
# so now we're only interested in what the
# wrapped validator is
field_validator = field_validator.validator
if isinstance(field_validator, bv.Struct) \
and not isinstance(field_validator, bv.StructTree):
d = collections.OrderedDict() # type: typing.Dict[str, typing.Any]
d['.tag'] = value._tag
d.update(encoded_val)
return d
else:
return collections.OrderedDict((
('.tag', value._tag),
(value._tag, encoded_val),
))
# ------------------------------------------------------------------------
class StoneToJsonSerializer(StoneToPythonPrimitiveSerializer):
def encode(self, validator, value):
return json.dumps(super(StoneToJsonSerializer, self).encode(validator, value))
# --------------------------------------------------------------
# JSON Encoder
#
# These interfaces are preserved for backward compatibility and symmetry with deserialization
# functions.
def json_encode(data_type, obj, caller_permissions=None, alias_validators=None, old_style=False,
should_redact=False):
"""Encodes an object into JSON based on its type.
Args:
data_type (Validator): Validator for obj.
obj (object): Object to be serialized.
caller_permissions (list): The list of raw-string caller permissions with which
to serialize.
alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]):
Custom validation functions. These must raise bv.ValidationError on
failure.
Returns:
str: JSON-encoded object.
This function will also do additional validation that wasn't done by the
objects themselves:
1. The passed in obj may not have been validated with data_type yet.
2. If an object that should be a Struct was assigned to a field, its
type has been validated, but the presence of all required fields
hasn't been.
3. If an object that should be a Union was assigned to a field, whether
or not a tag has been set has not been validated.
4. A list may have passed validation initially, but been mutated since.
Example of serializing a struct to JSON:
struct FileRef
path String
rev String
> fr = FileRef()
> fr.path = 'a/b/c'
> fr.rev = '1234'
> JsonEncoder.encode(fr)
"{'path': 'a/b/c', 'rev': '1234'}"
Example of serializing a union to JSON:
union UploadMode
add
overwrite
update FileRef
> um = UploadMode()
> um.set_add()
> JsonEncoder.encode(um)
'"add"'
> um.update = fr
> JsonEncoder.encode(um)
"{'update': {'path': 'a/b/c', 'rev': '1234'}}"
"""
for_msgpack = False
serializer = StoneToJsonSerializer(
caller_permissions, alias_validators, for_msgpack, old_style, should_redact)
return serializer.encode(data_type, obj)
def json_compat_obj_encode(data_type, obj, caller_permissions=None, alias_validators=None,
old_style=False, for_msgpack=False, should_redact=False):
"""Encodes an object into a JSON-compatible dict based on its type.
Args:
data_type (Validator): Validator for obj.
obj (object): Object to be serialized.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
Returns:
An object that when passed to json.dumps() will produce a string
giving the JSON-encoded object.
See json_encode() for additional information about validation.
"""
serializer = StoneToPythonPrimitiveSerializer(
caller_permissions, alias_validators, for_msgpack, old_style, should_redact)
return serializer.encode(data_type, obj)
# --------------------------------------------------------------
# JSON Decoder
class PythonPrimitiveToStoneDecoder(object):
def __init__(self, caller_permissions, alias_validators, for_msgpack, old_style, strict):
self.caller_permissions = (caller_permissions if
caller_permissions else CallerPermissionsDefault())
self.alias_validators = alias_validators
self.strict = strict
self._old_style = old_style
self._for_msgpack = for_msgpack
@property
def for_msgpack(self):
"""
EXPERIMENTAL: A flag associated with the serializer indicating
whether objects produced by the ``encode`` method should be
encoded for msgpack.
"""
return self._for_msgpack
@property
def old_style(self):
"""
A flag associated with the serializer indicating whether objects
produced by the ``encode`` method should be encoded according to
Dropbox's old or new API styles.
"""
return self._old_style
def json_compat_obj_decode_helper(self, data_type, obj):
"""
See json_compat_obj_decode() for argument descriptions.
"""
if isinstance(data_type, bv.StructTree):
return self.decode_struct_tree(data_type, obj)
elif isinstance(data_type, bv.Struct):
return self.decode_struct(data_type, obj)
elif isinstance(data_type, bv.Union):
if self.old_style:
return self.decode_union_old(data_type, obj)
else:
return self.decode_union(data_type, obj)
elif isinstance(data_type, bv.List):
return self.decode_list(
data_type, obj)
elif isinstance(data_type, bv.Map):
return self.decode_map(
data_type, obj)
elif isinstance(data_type, bv.Nullable):
return self.decode_nullable(
data_type, obj)
elif isinstance(data_type, bv.Primitive):
# Set validate to false because validation will be done by the
# containing struct or union when the field is assigned.
return self.make_stone_friendly(data_type, obj, False)
else:
raise AssertionError('Cannot handle type %r.' % data_type)
def decode_struct(self, data_type, obj):
"""
The data_type argument must be a Struct.
See json_compat_obj_decode() for argument descriptions.
"""
if obj is None and data_type.has_default():
return data_type.get_default()
elif not isinstance(obj, dict):
raise bv.ValidationError('expected object, got %s' %
bv.generic_type_name(obj))
all_fields = data_type.definition._all_fields_
for extra_permission in self.caller_permissions.permissions:
all_extra_fields = '_all_{}_fields_'.format(extra_permission)
all_fields = all_fields + getattr(data_type.definition, all_extra_fields, [])
if self.strict:
all_field_names = data_type.definition._all_field_names_
for extra_permission in self.caller_permissions.permissions:
all_extra_field_names = '_all_{}_field_names_'.format(extra_permission)
all_field_names = all_field_names.union(
getattr(data_type.definition, all_extra_field_names, {}))
for key in obj:
if (key not in all_field_names and
not key.startswith('.tag')):
raise bv.ValidationError("unknown field '%s'" % key)
ins = data_type.definition()
self.decode_struct_fields(ins, all_fields, obj)
# Check that all required fields have been set.
data_type.validate_fields_only_with_permissions(ins, self.caller_permissions)
return ins
def decode_struct_fields(self, ins, fields, obj):
"""
Args:
ins: An instance of the class representing the data type being decoded.
The object will have its fields set.
fields: A tuple of (field_name: str, field_validator: Validator)
obj (dict): JSON-compatible dict that is being decoded.
strict (bool): See :func:`json_compat_obj_decode`.
Returns:
None: `ins` has its fields set based on the contents of `obj`.
"""
for name, field_data_type in fields:
if name in obj:
try:
v = self.json_compat_obj_decode_helper(field_data_type, obj[name])
setattr(ins, name, v)
except bv.ValidationError as e:
e.add_parent(name)
raise
elif field_data_type.has_default():
setattr(ins, name, field_data_type.get_default())
def decode_union(self, data_type, obj):
"""
The data_type argument must be a Union.
See json_compat_obj_decode() for argument descriptions.
"""
val = None
if isinstance(obj, six.string_types):
# Handles the shorthand format where the union is serialized as only
# the string of the tag.
tag = obj
if data_type.definition._is_tag_present(tag, self.caller_permissions):
val_data_type = data_type.definition._get_val_data_type(
tag, self.caller_permissions)
if not isinstance(val_data_type, (bv.Void, bv.Nullable)):
raise bv.ValidationError(
"expected object for '%s', got symbol" % tag)
if tag == data_type.definition._catch_all:
raise bv.ValidationError(
"unexpected use of the catch-all tag '%s'" % tag)
elif not self.strict and data_type.definition._catch_all:
tag = data_type.definition._catch_all
else:
raise bv.ValidationError("unknown tag '%s'" % tag)
elif isinstance(obj, dict):
tag, val = self.decode_union_dict(
data_type, obj)
else:
raise bv.ValidationError("expected string or object, got %s" %
bv.generic_type_name(obj))
return data_type.definition(tag, val)
def decode_union_dict(self, data_type, obj):
if '.tag' not in obj:
raise bv.ValidationError("missing '.tag' key")
tag = obj['.tag']
if not isinstance(tag, six.string_types):
raise bv.ValidationError(
'tag must be string, got %s' % bv.generic_type_name(tag))
if not data_type.definition._is_tag_present(tag, self.caller_permissions):
if not self.strict and data_type.definition._catch_all:
return data_type.definition._catch_all, None
else:
raise bv.ValidationError("unknown tag '%s'" % tag)
if tag == data_type.definition._catch_all:
raise bv.ValidationError(
"unexpected use of the catch-all tag '%s'" % tag)
val_data_type = data_type.definition._get_val_data_type(tag, self.caller_permissions)
if isinstance(val_data_type, bv.Nullable):
val_data_type = val_data_type.validator
nullable = True
else:
nullable = False
if isinstance(val_data_type, bv.Void):
if self.strict:
# In strict mode, ensure there are no extraneous keys set. In
# non-strict mode, we accept that other keys may be set due to a
# change of the void type to another.
if tag in obj:
if obj[tag] is not None:
raise bv.ValidationError('expected null, got %s' %
bv.generic_type_name(obj[tag]))
for key in obj:
if key != tag and key != '.tag':
raise bv.ValidationError("unexpected key '%s'" % key)
val = None
elif isinstance(val_data_type,
(bv.Primitive, bv.List, bv.StructTree, bv.Union, bv.Map)):
if tag in obj:
raw_val = obj[tag]
try:
val = self.json_compat_obj_decode_helper(val_data_type, raw_val)
except bv.ValidationError as e:
e.add_parent(tag)
raise
else:
# Check no other keys
if nullable:
val = None
else:
raise bv.ValidationError("missing '%s' key" % tag)
for key in obj:
if key != tag and key != '.tag':
raise bv.ValidationError("unexpected key '%s'" % key)
elif isinstance(val_data_type, bv.Struct):
if nullable and len(obj) == 1: # only has a .tag key
val = None
else:
# assume it's not null
raw_val = obj
try:
val = self.json_compat_obj_decode_helper(val_data_type, raw_val)
except bv.ValidationError as e:
e.add_parent(tag)
raise
else:
assert False, type(val_data_type)
return tag, val
def decode_union_old(self, data_type, obj):
"""
The data_type argument must be a Union.
See json_compat_obj_decode() for argument descriptions.
"""
val = None
if isinstance(obj, six.string_types):
# Union member has no associated value
tag = obj
if data_type.definition._is_tag_present(tag, self.caller_permissions):
val_data_type = data_type.definition._get_val_data_type(tag,
self.caller_permissions)
if not isinstance(val_data_type, (bv.Void, bv.Nullable)):
raise bv.ValidationError(
"expected object for '%s', got symbol" % tag)
else:
if not self.strict and data_type.definition._catch_all:
tag = data_type.definition._catch_all
else:
raise bv.ValidationError("unknown tag '%s'" % tag)
elif isinstance(obj, dict):
# Union member has value
if len(obj) != 1:
raise bv.ValidationError('expected 1 key, got %s' % len(obj))
tag = list(obj)[0]
raw_val = obj[tag]
if data_type.definition._is_tag_present(tag, self.caller_permissions):
val_data_type = data_type.definition._get_val_data_type(tag,
self.caller_permissions)
if isinstance(val_data_type, bv.Nullable) and raw_val is None:
val = None
elif isinstance(val_data_type, bv.Void):
if raw_val is None or not self.strict:
# If raw_val is None, then this is the more verbose
# representation of a void union member. If raw_val isn't
# None, then maybe the spec has changed, so check if we're
# in strict mode.
val = None
else:
raise bv.ValidationError('expected null, got %s' %
bv.generic_type_name(raw_val))
else:
try:
val = self.json_compat_obj_decode_helper(val_data_type, raw_val)
except bv.ValidationError as e:
e.add_parent(tag)
raise
else:
if not self.strict and data_type.definition._catch_all:
tag = data_type.definition._catch_all
else:
raise bv.ValidationError("unknown tag '%s'" % tag)
else:
raise bv.ValidationError("expected string or object, got %s" %
bv.generic_type_name(obj))
return data_type.definition(tag, val)
def decode_struct_tree(self, data_type, obj):
"""
The data_type argument must be a StructTree.
See json_compat_obj_decode() for argument descriptions.
"""
subtype = self.determine_struct_tree_subtype(data_type, obj)
return self.decode_struct(subtype, obj)
def determine_struct_tree_subtype(self, data_type, obj):
"""
Searches through the JSON-object-compatible dict using the data type
definition to determine which of the enumerated subtypes `obj` is.
"""
if '.tag' not in obj:
raise bv.ValidationError("missing '.tag' key")
if not isinstance(obj['.tag'], six.string_types):
raise bv.ValidationError('expected string, got %s' %
bv.generic_type_name(obj['.tag']),
parent='.tag')
# Find the subtype the tags refer to
full_tags_tuple = (obj['.tag'],)
if full_tags_tuple in data_type.definition._tag_to_subtype_:
subtype = data_type.definition._tag_to_subtype_[full_tags_tuple]
if isinstance(subtype, bv.StructTree):
raise bv.ValidationError("tag '%s' refers to non-leaf subtype" %
('.'.join(full_tags_tuple)))
return subtype
else:
if self.strict:
# In strict mode, the entirety of the tag hierarchy should
# point to a known subtype.
raise bv.ValidationError("unknown subtype '%s'" %
'.'.join(full_tags_tuple))
else:
# If subtype was not found, use the base.
if data_type.definition._is_catch_all_:
return data_type
else:
raise bv.ValidationError(
"unknown subtype '%s' and '%s' is not a catch-all" %
('.'.join(full_tags_tuple), data_type.definition.__name__))
def decode_list(self, data_type, obj):
"""
The data_type argument must be a List.
See json_compat_obj_decode() for argument descriptions.
"""
if not isinstance(obj, list):
raise bv.ValidationError(
'expected list, got %s' % bv.generic_type_name(obj))
return [
self.json_compat_obj_decode_helper(data_type.item_validator, item)
for item in obj]
def decode_map(self, data_type, obj):
"""
The data_type argument must be a Map.
See json_compat_obj_decode() for argument descriptions.
"""
if not isinstance(obj, dict):
raise bv.ValidationError(
'expected dict, got %s' % bv.generic_type_name(obj))
return {
self.json_compat_obj_decode_helper(data_type.key_validator, key):
self.json_compat_obj_decode_helper(data_type.value_validator, value)
for key, value in obj.items()
}
def decode_nullable(self, data_type, obj):
"""
The data_type argument must be a Nullable.
See json_compat_obj_decode() for argument descriptions.
"""
if obj is not None:
return self.json_compat_obj_decode_helper(data_type.validator, obj)
else:
return None
def make_stone_friendly(self, data_type, val, validate):
"""
Convert a Python object to a type that will pass validation by its
validator.
Validation by ``alias_validators`` is performed even if ``validate`` is
false.
"""
if isinstance(data_type, bv.Timestamp):
try:
ret = datetime.datetime.strptime(val, data_type.format)
except (TypeError, ValueError) as e:
raise bv.ValidationError(e.args[0])
elif isinstance(data_type, bv.Bytes):
if self.for_msgpack:
if isinstance(val, six.text_type):
ret = val.encode('utf-8')
else:
ret = val
else:
try:
ret = base64.b64decode(val)
except TypeError:
raise bv.ValidationError('invalid base64-encoded bytes')
elif isinstance(data_type, bv.Void):
if self.strict and val is not None:
raise bv.ValidationError("expected null, got value")
return None
else:
if validate:
if self.caller_permissions.permissions:
data_type.validate_with_permissions(val, self.caller_permissions)
else:
data_type.validate(val)
ret = val
if self.alias_validators is not None and data_type in self.alias_validators:
self.alias_validators[data_type](ret)
return ret
def json_decode(data_type, serialized_obj, caller_permissions=None,
alias_validators=None, strict=True, old_style=False):
"""Performs the reverse operation of json_encode.
Args:
data_type (Validator): Validator for serialized_obj.
serialized_obj (str): The JSON string to deserialize.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]):
Custom validation functions. These must raise bv.ValidationError on
failure.
strict (bool): If strict, then unknown struct fields will raise an
error, and unknown union variants will raise an error even if a
catch all field is specified. strict should only be used by a
recipient of serialized JSON if it's guaranteed that its Stone
specs are at least as recent as the senders it receives messages
from.
Returns:
The returned object depends on the input data_type.
- Boolean -> bool
- Bytes -> bytes
- Float -> float
- Integer -> long
- List -> list
- Map -> dict
- Nullable -> None or its wrapped type.
- String -> unicode (PY2) or str (PY3)
- Struct -> An instance of its definition attribute.
- Timestamp -> datetime.datetime
- Union -> An instance of its definition attribute.
"""
try:
deserialized_obj = json.loads(serialized_obj)
except ValueError:
raise bv.ValidationError('could not decode input as JSON')
else:
return json_compat_obj_decode(
data_type, deserialized_obj, caller_permissions=caller_permissions,
alias_validators=alias_validators, strict=strict, old_style=old_style)
def json_compat_obj_decode(data_type, obj, caller_permissions=None,
alias_validators=None, strict=True,
old_style=False, for_msgpack=False):
"""
Decodes a JSON-compatible object based on its data type into a
representative Python object.
Args:
data_type (Validator): Validator for serialized_obj.
obj: The JSON-compatible object to decode based on data_type.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
strict (bool): If strict, then unknown struct fields will raise an
error, and unknown union variants will raise an error even if a
catch all field is specified. See json_decode() for more.
Returns:
See json_decode().
"""
decoder = PythonPrimitiveToStoneDecoder(caller_permissions,
alias_validators, for_msgpack, old_style, strict)
if isinstance(data_type, bv.Primitive):
return decoder.make_stone_friendly(
data_type, obj, True)
else:
return decoder.json_compat_obj_decode_helper(
data_type, obj)
# Adapted from:
# http://code.activestate.com/recipes/306860-proleptic-gregorian-dates-and-strftime-before-1900/
# Remove the unsupposed "%s" command. But don't do it if there's an odd
# number of %s before the s because those are all escaped. Can't simply
# remove the s because the result of %sY should be %Y if %s isn't
# supported, not the 4 digit year.
_ILLEGAL_S = re.compile(r'((^|[^%])(%%)*%s)')
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
# Every 28 years the calendar repeats, except through century leap years
# where it's 6 years. But only if you're using the Gregorian calendar. ;)
def _strftime(dt, fmt):
try:
return dt.strftime(fmt)
except ValueError:
if not six.PY2 or dt.year > 1900:
raise
if _ILLEGAL_S.search(fmt):
raise TypeError("This strftime implementation does not handle %s")
year = dt.year
# For every non-leap year century, advance by 6 years to get into the
# 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = '%4d' % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return s
try:
import msgpack
except ImportError:
pass
else:
msgpack_compat_obj_encode = functools.partial(json_compat_obj_encode,
for_msgpack=True)
def msgpack_encode(data_type, obj):
return msgpack.dumps(
msgpack_compat_obj_encode(data_type, obj), encoding='utf-8')
msgpack_compat_obj_decode = functools.partial(json_compat_obj_decode,
for_msgpack=True)
def msgpack_decode(
data_type, serialized_obj, alias_validators=None, strict=True):
# We decode everything as utf-8 because we want all object keys to be
# unicode. Otherwise, we need to do a lot more refactoring to make
# json/msgpack share the same code. We expect byte arrays to fail
# decoding, but when they don't, we have to convert them to bytes.
deserialized_obj = msgpack.loads(
serialized_obj, encoding='utf-8', unicode_errors='ignore')
return msgpack_compat_obj_decode(
data_type, deserialized_obj, alias_validators, strict)
|
|
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
# sequence based similarity measures
from py_stringmatching.similarity_measure.affine import Affine
from py_stringmatching.similarity_measure.bag_distance import BagDistance
from py_stringmatching.similarity_measure.editex import Editex
from py_stringmatching.similarity_measure.hamming_distance import HammingDistance
from py_stringmatching.similarity_measure.jaro import Jaro
from py_stringmatching.similarity_measure.jaro_winkler import JaroWinkler
from py_stringmatching.similarity_measure.levenshtein import Levenshtein
from py_stringmatching.similarity_measure.needleman_wunsch import NeedlemanWunsch
from py_stringmatching.similarity_measure.smith_waterman import SmithWaterman
# token based similarity measures
from py_stringmatching.similarity_measure.cosine import Cosine
from py_stringmatching.similarity_measure.dice import Dice
from py_stringmatching.similarity_measure.jaccard import Jaccard
from py_stringmatching.similarity_measure.overlap_coefficient import OverlapCoefficient
from py_stringmatching.similarity_measure.soft_tfidf import SoftTfIdf
from py_stringmatching.similarity_measure.tfidf import TfIdf
from py_stringmatching.similarity_measure.tversky_index import TverskyIndex
# hybrid similarity measures
from py_stringmatching.similarity_measure.generalized_jaccard import GeneralizedJaccard
from py_stringmatching.similarity_measure.monge_elkan import MongeElkan
#phonetic similarity measures
from py_stringmatching.similarity_measure.soundex import Soundex
from . import _short_string_1, _long_string_1, _medium_string_1, _short_string_2, _long_string_2, _medium_string_2
from . import _small_num_tokens_wi_rep, _small_num_tokens_wo_rep, _med_num_tokens_wi_rep, _med_num_tokens_wo_rep, \
_large_num_tokens_wi_rep, _large_num_tokens_wo_rep, _long_hamm_string1, _long_hamm_string2
class TimeAffine:
def setup(self):
self.affine = Affine()
def time_short_short(self):
self.affine.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.affine.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.affine.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.affine.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.affine.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.affine.get_raw_score(_medium_string_1, _long_string_1)
class TimeJaro:
def setup(self):
self.jaro = Jaro()
def time_short_short(self):
self.jaro.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.jaro.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.jaro.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.jaro.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.jaro.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.jaro.get_raw_score(_medium_string_1, _long_string_1)
class TimeJaroWinkler:
def setup(self):
self.jaro_winkler = JaroWinkler()
def time_short_short(self):
self.jaro_winkler.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.jaro_winkler.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.jaro_winkler.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.jaro_winkler.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.jaro_winkler.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.jaro_winkler.get_raw_score(_medium_string_1, _long_string_1)
class TimeHammingDistance:
def setup(self):
self.hamming_distance = HammingDistance()
def time_short_short(self):
self.hamming_distance.get_raw_score(_short_string_1, _short_string_1)
def time_medium_medium(self):
self.hamming_distance.get_raw_score(_medium_string_1, _medium_string_1)
def time_long_long(self):
self.hamming_distance.get_raw_score(_long_hamm_string1, _long_hamm_string2)
# def time_short_medium(self):
# self.hamming_distance.get_raw_score(_short_string_1, _medium_string_1)
#
# def time_short_long(self):
# self.hamming_distance.get_raw_score(_short_string_1, _long_string_1)
#
# def time_medium_long(self):
# self.hamming_distance.get_raw_score(_medium_string_1, _long_string_1)
class TimeEditex:
def setup(self):
self.editex = Editex()
def time_short_short(self):
self.editex.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.editex.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.editex.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.editex.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.editex.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.editex.get_raw_score(_medium_string_1, _long_string_1)
class TimeLevenshtein:
def setup(self):
self.levenshtein = Levenshtein()
def time_short_short(self):
self.levenshtein.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.levenshtein.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.levenshtein.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.levenshtein.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.levenshtein.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.levenshtein.get_raw_score(_medium_string_1, _long_string_1)
class TimeBagDistance:
def setup(self):
self.bag_distance = BagDistance()
def time_short_short(self):
self.bag_distance.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.bag_distance.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.bag_distance.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.bag_distance.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.bag_distance.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.bag_distance.get_raw_score(_medium_string_1, _long_string_1)
class TimeNeedlemanWunsch:
def setup(self):
self.needleman_wunsch = NeedlemanWunsch()
def time_short_short(self):
self.needleman_wunsch.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.needleman_wunsch.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.needleman_wunsch.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.needleman_wunsch.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.needleman_wunsch.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.needleman_wunsch.get_raw_score(_medium_string_1, _long_string_1)
class TimeSmithWaterman:
def setup(self):
self.smith_waterman = SmithWaterman()
def time_short_short(self):
self.smith_waterman.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.smith_waterman.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.smith_waterman.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.smith_waterman.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.smith_waterman.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.smith_waterman.get_raw_score(_medium_string_1, _long_string_1)
class TimeSoundex:
def setup(self):
self.soundex = Soundex()
def time_short_short(self):
self.soundex.get_raw_score(_short_string_1, _short_string_2)
def time_medium_medium(self):
self.soundex.get_raw_score(_medium_string_1, _medium_string_2)
def time_long_long(self):
self.soundex.get_raw_score(_long_string_1, _long_string_2)
def time_short_medium(self):
self.soundex.get_raw_score(_short_string_1, _medium_string_1)
def time_short_long(self):
self.soundex.get_raw_score(_short_string_1, _long_string_1)
def time_medium_long(self):
self.soundex.get_raw_score(_medium_string_1, _long_string_1)
class TimeCosine:
def setup(self):
self.cosine = Cosine()
def time_small_small_wo_rep(self):
self.cosine.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep(self):
self.cosine.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep(self):
self.cosine.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep(self):
self.cosine.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep(self):
self.cosine.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep(self):
self.cosine.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep(self):
self.cosine.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep(self):
self.cosine.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep(self):
self.cosine.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep(self):
self.cosine.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep(self):
self.cosine.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep(self):
self.cosine.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
class TimeDice:
def setup(self):
self.dice = Dice()
def time_small_small_wo_rep(self):
self.dice.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep(self):
self.dice.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep(self):
self.dice.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep(self):
self.dice.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep(self):
self.dice.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep(self):
self.dice.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep(self):
self.dice.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep(self):
self.dice.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep(self):
self.dice.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep(self):
self.dice.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep(self):
self.dice.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep(self):
self.dice.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
class TimeJaccard:
def setup(self):
self.jaccard = Jaccard()
def time_small_small_wo_rep(self):
self.jaccard.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep(self):
self.jaccard.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep(self):
self.jaccard.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep(self):
self.jaccard.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep(self):
self.jaccard.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep(self):
self.jaccard.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep(self):
self.jaccard.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep(self):
self.jaccard.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep(self):
self.jaccard.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep(self):
self.jaccard.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep(self):
self.jaccard.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep(self):
self.jaccard.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
class TimeGeneralizedJaccard:
def setup(self):
self.generalized_jaccard = GeneralizedJaccard()
def time_small_small_wo_rep(self):
self.generalized_jaccard.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep(self):
self.generalized_jaccard.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep(self):
self.generalized_jaccard.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep(self):
self.generalized_jaccard.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep(self):
self.generalized_jaccard.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep(self):
self.generalized_jaccard.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep(self):
self.generalized_jaccard.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep(self):
self.generalized_jaccard.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep(self):
self.generalized_jaccard.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep(self):
self.generalized_jaccard.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep(self):
self.generalized_jaccard.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep(self):
self.generalized_jaccard.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
class TimeOverlap:
def setup(self):
self.ov_coeff = OverlapCoefficient()
def time_small_small_wo_rep(self):
self.ov_coeff.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep(self):
self.ov_coeff.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep(self):
self.ov_coeff.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep(self):
self.ov_coeff.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep(self):
self.ov_coeff.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep(self):
self.ov_coeff.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep(self):
self.ov_coeff.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep(self):
self.ov_coeff.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep(self):
self.ov_coeff.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep(self):
self.ov_coeff.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep(self):
self.ov_coeff.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep(self):
self.ov_coeff.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
class TimeMongeElkan:
def setup(self):
self.monge_elkan = MongeElkan()
def time_small_small_wo_rep(self):
self.monge_elkan.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep(self):
self.monge_elkan.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep(self):
self.monge_elkan.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep(self):
self.monge_elkan.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep(self):
self.monge_elkan.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep(self):
self.monge_elkan.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep(self):
self.monge_elkan.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep(self):
self.monge_elkan.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
class TimeTfIdf:
def setup(self):
self.tfidf = TfIdf()
self.tfidf_with_dampen = TfIdf(dampen=True)
corpus_list = [_small_num_tokens_wo_rep, _small_num_tokens_wi_rep,
_med_num_tokens_wi_rep, _med_num_tokens_wo_rep,
_large_num_tokens_wo_rep, _large_num_tokens_wi_rep]
self.tfidf_with_corpus = TfIdf(corpus_list)
self.tfidf_with_corpus_dampen = TfIdf(corpus_list, dampen=True)
def time_small_small_wo_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep_no_corpus_no_dampen(self):
self.tfidf.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
# dampen - true
def time_small_small_wo_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep_no_corpus(self):
self.tfidf_with_dampen.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
# corpus list - true
def time_small_small_wo_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep_no_dampen(self):
self.tfidf_with_corpus.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
# corpus list - true, dampen_true
def time_small_small_wo_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep(self):
self.tfidf_with_corpus_dampen.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
class TimeSoftTfIdf:
def setup(self):
self.soft_tfidf = SoftTfIdf()
corpus_list = [_small_num_tokens_wo_rep, _small_num_tokens_wi_rep,
_med_num_tokens_wi_rep, _med_num_tokens_wo_rep,
_large_num_tokens_wo_rep, _large_num_tokens_wi_rep]
self.soft_tfidf_with_corpus = SoftTfIdf(corpus_list)
# no corpus list
def time_small_small_wo_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_small_num_tokens_wo_rep, _small_num_tokens_wo_rep)
def time_small_small_wi_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_small_num_tokens_wi_rep, _small_num_tokens_wi_rep)
def time_medium_medium_wo_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_med_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_medium_medium_wi_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_med_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_large_large_wo_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_large_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_large_large_wi_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_large_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_small_medium_wo_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_small_num_tokens_wo_rep, _med_num_tokens_wo_rep)
def time_small_medium_wi_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_small_num_tokens_wi_rep, _med_num_tokens_wi_rep)
def time_small_large_wo_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_small_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_small_large_wi_rep_no_corpus(self):
self.soft_tfidf.get_raw_score(_small_num_tokens_wi_rep, _large_num_tokens_wi_rep)
def time_medium_large_wo_rep(self):
self.soft_tfidf_with_corpus.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
def time_medium_large_wi_rep(self):
self.soft_tfidf_with_corpus.get_raw_score(_med_num_tokens_wo_rep, _large_num_tokens_wo_rep)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.group_by_window()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
# NOTE(mrry): These tests are based on the tests in bucket_ops_test.py.
# Currently, they use a constant batch size, though should be made to use a
# different batch size per key.
@test_util.run_all_in_graph_and_eager_modes
class GroupByWindowTest(test_base.DatasetTestBase):
def _dynamicPad(self, bucket, window, window_size):
# TODO(mrry): To match `tf.contrib.training.bucket()`, implement a
# generic form of padded_batch that pads every component
# dynamically and does not rely on static shape information about
# the arguments.
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape(
[None]), tensor_shape.TensorShape([3])))))
def testSingleBucket(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(32)).map(_map_fn)
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: 0,
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
which_bucket, bucketed_values = self.evaluate(get_next())
self.assertEqual(0, which_bucket)
expected_scalar_int = np.arange(32, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values[0])
self.assertAllEqual(expected_unk_int64, bucketed_values[1])
self.assertAllEqual(expected_vec3_str, bucketed_values[2])
def testEvenOddBuckets(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(64)).map(_map_fn)
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
# Get two minibatches (one containing even values, one containing odds)
which_bucket_even, bucketed_values_even = self.evaluate(get_next())
which_bucket_odd, bucketed_values_odd = self.evaluate(get_next())
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_even))
self.assertEqual(3, len(bucketed_values_odd))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, which_bucket_even)
self.assertAllEqual(1, which_bucket_odd)
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(
3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_even[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_even[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[2])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2])
def testEvenOddBucketsFilterOutAllOdd(self):
def _map_fn(v):
return {
"x": v,
"y": array_ops.fill([v], v),
"z": array_ops.fill([3], string_ops.as_string(v))
}
def _dynamic_pad_fn(bucket, window, _):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, {
"x": tensor_shape.TensorShape([]),
"y": tensor_shape.TensorShape([None]),
"z": tensor_shape.TensorShape([3])
})))
input_dataset = dataset_ops.Dataset.from_tensor_slices(math_ops.range(
128)).map(_map_fn).filter(lambda d: math_ops.equal(d["x"] % 2, 0))
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda d: math_ops.cast(d["x"] % 2, dtypes.int64),
lambda k, bucket: _dynamic_pad_fn(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
which_bucket0, bucketed_values_even0 = self.evaluate(get_next())
which_bucket1, bucketed_values_even1 = self.evaluate(get_next())
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, which_bucket0)
self.assertAllEqual(0, which_bucket1)
self.assertAllEqual(
np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0["x"])
self.assertAllEqual(
np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1["x"])
def testDynamicWindowSize(self):
components = np.arange(100).astype(np.int64)
# Key fn: even/odd
# Reduce fn: batches of 5
# Window size fn: even=5, odd=10
def window_size_func(key):
window_sizes = constant_op.constant([5, 10], dtype=dtypes.int64)
return window_sizes[key]
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(20),
None, window_size_func))
get_next = self.getNext(dataset)
with self.assertRaises(errors.OutOfRangeError):
batches = 0
while True:
result = self.evaluate(get_next())
is_even = all(x % 2 == 0 for x in result)
is_odd = all(x % 2 == 1 for x in result)
self.assertTrue(is_even or is_odd)
expected_batch_size = 5 if is_even else 10
self.assertEqual(expected_batch_size, result.shape[0])
batches += 1
self.assertEqual(batches, 15)
def testSimple(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: x * x).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4),
4))
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
result = self.evaluate(get_next())
self.assertTrue(
all(x % 2 == 0 for x in result) or all(x % 2 == 1) for x in result)
counts.append(result.shape[0])
self.assertEqual(len(components), sum(counts))
num_full_batches = len([c for c in counts if c == 4])
self.assertGreaterEqual(num_full_batches, 24)
self.assertTrue(all(c == 4 for c in counts[:num_full_batches]))
def testImmediateOutput(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
-1).apply(
grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4),
4))
get_next = self.getNext(dataset)
# The input is infinite, so this test demonstrates that:
# 1. We produce output without having to consume the entire input,
# 2. Different buckets can produce output at different rates, and
# 3. For deterministic input, the output is deterministic.
for _ in range(3):
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
self.assertAllEqual([2, 2, 2, 2], self.evaluate(get_next()))
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
def testSmallGroups(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4), 4))
get_next = self.getNext(dataset)
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
# The small outputs at the end are deterministically produced in key
# order.
self.assertAllEqual([0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1], self.evaluate(get_next()))
def testEmpty(self):
dataset = dataset_ops.Dataset.range(4).apply(
grouping.group_by_window(lambda _: 0, lambda _, xs: xs, 0))
get_next = self.getNext(dataset)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Window size must be greater than zero, but got 0."):
print(self.evaluate(get_next()))
def testReduceFuncError(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
def reduce_func(_, xs):
# Introduce an incorrect padded shape that cannot (currently) be
# detected at graph construction time.
return xs.padded_batch(
4,
padded_shapes=(tensor_shape.TensorShape([]),
constant_op.constant([5], dtype=dtypes.int64) * -1))
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: (x, ops.convert_to_tensor([x * x]))).apply(
grouping.group_by_window(lambda x, _: x % 2, reduce_func, 32))
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
def testConsumeWindowDatasetMoreThanOnce(self):
components = np.random.randint(50, size=(200,)).astype(np.int64)
def reduce_func(key, window):
# Apply two different kinds of padding to the input: tight
# padding, and quantized (to a multiple of 10) padding.
return dataset_ops.Dataset.zip((
window.padded_batch(
4, padded_shapes=tensor_shape.TensorShape([None])),
window.padded_batch(
4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])),
))
dataset = dataset_ops.Dataset.from_tensor_slices(
components
).map(lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x)).apply(
grouping.group_by_window(
lambda x: math_ops.cast(array_ops.shape(x)[0] // 10, dtypes.int64),
reduce_func, 4))
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
tight_result, multiple_of_10_result = self.evaluate(get_next())
self.assertEqual(0, multiple_of_10_result.shape[1] % 10)
self.assertAllEqual(tight_result,
multiple_of_10_result[:, :tight_result.shape[1]])
counts.append(tight_result.shape[0])
self.assertEqual(len(components), sum(counts))
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/python3
#
# Average bandwidth monitoring script. Run periodically via NM db.sync to
# enforce a soft limit on daily bandwidth usage for each slice. If a
# slice is found to have transmitted 80% of its daily byte limit usage,
# its instantaneous rate will be capped at the bytes remaning in the limit
# over the time remaining in the recording period.
#
# Two separate limits are enforced, one for destinations exempt from
# the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
#
# Mark Huang <[email protected]>
# Andy Bavier <[email protected]>
# Faiyaz Ahmed <[email protected]>
# Copyright (C) 2004-2008 The Trustees of Princeton University
#
import os
import sys
import time
import pickle
import socket
import copy
import threading
import plnode.bwlimit as bwlimit
import logger
import tools
import database
from config import Config
priority = 20
# Defaults
# Set DEBUG to True if you don't want to send emails
DEBUG = False
# Set ENABLE to False to setup buckets, but not limit.
ENABLE = True
DB_FILE = "/var/lib/nodemanager/bwmon.pickle"
# Constants
seconds_per_day = 24 * 60 * 60
bits_per_byte = 8
dev_default = tools.get_default_if()
# Burst to line rate (or node cap). Set by NM. in KBit/s
default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
# 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
# 5.4 Gbyte per day max allowed transfered per recording period
# 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
# but its better to keep a higher byte total and keep people happy than correct
# the problem and piss people off.
# default_MaxKByte = 5662310
# -- 6/1/09
# llp wants to double these, so we use the following
# 1mbit * 24hrs * 60mins * 60secs = bits/day
# 1000000 * 24 * 60 * 60 / (1024 * 8)
default_MaxKByte = 10546875
# 16.4 Gbyte per day max allowed transfered per recording period to I2
# default_Maxi2KByte = 17196646
# -- 6/1/09
# 3Mb/s for 24hrs a day (30.17 gigs)
default_Maxi2KByte = 31640625
# Default share quanta
default_Share = 1
# Average over 1 day
period = 1 * seconds_per_day
# Message template
template = \
"""
The slice %(slice)s has transmitted more than %(bytes)s from
%(hostname)s to %(class)s destinations
since %(since)s.
Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
until %(until)s.
Please reduce the average %(class)s transmission rate
of the slice to %(limit)s per %(period)s.
""".lstrip()
footer = \
"""
%(date)s %(hostname)s bwcap %(slice)s
""".lstrip()
def format_bytes(bytes, si = True):
"""
Formats bytes into a string
"""
if si:
kilo = 1000.
else:
# Officially, a kibibyte
kilo = 1024.
if bytes >= (kilo * kilo * kilo):
return "%.1f GB" % (bytes / (kilo * kilo * kilo))
elif bytes >= 1000000:
return "%.1f MB" % (bytes / (kilo * kilo))
elif bytes >= 1000:
return "%.1f KB" % (bytes / kilo)
else:
return "%.0f bytes" % bytes
def format_period(seconds):
"""
Formats a period in seconds into a string
"""
if seconds == (24 * 60 * 60):
return "day"
elif seconds == (60 * 60):
return "hour"
elif seconds > (24 * 60 * 60):
return "%.1f days" % (seconds / 24. / 60. / 60.)
elif seconds > (60 * 60):
return "%.1f hours" % (seconds / 60. / 60.)
elif seconds > (60):
return "%.1f minutes" % (seconds / 60.)
else:
return "%.0f seconds" % seconds
def slicemail(slice, subject, body):
'''
Front end to sendmail. Sends email to slice alias with given subject and body.
'''
config = Config()
sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % config.PLC_MAIL_SUPPORT_ADDRESS, "w")
# Parsed from MyPLC config
to = [config.PLC_MAIL_MOM_LIST_ADDRESS]
if slice is not None and slice != "root":
to.append(config.PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
header = {'from': "%s Support <%s>" % (config.PLC_NAME,
config.PLC_MAIL_SUPPORT_ADDRESS),
'to': ", ".join(to),
'version': sys.version.split(" ")[0],
'subject': subject}
# Write headers
sendmail.write(
"""
Content-type: text/plain
From: %(from)s
Reply-To: %(from)s
To: %(to)s
X-Mailer: Python/%(version)s
Subject: %(subject)s
""".lstrip() % header)
# Write body
sendmail.write(body)
# Done
sendmail.close()
class Slice:
"""
Stores the last recorded bandwidth parameters of a slice.
xid - slice context/VServer ID
name - slice name
time - beginning of recording period in UNIX seconds
bytes - low bandwidth bytes transmitted at the beginning of the recording period
i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
MaxKByte - total volume of data allowed
ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
Maxi2KByte - same as MaxKByte, but for i2
Threshi2Kbyte - same as Threshi2KByte, but for i2
MaxRate - max_rate slice attribute.
Maxi2Rate - max_exempt_rate slice attribute.
Share - Used by Sirius to loan min rates
Sharei2 - Used by Sirius to loan min rates for i2
self.emailed - did slice recv email during this recording period
"""
def __init__(self, xid, name, rspec):
self.xid = xid
self.name = name
self.time = 0
self.bytes = 0
self.i2bytes = 0
self.MaxRate = default_MaxRate
self.MinRate = bwlimit.bwmin // 1000
self.Maxi2Rate = default_Maxi2Rate
self.Mini2Rate = bwlimit.bwmin // 1000
self.MaxKByte = default_MaxKByte
self.ThreshKByte = int(.8 * self.MaxKByte)
self.Maxi2KByte = default_Maxi2KByte
self.Threshi2KByte = int(.8 * self.Maxi2KByte)
self.Share = default_Share
self.Sharei2 = default_Share
self.emailed = False
self.capped = False
self.updateSliceTags(rspec)
bwlimit.set(
xid=self.xid, dev=dev_default,
minrate=self.MinRate * 1000,
maxrate=self.MaxRate * 1000,
maxexemptrate=self.Maxi2Rate * 1000,
minexemptrate=self.Mini2Rate * 1000,
share=self.Share)
def __repr__(self):
return self.name
def updateSliceTags(self, rspec):
'''
Use respects from GetSlivers to PLC to populate slice object. Also
do some sanity checking.
'''
# Sanity check plus policy decision for MinRate:
# Minrate cant be greater than 25% of MaxRate or NodeCap.
MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin // 1000))
if MinRate > int(.25 * default_MaxRate):
MinRate = int(.25 * default_MaxRate)
if MinRate != self.MinRate:
self.MinRate = MinRate
logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
if MaxRate != self.MaxRate:
self.MaxRate = MaxRate
logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin // 1000))
if Mini2Rate != self.Mini2Rate:
self.Mini2Rate = Mini2Rate
logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
if Maxi2Rate != self.Maxi2Rate:
self.Maxi2Rate = Maxi2Rate
logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
if MaxKByte != self.MaxKByte:
self.MaxKByte = MaxKByte
logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
if Maxi2KByte != self.Maxi2KByte:
self.Maxi2KByte = Maxi2KByte
logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
if ThreshKByte != self.ThreshKByte:
self.ThreshKByte = ThreshKByte
logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
if Threshi2KByte != self.Threshi2KByte:
self.Threshi2KByte = Threshi2KByte
logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
Share = int(rspec.get('net_share', default_Share))
if Share != self.Share:
self.Share = Share
logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
Sharei2 = int(rspec.get('net_i2_share', default_Share))
if Sharei2 != self.Sharei2:
self.Sharei2 = Sharei2
logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
def reset(self, runningrates, rspec):
"""
Begin a new recording period. Remove caps by restoring limits
to their default values.
"""
# Cache share for later comparison
self.Share = runningrates.get('share', 1)
# Query Node Manager for max rate overrides
self.updateSliceTags(rspec)
# Reset baseline time
self.time = time.time()
# Reset baseline byte coutns
self.bytes = runningrates.get('usedbytes', 0)
self.i2bytes = runningrates.get('usedi2bytes', 0)
# Reset email
self.emailed = False
# Reset flag
self.capped = False
# Reset rates.
maxrate = self.MaxRate * 1000
minrate = self.MinRate * 1000
maxi2rate = self.Maxi2Rate * 1000
mini2rate = self.Mini2Rate * 1000
if (maxrate != runningrates.get('maxrate', 0)) or \
(minrate != runningrates.get('maxrate', 0)) or \
(maxi2rate != runningrates.get('maxexemptrate', 0)) or \
(mini2rate != runningrates.get('minexemptrate', 0)) or \
(self.Share != runningrates.get('share', 0)):
logger.log("bwmon: %s reset to %s/%s" % \
(self.name,
bwlimit.format_tc_rate(maxrate),
bwlimit.format_tc_rate(maxi2rate)))
bwlimit.set(xid = self.xid, dev = dev_default,
minrate = self.MinRate * 1000,
maxrate = self.MaxRate * 1000,
maxexemptrate = self.Maxi2Rate * 1000,
minexemptrate = self.Mini2Rate * 1000,
share = self.Share)
def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
"""
Notify the slice it's being capped.
"""
# Prepare message parameters from the template
message = ""
params = {'slice': self.name, 'hostname': socket.gethostname(),
'since': time.asctime(time.gmtime(self.time)) + " GMT",
'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
'date': time.asctime(time.gmtime()) + " GMT",
'period': format_period(period)}
if new_maxrate != (self.MaxRate * 1000):
# Format template parameters for low bandwidth message
params['class'] = "low bandwidth"
params['bytes'] = format_bytes(usedbytes - self.bytes)
params['limit'] = format_bytes(self.MaxKByte * 1024)
params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
# Cap low bandwidth burst rate
message += template % params
logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
if new_maxexemptrate != (self.Maxi2Rate * 1000):
# Format template parameters for high bandwidth message
params['class'] = "high bandwidth"
params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
params['limit'] = format_bytes(self.Maxi2KByte * 1024)
params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
message += template % params
logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
# Notify slice
if self.emailed == False:
subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
if DEBUG:
logger.log("bwmon: "+ subject)
logger.log("bwmon: "+ message + (footer % params))
else:
self.emailed = True
logger.log("bwmon: Emailing %s" % self.name)
slicemail(self.name, subject, message + (footer % params))
def update(self, runningrates, rspec):
"""
Update byte counts and check if byte thresholds have been
exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
Recalculate every time module runs.
"""
# cache share for later comparison
runningrates['share'] = self.Share
# Query Node Manager for max rate overrides
self.updateSliceTags(rspec)
usedbytes = runningrates['usedbytes']
usedi2bytes = runningrates['usedi2bytes']
# Check limits.
if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
sum = self.bytes + (self.ThreshKByte * 1024)
maxbyte = self.MaxKByte * 1024
bytesused = usedbytes - self.bytes
timeused = int(time.time() - self.time)
# Calcuate new rate. in bit/s
new_maxrate = int(((maxbyte - bytesused) * 8)
/ (period - timeused))
# Never go under MinRate
if new_maxrate < (self.MinRate * 1000):
new_maxrate = self.MinRate * 1000
# State information. I'm capped.
self.capped += True
else:
# Sanity Check
new_maxrate = self.MaxRate * 1000
self.capped += False
if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
maxi2byte = self.Maxi2KByte * 1024
i2bytesused = usedi2bytes - self.i2bytes
timeused = int(time.time() - self.time)
# Calcuate New Rate.
new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)
/(period - timeused))
# Never go under MinRate
if new_maxi2rate < (self.Mini2Rate * 1000):
new_maxi2rate = self.Mini2Rate * 1000
# State information. I'm capped.
self.capped += True
else:
# Sanity
new_maxi2rate = self.Maxi2Rate * 1000
self.capped += False
# Check running values against newly calculated values so as not to run tc
# unnecessarily
if (runningrates['maxrate'] != new_maxrate) or \
(runningrates['minrate'] != self.MinRate * 1000) or \
(runningrates['maxexemptrate'] != new_maxi2rate) or \
('minexemptrate' in runningrates and runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
(runningrates['share'] != self.Share):
# Apply parameters
bwlimit.set(xid = self.xid, dev = dev_default,
minrate = self.MinRate * 1000,
maxrate = new_maxrate,
minexemptrate = self.Mini2Rate * 1000,
maxexemptrate = new_maxi2rate,
share = self.Share)
# Notify slice
if self.capped == True:
self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
def gethtbs(root_xid, default_xid):
"""
Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
Turn off HTBs without names.
"""
livehtbs = {}
for params in bwlimit.get(dev = dev_default):
(xid, share,
minrate, maxrate,
minexemptrate, maxexemptrate,
usedbytes, usedi2bytes) = params
name = bwlimit.get_slice(xid)
if (name is None) \
and (xid != root_xid) \
and (xid != default_xid):
# Orphaned (not associated with a slice) class
name = "%d?" % xid
logger.log("bwmon: Found orphaned HTB %s. Removing." %name)
bwlimit.off(xid, dev = dev_default)
livehtbs[xid] = {'share': share,
'minrate': minrate,
'maxrate': maxrate,
'maxexemptrate': maxexemptrate,
'minexemptrate': minexemptrate,
'usedbytes': usedbytes,
'name': name,
'usedi2bytes': usedi2bytes}
return livehtbs
def sync(nmdbcopy):
"""
Syncs tc, db, and bwmon.pickle.
Then, starts new slices, kills old ones, and updates byte accounts for each running slice.
Sends emails and caps those that went over their limit.
"""
# Defaults
global DB_FILE, \
period, \
default_MaxRate, \
default_Maxi2Rate, \
default_MaxKByte, \
default_Maxi2KByte, \
default_Share, \
dev_default
# All slices
names = []
# In case the limits have changed.
default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
# Incase default isn't set yet.
if default_MaxRate == -1:
default_MaxRate = 1000000
# xxx $Id$
# with svn we used to have a trick to detect upgrades of this file
# this has gone with the move to git, without any noticeable effect on operations though
try:
f = open(DB_FILE, "r+")
logger.verbose("bwmon: Loading %s" % DB_FILE)
(version, slices, deaddb) = pickle.load(f)
f.close()
# Check version of data file
if version != "$Id$":
logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE))
raise Exception
except Exception:
version = "$Id$"
slices = {}
deaddb = {}
# Get/set special slice IDs
root_xid = bwlimit.get_xid("root")
default_xid = bwlimit.get_xid("default")
# Since root is required for sanity, its not in the API/plc database, so pass {}
# to use defaults.
if root_xid not in list(slices.keys()):
slices[root_xid] = Slice(root_xid, "root", {})
slices[root_xid].reset({}, {})
# Used by bwlimit. pass {} since there is no rspec (like above).
if default_xid not in list(slices.keys()):
slices[default_xid] = Slice(default_xid, "default", {})
slices[default_xid].reset({}, {})
live = {}
# Get running slivers that should be on this node (from plc). {xid: name}
# db keys on name, bwmon keys on xid. db doesnt have xid either.
for plcSliver in list(nmdbcopy.keys()):
live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__())
logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__())
# Get actual running values from tc.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__())
# The dat file has HTBs for slices, but the HTBs aren't running
nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
# Reset tc counts.
for nohtbslice in nohtbslices:
if nohtbslice in live:
slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
else:
logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
del slices[nohtbslice]
# The dat file doesnt have HTB for the slice but kern has HTB
slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
for slicenodat in slicesnodat:
# But slice is running
if slicenodat in live:
# init the slice. which means start accounting over since kernel
# htb was already there.
slices[slicenodat] = Slice(slicenodat,
live[slicenodat]['name'],
live[slicenodat]['_rspec'])
# Get new slices.
# Slices in GetSlivers but not running HTBs
newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())
# Setup new slices
for newslice in newslicesxids:
# Delegated slices dont have xids (which are uids) since they haven't been
# instantiated yet.
if newslice != None and ('_rspec' in live[newslice]) == True:
# Check to see if we recently deleted this slice.
if live[newslice]['name'] not in list(deaddb.keys()):
logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
# _rspec is the computed rspec: NM retrieved data from PLC, computed loans
# and made a dict of computed values.
slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
slices[newslice].reset( {}, live[newslice]['_rspec'] )
# Double check time for dead slice in deaddb is within 24hr recording period.
elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
deadslice = deaddb[live[newslice]['name']]
logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
slices[newslice] = deadslice['slice']
slices[newslice].xid = newslice
# Start the HTB
newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
"minrate": deadslice['slice'].MinRate * 1000,
"maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
"usedbytes": deadslice['htb']['usedbytes'] * 1000,
"usedi2bytes": deadslice['htb']['usedi2bytes'],
"share":deadslice['htb']['share']}
slices[newslice].reset(newvals, live[newslice]['_rspec'])
# Bring up to date
slices[newslice].update(newvals, live[newslice]['_rspec'])
# Since the slice has been reinitialed, remove from dead database.
del deaddb[deadslice['slice'].name]
del newvals
else:
logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
# Move dead slices that exist in the pickle file, but
# aren't instantiated by PLC into the dead dict until
# recording period is over. This is to avoid the case where a slice is dynamically created
# and destroyed then recreated to get around byte limits.
deadxids = set(slices.keys()) - set(live.keys())
logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
for deadxid in deadxids:
if deadxid == root_xid or deadxid == default_xid:
continue
logger.log("bwmon: removing dead slice %s " % deadxid)
if deadxid in slices and deadxid in kernelhtbs:
# add slice (by name) to deaddb
logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
del slices[deadxid]
if deadxid in kernelhtbs:
logger.verbose("bwmon: Removing HTB for %s." % deadxid)
bwlimit.off(deadxid, dev = dev_default)
# Clean up deaddb
for deadslice in list(deaddb.keys()):
if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
logger.log("bwmon: Removing dead slice %s from dat." \
% deaddb[deadslice]['slice'].name)
del deaddb[deadslice]
# Get actual running values from tc since we've added and removed buckets.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__())
# Update all byte limites on all slices
for (xid, slice) in slices.items():
# Monitor only the specified slices
if xid == root_xid or xid == default_xid: continue
if names and name not in names:
continue
if (time.time() >= (slice.time + period)) or \
(kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
(kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
# Reset to defaults every 24 hours or if it appears
# that the byte counters have overflowed (or, more
# likely, the node was restarted or the HTB buckets
# were re-initialized).
slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
elif ENABLE:
logger.verbose("bwmon: Updating slice %s" % slice.name)
# Update byte counts
slice.update(kernelhtbs[xid], live[xid]['_rspec'])
logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE))
f = open(DB_FILE, "w")
pickle.dump((version, slices, deaddb), f)
f.close()
# doesnt use generic default interface because this runs as its own thread.
# changing the config variable will not have an effect since GetSlivers: pass
def getDefaults(nmdbcopy):
'''
Get defaults from default slice's slice attributes.
'''
status = True
# default slice
dfltslice = nmdbcopy.get(Config().PLC_SLICE_PREFIX+"_default")
if dfltslice:
if dfltslice['rspec']['net_max_rate'] == -1:
allOff()
status = False
return status
def allOff():
"""
Turn off all slice HTBs
"""
# Get/set special slice IDs
root_xid = bwlimit.get_xid("root")
default_xid = bwlimit.get_xid("default")
kernelhtbs = gethtbs(root_xid, default_xid)
if len(kernelhtbs):
logger.log("bwmon: Disabling all running HTBs.")
for htb in list(kernelhtbs.keys()): bwlimit.off(htb, dev = dev_default)
lock = threading.Event()
def run():
"""
When run as a thread, wait for event, lock db, deep copy it, release it,
run bwmon.GetSlivers(), then go back to waiting.
"""
logger.verbose("bwmon: Thread started")
while True:
lock.wait()
logger.verbose("bwmon: Event received. Running.")
database.db_lock.acquire()
nmdbcopy = copy.deepcopy(database.db)
database.db_lock.release()
try:
if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
# class show to check if net:InitNodeLimit:bwlimit.init has run.
sync(nmdbcopy)
else: logger.log("bwmon: BW limits DISABLED.")
except: logger.log_exc("bwmon failed")
lock.clear()
def start(*args):
tools.as_daemon_thread(run)
def GetSlivers(*args):
logger.verbose ("bwmon: triggering dummy GetSlivers")
pass
|
|
###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import os, json, binascii, time, textwrap
from twisted.python import log
from twisted.internet import reactor
# for versions
import autobahn
import autobahntestsuite
from autobahn.websocket import WebSocketProtocol, \
WebSocketServerFactory, \
WebSocketServerProtocol, \
WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from case import Case, \
Cases, \
CaseCategories, \
CaseSubCategories, \
CaseBasename
from caseset import CaseSet
from autobahn.util import utcnow
from report import CSS_COMMON, \
CSS_DETAIL_REPORT, \
CSS_MASTER_REPORT, \
JS_MASTER_REPORT
def binLogData(data, maxlen = 64):
ellipses = " ..."
if len(data) > maxlen - len(ellipses):
dd = binascii.b2a_hex(data[:maxlen]) + ellipses
else:
dd = binascii.b2a_hex(data)
return dd
def asciiLogData(data, maxlen = 64, replace = False):
ellipses = " ..."
try:
if len(data) > maxlen - len(ellipses):
dd = data[:maxlen] + ellipses
else:
dd = data
return dd.decode('utf8', errors = 'replace' if replace else 'strict')
except:
return '0x' + binLogData(data, maxlen)
class FuzzingProtocol:
"""
Common mixin-base class for fuzzing server and client protocols.
"""
MAX_WIRE_LOG_DATA = 256
def connectionMade(self):
attrs = ['case', 'runCase', 'caseAgent', 'caseStarted']
for attr in attrs:
if not hasattr(self, attr):
setattr(self, attr, None)
#self.case = None
#self.runCase = None
#self.caseAgent = None
#self.caseStarted = None
self.caseStart = 0
self.caseEnd = 0
## wire log
##
self.createWirelog = True
self.wirelog = []
## stats for octets and frames
##
self.createStats = True
self.rxOctetStats = {}
self.rxFrameStats = {}
self.txOctetStats = {}
self.txFrameStats = {}
def connectionLost(self, reason):
if self.runCase:
self.runCase.onConnectionLost(self.failedByMe)
self.caseEnd = time.time()
caseResult = {"case": self.case,
"id": self.factory.CaseSet.caseClasstoId(self.Case),
"description": self.Case.DESCRIPTION,
"expectation": self.Case.EXPECTATION,
"agent": self.caseAgent,
"started": self.caseStarted,
"duration": int(round(1000. * (self.caseEnd - self.caseStart))), # case execution time in ms
"reportTime": self.runCase.reportTime, # True/False switch to control report output of duration
"reportCompressionRatio": self.runCase.reportCompressionRatio,
"behavior": self.runCase.behavior,
"behaviorClose": self.runCase.behaviorClose,
"expected": self.runCase.expected,
"expectedClose": self.runCase.expectedClose,
"received": self.runCase.received,
"result": self.runCase.result,
"resultClose": self.runCase.resultClose,
"wirelog": self.wirelog,
"createWirelog": self.createWirelog,
"closedByMe": self.closedByMe,
"failedByMe": self.failedByMe,
"droppedByMe": self.droppedByMe,
"wasClean": self.wasClean,
"wasNotCleanReason": self.wasNotCleanReason,
"wasServerConnectionDropTimeout": self.wasServerConnectionDropTimeout,
"wasOpenHandshakeTimeout": self.wasOpenHandshakeTimeout,
"wasCloseHandshakeTimeout": self.wasCloseHandshakeTimeout,
"localCloseCode": self.localCloseCode,
"localCloseReason": self.localCloseReason,
"remoteCloseCode": self.remoteCloseCode,
"remoteCloseReason": self.remoteCloseReason,
"isServer": self.factory.isServer,
"createStats": self.createStats,
"rxOctetStats": self.rxOctetStats,
"rxFrameStats": self.rxFrameStats,
"txOctetStats": self.txOctetStats,
"txFrameStats": self.txFrameStats,
"httpRequest": self.http_request_data,
"httpResponse": self.http_response_data,
"trafficStats": self.runCase.trafficStats.__json__() if self.runCase.trafficStats else None}
def cleanBin(e_old):
e_new = []
for t in e_old:
if t[0] == 'message':
e_new.append((t[0], asciiLogData(t[1]), t[2]))
elif t[0] in ['ping', 'pong']:
e_new.append((t[0], asciiLogData(t[1])))
elif t[0] == 'timeout':
e_new.append(t)
else:
print t
raise Exception("unknown part type %s" % t[0])
return e_new
for k in caseResult['expected']:
e_old = caseResult['expected'][k]
caseResult['expected'][k] = cleanBin(e_old)
caseResult['received'] = cleanBin(caseResult['received'])
## now log the case results
##
self.factory.logCase(caseResult)
# parent's connectionLost does useful things
WebSocketProtocol.connectionLost(self,reason)
def enableWirelog(self, enable):
if enable != self.createWirelog:
self.createWirelog = enable
self.wirelog.append(("WLM", enable))
def logRxOctets(self, data):
if self.createStats:
l = len(data)
self.rxOctetStats[l] = self.rxOctetStats.get(l, 0) + 1
if self.createWirelog:
self.wirelog.append(("RO", (len(data), binLogData(data))))
def logTxOctets(self, data, sync):
if self.createStats:
l = len(data)
self.txOctetStats[l] = self.txOctetStats.get(l, 0) + 1
if self.createWirelog:
self.wirelog.append(("TO", (len(data), binLogData(data)), sync))
def logRxFrame(self, frameHeader, payload):
if self.createStats:
self.rxFrameStats[frameHeader.opcode] = self.rxFrameStats.get(frameHeader.opcode, 0) + 1
if self.createWirelog:
p = ''.join(payload)
self.wirelog.append(("RF",
(len(p), asciiLogData(p)),
frameHeader.opcode,
frameHeader.fin,
frameHeader.rsv,
frameHeader.mask is not None,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else None))
def logTxFrame(self, frameHeader, payload, repeatLength, chopsize, sync):
if self.createStats:
self.txFrameStats[frameHeader.opcode] = self.txFrameStats.get(frameHeader.opcode, 0) + 1
if self.createWirelog:
self.wirelog.append(("TF",
(len(payload), asciiLogData(payload)),
frameHeader.opcode,
frameHeader.fin,
frameHeader.rsv,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else None,
repeatLength,
chopsize,
sync))
def executeContinueLater(self, fun, tag):
if self.state != WebSocketProtocol.STATE_CLOSED:
self.wirelog.append(("CTE", tag))
fun()
else:
pass # connection already gone
def continueLater(self, delay, fun, tag = None):
self.wirelog.append(("CT", delay, tag))
reactor.callLater(delay, self.executeContinueLater, fun, tag)
def executeKillAfter(self):
if self.state != WebSocketProtocol.STATE_CLOSED:
self.wirelog.append(("KLE", ))
self.failConnection()
else:
pass # connection already gone
def killAfter(self, delay):
self.wirelog.append(("KL", delay))
reactor.callLater(delay, self.executeKillAfter)
def executeCloseAfter(self):
if self.state != WebSocketProtocol.STATE_CLOSED:
self.wirelog.append(("TIE", ))
self.sendClose()
else:
pass # connection already gone
def closeAfter(self, delay):
self.wirelog.append(("TI", delay))
reactor.callLater(delay, self.executeCloseAfter)
def onOpen(self):
if self.runCase:
cc_id = self.factory.CaseSet.caseClasstoId(self.runCase.__class__)
if self.factory.CaseSet.checkAgentCaseExclude(self.factory.specExcludeAgentCases, self.caseAgent, cc_id):
print "Skipping test case %s for agent %s by test configuration!" % (cc_id, self.caseAgent)
self.runCase = None
self.sendClose()
return
else:
self.caseStart = time.time()
self.runCase.onOpen()
elif self.path == "/updateReports":
self.factory.createReports()
self.sendClose()
elif self.path == "/getCaseCount":
self.sendMessage(json.dumps(len(self.factory.specCases)))
self.sendClose()
elif self.path == "/getCaseStatus":
def sendResults(results):
self.sendMessage(json.dumps({
'behavior':results['behavior']
}))
self.sendClose()
self.factory.addResultListener(self.caseAgent, self.factory.CaseSet.caseClasstoId(self.Case), sendResults)
elif self.path == "/getCaseInfo":
self.sendMessage(json.dumps({
'id': self.factory.CaseSet.caseClasstoId(self.Case),
'description': self.factory.CaseSet.caseClassToPrettyDescription(self.Case),
}))
self.sendClose()
else:
pass
def onPong(self, payload):
if self.runCase:
self.runCase.onPong(payload)
else:
if self.debug:
log.msg("Pong received: " + payload)
def onClose(self, wasClean, code, reason):
if self.runCase:
self.runCase.onClose(wasClean, code, reason)
else:
if self.debug:
log.msg("Close received: %s - %s" % (code, reason))
def onMessage(self, msg, binary):
if self.runCase:
self.runCase.onMessage(msg, binary)
else:
if binary:
raise Exception("binary command message")
else:
try:
obj = json.loads(msg)
except:
raise Exception("could not parse command")
## send one frame as specified
##
if obj[0] == "sendframe":
pl = obj[1].get("payload", "")
self.sendFrame(opcode = obj[1]["opcode"],
payload = pl.encode("UTF-8"),
fin = obj[1].get("fin", True),
rsv = obj[1].get("rsv", 0),
mask = obj[1].get("mask", None),
payload_len = obj[1].get("payload_len", None),
chopsize = obj[1].get("chopsize", None),
sync = obj[1].get("sync", False))
## send multiple frames as specified
##
elif obj[0] == "sendframes":
frames = obj[1]
for frame in frames:
pl = frame.get("payload", "")
self.sendFrame(opcode = frame["opcode"],
payload = pl.encode("UTF-8"),
fin = frame.get("fin", True),
rsv = frame.get("rsv", 0),
mask = frame.get("mask", None),
payload_len = frame.get("payload_len", None),
chopsize = frame.get("chopsize", None),
sync = frame.get("sync", False))
## send close
##
elif obj[0] == "close":
spec = obj[1]
self.sendClose(spec.get("code", None), spec.get("reason", None))
## echo argument
##
elif obj[0] == "echo":
spec = obj[1]
self.sendFrame(opcode = 1, payload = spec.get("payload", ""), payload_len = spec.get("payload_len", None))
else:
raise Exception("fuzzing peer received unknown command" % obj[0])
class FuzzingFactory:
"""
Common mixin-base class for fuzzing server and client protocol factory.
"""
MAX_CASE_PICKLE_LEN = 1000
def __init__(self, outdir):
self.repeatAgentRowPerSubcategory = True
self.outdir = outdir
self.agents = {}
self.cases = {}
self.resultListeners = {}
def logCase(self, caseResults):
"""
Called from FuzzingProtocol instances when case has been finished to store case results.
"""
agent = caseResults["agent"]
case = caseResults["id"]
## index by agent->case
##
if not self.agents.has_key(agent):
self.agents[agent] = {}
self.agents[agent][case] = caseResults
## index by case->agent
##
if not self.cases.has_key(case):
self.cases[case] = {}
self.cases[case][agent] = caseResults
if (agent, case) in self.resultListeners:
callback = self.resultListeners.pop((agent, case))
callback(caseResults)
def addResultListener(self, agent, caseId, resultsCallback):
if agent in self.agents and caseId in self.agents[agent]:
resultsCallback(self.agents[agent][caseId])
else:
self.resultListeners[(agent,caseId)] = resultsCallback
def createReports(self, produceHtml = True, produceJson = True):
"""
Create reports from all data stored for test cases which have been executed.
"""
## create output directory when non-existent
##
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
## create master report
##
if produceHtml:
self.createMasterReportHTML(self.outdir)
if produceJson:
self.createMasterReportJSON(self.outdir)
## create case detail reports
##
for agentId in self.agents:
for caseId in self.agents[agentId]:
if produceHtml:
self.createAgentCaseReportHTML(agentId, caseId, self.outdir)
if produceJson:
self.createAgentCaseReportJSON(agentId, caseId, self.outdir)
def cleanForFilename(self, str):
"""
Clean a string for use as filename.
"""
s0 = ''.join([c if c in "abcdefghjiklmnopqrstuvwxyz0123456789" else " " for c in str.strip().lower()])
s1 = s0.strip()
s2 = s1.replace(' ', '_')
return s2
def makeAgentCaseReportFilename(self, agentId, caseId, ext):
"""
Create filename for case detail report from agent and case.
"""
c = caseId.replace('.', '_')
return self.cleanForFilename(agentId) + "_case_" + c + "." + ext
def limitString(self, s, limit, indicator = " ..."):
ss = str(s)
if len(ss) > limit - len(indicator):
return ss[:limit - len(indicator)] + indicator
else:
return ss
def createMasterReportJSON(self, outdir):
"""
Create report master JSON file.
:param outdir: Directory where to create file.
:type outdir: str
:returns: str -- Name of created file.
"""
res = {}
for agentId in self.agents:
if not res.has_key(agentId):
res[agentId] = {}
for caseId in self.agents[agentId]:
case = self.agents[agentId][caseId]
c = {}
report_filename = self.makeAgentCaseReportFilename(agentId, caseId, ext = 'json')
c["behavior"] = case["behavior"]
c["behaviorClose"] = case["behaviorClose"]
c["remoteCloseCode"] = case["remoteCloseCode"]
c["duration"] = case["duration"]
c["reportfile"] = report_filename
res[agentId][caseId] = c
report_filename = "index.json"
f = open(os.path.join(outdir, report_filename), 'w')
f.write(json.dumps(res, sort_keys = True, indent = 3, separators = (',', ': ')))
f.close()
def createMasterReportHTML(self, outdir):
"""
Create report master HTML file.
:param outdir: Directory where to create file.
:type outdir: str
:returns: str -- Name of created file.
"""
## open report file in create / write-truncate mode
##
report_filename = "index.html"
f = open(os.path.join(outdir, report_filename), 'w')
## write HTML
##
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write(' <head>\n')
f.write(' <meta charset="utf-8" />\n')
f.write(' <style lang="css">%s</style>\n' % CSS_COMMON)
f.write(' <style lang="css">%s</style>\n' % CSS_MASTER_REPORT)
f.write(' <script language="javascript">%s</script>\n' % JS_MASTER_REPORT % {"agents_cnt": len(self.agents.keys())})
f.write(' </head>\n')
f.write(' <body>\n')
f.write(' <a href="#"><div id="toggle_button" class="unselectable" onclick="toggleClose();">Toggle Details</div></a>\n')
f.write(' <a name="top"></a>\n')
f.write(' <br/>\n')
## top logos
f.write(' <center><a href="http://autobahn.ws/testsuite" title="Autobahn WebSockets Testsuite"><img src="http://autobahn.ws/static/img/ws_protocol_test_report.png" border="0" width="820" height="46" alt="Autobahn WebSockets Testsuite Report"></img></a></center>\n')
f.write(' <center><a href="http://autobahn.ws" title="Autobahn WebSockets"> <img src="http://autobahn.ws/static/img/ws_protocol_test_report_autobahn.png" border="0" width="300" height="68" alt="Autobahn WebSockets"> </img></a></center>\n')
## write report header
##
f.write(' <div id="master_report_header" class="block">\n')
f.write(' <p id="intro">Summary report generated on %s (UTC) by <a href="%s">Autobahn WebSockets Testsuite</a> v%s/v%s.</p>\n' % (utcnow(), "http://autobahn.ws/testsuite", autobahntestsuite.version, autobahn.version))
f.write("""
<table id="case_outcome_desc">
<tr>
<td class="case_ok">Pass</td>
<td class="outcome_desc">Test case was executed and passed successfully.</td>
</tr>
<tr>
<td class="case_non_strict">Non-Strict</td>
<td class="outcome_desc">Test case was executed and passed non-strictly.
A non-strict behavior is one that does not adhere to a SHOULD-behavior as described in the protocol specification or
a well-defined, canonical behavior that appears to be desirable but left open in the protocol specification.
An implementation with non-strict behavior is still conformant to the protocol specification.</td>
</tr>
<tr>
<td class="case_failed">Fail</td>
<td class="outcome_desc">Test case was executed and failed. An implementation which fails a test case - other
than a performance/limits related one - is non-conforming to a MUST-behavior as described in the protocol specification.</td>
</tr>
<tr>
<td class="case_info">Info</td>
<td class="outcome_desc">Informational test case which detects certain implementation behavior left unspecified by the spec
but nevertheless potentially interesting to implementors.</td>
</tr>
<tr>
<td class="case_missing">Missing</td>
<td class="outcome_desc">Test case is missing, either because it was skipped via the test suite configuration
or deactivated, i.e. because the implementation does not implement the tested feature or breaks during running
the test case.</td>
</tr>
</table>
""")
f.write(' </div>\n')
## write big agent/case report table
##
f.write(' <table id="agent_case_results">\n')
## sorted list of agents for which test cases where run
##
agentList = sorted(self.agents.keys())
## create list ordered list of case Ids
##
cl = []
for c in Cases:
t = self.CaseSet.caseClasstoIdTuple(c)
cl.append((t, self.CaseSet.caseIdTupletoId(t)))
cl = sorted(cl)
caseList = []
for c in cl:
caseList.append(c[1])
lastCaseCategory = None
lastCaseSubCategory = None
for caseId in caseList:
caseCategoryIndex = caseId.split('.')[0]
caseCategory = CaseCategories.get(caseCategoryIndex, "Misc")
caseSubCategoryIndex = '.'.join(caseId.split('.')[:2])
caseSubCategory = CaseSubCategories.get(caseSubCategoryIndex, None)
## Category/Agents row
##
if caseCategory != lastCaseCategory or (self.repeatAgentRowPerSubcategory and caseSubCategory != lastCaseSubCategory):
f.write(' <tr class="case_category_row">\n')
f.write(' <td class="case_category">%s %s</td>\n' % (caseCategoryIndex, caseCategory))
for agentId in agentList:
f.write(' <td class="agent close_flex" colspan="2">%s</td>\n' % agentId)
f.write(' </tr>\n')
lastCaseCategory = caseCategory
lastCaseSubCategory = None
## Subcategory row
##
if caseSubCategory != lastCaseSubCategory:
f.write(' <tr class="case_subcategory_row">\n')
f.write(' <td class="case_subcategory" colspan="%d">%s %s</td>\n' % (len(agentList) * 2 + 1, caseSubCategoryIndex, caseSubCategory))
f.write(' </tr>\n')
lastCaseSubCategory = caseSubCategory
## Cases row
##
f.write(' <tr class="agent_case_result_row">\n')
f.write(' <td class="case"><a href="#case_desc_%s">Case %s</a></td>\n' % (caseId.replace('.', '_'), caseId))
## Case results
##
for agentId in agentList:
if self.agents[agentId].has_key(caseId):
case = self.agents[agentId][caseId]
if case["behavior"] != Case.UNIMPLEMENTED:
agent_case_report_file = self.makeAgentCaseReportFilename(agentId, caseId, ext = 'html')
if case["behavior"] == Case.OK:
td_text = "Pass"
td_class = "case_ok"
elif case["behavior"] == Case.NON_STRICT:
td_text = "Non-Strict"
td_class = "case_non_strict"
elif case["behavior"] == Case.NO_CLOSE:
td_text = "No Close"
td_class = "case_no_close"
elif case["behavior"] == Case.INFORMATIONAL:
td_text = "Info"
td_class = "case_info"
else:
td_text = "Fail"
td_class = "case_failed"
if case["behaviorClose"] == Case.OK:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_ok"
elif case["behaviorClose"] == Case.FAILED_BY_CLIENT:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_almost"
elif case["behaviorClose"] == Case.WRONG_CODE:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_non_strict"
elif case["behaviorClose"] == Case.UNCLEAN:
ctd_text = "Unclean"
ctd_class = "case_failed"
elif case["behaviorClose"] == Case.INFORMATIONAL:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_info"
else:
ctd_text = "Fail"
ctd_class = "case_failed"
detail = ""
if case["reportTime"]:
detail += "%d ms" % case["duration"]
if case["reportCompressionRatio"] and case["trafficStats"] is not None:
crIn = case["trafficStats"]["incomingCompressionRatio"]
crOut = case["trafficStats"]["outgoingCompressionRatio"]
detail += " [%s/%s]" % ("%.3f" % crIn if crIn is not None else "-", "%.3f" % crOut if crOut is not None else "-")
if detail != "":
f.write(' <td class="%s"><a href="%s">%s</a><br/><span class="case_duration">%s</span></td><td class="close close_hide %s"><span class="close_code">%s</span></td>\n' % (td_class, agent_case_report_file, td_text, detail, ctd_class, ctd_text))
else:
f.write(' <td class="%s"><a href="%s">%s</a></td><td class="close close_hide %s"><span class="close_code">%s</span></td>\n' % (td_class, agent_case_report_file, td_text, ctd_class, ctd_text))
else:
f.write(' <td class="case_unimplemented close_flex" colspan="2">Unimplemented</td>\n')
else:
f.write(' <td class="case_missing close_flex" colspan="2">Missing</td>\n')
f.write(" </tr>\n")
f.write(" </table>\n")
f.write(" <br/><hr/>\n")
## Case descriptions
##
f.write(' <div id="test_case_descriptions">\n')
for caseId in caseList:
CCase = self.CaseSet.CasesById[caseId]
f.write(' <br/>\n')
f.write(' <a name="case_desc_%s"></a>\n' % caseId.replace('.', '_'))
f.write(' <h2>Case %s</h2>\n' % caseId)
f.write(' <a class="up" href="#top">Up</a>\n')
f.write(' <p class="case_text_block case_desc"><b>Case Description</b><br/><br/>%s</p>\n' % CCase.DESCRIPTION)
f.write(' <p class="case_text_block case_expect"><b>Case Expectation</b><br/><br/>%s</p>\n' % CCase.EXPECTATION)
f.write(' </div>\n')
f.write(" <br/><hr/>\n")
## end of HTML
##
f.write(" </body>\n")
f.write("</html>\n")
## close created HTML file and return filename
##
f.close()
return report_filename
def createAgentCaseReportJSON(self, agentId, caseId, outdir):
"""
Create case detail report JSON file.
:param agentId: ID of agent for which to generate report.
:type agentId: str
:param caseId: ID of case for which to generate report.
:type caseId: str
:param outdir: Directory where to create file.
:type outdir: str
:returns: str -- Name of created file.
"""
if not self.agents.has_key(agentId):
raise Exception("no test data stored for agent %s" % agentId)
if not self.agents[agentId].has_key(caseId):
raise Exception("no test data stored for case %s with agent %s" % (caseId, agentId))
## get case to generate report for
##
case = self.agents[agentId][caseId]
## open report file in create / write-truncate mode
##
report_filename = self.makeAgentCaseReportFilename(agentId, caseId, ext = 'json')
f = open(os.path.join(outdir, report_filename), 'w')
f.write(json.dumps(case, sort_keys = True, indent = 3, separators = (',', ': ')))
f.close()
def createAgentCaseReportHTML(self, agentId, caseId, outdir):
"""
Create case detail report HTML file.
:param agentId: ID of agent for which to generate report.
:type agentId: str
:param caseId: ID of case for which to generate report.
:type caseId: str
:param outdir: Directory where to create file.
:type outdir: str
:returns: str -- Name of created file.
"""
if not self.agents.has_key(agentId):
raise Exception("no test data stored for agent %s" % agentId)
if not self.agents[agentId].has_key(caseId):
raise Exception("no test data stored for case %s with agent %s" % (caseId, agentId))
## get case to generate report for
##
case = self.agents[agentId][caseId]
## open report file in create / write-truncate mode
##
report_filename = self.makeAgentCaseReportFilename(agentId, caseId, ext = 'html')
f = open(os.path.join(outdir, report_filename), 'w')
## write HTML
##
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write(' <head>\n')
f.write(' <meta charset="utf-8" />\n')
f.write(' <style lang="css">%s</style>\n' % CSS_COMMON)
f.write(' <style lang="css">%s</style>\n' % CSS_DETAIL_REPORT)
f.write(' </head>\n')
f.write(' <body>\n')
f.write(' <a name="top"></a>\n')
f.write(' <br/>\n')
## top logos
f.write(' <center><a href="http://autobahn.ws/testsuite" title="Autobahn WebSockets Testsuite"><img src="http://autobahn.ws/static/img/ws_protocol_test_report.png" border="0" width="820" height="46" alt="Autobahn WebSockets Testsuite Report"></img></a></center>\n')
f.write(' <center><a href="http://autobahn.ws" title="Autobahn WebSockets"> <img src="http://autobahn.ws/static/img/ws_protocol_test_report_autobahn.png" border="0" width="300" height="68" alt="Autobahn WebSockets"> </img></a></center>\n')
f.write(' <br/>\n')
## Case Summary
##
if case["behavior"] == Case.OK:
style = "case_ok"
text = "Pass"
elif case["behavior"] == Case.NON_STRICT:
style = "case_non_strict"
text = "Non-Strict"
elif case["behavior"] == Case.INFORMATIONAL:
style = "case_info"
text = "Informational"
else:
style = "case_failed"
text = "Fail"
f.write(' <p class="case %s">%s - <span style="font-size: 1.3em;"><b>Case %s</b></span> : %s - <span style="font-size: 0.9em;"><b>%d</b> ms @ %s</a></p>\n' % (style, case["agent"], caseId, text, case["duration"], case["started"]))
## Case Description, Expectation, Outcome, Case Closing Behavior
##
f.write(' <p class="case_text_block case_desc"><b>Case Description</b><br/><br/>%s</p>\n' % case["description"])
f.write(' <p class="case_text_block case_expect"><b>Case Expectation</b><br/><br/>%s</p>\n' % case["expectation"])
f.write("""
<p class="case_text_block case_outcome">
<b>Case Outcome</b><br/><br/>%s<br/><br/>
<i>Expected:</i><br/><span class="case_pickle">%s</span><br/><br/>
<i>Observed:</i><br><span class="case_pickle">%s</span>
</p>\n""" % (case.get("result", ""), self.limitString(case.get("expected", ""), FuzzingFactory.MAX_CASE_PICKLE_LEN), self.limitString(case.get("received", ""), FuzzingFactory.MAX_CASE_PICKLE_LEN)))
f.write(' <p class="case_text_block case_closing_beh"><b>Case Closing Behavior</b><br/><br/>%s (%s)</p>\n' % (case.get("resultClose", ""), case.get("behaviorClose", "")))
f.write(" <br/><hr/>\n")
## Opening Handshake
##
f.write(' <h2>Opening Handshake</h2>\n')
f.write(' <pre class="http_dump">%s</pre>\n' % case["httpRequest"].strip())
f.write(' <pre class="http_dump">%s</pre>\n' % case["httpResponse"].strip())
f.write(" <br/><hr/>\n")
## Closing Behavior
##
cbv = [("isServer", "True, iff I (the fuzzer) am a server, and the peer is a client."),
("closedByMe", "True, iff I have initiated closing handshake (that is, did send close first)."),
("failedByMe", "True, iff I have failed the WS connection (i.e. due to protocol error). Failing can be either by initiating closing handshake or brutal drop TCP."),
("droppedByMe", "True, iff I dropped the TCP connection."),
("wasClean", "True, iff full WebSockets closing handshake was performed (close frame sent and received) _and_ the server dropped the TCP (which is its responsibility)."),
("wasNotCleanReason", "When wasClean == False, the reason what happened."),
("wasServerConnectionDropTimeout", "When we are a client, and we expected the server to drop the TCP, but that didn't happen in time, this gets True."),
("wasOpenHandshakeTimeout", "When performing the opening handshake, but the peer did not finish in time, this gets True."),
("wasCloseHandshakeTimeout", "When we initiated a closing handshake, but the peer did not respond in time, this gets True."),
("localCloseCode", "The close code I sent in close frame (if any)."),
("localCloseReason", "The close reason I sent in close frame (if any)."),
("remoteCloseCode", "The close code the peer sent me in close frame (if any)."),
("remoteCloseReason", "The close reason the peer sent me in close frame (if any).")
]
f.write(' <h2>Closing Behavior</h2>\n')
f.write(' <table>\n')
f.write(' <tr class="stats_header"><td>Key</td><td class="left">Value</td><td class="left">Description</td></tr>\n')
for c in cbv:
f.write(' <tr class="stats_row"><td>%s</td><td class="left">%s</td><td class="left">%s</td></tr>\n' % (c[0], case[c[0]], c[1]))
f.write(' </table>')
f.write(" <br/><hr/>\n")
## Wire Statistics
##
f.write(' <h2>Wire Statistics</h2>\n')
if not case["createStats"]:
f.write(' <p style="margin-left: 40px; color: #f00;"><i>Statistics for octets/frames disabled!</i></p>\n')
else:
## octet stats
##
for statdef in [("Received", case["rxOctetStats"]), ("Transmitted", case["txOctetStats"])]:
f.write(' <h3>Octets %s by Chop Size</h3>\n' % statdef[0])
f.write(' <table>\n')
stats = statdef[1]
total_cnt = 0
total_octets = 0
f.write(' <tr class="stats_header"><td>Chop Size</td><td>Count</td><td>Octets</td></tr>\n')
for s in sorted(stats.keys()):
f.write(' <tr class="stats_row"><td>%d</td><td>%d</td><td>%d</td></tr>\n' % (s, stats[s], s * stats[s]))
total_cnt += stats[s]
total_octets += s * stats[s]
f.write(' <tr class="stats_total"><td>Total</td><td>%d</td><td>%d</td></tr>\n' % (total_cnt, total_octets))
f.write(' </table>\n')
## frame stats
##
for statdef in [("Received", case["rxFrameStats"]), ("Transmitted", case["txFrameStats"])]:
f.write(' <h3>Frames %s by Opcode</h3>\n' % statdef[0])
f.write(' <table>\n')
stats = statdef[1]
total_cnt = 0
f.write(' <tr class="stats_header"><td>Opcode</td><td>Count</td></tr>\n')
for s in sorted(stats.keys()):
f.write(' <tr class="stats_row"><td>%d</td><td>%d</td></tr>\n' % (s, stats[s]))
total_cnt += stats[s]
f.write(' <tr class="stats_total"><td>Total</td><td>%d</td></tr>\n' % (total_cnt))
f.write(' </table>\n')
f.write(" <br/><hr/>\n")
## Wire Log
##
f.write(' <h2>Wire Log</h2>\n')
if not case["createWirelog"]:
f.write(' <p style="margin-left: 40px; color: #f00;"><i>Wire log after handshake disabled!</i></p>\n')
f.write(' <div id="wirelog">\n')
wl = case["wirelog"]
i = 0
for t in wl:
if t[0] == "RO":
prefix = "RX OCTETS"
css_class = "wirelog_rx_octets"
elif t[0] == "TO":
prefix = "TX OCTETS"
if t[2]:
css_class = "wirelog_tx_octets_sync"
else:
css_class = "wirelog_tx_octets"
elif t[0] == "RF":
prefix = "RX FRAME "
css_class = "wirelog_rx_frame"
elif t[0] == "TF":
prefix = "TX FRAME "
if t[8] or t[7] is not None:
css_class = "wirelog_tx_frame_sync"
else:
css_class = "wirelog_tx_frame"
elif t[0] in ["CT", "CTE", "KL", "KLE", "TI", "TIE", "WLM"]:
pass
else:
raise Exception("logic error (unrecognized wire log row type %s - row %s)" % (t[0], str(t)))
if t[0] in ["RO", "TO", "RF", "TF"]:
payloadLen = t[1][0]
lines = textwrap.wrap(t[1][1], 100)
if t[0] in ["RO", "TO"]:
if len(lines) > 0:
f.write(' <pre class="%s">%03d %s: %s</pre>\n' % (css_class, i, prefix, lines[0]))
for ll in lines[1:]:
f.write(' <pre class="%s">%s%s</pre>\n' % (css_class, (2+4+len(prefix))*" ", ll))
else:
if t[0] == "RF":
if t[6]:
mmask = binascii.b2a_hex(t[6])
else:
mmask = str(t[6])
f.write(' <pre class="%s">%03d %s: OPCODE=%s, FIN=%s, RSV=%s, PAYLOAD-LEN=%s, MASKED=%s, MASK=%s</pre>\n' % (css_class, i, prefix, str(t[2]), str(t[3]), str(t[4]), payloadLen, str(t[5]), mmask))
elif t[0] == "TF":
f.write(' <pre class="%s">%03d %s: OPCODE=%s, FIN=%s, RSV=%s, PAYLOAD-LEN=%s, MASK=%s, PAYLOAD-REPEAT-LEN=%s, CHOPSIZE=%s, SYNC=%s</pre>\n' % (css_class, i, prefix, str(t[2]), str(t[3]), str(t[4]), payloadLen, str(t[5]), str(t[6]), str(t[7]), str(t[8])))
else:
raise Exception("logic error")
for ll in lines:
f.write(' <pre class="%s">%s%s</pre>\n' % (css_class, (2+4+len(prefix))*" ", ll.encode('utf8')))
elif t[0] == "WLM":
if t[1]:
f.write(' <pre class="wirelog_delay">%03d WIRELOG ENABLED</pre>\n' % (i))
else:
f.write(' <pre class="wirelog_delay">%03d WIRELOG DISABLED</pre>\n' % (i))
elif t[0] == "CT":
f.write(' <pre class="wirelog_delay">%03d DELAY %f sec for TAG %s</pre>\n' % (i, t[1], t[2]))
elif t[0] == "CTE":
f.write(' <pre class="wirelog_delay">%03d DELAY TIMEOUT on TAG %s</pre>\n' % (i, t[1]))
elif t[0] == "KL":
f.write(' <pre class="wirelog_kill_after">%03d FAIL CONNECTION AFTER %f sec</pre>\n' % (i, t[1]))
elif t[0] == "KLE":
f.write(' <pre class="wirelog_kill_after">%03d FAILING CONNECTION</pre>\n' % (i))
elif t[0] == "TI":
f.write(' <pre class="wirelog_kill_after">%03d CLOSE CONNECTION AFTER %f sec</pre>\n' % (i, t[1]))
elif t[0] == "TIE":
f.write(' <pre class="wirelog_kill_after">%03d CLOSING CONNECTION</pre>\n' % (i))
else:
raise Exception("logic error (unrecognized wire log row type %s - row %s)" % (t[0], str(t)))
i += 1
if case["droppedByMe"]:
f.write(' <pre class="wirelog_tcp_closed_by_me">%03d TCP DROPPED BY ME</pre>\n' % i)
else:
f.write(' <pre class="wirelog_tcp_closed_by_peer">%03d TCP DROPPED BY PEER</pre>\n' % i)
f.write(' </div>\n')
f.write(" <br/><hr/>\n")
## end of HTML
##
f.write(" </body>\n")
f.write("</html>\n")
## close created HTML file and return filename
##
f.close()
return report_filename
class FuzzingServerProtocol(FuzzingProtocol, WebSocketServerProtocol):
def connectionMade(self):
WebSocketServerProtocol.connectionMade(self)
FuzzingProtocol.connectionMade(self)
def connectionLost(self, reason):
WebSocketServerProtocol.connectionLost(self, reason)
FuzzingProtocol.connectionLost(self, reason)
def onConnect(self, connectionRequest):
if self.debug:
log.msg("connection received from %s speaking WebSockets protocol %d - upgrade request for host '%s', path '%s', params %s, origin '%s', protocols %s, headers %s" % (connectionRequest.peerstr, connectionRequest.version, connectionRequest.host, connectionRequest.path, str(connectionRequest.params), connectionRequest.origin, str(connectionRequest.protocols), str(connectionRequest.headers)))
if connectionRequest.params.has_key("agent"):
if len(connectionRequest.params["agent"]) > 1:
raise Exception("multiple agents specified")
self.caseAgent = connectionRequest.params["agent"][0]
else:
#raise Exception("no agent specified")
self.caseAgent = None
if connectionRequest.params.has_key("case"):
if len(connectionRequest.params["case"]) > 1:
raise Exception("multiple test cases specified")
try:
self.case = int(connectionRequest.params["case"][0])
except:
raise Exception("invalid test case ID %s" % connectionRequest.params["case"][0])
if self.case:
if self.case >= 1 and self.case <= len(self.factory.specCases):
self.Case = self.factory.CaseSet.CasesById[self.factory.specCases[self.case - 1]]
if connectionRequest.path == "/runCase":
self.runCase = self.Case(self)
else:
raise Exception("case %s not found" % self.case)
if connectionRequest.path == "/runCase":
if not self.runCase:
raise Exception("need case to run")
if not self.caseAgent:
raise Exception("need agent to run case")
self.caseStarted = utcnow()
print "Running test case ID %s for agent %s from peer %s" % (self.factory.CaseSet.caseClasstoId(self.Case), self.caseAgent, connectionRequest.peerstr)
elif connectionRequest.path == "/updateReports":
if not self.caseAgent:
raise Exception("need agent to update reports for")
print "Updating reports, requested by peer %s" % connectionRequest.peerstr
elif connectionRequest.path == "/getCaseInfo":
if not self.Case:
raise Exception("need case to get info")
elif connectionRequest.path == "/getCaseStatus":
if not self.Case:
raise Exception("need case to get status")
if not self.caseAgent:
raise Exception("need agent to get status")
elif connectionRequest.path == "/getCaseCount":
pass
else:
print "Entering direct command mode for peer %s" % connectionRequest.peerstr
self.path = connectionRequest.path
return None
class FuzzingServerFactory(FuzzingFactory, WebSocketServerFactory):
protocol = FuzzingServerProtocol
def __init__(self, spec, debug = False):
WebSocketServerFactory.__init__(self, debug = debug, debugCodePaths = debug)
FuzzingFactory.__init__(self, spec.get("outdir", "./reports/clients/"))
# needed for wire log / stats
self.logOctets = True
self.logFrames = True
## WebSocket session parameters
##
self.setSessionParameters(url = spec["url"],
protocols = spec.get("protocols", []),
server = "AutobahnTestSuite/%s-%s" % (autobahntestsuite.version, autobahn.version))
## WebSocket protocol options
##
self.setProtocolOptions(**spec.get("options", {}))
self.spec = spec
self.CaseSet = CaseSet(CaseBasename, Cases, CaseCategories, CaseSubCategories)
self.specCases = self.CaseSet.parseSpecCases(self.spec)
self.specExcludeAgentCases = self.CaseSet.parseExcludeAgentCases(self.spec)
print "Autobahn WebSockets %s/%s Fuzzing Server (Port %d%s)" % (autobahntestsuite.version, autobahn.version, self.port, ' TLS' if self.isSecure else '')
print "Ok, will run %d test cases for any clients connecting" % len(self.specCases)
print "Cases = %s" % str(self.specCases)
class FuzzingClientProtocol(FuzzingProtocol, WebSocketClientProtocol):
def connectionMade(self):
FuzzingProtocol.connectionMade(self)
WebSocketClientProtocol.connectionMade(self)
self.caseStarted = utcnow()
print "Running test case ID %s for agent %s from peer %s" % (self.factory.CaseSet.caseClasstoId(self.Case), self.caseAgent, self.peerstr)
def connectionLost(self, reason):
WebSocketClientProtocol.connectionLost(self, reason)
FuzzingProtocol.connectionLost(self, reason)
class FuzzingClientFactory(FuzzingFactory, WebSocketClientFactory):
protocol = FuzzingClientProtocol
def __init__(self, spec, debug = False):
WebSocketClientFactory.__init__(self, debug = debug, debugCodePaths = debug)
FuzzingFactory.__init__(self, spec.get("outdir", "./reports/servers/"))
# needed for wire log / stats
self.logOctets = True
self.logFrames = True
self.spec = spec
self.CaseSet = CaseSet(CaseBasename, Cases, CaseCategories, CaseSubCategories)
self.specCases = self.CaseSet.parseSpecCases(self.spec)
self.specExcludeAgentCases = self.CaseSet.parseExcludeAgentCases(self.spec)
print "Autobahn Fuzzing WebSocket Client (Autobahn Version %s / Autobahn Testsuite Version %s)" % (autobahntestsuite.version, autobahn.version)
print "Ok, will run %d test cases against %d servers" % (len(self.specCases), len(spec["servers"]))
print "Cases = %s" % str(self.specCases)
print "Servers = %s" % str([x["url"] + "@" + x["agent"] for x in spec["servers"]])
self.currServer = -1
if self.nextServer():
if self.nextCase():
connectWS(self)
def buildProtocol(self, addr):
proto = FuzzingClientProtocol()
proto.factory = self
proto.caseAgent = self.agent
proto.case = self.currentCaseIndex
proto.Case = Cases[self.currentCaseIndex - 1]
proto.runCase = proto.Case(proto)
return proto
def nextServer(self):
self.currSpecCase = -1
self.currServer += 1
if self.currServer < len(self.spec["servers"]):
## run tests for next server
##
server = self.spec["servers"][self.currServer]
## agent (=server) string for reports
##
self.agent = server.get("agent", "UnknownServer")
## WebSocket session parameters
##
self.setSessionParameters(url = server["url"],
origin = server.get("origin", None),
protocols = server.get("protocols", []),
useragent = "AutobahnTestSuite/%s-%s" % (autobahntestsuite.version, autobahn.version))
## WebSocket protocol options
##
self.resetProtocolOptions() # reset to defaults
self.setProtocolOptions(**self.spec.get("options", {})) # set spec global options
self.setProtocolOptions(**server.get("options", {})) # set server specific options
return True
else:
return False
def nextCase(self):
self.currSpecCase += 1
if self.currSpecCase < len(self.specCases):
self.currentCaseId = self.specCases[self.currSpecCase]
self.currentCaseIndex = self.CaseSet.CasesIndices[self.currentCaseId]
return True
else:
return False
def clientConnectionLost(self, connector, reason):
if self.nextCase():
connector.connect()
else:
if self.nextServer():
if self.nextCase():
connectWS(self)
else:
self.createReports()
reactor.stop()
def clientConnectionFailed(self, connector, reason):
print "Connection to %s failed (%s)" % (self.spec["servers"][self.currServer]["url"], reason.getErrorMessage())
if self.nextServer():
if self.nextCase():
connectWS(self)
else:
self.createReports()
reactor.stop()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import keyword_only
from pyspark.sql import DataFrame
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.param.shared import *
__all__ = ["FPGrowth", "FPGrowthModel", "PrefixSpan"]
class _FPGrowthParams(HasPredictionCol):
"""
Params for :py:class:`FPGrowth` and :py:class:`FPGrowthModel`.
.. versionadded:: 3.0.0
"""
itemsCol = Param(Params._dummy(), "itemsCol",
"items column name", typeConverter=TypeConverters.toString)
minSupport = Param(
Params._dummy(),
"minSupport",
"Minimal support level of the frequent pattern. [0.0, 1.0]. " +
"Any pattern that appears more than (minSupport * size-of-the-dataset) " +
"times will be output in the frequent itemsets.",
typeConverter=TypeConverters.toFloat)
numPartitions = Param(
Params._dummy(),
"numPartitions",
"Number of partitions (at least 1) used by parallel FP-growth. " +
"By default the param is not set, " +
"and partition number of the input dataset is used.",
typeConverter=TypeConverters.toInt)
minConfidence = Param(
Params._dummy(),
"minConfidence",
"Minimal confidence for generating Association Rule. [0.0, 1.0]. " +
"minConfidence will not affect the mining for frequent itemsets, " +
"but will affect the association rules generation.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_FPGrowthParams, self).__init__(*args)
self._setDefault(minSupport=0.3, minConfidence=0.8,
itemsCol="items", predictionCol="prediction")
def getItemsCol(self):
"""
Gets the value of itemsCol or its default value.
"""
return self.getOrDefault(self.itemsCol)
def getMinSupport(self):
"""
Gets the value of minSupport or its default value.
"""
return self.getOrDefault(self.minSupport)
def getNumPartitions(self):
"""
Gets the value of :py:attr:`numPartitions` or its default value.
"""
return self.getOrDefault(self.numPartitions)
def getMinConfidence(self):
"""
Gets the value of minConfidence or its default value.
"""
return self.getOrDefault(self.minConfidence)
class FPGrowthModel(JavaModel, _FPGrowthParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by FPGrowth.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setItemsCol(self, value):
"""
Sets the value of :py:attr:`itemsCol`.
"""
return self._set(itemsCol=value)
@since("3.0.0")
def setMinConfidence(self, value):
"""
Sets the value of :py:attr:`minConfidence`.
"""
return self._set(minConfidence=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@property
@since("2.2.0")
def freqItemsets(self):
"""
DataFrame with two columns:
* `items` - Itemset of the same type as the input column.
* `freq` - Frequency of the itemset (`LongType`).
"""
return self._call_java("freqItemsets")
@property
@since("2.2.0")
def associationRules(self):
"""
DataFrame with four columns:
* `antecedent` - Array of the same type as the input column.
* `consequent` - Array of the same type as the input column.
* `confidence` - Confidence for the rule (`DoubleType`).
* `lift` - Lift for the rule (`DoubleType`).
"""
return self._call_java("associationRules")
class FPGrowth(JavaEstimator, _FPGrowthParams, JavaMLWritable, JavaMLReadable):
r"""
A parallel FP-growth algorithm to mine frequent itemsets. The algorithm is described in
Li et al., PFP: Parallel FP-Growth for Query Recommendation [LI2008]_.
PFP distributes computation in such a way that each worker executes an
independent group of mining tasks. The FP-Growth algorithm is described in
Han et al., Mining frequent patterns without candidate generation [HAN2000]_
.. [LI2008] https://doi.org/10.1145/1454008.1454027
.. [HAN2000] https://doi.org/10.1145/335191.335372
.. note:: null values in the feature column are ignored during fit().
.. note:: Internally `transform` `collects` and `broadcasts` association rules.
>>> from pyspark.sql.functions import split
>>> data = (spark.read
... .text("data/mllib/sample_fpgrowth.txt")
... .select(split("value", "\s+").alias("items")))
>>> data.show(truncate=False)
+------------------------+
|items |
+------------------------+
|[r, z, h, k, p] |
|[z, y, x, w, v, u, t, s]|
|[s, x, o, n, r] |
|[x, z, y, m, t, s, q, e]|
|[z] |
|[x, z, y, r, q, t, p] |
+------------------------+
...
>>> fp = FPGrowth(minSupport=0.2, minConfidence=0.7)
>>> fpm = fp.fit(data)
>>> fpm.setPredictionCol("newPrediction")
FPGrowthModel...
>>> fpm.freqItemsets.show(5)
+---------+----+
| items|freq|
+---------+----+
| [s]| 3|
| [s, x]| 3|
|[s, x, z]| 2|
| [s, z]| 2|
| [r]| 3|
+---------+----+
only showing top 5 rows
...
>>> fpm.associationRules.show(5)
+----------+----------+----------+----+------------------+
|antecedent|consequent|confidence|lift| support|
+----------+----------+----------+----+------------------+
| [t, s]| [y]| 1.0| 2.0|0.3333333333333333|
| [t, s]| [x]| 1.0| 1.5|0.3333333333333333|
| [t, s]| [z]| 1.0| 1.2|0.3333333333333333|
| [p]| [r]| 1.0| 2.0|0.3333333333333333|
| [p]| [z]| 1.0| 1.2|0.3333333333333333|
+----------+----------+----------+----+------------------+
only showing top 5 rows
...
>>> new_data = spark.createDataFrame([(["t", "s"], )], ["items"])
>>> sorted(fpm.transform(new_data).first().newPrediction)
['x', 'y', 'z']
>>> model_path = temp_path + "/fpm_model"
>>> fpm.save(model_path)
>>> model2 = FPGrowthModel.load(model_path)
>>> fpm.transform(data).take(1) == model2.transform(data).take(1)
True
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
__init__(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
super(FPGrowth, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.fpm.FPGrowth", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setItemsCol(self, value):
"""
Sets the value of :py:attr:`itemsCol`.
"""
return self._set(itemsCol=value)
def setMinSupport(self, value):
"""
Sets the value of :py:attr:`minSupport`.
"""
return self._set(minSupport=value)
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
def setMinConfidence(self, value):
"""
Sets the value of :py:attr:`minConfidence`.
"""
return self._set(minConfidence=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def _create_model(self, java_model):
return FPGrowthModel(java_model)
class PrefixSpan(JavaParams):
"""
A parallel PrefixSpan algorithm to mine frequent sequential patterns.
The PrefixSpan algorithm is described in J. Pei, et al., PrefixSpan: Mining Sequential Patterns
Efficiently by Prefix-Projected Pattern Growth
(see <a href="https://doi.org/10.1109/ICDE.2001.914830">here</a>).
This class is not yet an Estimator/Transformer, use :py:func:`findFrequentSequentialPatterns`
method to run the PrefixSpan algorithm.
@see <a href="https://en.wikipedia.org/wiki/Sequential_Pattern_Mining">Sequential Pattern Mining
(Wikipedia)</a>
>>> from pyspark.ml.fpm import PrefixSpan
>>> from pyspark.sql import Row
>>> df = sc.parallelize([Row(sequence=[[1, 2], [3]]),
... Row(sequence=[[1], [3, 2], [1, 2]]),
... Row(sequence=[[1, 2], [5]]),
... Row(sequence=[[6]])]).toDF()
>>> prefixSpan = PrefixSpan()
>>> prefixSpan.getMaxLocalProjDBSize()
32000000
>>> prefixSpan.getSequenceCol()
'sequence'
>>> prefixSpan.setMinSupport(0.5)
PrefixSpan...
>>> prefixSpan.setMaxPatternLength(5)
PrefixSpan...
>>> prefixSpan.findFrequentSequentialPatterns(df).sort("sequence").show(truncate=False)
+----------+----+
|sequence |freq|
+----------+----+
|[[1]] |3 |
|[[1], [3]]|2 |
|[[2]] |3 |
|[[2, 1]] |3 |
|[[3]] |2 |
+----------+----+
...
.. versionadded:: 2.4.0
"""
minSupport = Param(Params._dummy(), "minSupport", "The minimal support level of the " +
"sequential pattern. Sequential pattern that appears more than " +
"(minSupport * size-of-the-dataset) times will be output. Must be >= 0.",
typeConverter=TypeConverters.toFloat)
maxPatternLength = Param(Params._dummy(), "maxPatternLength",
"The maximal length of the sequential pattern. Must be > 0.",
typeConverter=TypeConverters.toInt)
maxLocalProjDBSize = Param(Params._dummy(), "maxLocalProjDBSize",
"The maximum number of items (including delimiters used in the " +
"internal storage format) allowed in a projected database before " +
"local processing. If a projected database exceeds this size, " +
"another iteration of distributed prefix growth is run. " +
"Must be > 0.",
typeConverter=TypeConverters.toInt)
sequenceCol = Param(Params._dummy(), "sequenceCol", "The name of the sequence column in " +
"dataset, rows with nulls in this column are ignored.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence"):
"""
__init__(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000, \
sequenceCol="sequence")
"""
super(PrefixSpan, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.fpm.PrefixSpan", self.uid)
self._setDefault(minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence"):
"""
setParams(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000, \
sequenceCol="sequence")
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.0.0")
def setMinSupport(self, value):
"""
Sets the value of :py:attr:`minSupport`.
"""
return self._set(minSupport=value)
@since("3.0.0")
def getMinSupport(self):
"""
Gets the value of minSupport or its default value.
"""
return self.getOrDefault(self.minSupport)
@since("3.0.0")
def setMaxPatternLength(self, value):
"""
Sets the value of :py:attr:`maxPatternLength`.
"""
return self._set(maxPatternLength=value)
@since("3.0.0")
def getMaxPatternLength(self):
"""
Gets the value of maxPatternLength or its default value.
"""
return self.getOrDefault(self.maxPatternLength)
@since("3.0.0")
def setMaxLocalProjDBSize(self, value):
"""
Sets the value of :py:attr:`maxLocalProjDBSize`.
"""
return self._set(maxLocalProjDBSize=value)
@since("3.0.0")
def getMaxLocalProjDBSize(self):
"""
Gets the value of maxLocalProjDBSize or its default value.
"""
return self.getOrDefault(self.maxLocalProjDBSize)
@since("3.0.0")
def setSequenceCol(self, value):
"""
Sets the value of :py:attr:`sequenceCol`.
"""
return self._set(sequenceCol=value)
@since("3.0.0")
def getSequenceCol(self):
"""
Gets the value of sequenceCol or its default value.
"""
return self.getOrDefault(self.sequenceCol)
@since("2.4.0")
def findFrequentSequentialPatterns(self, dataset):
"""
Finds the complete set of frequent sequential patterns in the input sequences of itemsets.
:param dataset: A dataframe containing a sequence column which is
`ArrayType(ArrayType(T))` type, T is the item type for the input dataset.
:return: A `DataFrame` that contains columns of sequence and corresponding frequency.
The schema of it will be:
- `sequence: ArrayType(ArrayType(T))` (T is the item type)
- `freq: Long`
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.findFrequentSequentialPatterns(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import pyspark.ml.fpm
from pyspark.sql import SparkSession
globs = pyspark.ml.fpm.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.fpm tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
|
# Block Breaker Game
# Chapter 9
import sys, time, random, math, pygame
from pygame.locals import *
from MyLibrary import *
levels = (
(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2,
2, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2,
2, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3,
3, 3, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3,
3, 3, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3,
3, 3, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3,
3, 3, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
)
# this function increments the level
def goto_next_level():
global level, levels
level += 1
if level > len(levels) - 1: level = 0
load_level()
# this function updates the blocks in play
def update_blocks():
global block_group, waiting
if len(block_group) == 0: # all blocks gone?
goto_next_level()
waiting = True
block_group.update(ticks, 50)
# this function sets up the blocks for the level
def load_level():
global level, block, block_image, block_group, levels
block_image = pygame.image.load("blocks.png").convert_alpha()
block_group.empty() # reset block group
for bx in range(0, 12):
for by in range(0, 10):
block = MySprite()
block.set_image(block_image, 58, 28, 4)
x = 40 + bx * (block.frame_width + 1)
y = 60 + by * (block.frame_height + 1)
block.position = x, y
# read blocks from level data
num = levels[level][by * 12 + bx]
block.first_frame = num - 1
block.last_frame = num - 1
if num > 0: # 0 is blank
block_group.add(block)
print(len(block_group))
# this function initializes the game
def game_init():
global screen, font, timer
global paddle_group, block_group, ball_group
global paddle, block_image, block, ball
pygame.init()
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption("Block Breaker Game")
font = pygame.font.Font(None, 36)
pygame.mouse.set_visible(False)
timer = pygame.time.Clock()
# create sprite groups
paddle_group = pygame.sprite.Group()
block_group = pygame.sprite.Group()
ball_group = pygame.sprite.Group()
# create the paddle sprite
paddle = MySprite()
paddle.load("paddle.png")
paddle.position = 400, 540
paddle_group.add(paddle)
# create ball sprite
ball = MySprite()
ball.load("ball.png")
ball.position = 400, 300
ball_group.add(ball)
# this function moves the paddle
def move_paddle():
global movex, movey, keys, waiting
paddle_group.update(ticks, 50)
if keys[K_SPACE]:
if waiting:
waiting = False
reset_ball()
elif keys[K_LEFT]:
paddle.velocity.x = -10.0
elif keys[K_RIGHT]:
paddle.velocity.x = 10.0
else:
if movex < -2:
paddle.velocity.x = movex
elif movex > 2:
paddle.velocity.x = movex
else:
paddle.velocity.x = 0
paddle.X += paddle.velocity.x
if paddle.X < 0:
paddle.X = 0
elif paddle.X > 710:
paddle.X = 710
# this function resets the ball's velocity
def reset_ball():
ball.velocity = Point(4.5, -7.0)
# this function moves the ball
def move_ball():
global waiting, ball, game_over, lives
# move the ball
ball_group.update(ticks, 50)
if waiting:
ball.X = paddle.X + 40
ball.Y = paddle.Y - 20
ball.X += ball.velocity.x
ball.Y += ball.velocity.y
if ball.X < 0:
ball.X = 0
ball.velocity.x *= -1
elif ball.X > 780:
ball.X = 780
ball.velocity.x *= -1
if ball.Y < 0:
ball.Y = 0
ball.velocity.y *= -1
elif ball.Y > 580: # missed paddle
waiting = True
lives -= 1
if lives < 1: game_over = True
# this function test for collision between ball and paddle
def collision_ball_paddle():
if pygame.sprite.collide_rect(ball, paddle):
ball.velocity.y = -abs(ball.velocity.y)
bx = ball.X + 8
by = ball.Y + 8
px = paddle.X + paddle.frame_width / 2
py = paddle.Y + paddle.frame_height / 2
if bx < px: # left side of paddle?
ball.velocity.x = -abs(ball.velocity.x)
else: # right side of paddle?
ball.velocity.x = abs(ball.velocity.x)
# this function tests for collision between ball and blocks
def collision_ball_blocks():
global score, block_group, ball
hit_block = pygame.sprite.spritecollideany(ball, block_group)
if hit_block != None:
score += 10
block_group.remove(hit_block)
bx = ball.X + 8
by = ball.Y + 8
# hit middle of block from above or below?
if hit_block.X + 5 < bx < hit_block.X + hit_block.frame_width - 5:
if by < hit_block.Y + hit_block.frame_height / 2: # above?
ball.velocity.y = -abs(ball.velocity.y)
else: # below?
ball.velocity.y = abs(ball.velocity.y)
# hit left side of block?
elif bx < hit_block.X + 5:
ball.velocity.x = -abs(ball.velocity.x)
# hit right side of block?
elif bx > hit_block.X + hit_block.frame_width - 5:
ball.velocity.x = abs(ball.velocity.x)
# handle any other situation
else:
ball.velocity.y *= -1
# main program begins
game_init()
game_over = False
waiting = True
score = 0
lives = 3
level = 0
load_level()
# repeating loop
while True:
timer.tick(30)
ticks = pygame.time.get_ticks()
# handle events
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == MOUSEMOTION:
movex, movey = event.rel
elif event.type == MOUSEBUTTONUP:
if waiting:
waiting = False
reset_ball()
elif event.type == KEYUP:
if event.key == K_RETURN: goto_next_level()
# handle key presses
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]: sys.exit()
# do updates
if not game_over:
update_blocks()
move_paddle()
move_ball()
collision_ball_paddle()
collision_ball_blocks()
# do drawing
screen.fill((50, 50, 100))
block_group.draw(screen)
ball_group.draw(screen)
paddle_group.draw(screen)
print_text(font, 0, 0, "SCORE " + str(score))
print_text(font, 200, 0, "LEVEL " + str(level + 1))
print_text(font, 400, 0, "BLOCKS " + str(len(block_group)))
print_text(font, 670, 0, "BALLS " + str(lives))
if game_over:
print_text(font, 300, 380, "G A M E O V E R")
pygame.display.update()
|
|
#!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2016 Charles Lin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
#Main method run script for processing of THMYCN data
#folder is changed to prevent any horrific genome clashing
#See README for additional information on downloading and installing dependencies
#==========================================================================
#=============================DEPENDENCIES=================================
#==========================================================================
import sys, os
# Get the script's full local path
whereAmI = os.path.dirname(os.path.realpath(__file__))
pipeline_dir = '/storage/cylin/home/cl6/pipeline/'
sys.path.append(whereAmI)
sys.path.append(pipeline_dir)
import pipeline_dfci
import utils
import string
import numpy
import os
import re
from collections import defaultdict
import subprocess
#==========================================================================
#============================PARAMETERS====================================
#==========================================================================
projectName = 'mycn'
genome ='mm9'
annotFile = '%s/annotation/%s_refseq.ucsc' % (pipeline_dir,genome)
#project folders
projectFolder = '/storage/cylin/grail/projects/mycn_resub/%s/thmycn/' % (projectName) #PATH TO YOUR PROJECT FOLDER
hg19_projectFolder ='/storage/cylin/grail/projects/mycn_resub/%s/' % (projectName) #PATH TO YOUR PROJECT FOLDER
projectFolder = utils.formatFolder(projectFolder,True)
#standard folder names
gffFolder ='%sgff_mm9/' % (projectFolder)
macsFolder = '%smacsFolder_mm9/' % (projectFolder)
macsEnrichedFolder = '%smacsEnriched_mm9/' % (projectFolder)
mappedEnrichedFolder = '%smappedEnriched_mm9/' % (projectFolder)
mappedFolder = '%smappedFolder_mm9/' % (projectFolder)
wiggleFolder = '%swiggles_mm9/' % (projectFolder)
metaFolder = '%smeta_mm9/' % (projectFolder)
metaRoseFolder = '%smeta_rose_mm9/' % (projectFolder)
roseFolder = '%srose_mm9/' % (projectFolder)
fastaFolder = '%sfasta_mm9/' % (projectFolder)
bedFolder = '%sbed_mm9/' % (projectFolder)
figuresFolder = '%sfigures_mm9/' % (projectFolder)
geneListFolder = '%sgeneListFolder_mm9/' % (projectFolder)
bedFolder = '%sbeds_mm9/' % (projectFolder)
signalFolder = '%ssignalTables_mm9/' % (projectFolder)
tableFolder = '%stables_mm9/' % (projectFolder)
genePlotFolder = '%sgene_plot_mm9/' % (projectFolder)
#mask Files
#genomeDirectory
genomeDirectory = '/grail/genomes/Mus_musculus/UCSC/mm9/Sequence/Chromosomes/'
#making folders
folderList = [gffFolder,macsFolder,macsEnrichedFolder,mappedEnrichedFolder,mappedFolder,wiggleFolder,metaFolder,metaRoseFolder,roseFolder,fastaFolder,figuresFolder,geneListFolder,bedFolder,signalFolder,tableFolder,genePlotFolder]
for folder in folderList:
pipeline_dfci.formatFolder(folder,True)
#==========================================================================
#============================LIST OF DATAFILES=============================
#==========================================================================
#this project will utilize multiple datatables
#data tables are organized largely by type/system
#some data tables overlap for ease of analysis
#ChIP-Seq
mouse_dataFile = '%sdata_tables_mm9/THMYCN_TABLE.txt' % (projectFolder)
#==========================================================================
#===========================MAIN METHOD====================================
#==========================================================================
def main():
print('main analysis for MYCN project')
print('changing directory to project folder')
os.chdir(projectFolder)
print('\n\n')
print('#======================================================================')
print('#======================I, LOADING DATA ANNOTATION======================')
print('#======================================================================')
print('\n\n')
#This section sanity checks each data table and makes sure both bam and .bai files are accessible
#for ChIP-Seq
pipeline_dfci.summary(mouse_dataFile)
print('\n\n')
print('#======================================================================')
print('#============II. MAKING A BED OUT OF HG19 FIGURE REGIONS===============')
print('#======================================================================')
print('\n\n')
hg19_gff_path = '%sgff/HG19_NB_FIGURE_GENES.gff' % (hg19_projectFolder)
hg19_gff = utils.parseTable(hg19_gff_path,'\t')
print(hg19_gff)
hg19_bed = utils.gffToBed(hg19_gff)
print(hg19_bed)
hg19_bed_path = '%sbeds/HG19_NB_FIGURE_GENES.bed' % (hg19_projectFolder)
utils.unParseTable(hg19_bed,hg19_bed_path,'\t')
#need to manually lift this over to mm9
#https://genome.ucsc.edu/cgi-bin/hgLiftOver
mm9_bed_path = '%sMM9_NB_FIGURE_GENES_LIFTOVER.bed' % (bedFolder)
mm9_gff_path = '%sMM9_NB_FIGURE_GENES_LIFTOVER.gff' % (gffFolder)
mm9_gff = utils.bedToGFF(mm9_bed_path)
#now add some additional manual regions
added_gff_regions = [
['chr12','TWIST1_ENHANCER','TWIST1_ENHANCER',34639818,34656263,'','-','','TWIST1_ENHANCER'],
['chr11','NPM1_PROMOTER_2','NPM1_PROMOTER_2',33049820,33065883,'','+','','NPM1_PROMOTER_2'],
['chr6','GATA2_ENHANCER','GATA2_ENHANCER',88135802,88159867,'','+','','GATA2_ENHANCER'],
['chr7','PHOX2A','PHOX2A',108964211,108974610,'','+','','PHOX2A'],
['chr15','LET7B','LET7B',85497440,85538754,'','+','','LET7B',],
['chr10','LIN28B','LIN28B',45161233,45217227,'','-','','LIN28B'],
]
mm9_gff_full = mm9_gff+added_gff_regions
utils.unParseTable(mm9_gff_full,mm9_gff_path,'\t')
print('\n\n')
print('#======================================================================')
print('#=======================III. PLOTTING DATA IN MOUSE====================')
print('#======================================================================')
print('\n\n')
#plot mouse regions
plot_mouse_genes(mouse_dataFile,mm9_gff_path)
#==========================================================================
#===================SPECIFIC FUNCTIONS FOR ANALYSIS========================
#==========================================================================
#specific functions written for this analysis
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~PLOTTING FOR SHEP ON SYSTEM~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def plot_mouse_genes(mouse_dataFile,mouse_figure_gff_path):
'''
plots all varieties and iterations of tracks @ lifted over mouse regions
'''
#first establish the plot folder
plotFolder = utils.formatFolder('%sTHMYCN/' % (genePlotFolder),True)
plot_prefix = 'MM9_NB_FIGURE_GENES_LIFTOVER'
#we also have to set the extension properly between datasets
#go by data file
dataDict = pipeline_dfci.loadDataTable(mouse_dataFile)
names_list = dataDict.keys()
#initial check for consistency of read lengths
# for name in names_list:
# bam = utils.Bam(dataDict[name]['bam'])
# read_length = bam.getReadLengths()[0]
# bam_extension = 200-read_length
# print('For dataset %s in %s using an extension of %s' % (name,mouse_dataFile,bam_extension))
# sys.exit()
bam = utils.Bam(dataDict[names_list[0]]['bam'])
read_length = bam.getReadLengths()[0]
bam_extension = 200-read_length
print('For datasets in %s using an extension of %s' % (mouse_dataFile,bam_extension))
#first do individuals
for plot_group in ['_MYCN','H3K27AC']:
plotList = [name for name in dataDict.keys() if name.upper().count(plot_group) > 0]
print(plotList)
if plot_group == '_MYCN':
plotName = '%s_THMYCN%s' % (plot_prefix,plot_group)
else:
plotName = '%s_THMYCN_%s' % (plot_prefix,plot_group)
print(plotName)
pipeline_dfci.callBatchPlot(mouse_dataFile,mouse_figure_gff_path,plotName,plotFolder,plotList,uniform=True,bed ='',plotType= 'MULTIPLE',extension=bam_extension,multiPage = False,debug=False,nameString = '',rpm=True,rxGenome = '')
#now as metas
#we only have 3 good k27ac and 3 good mycn datasets
plotList = ['CG_H3K27Ac',
'SCG_H3K27Ac',
'THMYCN1_H3K27Ac',
'THMYCN_139423_H3K27Ac',
'THMYCN_139076_H3K27Ac',
'THMYCN2_MYCN',
'THMYCN_139076_MYCN',
'THMYCN_139423_MYCN',
]
groupString = 'CG_,SCG,H3K27AC,H3K27AC,H3K27AC,MYCN,MYCN,MYCN'
plotName = '%s_THMYCN_META_RELATIVE' % (plot_prefix)
pipeline_dfci.callBatchPlot(mouse_dataFile,mouse_figure_gff_path,plotName,plotFolder,plotList,uniform=False,bed ='',plotType= 'MERGE',extension=bam_extension,multiPage = False,debug=False,nameString = groupString,rpm=True,rxGenome = '')
plotName = '%s_THMYCN_META_UNIFORM' % (plot_prefix)
pipeline_dfci.callBatchPlot(mouse_dataFile,mouse_figure_gff_path,plotName,plotFolder,plotList,uniform=True,bed ='',plotType= 'MERGE',extension=bam_extension,multiPage = False,debug=False,nameString = groupString,rpm=True,rxGenome = '')
#==========================================================================
#==================================THE END=================================
#==========================================================================
if __name__=="__main__":
main()
|
|
# The StringAccessor class defined below is an adaptation of the
# pandas string methods source code (see pd.core.strings)
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import re
import textwrap
import numpy as np
from .computation import apply_ufunc
_cpython_optimized_encoders = (
"utf-8",
"utf8",
"latin-1",
"latin1",
"iso-8859-1",
"mbcs",
"ascii",
)
_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32")
def _is_str_like(x):
return isinstance(x, str) or isinstance(x, bytes)
class StringAccessor:
"""Vectorized string functions for string-like arrays.
Similar to pandas, fields can be accessed through the `.str` attribute
for applicable DataArrays.
>>> da = xr.DataArray(["some", "text", "in", "an", "array"])
>>> ds.str.len()
<xarray.DataArray (dim_0: 5)>
array([4, 4, 2, 2, 5])
Dimensions without coordinates: dim_0
"""
__slots__ = ("_obj",)
def __init__(self, obj):
self._obj = obj
def _apply(self, f, dtype=None):
# TODO handling of na values ?
if dtype is None:
dtype = self._obj.dtype
g = np.vectorize(f, otypes=[dtype])
return apply_ufunc(g, self._obj, dask="parallelized", output_dtypes=[dtype])
def len(self):
"""
Compute the length of each element in the array.
Returns
-------
lengths array : array of int
"""
return self._apply(len, dtype=int)
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def get(self, i):
"""
Extract element from indexable in each element in the array.
Parameters
----------
i : int
Position of element to extract.
default : optional
Value for out-of-range index. If not specified (None) defaults to
an empty string.
Returns
-------
items : array of objects
"""
obj = slice(-1, None) if i == -1 else slice(i, i + 1)
return self._apply(lambda x: x[obj])
def slice(self, start=None, stop=None, step=None):
"""
Slice substrings from each element in the array.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
sliced strings : same type as values
"""
s = slice(start, stop, step)
f = lambda x: x[s]
return self._apply(f)
def slice_replace(self, start=None, stop=None, repl=""):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified, the sliced region
is replaced with an empty string.
Returns
-------
replaced : same type as values
"""
repl = self._obj.dtype.type(repl)
def f(x):
if len(x[start:stop]) == 0:
local_stop = start
else:
local_stop = stop
y = self._obj.dtype.type("")
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return self._apply(f)
def capitalize(self):
"""
Convert strings in the array to be capitalized.
Returns
-------
capitalized : same type as values
"""
return self._apply(lambda x: x.capitalize())
def lower(self):
"""
Convert strings in the array to lowercase.
Returns
-------
lowerd : same type as values
"""
return self._apply(lambda x: x.lower())
def swapcase(self):
"""
Convert strings in the array to be swapcased.
Returns
-------
swapcased : same type as values
"""
return self._apply(lambda x: x.swapcase())
def title(self):
"""
Convert strings in the array to titlecase.
Returns
-------
titled : same type as values
"""
return self._apply(lambda x: x.title())
def upper(self):
"""
Convert strings in the array to uppercase.
Returns
-------
uppered : same type as values
"""
return self._apply(lambda x: x.upper())
def isalnum(self):
"""
Check whether all characters in each string are alphanumeric.
Returns
-------
isalnum : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.isalnum(), dtype=bool)
def isalpha(self):
"""
Check whether all characters in each string are alphabetic.
Returns
-------
isalpha : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.isalpha(), dtype=bool)
def isdecimal(self):
"""
Check whether all characters in each string are decimal.
Returns
-------
isdecimal : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.isdecimal(), dtype=bool)
def isdigit(self):
"""
Check whether all characters in each string are digits.
Returns
-------
isdigit : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.isdigit(), dtype=bool)
def islower(self):
"""
Check whether all characters in each string are lowercase.
Returns
-------
islower : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.islower(), dtype=bool)
def isnumeric(self):
"""
Check whether all characters in each string are numeric.
Returns
-------
isnumeric : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.isnumeric(), dtype=bool)
def isspace(self):
"""
Check whether all characters in each string are spaces.
Returns
-------
isspace : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.isspace(), dtype=bool)
def istitle(self):
"""
Check whether all characters in each string are titlecase.
Returns
-------
istitle : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.istitle(), dtype=bool)
def isupper(self):
"""
Check whether all characters in each string are uppercase.
Returns
-------
isupper : array of bool
Array of boolean values with the same shape as the original array.
"""
return self._apply(lambda x: x.isupper(), dtype=bool)
def count(self, pat, flags=0):
"""
Count occurrences of pattern in each string of the array.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~xarray.DatArray`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
Returns
-------
counts : array of int
"""
pat = self._obj.dtype.type(pat)
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return self._apply(f, dtype=int)
def startswith(self, pat):
"""
Test if the start of each string element matches a pattern.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
Returns
-------
startswith : array of bool
An array of booleans indicating whether the given pattern matches
the start of each string element.
"""
pat = self._obj.dtype.type(pat)
f = lambda x: x.startswith(pat)
return self._apply(f, dtype=bool)
def endswith(self, pat):
"""
Test if the end of each string element matches a pattern.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
Returns
-------
endswith : array of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
"""
pat = self._obj.dtype.type(pat)
f = lambda x: x.endswith(pat)
return self._apply(f, dtype=bool)
def pad(self, width, side="left", fillchar=" "):
"""
Pad strings in the array up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
filled : same type as values
Array with a minimum number of char in each element.
"""
width = int(width)
fillchar = self._obj.dtype.type(fillchar)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if side == "left":
f = lambda s: s.rjust(width, fillchar)
elif side == "right":
f = lambda s: s.ljust(width, fillchar)
elif side == "both":
f = lambda s: s.center(width, fillchar)
else: # pragma: no cover
raise ValueError("Invalid side")
return self._apply(f)
def center(self, width, fillchar=" "):
"""
Filling left and right side of strings in the array with an
additional character.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : same type as values
"""
return self.pad(width, side="both", fillchar=fillchar)
def ljust(self, width, fillchar=" "):
"""
Filling right side of strings in the array with an additional
character.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : same type as values
"""
return self.pad(width, side="right", fillchar=fillchar)
def rjust(self, width, fillchar=" "):
"""
Filling left side of strings in the array with an additional character.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : same type as values
"""
return self.pad(width, side="left", fillchar=fillchar)
def zfill(self, width):
"""
Pad strings in the array by prepending '0' characters.
Strings in the array are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the array with length greater or equal to `width` are unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
filled : same type as values
"""
return self.pad(width, side="left", fillchar="0")
def contains(self, pat, case=True, flags=0, regex=True):
"""
Test if pattern or regex is contained within a string of the array.
Return boolean array based on whether a given pattern or regex is
contained within a string of the array.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
contains : array of bool
An array of boolean values indicating whether the
given pattern is contained within the string of each element
of the array.
"""
pat = self._obj.dtype.type(pat)
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0: # pragma: no cover
raise ValueError("This pattern has match groups.")
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
uppered = self._obj.str.upper()
return uppered.str.contains(pat.upper(), regex=False)
return self._apply(f, dtype=bool)
def match(self, pat, case=True, flags=0):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matched : array of bool
"""
if not case:
flags |= re.IGNORECASE
pat = self._obj.dtype.type(pat)
regex = re.compile(pat, flags=flags)
f = lambda x: bool(regex.match(x))
return self._apply(f, dtype=bool)
def strip(self, to_strip=None, side="both"):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the array from left and/or right sides.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
side : {'left', 'right', 'both'}, default 'left'
Side from which to strip.
Returns
-------
stripped : same type as values
"""
if to_strip is not None:
to_strip = self._obj.dtype.type(to_strip)
if side == "both":
f = lambda x: x.strip(to_strip)
elif side == "left":
f = lambda x: x.lstrip(to_strip)
elif side == "right":
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError("Invalid side")
return self._apply(f)
def lstrip(self, to_strip=None):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the array from the left side.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
stripped : same type as values
"""
return self.strip(to_strip, side="left")
def rstrip(self, to_strip=None):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the array from the right side.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
stripped : same type as values
"""
return self.strip(to_strip, side="right")
def wrap(self, width, **kwargs):
"""
Wrap long strings in the array to be formatted in paragraphs with
length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by
string.whitespace) remaining after tab expansion will be replaced
by a single space (default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to
ensure that no lines are longer than width. If it is false, long
words will not be broken, and some lines may be longer than width.
(default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right
after hyphens in compound words, as it is customary in English. If
false, only whitespaces will be considered as potentially good
places for line breaks, but you need to set break_long_words to
false if you want truly insecable words. (default: True)
Returns
-------
wrapped : same type as values
"""
tw = textwrap.TextWrapper(width=width)
f = lambda x: "\n".join(tw.wrap(x))
return self._apply(f)
def translate(self, table):
"""
Map all characters in the string through the given mapping table.
Parameters
----------
table : dict
A a mapping of Unicode ordinals to Unicode ordinals, strings,
or None. Unmapped characters are left untouched. Characters mapped
to None are deleted. :meth:`str.maketrans` is a helper function for
making translation tables.
Returns
-------
translated : same type as values
"""
f = lambda x: x.translate(table)
return self._apply(f)
def repeat(self, repeats):
"""
Duplicate each string in the array.
Parameters
----------
repeats : int
Number of repetitions.
Returns
-------
repeated : same type as values
Array of repeated string objects.
"""
f = lambda x: repeats * x
return self._apply(f)
def find(self, sub, start=0, end=None, side="left"):
"""
Return lowest or highest indexes in each strings in the array
where the substring is fully contained between [start:end].
Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Starting side for search.
Returns
-------
found : array of integer values
"""
sub = self._obj.dtype.type(sub)
if side == "left":
method = "find"
elif side == "right":
method = "rfind"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return self._apply(f, dtype=int)
def rfind(self, sub, start=0, end=None):
"""
Return highest indexes in each strings in the array
where the substring is fully contained between [start:end].
Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : array of integer values
"""
return self.find(sub, start=start, end=end, side="right")
def index(self, sub, start=0, end=None, side="left"):
"""
Return lowest or highest indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.find`` except instead of returning -1, it raises a ValueError
when the substring is not found.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Starting side for search.
Returns
-------
found : array of integer values
"""
sub = self._obj.dtype.type(sub)
if side == "left":
method = "index"
elif side == "right":
method = "rindex"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return self._apply(f, dtype=int)
def rindex(self, sub, start=0, end=None):
"""
Return highest indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.rfind`` except instead of returning -1, it raises a ValueError
when the substring is not found.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : array of integer values
"""
return self.index(sub, start=start, end=end, side="right")
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
"""
Replace occurrences of pattern/regex in the array with some string.
Parameters
----------
pat : string or compiled regex
String can be a character sequence or regular expression.
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
regex : boolean, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
Returns
-------
replaced : same type as values
A copy of the object with all matching occurrences of `pat`
replaced by `repl`.
"""
if not (_is_str_like(repl) or callable(repl)): # pragma: no cover
raise TypeError("repl must be a string or callable")
if _is_str_like(pat):
pat = self._obj.dtype.type(pat)
if _is_str_like(repl):
repl = self._obj.dtype.type(repl)
is_compiled_re = isinstance(pat, type(re.compile("")))
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
"case and flags cannot be set" " when pat is a compiled regex"
)
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement "
"pattern with regex=False"
)
if callable(repl):
raise ValueError(
"Cannot use a callable replacement when " "regex=False"
)
f = lambda x: x.replace(pat, repl, n)
return self._apply(f)
def decode(self, encoding, errors="strict"):
"""
Decode character string in the array using indicated encoding.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
decoded : same type as values
"""
if encoding in _cpython_optimized_decoders:
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return self._apply(f, dtype=np.str_)
def encode(self, encoding, errors="strict"):
"""
Encode character string in the array using indicated encoding.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : same type as values
"""
if encoding in _cpython_optimized_encoders:
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return self._apply(f, dtype=np.bytes_)
|
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <[email protected]>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import jsonschema
from oslo_log import log
from oslo_utils import timeutils
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ceilometer.alarm.storage import models as alarm_models
from ceilometer.api.controllers.v2 import alarms
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import samples
from ceilometer.api.controllers.v2 import utils as v2_utils
from ceilometer.api import rbac
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer import utils
LOG = log.getLogger(__name__)
class ComplexQuery(base.Base):
"""Holds a sample query encoded in json."""
filter = wtypes.text
"The filter expression encoded in json."
orderby = wtypes.text
"List of single-element dicts for specifying the ordering of the results."
limit = int
"The maximum number of results to be returned."
@classmethod
def sample(cls):
return cls(filter='{"and": [{"and": [{"=": ' +
'{"counter_name": "cpu_util"}}, ' +
'{">": {"counter_volume": 0.23}}, ' +
'{"<": {"counter_volume": 0.26}}]}, ' +
'{"or": [{"and": [{">": ' +
'{"timestamp": "2013-12-01T18:00:00"}}, ' +
'{"<": ' +
'{"timestamp": "2013-12-01T18:15:00"}}]}, ' +
'{"and": [{">": ' +
'{"timestamp": "2013-12-01T18:30:00"}}, ' +
'{"<": ' +
'{"timestamp": "2013-12-01T18:45:00"}}]}]}]}',
orderby='[{"counter_volume": "ASC"}, ' +
'{"timestamp": "DESC"}]',
limit=42
)
def _list_to_regexp(items, regexp_prefix=""):
regexp = ["^%s$" % item for item in items]
regexp = regexp_prefix + "|".join(regexp)
return regexp
class ValidatedComplexQuery(object):
complex_operators = ["and", "or"]
order_directions = ["asc", "desc"]
simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"]
regexp_prefix = "(?i)"
complex_ops = _list_to_regexp(complex_operators, regexp_prefix)
simple_ops = _list_to_regexp(simple_ops, regexp_prefix)
order_directions = _list_to_regexp(order_directions, regexp_prefix)
timestamp_fields = ["timestamp", "state_timestamp"]
def __init__(self, query, db_model, additional_name_mapping=None,
metadata_allowed=False):
additional_name_mapping = additional_name_mapping or {}
self.name_mapping = {"user": "user_id",
"project": "project_id"}
self.name_mapping.update(additional_name_mapping)
valid_keys = db_model.get_field_names()
valid_keys = list(valid_keys) + list(self.name_mapping.keys())
valid_fields = _list_to_regexp(valid_keys)
if metadata_allowed:
valid_filter_fields = valid_fields + "|^metadata\.[\S]+$"
else:
valid_filter_fields = valid_fields
schema_value = {
"oneOf": [{"type": "string"},
{"type": "number"},
{"type": "boolean"}],
"minProperties": 1,
"maxProperties": 1}
schema_value_in = {
"type": "array",
"items": {"oneOf": [{"type": "string"},
{"type": "number"}]},
"minItems": 1}
schema_field = {
"type": "object",
"patternProperties": {valid_filter_fields: schema_value},
"additionalProperties": False,
"minProperties": 1,
"maxProperties": 1}
schema_field_in = {
"type": "object",
"patternProperties": {valid_filter_fields: schema_value_in},
"additionalProperties": False,
"minProperties": 1,
"maxProperties": 1}
schema_leaf_in = {
"type": "object",
"patternProperties": {"(?i)^in$": schema_field_in},
"additionalProperties": False,
"minProperties": 1,
"maxProperties": 1}
schema_leaf_simple_ops = {
"type": "object",
"patternProperties": {self.simple_ops: schema_field},
"additionalProperties": False,
"minProperties": 1,
"maxProperties": 1}
schema_and_or_array = {
"type": "array",
"items": {"$ref": "#"},
"minItems": 2}
schema_and_or = {
"type": "object",
"patternProperties": {self.complex_ops: schema_and_or_array},
"additionalProperties": False,
"minProperties": 1,
"maxProperties": 1}
schema_not = {
"type": "object",
"patternProperties": {"(?i)^not$": {"$ref": "#"}},
"additionalProperties": False,
"minProperties": 1,
"maxProperties": 1}
self.schema = {
"oneOf": [{"$ref": "#/definitions/leaf_simple_ops"},
{"$ref": "#/definitions/leaf_in"},
{"$ref": "#/definitions/and_or"},
{"$ref": "#/definitions/not"}],
"minProperties": 1,
"maxProperties": 1,
"definitions": {"leaf_simple_ops": schema_leaf_simple_ops,
"leaf_in": schema_leaf_in,
"and_or": schema_and_or,
"not": schema_not}}
self.orderby_schema = {
"type": "array",
"items": {
"type": "object",
"patternProperties":
{valid_fields:
{"type": "string",
"pattern": self.order_directions}},
"additionalProperties": False,
"minProperties": 1,
"maxProperties": 1}}
self.original_query = query
def validate(self, visibility_field):
"""Validates the query content and does the necessary conversions."""
if self.original_query.filter is wtypes.Unset:
self.filter_expr = None
else:
try:
self.filter_expr = json.loads(self.original_query.filter)
self._validate_filter(self.filter_expr)
except (ValueError, jsonschema.exceptions.ValidationError) as e:
raise base.ClientSideError(
_("Filter expression not valid: %s") % e)
self._replace_isotime_with_datetime(self.filter_expr)
self._convert_operator_to_lower_case(self.filter_expr)
self._normalize_field_names_for_db_model(self.filter_expr)
self._force_visibility(visibility_field)
if self.original_query.orderby is wtypes.Unset:
self.orderby = None
else:
try:
self.orderby = json.loads(self.original_query.orderby)
self._validate_orderby(self.orderby)
except (ValueError, jsonschema.exceptions.ValidationError) as e:
raise base.ClientSideError(
_("Order-by expression not valid: %s") % e)
self._convert_orderby_to_lower_case(self.orderby)
self._normalize_field_names_in_orderby(self.orderby)
self.limit = (None if self.original_query.limit is wtypes.Unset
else self.original_query.limit)
self.limit = v2_utils.enforce_limit(self.limit)
@staticmethod
def _convert_orderby_to_lower_case(orderby):
for orderby_field in orderby:
utils.lowercase_values(orderby_field)
def _normalize_field_names_in_orderby(self, orderby):
for orderby_field in orderby:
self._replace_field_names(orderby_field)
def _traverse_postorder(self, tree, visitor):
op = list(tree.keys())[0]
if op.lower() in self.complex_operators:
for i, operand in enumerate(tree[op]):
self._traverse_postorder(operand, visitor)
if op.lower() == "not":
self._traverse_postorder(tree[op], visitor)
visitor(tree)
def _check_cross_project_references(self, own_project_id,
visibility_field):
"""Do not allow other than own_project_id."""
def check_project_id(subfilter):
op, value = list(subfilter.items())[0]
if (op.lower() not in self.complex_operators
and list(value.keys())[0] == visibility_field
and value[visibility_field] != own_project_id):
raise base.ProjectNotAuthorized(value[visibility_field])
self._traverse_postorder(self.filter_expr, check_project_id)
def _force_visibility(self, visibility_field):
"""Force visibility field.
If the tenant is not admin insert an extra
"and <visibility_field>=<tenant's project_id>" clause to the query.
"""
authorized_project = rbac.get_limited_to_project(pecan.request.headers)
is_admin = authorized_project is None
if not is_admin:
self._restrict_to_project(authorized_project, visibility_field)
self._check_cross_project_references(authorized_project,
visibility_field)
def _restrict_to_project(self, project_id, visibility_field):
restriction = {"=": {visibility_field: project_id}}
if self.filter_expr is None:
self.filter_expr = restriction
else:
self.filter_expr = {"and": [restriction, self.filter_expr]}
def _replace_isotime_with_datetime(self, filter_expr):
def replace_isotime(subfilter):
op, value = list(subfilter.items())[0]
if op.lower() not in self.complex_operators:
field = list(value.keys())[0]
if field in self.timestamp_fields:
date_time = self._convert_to_datetime(subfilter[op][field])
subfilter[op][field] = date_time
self._traverse_postorder(filter_expr, replace_isotime)
def _normalize_field_names_for_db_model(self, filter_expr):
def _normalize_field_names(subfilter):
op, value = list(subfilter.items())[0]
if op.lower() not in self.complex_operators:
self._replace_field_names(value)
self._traverse_postorder(filter_expr,
_normalize_field_names)
def _replace_field_names(self, subfilter):
field, value = list(subfilter.items())[0]
if field in self.name_mapping:
del subfilter[field]
subfilter[self.name_mapping[field]] = value
if field.startswith("metadata."):
del subfilter[field]
subfilter["resource_" + field] = value
def _convert_operator_to_lower_case(self, filter_expr):
self._traverse_postorder(filter_expr, utils.lowercase_keys)
@staticmethod
def _convert_to_datetime(isotime):
try:
date_time = timeutils.parse_isotime(isotime)
date_time = date_time.replace(tzinfo=None)
return date_time
except ValueError:
LOG.exception(_("String %s is not a valid isotime") % isotime)
msg = _('Failed to parse the timestamp value %s') % isotime
raise base.ClientSideError(msg)
def _validate_filter(self, filter_expr):
jsonschema.validate(filter_expr, self.schema)
def _validate_orderby(self, orderby_expr):
jsonschema.validate(orderby_expr, self.orderby_schema)
class QuerySamplesController(rest.RestController):
"""Provides complex query possibilities for samples."""
@wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery)
def post(self, body):
"""Define query for retrieving Sample data.
:param body: Query rules for the samples to be returned.
"""
rbac.enforce('query_sample', pecan.request)
sample_name_mapping = {"resource": "resource_id",
"meter": "counter_name",
"type": "counter_type",
"unit": "counter_unit",
"volume": "counter_volume"}
query = ValidatedComplexQuery(body,
storage.models.Sample,
sample_name_mapping,
metadata_allowed=True)
query.validate(visibility_field="project_id")
conn = pecan.request.storage_conn
return [samples.Sample.from_db_model(s)
for s in conn.query_samples(query.filter_expr,
query.orderby,
query.limit)]
class QueryAlarmHistoryController(rest.RestController):
"""Provides complex query possibilities for alarm history."""
@wsme_pecan.wsexpose([alarms.AlarmChange], body=ComplexQuery)
def post(self, body):
"""Define query for retrieving AlarmChange data.
:param body: Query rules for the alarm history to be returned.
"""
rbac.enforce('query_alarm_history', pecan.request)
query = ValidatedComplexQuery(body,
alarm_models.AlarmChange)
query.validate(visibility_field="on_behalf_of")
conn = pecan.request.alarm_storage_conn
return [alarms.AlarmChange.from_db_model(s)
for s in conn.query_alarm_history(query.filter_expr,
query.orderby,
query.limit)]
class QueryAlarmsController(rest.RestController):
"""Provides complex query possibilities for alarms."""
history = QueryAlarmHistoryController()
@wsme_pecan.wsexpose([alarms.Alarm], body=ComplexQuery)
def post(self, body):
"""Define query for retrieving Alarm data.
:param body: Query rules for the alarms to be returned.
"""
rbac.enforce('query_alarm', pecan.request)
query = ValidatedComplexQuery(body,
alarm_models.Alarm)
query.validate(visibility_field="project_id")
conn = pecan.request.alarm_storage_conn
return [alarms.Alarm.from_db_model(s)
for s in conn.query_alarms(query.filter_expr,
query.orderby,
query.limit)]
|
|
"""CherryPy is a pythonic, object-oriented HTTP framework.
CherryPy consists of not one, but four separate API layers.
The APPLICATION LAYER is the simplest. CherryPy applications are written as
a tree of classes and methods, where each branch in the tree corresponds to
a branch in the URL path. Each method is a 'page handler', which receives
GET and POST params as keyword arguments, and returns or yields the (HTML)
body of the response. The special method name 'index' is used for paths
that end in a slash, and the special method name 'default' is used to
handle multiple paths via a single handler. This layer also includes:
* the 'exposed' attribute (and cherrypy.expose)
* cherrypy.quickstart()
* _cp_config attributes
* cherrypy.tools (including cherrypy.session)
* cherrypy.url()
The ENVIRONMENT LAYER is used by developers at all levels. It provides
information about the current request and response, plus the application
and server environment, via a (default) set of top-level objects:
* cherrypy.request
* cherrypy.response
* cherrypy.engine
* cherrypy.server
* cherrypy.tree
* cherrypy.config
* cherrypy.thread_data
* cherrypy.log
* cherrypy.HTTPError, NotFound, and HTTPRedirect
* cherrypy.lib
The EXTENSION LAYER allows advanced users to construct and share their own
plugins. It consists of:
* Hook API
* Tool API
* Toolbox API
* Dispatch API
* Config Namespace API
Finally, there is the CORE LAYER, which uses the core API's to construct
the default components which are available at higher layers. You can think
of the default components as the 'reference implementation' for CherryPy.
Megaframeworks (and advanced users) may replace the default components
with customized or extended components. The core API's are:
* Application API
* Engine API
* Request API
* Server API
* WSGI API
These API's are described in the `CherryPy specification <https://bitbucket.org/cherrypy/cherrypy/wiki/CherryPySpec>`_.
"""
__version__ = "3.5.0"
from cherrypy._cpcompat import urljoin as _urljoin, urlencode as _urlencode
from cherrypy._cpcompat import basestring, unicodestr, set
from cherrypy._cperror import HTTPError, HTTPRedirect, InternalRedirect
from cherrypy._cperror import NotFound, CherryPyException, TimeoutError
from cherrypy import _cpdispatch as dispatch
from cherrypy import _cptools
tools = _cptools.default_toolbox
Tool = _cptools.Tool
from cherrypy import _cprequest
from cherrypy.lib import httputil as _httputil
from cherrypy import _cptree
tree = _cptree.Tree()
from cherrypy._cptree import Application
from cherrypy import _cpwsgi as wsgi
from cherrypy import process
try:
from cherrypy.process import win32
engine = win32.Win32Bus()
engine.console_control_handler = win32.ConsoleCtrlHandler(engine)
del win32
except ImportError:
engine = process.bus
# Timeout monitor. We add two channels to the engine
# to which cherrypy.Application will publish.
engine.listeners['before_request'] = set()
engine.listeners['after_request'] = set()
class _TimeoutMonitor(process.plugins.Monitor):
def __init__(self, bus):
self.servings = []
process.plugins.Monitor.__init__(self, bus, self.run)
def before_request(self):
self.servings.append((serving.request, serving.response))
def after_request(self):
try:
self.servings.remove((serving.request, serving.response))
except ValueError:
pass
def run(self):
"""Check timeout on all responses. (Internal)"""
for req, resp in self.servings:
resp.check_timeout()
engine.timeout_monitor = _TimeoutMonitor(engine)
engine.timeout_monitor.subscribe()
engine.autoreload = process.plugins.Autoreloader(engine)
engine.autoreload.subscribe()
engine.thread_manager = process.plugins.ThreadManager(engine)
engine.thread_manager.subscribe()
engine.signal_handler = process.plugins.SignalHandler(engine)
class _HandleSignalsPlugin(object):
"""Handle signals from other processes based on the configured
platform handlers above."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Add the handlers based on the platform"""
if hasattr(self.bus, "signal_handler"):
self.bus.signal_handler.subscribe()
if hasattr(self.bus, "console_control_handler"):
self.bus.console_control_handler.subscribe()
engine.signals = _HandleSignalsPlugin(engine)
from cherrypy import _cpserver
server = _cpserver.Server()
server.subscribe()
def quickstart(root=None, script_name="", config=None):
"""Mount the given root, start the builtin server (and engine), then block.
root: an instance of a "controller class" (a collection of page handler
methods) which represents the root of the application.
script_name: a string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the URL
at which to mount the given root. For example, if root.index() will
handle requests to "http://www.example.com:8080/dept/app1/", then
the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the root
of the URI, it MUST be an empty string (not "/").
config: a file or dict containing application config. If this contains
a [global] section, those entries will be used in the global
(site-wide) config.
"""
if config:
_global_conf_alias.update(config)
tree.mount(root, script_name, config)
engine.signals.subscribe()
engine.start()
engine.block()
from cherrypy._cpcompat import threadlocal as _local
class _Serving(_local):
"""An interface for registering request and response objects.
Rather than have a separate "thread local" object for the request and
the response, this class works as a single threadlocal container for
both objects (and any others which developers wish to define). In this
way, we can easily dump those objects when we stop/start a new HTTP
conversation, yet still refer to them as module-level globals in a
thread-safe way.
"""
request = _cprequest.Request(_httputil.Host("127.0.0.1", 80),
_httputil.Host("127.0.0.1", 1111))
"""
The request object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
response = _cprequest.Response()
"""
The response object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
def load(self, request, response):
self.request = request
self.response = response
def clear(self):
"""Remove all attributes of self."""
self.__dict__.clear()
serving = _Serving()
class _ThreadLocalProxy(object):
__slots__ = ['__attrname__', '__dict__']
def __init__(self, attrname):
self.__attrname__ = attrname
def __getattr__(self, name):
child = getattr(serving, self.__attrname__)
return getattr(child, name)
def __setattr__(self, name, value):
if name in ("__attrname__", ):
object.__setattr__(self, name, value)
else:
child = getattr(serving, self.__attrname__)
setattr(child, name, value)
def __delattr__(self, name):
child = getattr(serving, self.__attrname__)
delattr(child, name)
def _get_dict(self):
child = getattr(serving, self.__attrname__)
d = child.__class__.__dict__.copy()
d.update(child.__dict__)
return d
__dict__ = property(_get_dict)
def __getitem__(self, key):
child = getattr(serving, self.__attrname__)
return child[key]
def __setitem__(self, key, value):
child = getattr(serving, self.__attrname__)
child[key] = value
def __delitem__(self, key):
child = getattr(serving, self.__attrname__)
del child[key]
def __contains__(self, key):
child = getattr(serving, self.__attrname__)
return key in child
def __len__(self):
child = getattr(serving, self.__attrname__)
return len(child)
def __nonzero__(self):
child = getattr(serving, self.__attrname__)
return bool(child)
# Python 3
__bool__ = __nonzero__
# Create request and response object (the same objects will be used
# throughout the entire life of the webserver, but will redirect
# to the "serving" object)
request = _ThreadLocalProxy('request')
response = _ThreadLocalProxy('response')
# Create thread_data object as a thread-specific all-purpose storage
class _ThreadData(_local):
"""A container for thread-specific data."""
thread_data = _ThreadData()
# Monkeypatch pydoc to allow help() to go through the threadlocal proxy.
# Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve.
# The only other way would be to change what is returned from type(request)
# and that's not possible in pure Python (you'd have to fake ob_type).
def _cherrypy_pydoc_resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, _ThreadLocalProxy):
thing = getattr(serving, thing.__attrname__)
return _pydoc._builtin_resolve(thing, forceload)
try:
import pydoc as _pydoc
_pydoc._builtin_resolve = _pydoc.resolve
_pydoc.resolve = _cherrypy_pydoc_resolve
except ImportError:
pass
from cherrypy import _cplogging
class _GlobalLogManager(_cplogging.LogManager):
"""A site-wide LogManager; routes to app.log or global log as appropriate.
This :class:`LogManager<cherrypy._cplogging.LogManager>` implements
cherrypy.log() and cherrypy.log.access(). If either
function is called during a request, the message will be sent to the
logger for the current Application. If they are called outside of a
request, the message will be sent to the site-wide logger.
"""
def __call__(self, *args, **kwargs):
"""Log the given message to the app.log or global log as appropriate.
"""
# Do NOT use try/except here. See
# https://bitbucket.org/cherrypy/cherrypy/issue/945
if hasattr(request, 'app') and hasattr(request.app, 'log'):
log = request.app.log
else:
log = self
return log.error(*args, **kwargs)
def access(self):
"""Log an access message to the app.log or global log as appropriate.
"""
try:
return request.app.log.access()
except AttributeError:
return _cplogging.LogManager.access(self)
log = _GlobalLogManager()
# Set a default screen handler on the global log.
log.screen = True
log.error_file = ''
# Using an access file makes CP about 10% slower. Leave off by default.
log.access_file = ''
def _buslog(msg, level):
log.error(msg, 'ENGINE', severity=level)
engine.subscribe('log', _buslog)
# Helper functions for CP apps #
def expose(func=None, alias=None):
"""Expose the function, optionally providing an alias or set of aliases."""
def expose_(func):
func.exposed = True
if alias is not None:
if isinstance(alias, basestring):
parents[alias.replace(".", "_")] = func
else:
for a in alias:
parents[a.replace(".", "_")] = func
return func
import sys
import types
if isinstance(func, (types.FunctionType, types.MethodType)):
if alias is None:
# @expose
func.exposed = True
return func
else:
# func = expose(func, alias)
parents = sys._getframe(1).f_locals
return expose_(func)
elif func is None:
if alias is None:
# @expose()
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose(alias="alias") or
# @expose(alias=["alias1", "alias2"])
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose("alias") or
# @expose(["alias1", "alias2"])
parents = sys._getframe(1).f_locals
alias = func
return expose_
def popargs(*args, **kwargs):
"""A decorator for _cp_dispatch
(cherrypy.dispatch.Dispatcher.dispatch_method_name).
Optional keyword argument: handler=(Object or Function)
Provides a _cp_dispatch function that pops off path segments into
cherrypy.request.params under the names specified. The dispatch
is then forwarded on to the next vpath element.
Note that any existing (and exposed) member function of the class that
popargs is applied to will override that value of the argument. For
instance, if you have a method named "list" on the class decorated with
popargs, then accessing "/list" will call that function instead of popping
it off as the requested parameter. This restriction applies to all
_cp_dispatch functions. The only way around this restriction is to create
a "blank class" whose only function is to provide _cp_dispatch.
If there are path elements after the arguments, or more arguments
are requested than are available in the vpath, then the 'handler'
keyword argument specifies the next object to handle the parameterized
request. If handler is not specified or is None, then self is used.
If handler is a function rather than an instance, then that function
will be called with the args specified and the return value from that
function used as the next object INSTEAD of adding the parameters to
cherrypy.request.args.
This decorator may be used in one of two ways:
As a class decorator:
@cherrypy.popargs('year', 'month', 'day')
class Blog:
def index(self, year=None, month=None, day=None):
#Process the parameters here; any url like
#/, /2009, /2009/12, or /2009/12/31
#will fill in the appropriate parameters.
def create(self):
#This link will still be available at /create. Defined functions
#take precedence over arguments.
Or as a member of a class:
class Blog:
_cp_dispatch = cherrypy.popargs('year', 'month', 'day')
#...
The handler argument may be used to mix arguments with built in functions.
For instance, the following setup allows different activities at the
day, month, and year level:
class DayHandler:
def index(self, year, month, day):
#Do something with this day; probably list entries
def delete(self, year, month, day):
#Delete all entries for this day
@cherrypy.popargs('day', handler=DayHandler())
class MonthHandler:
def index(self, year, month):
#Do something with this month; probably list entries
def delete(self, year, month):
#Delete all entries for this month
@cherrypy.popargs('month', handler=MonthHandler())
class YearHandler:
def index(self, year):
#Do something with this year
#...
@cherrypy.popargs('year', handler=YearHandler())
class Root:
def index(self):
#...
"""
# Since keyword arg comes after *args, we have to process it ourselves
# for lower versions of python.
handler = None
handler_call = False
for k, v in kwargs.items():
if k == 'handler':
handler = v
else:
raise TypeError(
"cherrypy.popargs() got an unexpected keyword argument '{0}'"
.format(k)
)
import inspect
if handler is not None \
and (hasattr(handler, '__call__') or inspect.isclass(handler)):
handler_call = True
def decorated(cls_or_self=None, vpath=None):
if inspect.isclass(cls_or_self):
# cherrypy.popargs is a class decorator
cls = cls_or_self
setattr(cls, dispatch.Dispatcher.dispatch_method_name, decorated)
return cls
# We're in the actual function
self = cls_or_self
parms = {}
for arg in args:
if not vpath:
break
parms[arg] = vpath.pop(0)
if handler is not None:
if handler_call:
return handler(**parms)
else:
request.params.update(parms)
return handler
request.params.update(parms)
# If we are the ultimate handler, then to prevent our _cp_dispatch
# from being called again, we will resolve remaining elements through
# getattr() directly.
if vpath:
return getattr(self, vpath.pop(0), None)
else:
return self
return decorated
def url(path="", qs="", script_name=None, base=None, relative=None):
"""Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used
to find a script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute URL
for the current request path (minus the querystring) by passing no args.
If you call url(qs=cherrypy.request.query_string), you should get the
original browser URL (assuming no internal redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
"""
if isinstance(qs, (tuple, list, dict)):
qs = _urlencode(qs)
if qs:
qs = '?' + qs
if request.app:
if not path.startswith("/"):
# Append/remove trailing slash from path_info as needed
# (this is to support mistyped URL's without redirecting;
# if you want to redirect, use tools.trailing_slash).
pi = request.path_info
if request.is_index is True:
if not pi.endswith('/'):
pi = pi + '/'
elif request.is_index is False:
if pi.endswith('/') and pi != '/':
pi = pi[:-1]
if path == "":
path = pi
else:
path = _urljoin(pi, path)
if script_name is None:
script_name = request.script_name
if base is None:
base = request.base
newurl = base + script_name + path + qs
else:
# No request.app (we're being called outside a request).
# We'll have to guess the base from server.* attributes.
# This will produce very different results from the above
# if you're using vhosts or tools.proxy.
if base is None:
base = server.base()
path = (script_name or "") + path
newurl = base + path + qs
if './' in newurl:
# Normalize the URL by removing ./ and ../
atoms = []
for atom in newurl.split('/'):
if atom == '.':
pass
elif atom == '..':
atoms.pop()
else:
atoms.append(atom)
newurl = '/'.join(atoms)
# At this point, we should have a fully-qualified absolute URL.
if relative is None:
relative = getattr(request.app, "relative_urls", False)
# See http://www.ietf.org/rfc/rfc2396.txt
if relative == 'server':
# "A relative reference beginning with a single slash character is
# termed an absolute-path reference, as defined by <abs_path>..."
# This is also sometimes called "server-relative".
newurl = '/' + '/'.join(newurl.split('/', 3)[3:])
elif relative:
# "A relative reference that does not begin with a scheme name
# or a slash character is termed a relative-path reference."
old = url(relative=False).split('/')[:-1]
new = newurl.split('/')
while old and new:
a, b = old[0], new[0]
if a != b:
break
old.pop(0)
new.pop(0)
new = (['..'] * len(old)) + new
newurl = '/'.join(new)
return newurl
# import _cpconfig last so it can reference other top-level objects
from cherrypy import _cpconfig
# Use _global_conf_alias so quickstart can use 'config' as an arg
# without shadowing cherrypy.config.
config = _global_conf_alias = _cpconfig.Config()
config.defaults = {
'tools.log_tracebacks.on': True,
'tools.log_headers.on': True,
'tools.trailing_slash.on': True,
'tools.encode.on': True
}
config.namespaces["log"] = lambda k, v: setattr(log, k, v)
config.namespaces["checker"] = lambda k, v: setattr(checker, k, v)
# Must reset to get our defaults applied.
config.reset()
from cherrypy import _cpchecker
checker = _cpchecker.Checker()
engine.subscribe('start', checker)
|
|
import inspect as _inspect
import sys as _sys
import unittest as _unittest
try:
import wrapt as _wrapt
except ImportError as e:
_wrapt = None
_wrapt_error = e
import bes.trace as _bes_trace
def skip_if_missing_wrapt(obj):
if _wrapt is None:
return _unittest.skip(str(_wrapt_error))(obj)
return obj
def skip_if_python_version_less_than(version):
skip = _sys.version_info < version
def decorator(obj):
if skip:
return _unittest.skip(str(_wrapt_error))(obj)
return obj
return decorator
class Logger (object):
def __init__(self):
self.messages = []
def __call__(self, *args, **kwargs):
self.messages.append({'args': args, 'kwargs': kwargs})
class TraceTestCase (_unittest.TestCase):
def test_trace(self):
logger = Logger()
@_bes_trace.trace(logger=logger)
def foo(*args, **kwargs):
logger('running')
foo(1, 'a', b=3)
self.assertEqual(
logger.messages,
[
{
'args': (),
'kwargs': {
'action': 'start',
'type': 'foo',
},
},
{
'args': (
'running',
),
'kwargs': {},
},
{
'args': (),
'kwargs': {
'action': 'complete',
'type': 'foo',
},
},
])
def test_with_arguments(self):
logger = Logger()
@_bes_trace.trace(logger=logger, a=1)
def foo(*args, **kwargs):
logger('running')
foo(1, 'a', b=3)
self.assertEqual(
logger.messages,
[
{
'args': (),
'kwargs': {
'a': 1,
'action': 'start',
'type': 'foo',
},
},
{
'args': (
'running',
),
'kwargs': {},
},
{
'args': (),
'kwargs': {
'a': 1,
'action': 'complete',
'type': 'foo',
},
},
])
def test_with_explicit_type(self):
logger = Logger()
@_bes_trace.trace(type='my-type', logger=logger, a=1)
def foo(*args, **kwargs):
logger('running')
foo(1, 'a', b=3)
self.assertEqual(
logger.messages,
[
{
'args': (),
'kwargs': {
'a': 1,
'action': 'start',
'type': 'my-type',
},
},
{
'args': (
'running',
),
'kwargs': {},
},
{
'args': (),
'kwargs': {
'a': 1,
'action': 'complete',
'type': 'my-type',
},
},
])
def test_with_error(self):
logger = Logger()
@_bes_trace.trace(logger=logger, a=1)
def foo(*args, **kwargs):
raise ValueError('dying')
self.assertRaises(ValueError, foo, 1, 'a', b=3)
self.assertEqual(
logger.messages,
[
{
'args': (),
'kwargs': {
'a': 1,
'action': 'start',
'type': 'foo',
},
},
{
'args': (),
'kwargs': {
'a': 1,
'action': 'error',
'error': 'dying',
'type': 'foo',
},
},
])
def test_name(self):
@_bes_trace.trace()
def foo(*args, **kwargs):
return 1
self.assertEqual(foo.__name__, 'foo')
def test_doc(self):
@_bes_trace.trace()
def foo(*args, **kwargs):
'A test method'
return 1
self.assertEqual(foo.__doc__, 'A test method')
@skip_if_python_version_less_than(version=(3, 3))
def test_signature(self):
@_bes_trace.trace()
def foo(a, b=3, *args, **kwargs):
'A test method'
return 1
if hasattr(_inspect, 'signature'): # Python >= 3.3
signature = _inspect.signature(foo)
self.assertEqual(
str(signature), '(a, b=3, *args, **kwargs)')
else:
if hasattr(_inspect, 'getfullargspec'): # Python 3
argspec = _inspect.getfullargspec(foo)
self.assertEqual(argspec.varkw, 'kwargs')
self.assertEqual(argspec.kwonlyargs, [])
self.assertEqual(argspec.kwonlydefaults, None)
self.assertEqual(argspec.annotations, {})
else: # Python 2
argspec = _inspect.getargspec(foo)
self.assertEqual(argspec.keywords, 'kwargs')
self.assertEqual(argspec.args, ['a', 'b'])
self.assertEqual(argspec.varargs, 'args')
self.assertEqual(argspec.defaults, (3,))
@skip_if_missing_wrapt
def test_code(self):
@_bes_trace.trace()
def foo(a, b=3, *args, **kwargs):
"""A test method"""
return 1 * 2 + 3
source = _inspect.getsource(foo)
self.assertEqual(
source,
'\n'.join([
' @_bes_trace.trace()',
' def foo(a, b=3, *args, **kwargs):',
' """A test method"""',
' return 1 * 2 + 3',
'',
]))
|
|
import collections
import datetime
import re
import click
import pygal
import app.filter
import app.input
class Burndown():
def __init__(self, sprint, commitment, report, issues, customfield):
self.sprint = sprint
self.commitment = commitment
self.report = report
self.issues = issues
self.timeline = collections.OrderedDict()
self.customfield = customfield
self.faulty = {}
self.start = datetime.datetime.strptime(
sprint['startDate'], '%d/%b/%y %I:%M %p'
)
self.end = datetime.datetime.strptime(
sprint['endDate'], '%d/%b/%y %I:%M %p'
)
start = self.start.replace(hour=0, minute=0, second=0, microsecond=0)
day = datetime.timedelta(days=1)
while start <= self.end:
self.timeline[start.strftime('%Y-%m-%d')] = {
'date': start,
'ideal': commitment,
'completed': 0,
'unplanned': 0
}
start += day
self.calculate()
def _calculate_completed(self):
changes = {}
completed = self.commitment
# closed issues
for key in self.report.completed:
if 'Story' != self.issues[key].fields.issuetype.name:
continue
resolution = self._get_resolution_date(
self.issues[key].changelog.histories
)
if not self.start <= resolution <= self.end:
continue
estimation = getattr(self.issues[key].fields, self.customfield)
if estimation is None:
self.faulty[key] = 'Estimation missing'
change = resolution.strftime('%Y-%m-%d')
if change not in changes:
changes[change] = 0
changes[change] += int(estimation or 0)
# decreased story points
for key in self.report.all:
if 'Story' != self.issues[key].fields.issuetype.name:
continue
status = self.issues[key].fields.status.name
for history in self.issues[key].changelog.histories:
for item in history.items:
created = datetime.datetime.strptime(
re.sub(r'\..*$', '', history.created),
'%Y-%m-%dT%H:%M:%S'
)
if not (self.start <= created <= self.end):
continue
if 'status' == item.field:
status = item.toString
if 'Open' == status:
continue
if ('Story Points' == item.field and
item.fromString is not None):
estimationFrom = int(item.fromString or 0)
estimationTo = int(item.toString or 0)
diff = estimationTo - estimationFrom
if diff > 0:
continue
change = created.strftime('%Y-%m-%d')
if change not in changes:
changes[change] = 0
changes[change] += abs(diff)
for date, entry in self.timeline.items():
if date in changes:
completed -= changes[date]
entry['completed'] = completed
def _calculate_ideal(self):
items = self.timeline.items()
ideal = self.commitment
step = ideal / (len(items) - 1)
for date, entry in self.timeline.items():
entry['ideal'] = ideal
ideal -= step
entry['ideal'] = 0
def _calculate_unplanned(self):
changes = {}
unplanned = self.commitment
# added
for key in self.report.added:
if 'Story' != self.issues[key].fields.issuetype.name:
continue
for date in self._get_added_dates(
self.issues[key].changelog.histories):
estimation = self._get_estimation_from_date(
date, self.issues[key].changelog.histories
)
if estimation is None:
estimation = getattr(
self.issues[key].fields, self.customfield
)
if estimation is None:
self.faulty[key] = 'Estimation missing'
change = date.strftime('%Y-%m-%d')
if change not in changes:
changes[change] = 0
changes[change] += int(estimation or 0)
# punted
for key in self.report.punted:
if 'Story' != self.issues[key].fields.issuetype.name:
continue
for date in self._get_punted_dates(
self.issues[key].changelog.histories):
resolution = self._get_resolution_date(
self.issues[key].changelog.histories
)
if resolution and not self.start <= resolution <= self.end:
continue
estimation = self._get_estimation_from_date(
date, self.issues[key].changelog.histories
)
if estimation is None:
estimation = getattr(
self.issues[key].fields, self.customfield
)
if estimation is None:
self.faulty[key] = 'Estimation missing'
change = date.strftime('%Y-%m-%d')
if change not in changes:
changes[change] = 0
changes[change] -= int(estimation or 0)
# decreased/increased story points
for key in self.report.all:
if 'Story' != self.issues[key].fields.issuetype.name:
continue
status = self.issues[key].fields.status.name
for history in self.issues[key].changelog.histories:
for item in history.items:
created = datetime.datetime.strptime(
re.sub(r'\..*$', '', history.created),
'%Y-%m-%dT%H:%M:%S'
)
if not (self.start <= created <= self.end):
continue
if 'status' == item.field:
status = item.toString
if ('Story Points' == item.field and
item.fromString is not None):
estimationFrom = int(item.fromString or 0)
estimationTo = int(item.toString or 0)
diff = estimationTo - estimationFrom
if 'Open' != status and diff < 0:
continue
change = created.strftime('%Y-%m-%d')
if change not in changes:
changes[change] = 0
changes[change] += diff
for date, entry in self.timeline.items():
if date in changes:
unplanned += changes[date]
entry['unplanned'] = unplanned
def _get_added_dates(self, histories):
dates = []
for history in histories:
for item in history.items:
created = datetime.datetime.strptime(
re.sub(r'\..*$', '', history.created), '%Y-%m-%dT%H:%M:%S'
)
if not (self.start <= created <= self.end):
continue
if ('Sprint' == item.field and
str(self.sprint['id']) in
str(item.to).replace(' ', '').split(',')):
dates.append(created)
return dates
def _get_estimation_from_date(self, date, histories):
before = None
after = None
for history in histories:
for item in history.items:
created = datetime.datetime.strptime(
re.sub(r'\..*$', '', history.created), '%Y-%m-%dT%H:%M:%S'
)
if 'Story Points' == item.field and item.toString:
if created <= date:
before = int(item.toString)
if created > date:
after = int(item.toString)
if after:
return after
return before
def _get_punted_dates(self, histories):
dates = []
for history in histories:
for item in history.items:
created = datetime.datetime.strptime(
re.sub(r'\..*$', '', history.created), '%Y-%m-%dT%H:%M:%S'
)
if not (self.start <= created <= self.end):
continue
if 'Sprint' != item.field and str(self.sprint['id']):
continue
current_sprint = str(self.sprint['id'])
from_sprint = str(getattr(item, 'from'))
to_sprint = str(getattr(item, 'to'))
if from_sprint and current_sprint not in from_sprint:
continue
if current_sprint not in to_sprint:
dates.append(created)
return dates
def _get_resolution_date(self, histories):
for history in reversed(histories):
for item in history.items:
if 'resolution' == item.field and 'Done' == item.toString:
return datetime.datetime.strptime(
re.sub(r'\..*$', '', history.created),
'%Y-%m-%dT%H:%M:%S'
)
def calculate(self):
self._calculate_ideal()
self._calculate_completed()
self._calculate_unplanned()
def get_timeline(self):
dates = []
ideal = []
completed = []
unplanned = []
now = datetime.datetime.now()
for date, entry in self.timeline.items():
dates.append(entry['date'].strftime('%Y-%m-%d'))
ideal.append(entry['ideal'])
if entry['date'] <= now:
completed.append(entry['completed'])
unplanned.append(entry['unplanned'])
return {
'dates': dates,
'ideal': ideal,
'completed': completed,
'unplanned': unplanned
}
@click.command()
@click.argument('output', type=click.File('wb'))
@click.pass_obj
def burndown(obj, output=None):
session = obj
jira, customfield = (session.jira, session.customfield)
try:
board, sprints = app.input.for_board(jira)
id = app.input.for_sprint(jira, board, sprints)
except Exception as e:
click.secho(str(e), fg='red')
exit(1)
click.echo('Fetching sprint report: ', nl=False)
sprint = jira.sprint_info(board, id)
report = jira.sprint_report(board, id)
if not sprint or not report or not report.all:
click.secho('Nothing found for sprint ID {0}'.format(id), fg='red')
exit(1)
click.secho('OK', fg='green')
labels = app.input.for_labels()
issues, report = app.filter.for_labels(jira, report, labels)
commitment = click.prompt('Enter commitment', type=click.INT)
burndown = Burndown(
sprint, commitment, report, issues, customfield
)
timeline = burndown.get_timeline()
velocity = commitment - timeline['completed'][-1]
for key, message in burndown.faulty.items():
click.echo('{0} is faulty: {1}'.format(key, message))
if 'CLOSED' == sprint['state']:
click.echo('Velocity: {0}'.format(velocity))
click.echo('Writing SVG to {0}'.format(output.name))
style = pygal.style.Style(
background='transparent',
colors=('#b4b4b4', '#00b400', '#b40000'),
foreground='#000000',
foreground_strong='#000000',
foreground_subtle='#000000',
plot_background='transparent',
)
chart = pygal.Line(
interpolate='hermite',
style=style,
x_label_rotation=90
)
chart.title = 'Burndown, {0}'.format(sprint['name'])
chart.x_title = 'Dates'
chart.x_labels = timeline['dates']
chart.y_title = 'Story Points'
chart.add('Ideal', timeline['ideal'])
chart.add('Completed', timeline['completed'])
chart.add('Unplanned', timeline['unplanned'])
chart.value_formatter = lambda x: "%d" % x
output.write(chart.render())
click.echo('Done!')
|
|
"""Sensor for checking the status of London air."""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import HTTP_OK
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_LOCATIONS = "locations"
SCAN_INTERVAL = timedelta(minutes=30)
AUTHORITIES = [
"Barking and Dagenham",
"Bexley",
"Brent",
"Camden",
"City of London",
"Croydon",
"Ealing",
"Enfield",
"Greenwich",
"Hackney",
"Haringey",
"Harrow",
"Havering",
"Hillingdon",
"Islington",
"Kensington and Chelsea",
"Kingston",
"Lambeth",
"Lewisham",
"Merton",
"Redbridge",
"Richmond",
"Southwark",
"Sutton",
"Tower Hamlets",
"Wandsworth",
"Westminster",
]
URL = (
"http://api.erg.kcl.ac.uk/AirQuality/Hourly/"
"MonitoringIndex/GroupName=London/Json"
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LOCATIONS, default=AUTHORITIES): vol.All(
cv.ensure_list, [vol.In(AUTHORITIES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the London Air sensor."""
data = APIData()
data.update()
sensors = []
for name in config.get(CONF_LOCATIONS):
sensors.append(AirSensor(name, data))
add_entities(sensors, True)
class APIData:
"""Get the latest data for all authorities."""
def __init__(self):
"""Initialize the AirData object."""
self.data = None
# Update only once in scan interval.
@Throttle(SCAN_INTERVAL)
def update(self):
"""Get the latest data from TFL."""
response = requests.get(URL, timeout=10)
if response.status_code != HTTP_OK:
_LOGGER.warning("Invalid response from API")
else:
self.data = parse_api_response(response.json())
class AirSensor(Entity):
"""Single authority air sensor."""
ICON = "mdi:cloud-outline"
def __init__(self, name, APIdata):
"""Initialize the sensor."""
self._name = name
self._api_data = APIdata
self._site_data = None
self._state = None
self._updated = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def site_data(self):
"""Return the dict of sites data."""
return self._site_data
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.ICON
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attrs = {}
attrs["updated"] = self._updated
attrs["sites"] = len(self._site_data)
attrs["data"] = self._site_data
return attrs
def update(self):
"""Update the sensor."""
self._api_data.update()
self._site_data = self._api_data.data[self._name]
self._updated = self._site_data[0]["updated"]
sites_status = []
for site in self._site_data:
if site["pollutants_status"] != "no_species_data":
sites_status.append(site["pollutants_status"])
if sites_status:
self._state = max(set(sites_status), key=sites_status.count)
else:
self._state = None
def parse_species(species_data):
"""Iterate over list of species at each site."""
parsed_species_data = []
quality_list = []
for species in species_data:
if species["@AirQualityBand"] != "No data":
species_dict = {}
species_dict["description"] = species["@SpeciesDescription"]
species_dict["code"] = species["@SpeciesCode"]
species_dict["quality"] = species["@AirQualityBand"]
species_dict["index"] = species["@AirQualityIndex"]
species_dict[
"summary"
] = f"{species_dict['code']} is {species_dict['quality']}"
parsed_species_data.append(species_dict)
quality_list.append(species_dict["quality"])
return parsed_species_data, quality_list
def parse_site(entry_sites_data):
"""Iterate over all sites at an authority."""
authority_data = []
for site in entry_sites_data:
site_data = {}
species_data = []
site_data["updated"] = site["@BulletinDate"]
site_data["latitude"] = site["@Latitude"]
site_data["longitude"] = site["@Longitude"]
site_data["site_code"] = site["@SiteCode"]
site_data["site_name"] = site["@SiteName"].split("-")[-1].lstrip()
site_data["site_type"] = site["@SiteType"]
if isinstance(site["Species"], dict):
species_data = [site["Species"]]
else:
species_data = site["Species"]
parsed_species_data, quality_list = parse_species(species_data)
if not parsed_species_data:
parsed_species_data.append("no_species_data")
site_data["pollutants"] = parsed_species_data
if quality_list:
site_data["pollutants_status"] = max(
set(quality_list), key=quality_list.count
)
site_data["number_of_pollutants"] = len(quality_list)
else:
site_data["pollutants_status"] = "no_species_data"
site_data["number_of_pollutants"] = 0
authority_data.append(site_data)
return authority_data
def parse_api_response(response):
"""Parse return dict or list of data from API."""
data = dict.fromkeys(AUTHORITIES)
for authority in AUTHORITIES:
for entry in response["HourlyAirQualityIndex"]["LocalAuthority"]:
if entry["@LocalAuthorityName"] == authority:
if isinstance(entry["Site"], dict):
entry_sites_data = [entry["Site"]]
else:
entry_sites_data = entry["Site"]
data[authority] = parse_site(entry_sites_data)
return data
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import pickle
import pytest
import numpy as np
from numpy.testing import utils
from .example_models import models_1D, models_2D
from .. import fitting, models
from ..core import FittableModel
from ..polynomial import PolynomialBase
from ... import units as u
from ...utils import minversion
from ...tests.helper import assert_quantity_allclose
from ...utils import NumpyRNGContext
try:
import scipy
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
HAS_SCIPY_14 = HAS_SCIPY and minversion(scipy, "0.14")
@pytest.mark.skipif('not HAS_SCIPY')
def test_custom_model(amplitude=4, frequency=1):
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
y = sin_model.evaluate(x, 5., 2.)
y_prime = sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
fitter = fitting.LevMarLSQFitter()
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2)
sub_arr = model(x1, y1, z1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all((np.abs(model(x, y) - z) < self.eval_error))
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter2D(self, model_class, test_parameters):
"""Test if the parametric model works with the fitter."""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
utils.assert_allclose(fitted, expected,
atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_2D(self, model_class, test_parameters):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
xv, yv = np.meshgrid(x, y)
try:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
except KeyError:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.RandomState(1234567890)
amplitude = test_parameters['parameters'][0]
n = 0.1 * amplitude * (rsn.rand(self.M, self.N) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,
data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,
estimate_jacobian=True)
utils.assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters,
rtol=0.1)
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
utils.assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = 5
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
if isinstance(model, models.Lorentz1D):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter1D(self, model_class, test_parameters):
"""
Test if the parametric model works with the fitter.
"""
x_lim = test_parameters['x_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *
model(x))
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
utils.assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_1D(self, model_class, test_parameters):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
x_lim = test_parameters['x_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters['parameters']
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.RandomState(1234567890)
n = 0.1 * parameters[0] * (rsn.rand(self.N) - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
utils.assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.15)
def create_model(model_class, test_parameters, use_constraints=True,
parameter_key='parameters'):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if 'constraints' in test_parameters:
constraints = test_parameters['constraints']
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_1D.items(), key=lambda x: str(x[0])))
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_2D.items(), key=lambda x: str(x[0])))
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
utils.assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
utils.assert_equal(m(0), [42, 43])
utils.assert_equal(m([1, 2], model_set_axis=False),
[[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
utils.assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
utils.assert_equal(m(0), [0, 0])
utils.assert_equal(m([1, 2], model_set_axis=False),
[[42, 84], [43, 86]])
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0., .7, 1.4, 2.1, 3.9]
ans1 = [1., 7.3, 6.8, 6.3, 1.8]
utils.assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
utils.assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]
with pytest.raises(ValueError):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False,
fill_value=None)
utils.assert_allclose(model(xextrap),
[1., 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,
fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))
assert_quantity_allclose(model(np.arange(5)),
[100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_interp_2d():
table = np.array([
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0., .7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array(
[-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
utils.assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
utils.assert_allclose(a, r)
with pytest.raises(ValueError):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
with pytest.raises(ValueError):
model = LookupTable(lookup_table=[1, 2, 3])
with pytest.raises(NotImplementedError):
model = LookupTable(n_models=2)
with pytest.raises(ValueError):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
with pytest.raises(ValueError):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
with pytest.raises(ValueError):
model = LookupTable(points, table, bounds_error=False,
fill_value=1*u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
utils.assert_allclose(a, result)
with pytest.raises(ValueError):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
utils.assert_allclose(a[ind], aw[ind])
utils.assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
utils.assert_allclose(a[ind], aw[ind])
utils.assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert(p(1) == p(1, with_bounding_box=True))
|
|
from django.db.models.sql import compiler
from datetime import datetime
import re
from django.db.models.base import Model
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
index_start = len(self.query.extra_select.keys())
values = [self.query.convert_values(v, None, connection=self.connection) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
return tuple(values)
"""
use django as_sql with editing limit
"""
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark != 0)
if not do_offset:
return super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
select_re = re.compile('^SELECT[ ]+(DISTINCT\s)?')
query, params = super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
m = select_re.match(query)
if with_limits and m != None:
num = None
insert = None
if self.query.high_mark is not None:
num = self.query.high_mark - self.query.low_mark
if num <= 0:
return None, None
insert = 'TOP %d' % num
if insert is not None:
if m.groups()[0] != None:
query = select_re.sub('SELECT DISTINCT %s ' % insert, query)
else:
query = select_re.sub('SELECT %s ' % insert, query)
return query, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql_legacy(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
returns_id = bool(self.return_id and
self.connection.features.can_return_id_from_insert)
if returns_id:
result = ['SET NOCOUNT ON']
else:
result = []
result.append('INSERT INTO %s' % qn(opts.db_table))
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
if returns_id:
result.append(';\nSELECT SCOPE_IDENTITY()')
params = self.query.params
sql = ' '.join(result)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
if returns_id:
sql = "SET NOCOUNT ON"
else:
sql = ""
if len(self.query.columns) == 1 and not params:
sql += "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql += "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
if returns_id:
sql += '\n;SELECT SCOPE_IDENTITY()'
return sql, params
def as_sql(self):
from django.db.models.fields import DateTimeField
from django.db.models.fields import DateField
"""
using django as_sql()
with exclude Datetime field with None value
which is nullable
"""
# return super(SQLInsertCompiler, self).as_sql()
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
preset_fields = self.query.fields if has_fields else [opts.pk]
fields = []
if len(self.query.objs) == 1:
# check only one row insert
# multi-row pass
# so multi-row rows will crash
for field in preset_fields:
# if not isinstance(field, (DateField, DateTimeField)):
# fields.append(field)
if field.get_db_prep_save(
getattr(self.query.objs[0], field.attname) if self.query.raw else field.pre_save(self.query.objs[0], True), connection=self.connection) is not None:
fields.append(field)
elif field.blank is not True:
fields.append(field)
else:
fields = preset_fields
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
def as_sql(self):
"""
Copy of django UpdateCommpiler as_sql
need cheack datetime field
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import pickle
import unittest
import ray
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.policy.rnn_sequencing import chop_into_sequences, \
add_time_dimension
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.misc import linear, normc_initializer
from ray.rllib.models.model import Model
from ray.tune.registry import register_env
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class LSTMUtilsTest(unittest.TestCase):
def testBasic(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
agent_ids = [1, 1, 1, 1, 1, 1, 1, 1]
f = [[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]]]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
f_pad, s_init, seq_lens = chop_into_sequences(eps_ids,
np.ones_like(eps_ids),
agent_ids, f, s, 4)
self.assertEqual([f.tolist() for f in f_pad], [
[101, 102, 103, 0, 201, 202, 203, 204, 205, 0, 0, 0],
[[101], [102], [103], [0], [201], [202], [203], [204], [205], [0],
[0], [0]],
])
self.assertEqual([s.tolist() for s in s_init], [[209, 109, 105]])
self.assertEqual(seq_lens.tolist(), [3, 4, 1])
def testBatchId(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
batch_ids = [1, 1, 2, 2, 3, 3, 4, 4]
agent_ids = [1, 1, 1, 1, 1, 1, 1, 1]
f = [[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]]]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
_, _, seq_lens = chop_into_sequences(eps_ids, batch_ids, agent_ids, f,
s, 4)
self.assertEqual(seq_lens.tolist(), [2, 1, 1, 2, 2])
def testMultiAgent(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
agent_ids = [1, 1, 2, 1, 1, 2, 2, 3]
f = [[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]]]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
f_pad, s_init, seq_lens = chop_into_sequences(
eps_ids,
np.ones_like(eps_ids),
agent_ids,
f,
s,
4,
dynamic_max=False)
self.assertEqual(seq_lens.tolist(), [2, 1, 2, 2, 1])
self.assertEqual(len(f_pad[0]), 20)
self.assertEqual(len(s_init[0]), 5)
def testDynamicMaxLen(self):
eps_ids = [5, 2, 2]
agent_ids = [2, 2, 2]
f = [[1, 1, 1]]
s = [[1, 1, 1]]
f_pad, s_init, seq_lens = chop_into_sequences(eps_ids,
np.ones_like(eps_ids),
agent_ids, f, s, 4)
self.assertEqual([f.tolist() for f in f_pad], [[1, 0, 1, 1]])
self.assertEqual([s.tolist() for s in s_init], [[1, 1]])
self.assertEqual(seq_lens.tolist(), [1, 2])
class RNNSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
# Previously, a new class object was created during
# deserialization and this `capture_index`
# variable would be refreshed between class instantiations.
# This behavior is no longer the case, so we manually refresh
# the variable.
RNNSpyModel.capture_index = 0
def spy(sequences, state_in, state_out, seq_lens):
if len(sequences) == 1:
return 0 # don't capture inference inputs
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"rnn_spy_in_{}".format(RNNSpyModel.capture_index),
pickle.dumps({
"sequences": sequences,
"state_in": state_in,
"state_out": state_out,
"seq_lens": seq_lens
}),
overwrite=True)
RNNSpyModel.capture_index += 1
return 0
features = input_dict["obs"]
cell_size = 3
last_layer = add_time_dimension(features, self.seq_lens)
# Setup the LSTM cell
lstm = tf.nn.rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
self.state_init = [
np.zeros(lstm.state_size.c, np.float32),
np.zeros(lstm.state_size.h, np.float32)
]
# Setup LSTM inputs
if self.state_in:
c_in, h_in = self.state_in
else:
c_in = tf.placeholder(
tf.float32, [None, lstm.state_size.c], name="c")
h_in = tf.placeholder(
tf.float32, [None, lstm.state_size.h], name="h")
self.state_in = [c_in, h_in]
# Setup LSTM outputs
state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_out, lstm_state = tf.nn.dynamic_rnn(
lstm,
last_layer,
initial_state=state_in,
sequence_length=self.seq_lens,
time_major=False,
dtype=tf.float32)
self.state_out = list(lstm_state)
spy_fn = tf.py_func(
spy, [
last_layer,
self.state_in,
self.state_out,
self.seq_lens,
],
tf.int64,
stateful=True)
# Compute outputs
with tf.control_dependencies([spy_fn]):
last_layer = tf.reshape(lstm_out, [-1, cell_size])
logits = linear(last_layer, num_outputs, "action",
normc_initializer(0.01))
return logits, last_layer
class DebugCounterEnv(gym.Env):
def __init__(self):
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Box(0, 100, (1, ))
self.i = 0
def reset(self):
self.i = 0
return [self.i]
def step(self, action):
self.i += 1
return [self.i], self.i % 3, self.i >= 15, {}
class RNNSequencing(unittest.TestCase):
def testSimpleOptimizerSequencing(self):
ModelCatalog.register_custom_model("rnn", RNNSpyModel)
register_env("counter", lambda _: DebugCounterEnv())
ppo = PPOTrainer(
env="counter",
config={
"num_workers": 0,
"sample_batch_size": 10,
"train_batch_size": 10,
"sgd_minibatch_size": 10,
"vf_share_layers": True,
"simple_optimizer": True,
"num_sgd_iter": 1,
"model": {
"custom_model": "rnn",
"max_seq_len": 4,
"state_shape": [3, 3],
},
})
ppo.train()
ppo.train()
batch0 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_0"))
self.assertEqual(
batch0["sequences"].tolist(),
[[[0], [1], [2], [3]], [[4], [5], [6], [7]], [[8], [9], [0], [0]]])
self.assertEqual(batch0["seq_lens"].tolist(), [4, 4, 2])
self.assertEqual(batch0["state_in"][0][0].tolist(), [0, 0, 0])
self.assertEqual(batch0["state_in"][1][0].tolist(), [0, 0, 0])
self.assertGreater(abs(np.sum(batch0["state_in"][0][1])), 0)
self.assertGreater(abs(np.sum(batch0["state_in"][1][1])), 0)
self.assertTrue(
np.allclose(batch0["state_in"][0].tolist()[1:],
batch0["state_out"][0].tolist()[:-1]))
self.assertTrue(
np.allclose(batch0["state_in"][1].tolist()[1:],
batch0["state_out"][1].tolist()[:-1]))
batch1 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_1"))
self.assertEqual(batch1["sequences"].tolist(), [
[[10], [11], [12], [13]],
[[14], [0], [0], [0]],
[[0], [1], [2], [3]],
[[4], [0], [0], [0]],
])
self.assertEqual(batch1["seq_lens"].tolist(), [4, 1, 4, 1])
self.assertEqual(batch1["state_in"][0][2].tolist(), [0, 0, 0])
self.assertEqual(batch1["state_in"][1][2].tolist(), [0, 0, 0])
self.assertGreater(abs(np.sum(batch1["state_in"][0][0])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][1][0])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][0][1])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][1][1])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][0][3])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][1][3])), 0)
def testMinibatchSequencing(self):
ModelCatalog.register_custom_model("rnn", RNNSpyModel)
register_env("counter", lambda _: DebugCounterEnv())
ppo = PPOTrainer(
env="counter",
config={
"shuffle_sequences": False, # for deterministic testing
"num_workers": 0,
"sample_batch_size": 20,
"train_batch_size": 20,
"sgd_minibatch_size": 10,
"vf_share_layers": True,
"simple_optimizer": False,
"num_sgd_iter": 1,
"model": {
"custom_model": "rnn",
"max_seq_len": 4,
"state_shape": [3, 3],
},
})
ppo.train()
ppo.train()
# first epoch: 20 observations get split into 2 minibatches of 8
# four observations are discarded
batch0 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_0"))
batch1 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_1"))
if batch0["sequences"][0][0][0] > batch1["sequences"][0][0][0]:
batch0, batch1 = batch1, batch0 # sort minibatches
self.assertEqual(batch0["seq_lens"].tolist(), [4, 4])
self.assertEqual(batch1["seq_lens"].tolist(), [4, 3])
self.assertEqual(batch0["sequences"].tolist(), [
[[0], [1], [2], [3]],
[[4], [5], [6], [7]],
])
self.assertEqual(batch1["sequences"].tolist(), [
[[8], [9], [10], [11]],
[[12], [13], [14], [0]],
])
# second epoch: 20 observations get split into 2 minibatches of 8
# four observations are discarded
batch2 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_2"))
batch3 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_3"))
if batch2["sequences"][0][0][0] > batch3["sequences"][0][0][0]:
batch2, batch3 = batch3, batch2
self.assertEqual(batch2["seq_lens"].tolist(), [4, 4])
self.assertEqual(batch3["seq_lens"].tolist(), [2, 4])
self.assertEqual(batch2["sequences"].tolist(), [
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
])
self.assertEqual(batch3["sequences"].tolist(), [
[[13], [14], [0], [0]],
[[0], [1], [2], [3]],
])
if __name__ == "__main__":
ray.init(num_cpus=4)
unittest.main(verbosity=2)
|
|
#!/usr/bin/env python
r"""Training a sentiment analysis binary classifier on the IMDB dataset,
with or without pretrained GloVe word embeddings.
Downloading and extracting the various models and datasets involved is done
in parallel, along with running various make files and scripts.
"""
from __future__ import print_function
from builtins import zip
from collections import MutableSequence, Sequence
import fnmatch
import functools
from inspect import isgenerator
from multiprocessing import Pool
import os
from os import getcwd, listdir, mkdir, path
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
import shlex
import six
import subprocess
import sys
import tarfile
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
import zipfile
from keras import layers, models
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
import numpy as np
import wget
__author__ = 'Gregory Giecold'
__copyright__ = 'Copyright 2017-2022 Gregory Giecold and contributors'
__credit__ = 'Gregory Giecold'
__status__ = 'beta'
__version__ = '0.1.0'
__all__ = ['build_model', 'download_extract',
'get_imdb_data', 'tokenize_data',
'track_train']
def track_train(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
train_flag = kwargs.pop('train_flag', None)
if train_flag is not None:
wrapper.has_been_trained = train_flag
return func(*args, **kwargs)
wrapper.has_been_trained = False
return wrapper
def download_extract(url, odir):
assert isinstance(url, six.string_types)
if url.endswith('.gz'):
fname = wget.download(url, out=path.dirname(odir), bar=None)
with tarfile.open(fname, 'r') as th:
th.extractall(path.dirname(odir))
subprocess.check_call(['rm', '{}'.format(path.split(url)[-1])],
stdout=DEVNULL, stderr=subprocess.PIPE,
cwd=path.dirname(odir))
elif url.endswith('GloVe-1.2.zip'):
fname = wget.download(url, out=path.dirname(odir),
bar=wget.bar_thermometer)
with zipfile.ZipFile(fname, 'r', zipfile.ZIP_DEFLATED) as zh:
for file in zh.filelist:
name = file.filename
permissions = 0777
file.external_attr = permissions
ofile = path.join(path.dirname(odir), name)
if name.endswith('/'):
mkdir(ofile, permissions)
else:
fh = os.open(ofile, os.O_CREAT | os.O_WRONLY, permissions)
os.write(fh, zh.read(name))
os.close(fh)
commands = ('rm {}'.format(path.split(url)[-1]), 'make', './demo.sh')
directories = (path.dirname(odir), odir, odir)
shell_flags = (False, False, True)
for cmd, cdir, flag in zip(commands, directories, shell_flags):
subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cdir, shell=flag)
elif url.endswith('.zip'):
fname = wget.download(url, out=path.dirname(odir))
with zipfile.ZipFile(fname, 'r') as zh:
zh.extractall(odir)
subprocess.check_call(['rm', '{}'.format(path.split(url)[-1])],
stdout=DEVNULL, stderr=subprocess.PIPE,
cwd=path.dirname(odir))
def download_extract_unpack(args):
return download_extract(*args)
def get_imdb_data(odir, train_flag=True):
assert path.isdir(odir)
assert isinstance(train_flag, bool)
labels, texts = list(), list()
for category in ('neg', 'pos'):
subdir = path.join(odir, 'train' if train_flag else 'test', category)
for fname in fnmatch.filter(listdir(subdir), '*.txt'):
labels.append(0 if category == 'neg' else 1)
with open(path.join(subdir, fname), 'r') as fh:
texts.append(fh.read())
return labels, texts
def tokenize_data(tokenizer, odir, num_words, num_training_samples=20000,
num_validation_samples=10000, max_words_per_text=100):
@track_train
def helper(train_flag=True):
labels, texts = get_imdb_data(odir, train_flag)
labels = np.asarray(labels)
try:
if isinstance(texts, (MutableSequence, Sequence)):
texts = list(texts)
else:
assert isgenerator(texts)
except Exception:
raise
if train_flag:
tokenizer.fit_on_texts(texts)
data = tokenizer.texts_to_sequences(texts)
data = pad_sequences(data, maxlen=max_words_per_text)
return labels, data
labels, data = helper()
idx = np.arange(data.shape[0])
np.random.shuffle(idx)
data = data[idx]
labels = labels[idx]
x_train = data[:num_training_samples]
y_train = labels[:num_training_samples]
x_val = data[
num_training_samples:num_training_samples + num_validation_samples
]
y_val = labels[
num_training_samples:num_training_samples + num_validation_samples
]
y_test, x_test = helper(False)
return (x_train, y_train), (x_val, y_val), (x_test, y_test)
def build_model(pretrained_embedding=True, odir=None, tokenizer=None,
num_words=10000, embedding_dimension=100,
max_words_per_text=100):
if pretrained_embedding:
assert embedding_dimension in (50, 100, 200, 300)
model = models.Sequential()
model.add(layers.Embedding(
num_words, embedding_dimension,
input_length=max_words_per_text)
)
model.add(layers.Flatten())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
if pretrained_embedding:
assert (odir is not None) and path.isdir(odir)
assert (tokenizer is not None) and isinstance(tokenizer, Tokenizer)
embedding_dict = dict()
fname = path.join(
odir, 'glove.6B.{embedding_dimension}d.txt'.format(**locals())
)
with open(fname, 'r') as fh:
for line in fh:
tmp = line.split()
k, v = tmp[0], tmp[1:]
embedding_dict[k] = np.asarray(v, dtype='float32')
embedding_matrix = np.zeros((num_words, embedding_dimension))
for k, v in tokenizer.word_index.iteritems():
word_embedding = embedding_dict.get(k)
if v < num_words and word_embedding is not None:
embedding_matrix[v] = word_embedding
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics=['accuracy'])
return model
def main():
odir = path.join(path.dirname(getcwd()), 'data')
imdb_dir = path.join(odir, 'aclImdb')
glove_code_dir = path.join(odir, 'GloVe-1.2')
pretrained_glove_embedding_dir = path.join(odir, 'glove.6B')
dirs, urls = list(), list()
if not path.isdir(glove_code_dir):
dirs.append(glove_code_dir)
urls.append('https://nlp.stanford.edu/software/GloVe-1.2.zip')
if not path.isdir(pretrained_glove_embedding_dir):
dirs.append(pretrained_glove_embedding_dir)
urls.append('http://nlp.stanford.edu/data/glove.6B.zip')
if not path.isdir(imdb_dir):
dirs.append(imdb_dir)
urls.append(
'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
)
pool = Pool()
if sys.version_info.major == 3:
pool.starmap(download_extract, zip(urls, dirs))
else:
pool.map(download_extract_unpack, zip(urls, dirs))
num_words = 10000
tokenizer = Tokenizer(num_words=num_words)
(x_train, y_train), (x_val, y_val), (x_test, y_test) = tokenize_data(
tokenizer, imdb_dir, num_words)
for pretrained in (True, False):
model = build_model(pretrained, pretrained_glove_embedding_dir,
tokenizer)
history = model.fit(x_train, y_train, epochs=10, batch_size=32,
validation_data=(x_val, y_val), verbose=0)
scores = model.evaluate(x_test, y_test, verbose=0)
print("\nTest results for the model "
"{} pretrained GloVe word embeddings: ".format(
'with' if pretrained else 'without'))
print("loss={scores[0]}, accuracy={scores[1]}\n".format(**locals()))
# The model with pretrained embedding vectors will display a lower
# test accuracy, due to having overfitted the training samples.
model.save_weights('{}glove_model.hy'.format(
'pretrained_' if pretrained else ''))
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# "Public" methods; what we want to attach to the Histogram as a mix-in.
from __future__ import absolute_import
import math
# python 2/3 compatibility fixes
# from histogrammar.util import *
class HistogramMethods(object):
def plotbokeh(self, glyphType="line", glyphSize=1, fillColor="red",
lineColor="black", lineAlpha=1, fillAlpha=0.1, lineDash='solid'):
# glyphs
from bokeh.models.glyphs import Rect, Line
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.markers import (Circle, Cross,
Diamond, Square,
Triangle)
# data
from bokeh.models import ColumnDataSource
# Parameters of the histogram
lo = self.low
hi = self.high
num = self.num
bin_width = (hi-lo)/num
x = list()
center = lo
for _ in range(num):
x.append(center+bin_width/2)
center += bin_width
y = self.numericalValues
ci = [2.*v for v in self.confidenceIntervalValues()]
source = ColumnDataSource(data=dict(x=x, y=y, ci=ci))
glyph = None
if glyphType == "square":
glyph = Square(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "diamond":
glyph = Diamond(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "cross":
glyph = Cross(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "triangle":
glyph = Triangle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "circle":
glyph = Circle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "rect":
glyph = Rect(
x='x',
y='y',
width=bin_width,
height=0.1,
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
elif glyphType == "errors":
glyph = Rect(
x='x',
y='y',
width=bin_width,
height='ci',
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
elif glyphType == "histogram":
h = y
y = [yy/2 for yy in y]
source = ColumnDataSource(dict(x=x, y=y, h=h))
glyph = Rect(
x='x',
y='y',
width=bin_width,
height='h',
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
else:
glyph = Line(
x='x',
y='y',
line_color=lineColor,
line_alpha=lineAlpha,
line_width=glyphSize,
line_dash=lineDash)
return GlyphRenderer(glyph=glyph, data_source=source)
class SparselyHistogramMethods(object):
def plotbokeh(self, glyphType="line", glyphSize=1, fillColor="red",
lineColor="black", lineAlpha=1, fillAlpha=0.1, lineDash='solid'):
# glyphs
from bokeh.models.glyphs import Rect, Line
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.markers import (Circle, Cross,
Diamond, Square,
Triangle)
# data
from bokeh.models import ColumnDataSource
# Parameters of the histogram
lo = self.low
hi = self.high
num = self.numFilled
bin_width = (hi-lo)/num
x = list()
center = lo
for _ in range(num):
x.append(center+bin_width/2)
center += bin_width
y = [v.entries for _, v in sorted(self.bins.items())]
source = ColumnDataSource(data=dict(x=x, y=y))
glyph = None
if glyphType == "square":
glyph = Square(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "diamond":
glyph = Diamond(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "cross":
glyph = Cross(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "triangle":
glyph = Triangle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "circle":
glyph = Circle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "rect":
glyph = Rect(
x='x',
y='y',
width=bin_width,
height=0.1,
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
elif glyphType == "errors":
ci = [2.*v for v in self.confidenceIntervalValues()]
source = ColumnDataSource(data=dict(x=x, y=y, ci=ci))
glyph = Rect(
x='x',
y='y',
width=bin_width,
height='ci',
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
elif glyphType == "histogram":
h = y
y = [yy/2 for yy in y]
source = ColumnDataSource(dict(x=x, y=y, h=h))
glyph = Rect(
x='x',
y='y',
width=bin_width,
height='h',
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
else:
glyph = Line(
x='x',
y='y',
line_color=lineColor,
line_alpha=lineAlpha,
line_width=glyphSize,
line_dash=lineDash)
return GlyphRenderer(glyph=glyph, data_source=source)
class CategorizeHistogramMethods(object):
pass
class IrregularlyHistogramMethods(object):
pass
class CentrallyHistogramMethods(object):
pass
class ProfileMethods(object):
def plotbokeh(self, glyphType="line", glyphSize=1, fillColor="red",
lineColor="black", lineAlpha=1, fillAlpha=0.1, lineDash='solid'):
# glyphs
from bokeh.models.glyphs import Rect, Line
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.markers import (Circle, Cross,
Diamond, Square,
Triangle)
# data
from bokeh.models import ColumnDataSource
# Parameters of the histogram
lo = self.low
hi = self.high
num = self.num
bin_width = (hi-lo)/num
x = list()
y = list()
center = lo
for v in self.values:
if not math.isnan(v.mean):
y.append(v.mean)
x.append(center+bin_width/2)
center += bin_width
source = ColumnDataSource(data=dict(x=x, y=y))
glyph = None
if glyphType == "square":
glyph = Square(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "diamond":
glyph = Diamond(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "cross":
glyph = Cross(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "triangle":
glyph = Triangle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "circle":
glyph = Circle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "histogram":
w = [bin_width for _ in x]
h = y
y = [yy/2 for yy in y]
source = ColumnDataSource(dict(x=x, y=y, w=w, h=h))
glyph = Rect(
x='x',
y='y',
width='w',
height='h',
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
else:
glyph = Line(
x='x',
y='y',
line_color=lineColor,
line_alpha=lineAlpha,
line_width=glyphSize,
line_dash=lineDash)
return GlyphRenderer(glyph=glyph, data_source=source)
class SparselyProfileMethods(object):
pass
class ProfileErrMethods(object):
def plotbokeh(self, glyphType="line", glyphSize=1, fillColor="red",
lineColor="black", lineAlpha=1, fillAlpha=0.1, lineDash='solid'):
# glyphs
from bokeh.models.glyphs import Rect, Line
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.markers import (Circle, Cross,
Diamond, Square,
Triangle)
# data
from bokeh.models import ColumnDataSource
from math import sqrt
# Parameters of the histogram
lo = self.low
hi = self.high
num = self.num
bin_width = (hi-lo)/num
x = list()
y = list()
center = lo
for v in self.values:
if not math.isnan(v.mean):
y.append(v.mean)
x.append(center+bin_width/2)
center += bin_width
source = ColumnDataSource(data=dict(x=x, y=y))
glyph = None
if glyphType == "square":
glyph = Square(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "diamond":
glyph = Diamond(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "cross":
glyph = Cross(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "triangle":
glyph = Triangle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "circle":
glyph = Circle(
x='x',
y='y',
line_color=lineColor,
fill_color=fillColor,
line_alpha=lineAlpha,
size=glyphSize,
line_dash=lineDash)
elif glyphType == "errors":
w = [bin_width for _ in x]
h = [sqrt(v.variance/v.entries) if v.entries > 0 else 0.0 for v in self.values]
source = ColumnDataSource(dict(x=x, y=y, w=w, h=h))
glyph = Rect(
x='x',
y='y',
width='w',
height='h',
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
elif glyphType == "histogram":
w = [bin_width for _ in x]
h = y
y = [yy/2 for yy in y]
source = ColumnDataSource(dict(x=x, y=y, w=w, h=h))
glyph = Rect(
x='x',
y='y',
width='w',
height='h',
fill_alpha=fillAlpha,
line_color=lineColor,
fill_color=fillColor)
else:
glyph = Line(
x='x',
y='y',
line_color=lineColor,
line_alpha=lineAlpha,
line_width=glyphSize,
line_dash=lineDash)
return GlyphRenderer(glyph=glyph, data_source=source)
class SparselyProfileErrMethods(object):
pass
class StackedHistogramMethods(object):
nMaxStacked = 10
glyphTypeDefaults = ["circle"]*nMaxStacked
glyphSizeDefaults = [1]*nMaxStacked
fillColorDefaults = ["red"]*nMaxStacked
lineColorDefaults = ["red"]*nMaxStacked
lineAlphaDefaults = [1]*nMaxStacked
fillAlphaDefaults = [0.1]*nMaxStacked
lineDashDefaults = ["solid"]*nMaxStacked
def plotbokeh(self, glyphTypes=glyphTypeDefaults, glyphSizes=glyphSizeDefaults, fillColors=fillColorDefaults,
lineColors=lineColorDefaults, lineAlphas=lineAlphaDefaults, fillAlphas=fillAlphaDefaults,
lineDashes=lineDashDefaults):
nChildren = len(self.children)-1
assert len(glyphSizes) >= nChildren
assert len(glyphTypes) >= nChildren
assert len(fillColors) >= nChildren
assert len(lineColors) >= nChildren
assert len(lineAlphas) >= nChildren
assert len(fillAlphas) >= nChildren
assert len(lineDashes) >= nChildren
stackedGlyphs = list()
# for ichild, p in enumerate(self.children,start=1):
for ichild in range(nChildren):
stackedGlyphs.append(self.children[ichild+1].plotbokeh(glyphTypes[ichild],
glyphSizes[ichild],
fillColors[ichild],
lineColors[ichild],
lineAlphas[ichild],
fillAlphas[ichild],
lineDashes[ichild]))
return stackedGlyphs
class PartitionedHistogramMethods(object):
pass
class FractionedHistogramMethods(object):
pass
class TwoDimensionallyHistogramMethods(object):
pass
class SparselyTwoDimensionallyHistogramMethods(object):
pass
class IrregularlyTwoDimensionallyHistogramMethods(object):
pass
class CentrallyTwoDimensionallyHistogramMethods(object):
pass
def plot(xLabel='x', yLabel='y', *args):
from bokeh.models import DataRange1d, Plot, LinearAxis
from bokeh.models import PanTool, WheelZoomTool
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, min_border=80)
extra = list()
if not isinstance(xLabel, str) and not isinstance(yLabel, str):
extra.append(xLabel)
extra.append(yLabel)
xLabel = 'x'
yLabel = 'y'
elif not isinstance(xLabel, str):
extra.append(xLabel)
xLabel = 'x'
elif not isinstance(yLabel, str):
extra.append(yLabel)
yLabel = 'y'
args = extra+list(args)
for renderer in args:
if not isinstance(renderer, list):
plot.renderers.append(renderer)
else:
plot.renderers.extend(renderer)
# axes
xaxis = LinearAxis(axis_label=xLabel)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label=yLabel)
plot.add_layout(yaxis, 'left')
# add grid to the plot
# plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
# plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
# interactive tools
plot.add_tools(PanTool(), WheelZoomTool()) # , SaveTool())
return plot
def save(plot, fname):
# SaveTool https://github.com/bokeh/bokeh/blob/118b6a765ee79232b1fef0e82ed968a9dbb0e17f/examples/models/line.py
from bokeh.io import save, output_file
output_file(fname)
save(plot)
def view(plot, show=False):
from bokeh.plotting import curdoc
from bokeh.client import push_session
if show:
session = push_session(curdoc())
session.show(plot)
else:
curdoc().add_root(plot)
|
|
"""
Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic
interface.
"""
from collections import OrderedDict
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
import numpy as np
from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \
RMSPropSolver, AdaDeltaSolver, AdamSolver
import caffe.io
import six
# We directly update methods from Net here (rather than using composition or
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
# automatically have the improved interface.
@property
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
return OrderedDict(zip(self._blob_names, self._blobs))
@property
def _Net_blob_loss_weights(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blob loss weights indexed by name
"""
return OrderedDict(zip(self._blob_names, self._blob_loss_weights))
@property
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
return OrderedDict([(name, lr.blobs)
for name, lr in zip(self._layer_names, self.layers)
if len(lr.blobs) > 0])
@property
def _Net_inputs(self):
return [list(self.blobs.keys())[i] for i in self._inputs]
@property
def _Net_outputs(self):
return [list(self.blobs.keys())[i] for i in self._outputs]
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Parameters
----------
blobs : list of blobs to return in addition to output blobs.
kwargs : Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start : optional name of layer at which to begin the forward pass
end : optional name of layer at which to finish the forward pass
(inclusive)
Returns
-------
outs : {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in six.iteritems(kwargs):
if blob.shape[0] != self.blobs[in_].shape[0]:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs}
def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
"""
Backward pass: prepare diffs and run the net backward.
Parameters
----------
diffs : list of diffs to return in addition to bottom diffs.
kwargs : Keys are output blob names and values are diff ndarrays.
If None, top diffs are taken from forward loss.
start : optional name of layer at which to begin the backward pass
end : optional name of layer at which to finish the backward pass
(inclusive)
Returns
-------
outs: {blob name: diff ndarray} dict.
"""
if diffs is None:
diffs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = len(self.layers) - 1
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + diffs)
else:
end_ind = 0
outputs = set(self.inputs + diffs)
if kwargs:
if set(kwargs.keys()) != set(self.outputs):
raise Exception('Top diff arguments do not match net outputs.')
# Set top diffs according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for top, diff in six.iteritems(kwargs):
if diff.shape[0] != self.blobs[top].shape[0]:
raise Exception('Diff is not batch sized')
self.blobs[top].diff[...] = diff
self._backward(start_ind, end_ind)
# Unpack diffs to extract
return {out: self.blobs[out].diff for out in outputs}
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Parameters
----------
blobs : list of blobs to extract as in forward()
kwargs : Keys are input blob names and values are blob ndarrays.
Refer to forward().
Returns
-------
all_outs : {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in six.iteritems(outs):
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in six.iteritems(batch_blobs):
all_outs[out].extend(out_blobs.copy())
for diff, out_diffs in six.iteritems(batch_diffs):
all_diffs[diff].extend(out_diffs.copy())
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
def _Net_set_input_arrays(self, data, labels):
"""
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
return self._set_input_arrays(data, labels)
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Parameters
----------
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Yields
------
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(six.next(six.itervalues(blobs)))
batch_size = six.next(six.itervalues(self.blobs)).shape[0]
remainder = num % batch_size
num_batches = num // batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
class _Net_IdNameWrapper:
"""
A simple wrapper that allows the ids propery to be accessed as a dict
indexed by names. Used for top and bottom names
"""
def __init__(self, net, func):
self.net, self.func = net, func
def __getitem__(self, name):
# Map the layer name to id
ids = self.func(self.net, list(self.net._layer_names).index(name))
# Map the blob id to name
id_to_name = list(self.net.blobs)
return [id_to_name[i] for i in ids]
# Attach methods to Net.
Net.blobs = _Net_blobs
Net.blob_loss_weights = _Net_blob_loss_weights
Net.params = _Net_params
Net.forward = _Net_forward
Net.backward = _Net_backward
Net.forward_all = _Net_forward_all
Net.forward_backward_all = _Net_forward_backward_all
Net.set_input_arrays = _Net_set_input_arrays
Net._batch = _Net_batch
Net.inputs = _Net_inputs
Net.outputs = _Net_outputs
Net.top_names = property(lambda n: _Net_IdNameWrapper(n, Net._top_ids))
Net.bottom_names = property(lambda n: _Net_IdNameWrapper(n, Net._bottom_ids))
|
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Timothy Berkelbach <[email protected]>
#
'''PP with numeric integration. See also pyscf/pbc/gto/pesudo/pp_int.py
For GTH/HGH PPs, see:
Goedecker, Teter, Hutter, PRB 54, 1703 (1996)
Hartwigsen, Goedecker, and Hutter, PRB 58, 3641 (1998)
'''
import numpy as np
import scipy.linalg
import scipy.special
from pyscf import lib
from pyscf.gto import mole
from pyscf.pbc.gto.pseudo import pp_int
def get_alphas(cell):
'''alpha parameters from the non-divergent Hartree+Vloc G=0 term.
See ewald.pdf
Returns:
alphas : (natm,) ndarray
'''
return get_alphas_gth(cell)
def get_alphas_gth(cell):
'''alpha parameters for the local GTH pseudopotential.'''
G0 = np.zeros((1,3))
return -get_gth_vlocG(cell, G0)
def get_vlocG(cell, Gv=None):
'''Local PP kernel in G space: Vloc(G)
Returns:
(natm, ngrids) ndarray
'''
if Gv is None: Gv = cell.Gv
vlocG = get_gth_vlocG(cell, Gv)
return vlocG
def get_gth_vlocG(cell, Gv):
'''Local part of the GTH pseudopotential.
See MH (4.79).
Args:
Gv : (ngrids,3) ndarray
Returns:
(natm, ngrids) ndarray
'''
vlocG = pp_int.get_gth_vlocG_part1(cell, Gv)
# Add the C1, C2, C3, C4 contributions
G2 = np.einsum('ix,ix->i', Gv, Gv)
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
rloc, nexp, cexp = pp[1:3+1]
G2_red = G2 * rloc**2
cfacs = 0
if nexp >= 1:
cfacs += cexp[0]
if nexp >= 2:
cfacs += cexp[1] * (3 - G2_red)
if nexp >= 3:
cfacs += cexp[2] * (15 - 10*G2_red + G2_red**2)
if nexp >= 4:
cfacs += cexp[3] * (105 - 105*G2_red + 21*G2_red**2 - G2_red**3)
vlocG[ia,:] -= (2*np.pi)**(3/2.)*rloc**3*np.exp(-0.5*G2_red) * cfacs
return vlocG
def get_projG(cell, kpt=np.zeros(3)):
'''PP weight and projector for the nonlocal PP in G space.
Returns:
hs : list( list( np.array( , ) ) )
- hs[atm][l][i,j]
projs : list( list( list( list( np.array(ngrids) ) ) ) )
- projs[atm][l][m][i][ngrids]
'''
return get_gth_projG(cell, kpt+cell.Gv)
def get_gth_projG(cell, Gvs):
r'''G space projectors from the FT of the real-space projectors.
\int e^{iGr} p_j^l(r) Y_{lm}^*(theta,phi)
= i^l p_j^l(G) Y_{lm}^*(thetaG, phiG)
See MH Eq.(4.80)
'''
Gs,thetas,phis = cart2polar(Gvs)
hs = []
projs = []
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
pp = cell._pseudo[symb]
nproj_types = pp[4]
h_ia = []
proj_ia = []
for l,proj in enumerate(pp[5:]):
rl, nl, hl = proj
h_ia.append( np.array(hl) )
proj_ia_l = []
for m in range(-l,l+1):
projG_ang = Ylm(l,m,thetas,phis).conj()
proj_ia_lm = []
for i in range(nl):
projG_radial = projG_li(Gs,l,i,rl)
proj_ia_lm.append( (1j)**l * projG_radial*projG_ang )
proj_ia_l.append(proj_ia_lm)
proj_ia.append(proj_ia_l)
hs.append(h_ia)
projs.append(proj_ia)
return hs, projs
def projG_li(G, l, i, rl):
G = np.array(G)
G_red = G*rl
# MH Eq. (4.81)
return ( _qli(G_red,l,i) * np.pi**(5/4.) * G**l * np.sqrt(rl**(2*l+3))
/ np.exp(0.5*G_red**2) )
def _qli(x,l,i):
# MH Eqs. (4.82)-(4.93) :: beware typos!
# Mathematica formulas:
# p[l_, i_, r_] = Sqrt[2] r^(l + 2 (i - 1)) Exp[-r^2/(2 R^2)]/(R^(l + (4 i - 1)/2) Sqrt[Gamma[l + (4 i - 1)/2]])
# pG[l_, i_, G_] = Integrate[p[l, i, r] 4 Pi r^2 SphericalBesselJ[l, G r], {r, 0, Infinity}]
# qG[l_, i_, G_] := pG[l, i, G]/(Pi^(5/4) G^l Sqrt[R^(2 l + 3)]/Exp[(G R)^2/2])
# FullSimplify[qG[4, 3, G], R > 0 && G > 0]
sqrt = np.sqrt
if l==0 and i==0:
return 4*sqrt(2.)
elif l==0 and i==1:
return 8*sqrt(2/15.)*(3-x**2) # MH & GTH (right)
#return sqrt(8*2/15.)*(3-x**2) # HGH (wrong)
elif l==0 and i==2:
#return 16/3.*sqrt(2/105.)*(15-20*x**2+4*x**4) # MH (wrong)
return 16/3.*sqrt(2/105.)*(15-10*x**2+x**4) # HGH (right)
elif l==1 and i==0:
return 8*sqrt(1/3.)
elif l==1 and i==1:
return 16*sqrt(1/105.)*(5-x**2)
elif l==1 and i==2:
#return 32/3.*sqrt(1/1155.)*(35-28*x**2+4*x**4) # MH (wrong)
return 32/3.*sqrt(1/1155.)*(35-14*x**2+x**4) # HGH (right)
elif l==2 and i==0:
return 8*sqrt(2/15.)
elif l==2 and i==1:
return 16/3.*sqrt(2/105.)*(7-x**2)
elif l==2 and i==2:
#return 32/3.*sqrt(2/15015.)*(63-36*x**2+4*x**4) # MH (wrong I think)
return 32/3.*sqrt(2/15015.)*(63-18*x**2+x**4) # TCB
elif l==3 and i==0:
return 16*sqrt(1/105.)
elif l==3 and i==1:
return 32/3.*sqrt(1/1155.)*(9-x**2)
elif l==3 and i==2:
return 64/45.*sqrt(1/1001.)*(99-22*x**2+x**4)
elif l==4 and i==0:
return 16/3.*sqrt(2/105.)
elif l==4 and i==1:
return 32/3.*sqrt(2/15015.)*(11-x**2)
elif l==4 and i==2:
return 64/45.*sqrt(2/17017.)*(143-26*x**2+x**4)
else:
print("*** WARNING *** l =", l, ", i =", i, "not yet implemented for NL PP!")
return 0.
def Ylm_real(l,m,theta,phi):
'''Real spherical harmonics, if desired.'''
Ylabsm = Ylm(l,np.abs(m),theta,phi)
if m < 0:
return np.sqrt(2.) * Ylabsm.imag
elif m > 0:
return np.sqrt(2.) * Ylabsm.real
else: # m == 0
return Ylabsm.real
def Ylm(l,m,theta,phi):
'''
Spherical harmonics; returns a complex number
Note the "convention" for theta and phi:
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.special.sph_harm.html
'''
#return scipy.special.sph_harm(m=m,n=l,theta=phi,phi=theta)
return scipy.special.sph_harm(m,l,phi,theta)
def cart2polar(rvec):
# The rows of rvec are the 3-component vectors
# i.e. rvec is N x 3
x,y,z = rvec.T
r = lib.norm(rvec, axis=1)
# theta is the polar angle, 0 < theta < pi
# catch possible 0/0
theta = np.arccos(z/(r+1e-200))
# phi is the azimuthal angle, 0 < phi < 2pi (or -pi < phi < pi)
phi = np.arctan2(y,x)
return r, theta, phi
def get_pp(cell, kpt=np.zeros(3)):
'''Get the periodic pseudotential nuc-el AO matrix
'''
from pyscf.pbc import tools
coords = cell.get_uniform_grids()
aoR = cell.pbc_eval_gto('GTOval', coords, kpt=kpt)
nao = cell.nao_nr()
SI = cell.get_SI()
vlocG = get_vlocG(cell)
vpplocG = -np.sum(SI * vlocG, axis=0)
vpplocG[0] = np.sum(get_alphas(cell)) # from get_jvloc_G0 function
# vpploc evaluated in real-space
vpplocR = tools.ifft(vpplocG, cell.mesh).real
vpploc = np.dot(aoR.T.conj(), vpplocR.reshape(-1,1)*aoR)
# vppnonloc evaluated in reciprocal space
aokG = tools.fftk(np.asarray(aoR.T, order='C'),
cell.mesh, np.exp(-1j*np.dot(coords, kpt))).T
ngrids = len(aokG)
fakemol = mole.Mole()
fakemol._atm = np.zeros((1,mole.ATM_SLOTS), dtype=np.int32)
fakemol._bas = np.zeros((1,mole.BAS_SLOTS), dtype=np.int32)
ptr = mole.PTR_ENV_START
fakemol._env = np.zeros(ptr+10)
fakemol._bas[0,mole.NPRIM_OF ] = 1
fakemol._bas[0,mole.NCTR_OF ] = 1
fakemol._bas[0,mole.PTR_EXP ] = ptr+3
fakemol._bas[0,mole.PTR_COEFF] = ptr+4
Gv = np.asarray(cell.Gv+kpt)
G_rad = lib.norm(Gv, axis=1)
vppnl = np.zeros((nao,nao), dtype=np.complex128)
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl > 0:
hl = np.asarray(hl)
fakemol._bas[0,mole.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*np.pi**1.25
pYlm_part = fakemol.eval_gto('GTOval', Gv)
pYlm = np.empty((nl,l*2+1,ngrids))
for k in range(nl):
qkl = _qli(G_rad*rl, l, k)
pYlm[k] = pYlm_part.T * qkl
# pYlm is real
SPG_lmi = np.einsum('g,nmg->nmg', SI[ia].conj(), pYlm)
SPG_lm_aoG = np.einsum('nmg,gp->nmp', SPG_lmi, aokG)
tmp = np.einsum('ij,jmp->imp', hl, SPG_lm_aoG)
vppnl += np.einsum('imp,imq->pq', SPG_lm_aoG.conj(), tmp)
vppnl *= (1./ngrids**2)
if aoR.dtype == np.double:
return vpploc.real + vppnl.real
else:
return vpploc + vppnl
def get_jvloc_G0(cell, kpt=np.zeros(3)):
'''Get the (separately divergent) Hartree + Vloc G=0 contribution.
'''
ovlp = cell.pbc_intor('int1e_ovlp', hermi=1, kpts=kpt)
return 1./cell.vol * np.sum(get_alphas(cell)) * ovlp
|
|
import os
import yaml
import copy
import types
import ciw.dists
from .network import *
def get_distribution(dist):
"""
For use when parameters are read in from a .yml file.
Returns instances of the distribution classes that
correspond to the indicator string in the .yml file.
"""
if dist[0] == 'Uniform':
return ciw.dists.Uniform(dist[1], dist[2])
if dist[0] == 'Deterministic':
return ciw.dists.Deterministic(dist[1])
if dist[0] == 'Triangular':
return ciw.dists.Triangular(dist[1], dist[2], dist[3])
if dist[0] == 'Exponential':
return ciw.dists.Exponential(dist[1])
if dist[0] == 'Gamma':
return ciw.dists.Gamma(dist[1], dist[2])
if dist[0] == 'Normal':
return ciw.dists.Normal(dist[1], dist[2])
if dist[0] == 'Lognormal':
return ciw.dists.Lognormal(dist[1], dist[2])
if dist[0] == 'Weibull':
return ciw.dists.Weibull(dist[1], dist[2])
if dist[0] == 'Pmf':
return ciw.dists.Pmf(dist[1], dist[2])
if dist[0] == 'NoArrivals':
return ciw.dists.NoArrivals()
return ciw.dists.Distribution()
def create_network(arrival_distributions=None,
baulking_functions=None,
class_change_matrices=None,
number_of_servers=None,
priority_classes=None,
queue_capacities=None,
service_distributions=None,
routing=None,
batching_distributions=None,
ps_thresholds=None,
server_priority_functions=None):
"""
Takes in kwargs, creates dictionary.
"""
if arrival_distributions == None or number_of_servers == None or service_distributions == None:
raise ValueError('arrival_distributions, service_distributions and number_of_servers are required arguments.')
params = {
'arrival_distributions': arrival_distributions,
'number_of_servers': number_of_servers,
'service_distributions': service_distributions
}
if baulking_functions != None:
params['baulking_functions'] = baulking_functions
if class_change_matrices != None:
params['class_change_matrices'] = class_change_matrices
if priority_classes != None:
params['priority_classes'] = priority_classes
if queue_capacities != None:
params['queue_capacities'] = queue_capacities
if routing != None:
params['routing'] = routing
if batching_distributions != None:
params['batching_distributions'] = batching_distributions
if ps_thresholds != None:
params['ps_thresholds'] = ps_thresholds
if server_priority_functions != None:
params['server_priority_functions'] = server_priority_functions
return create_network_from_dictionary(params)
def load_parameters(directory_name):
"""
Loads the .yml file parameters to a dictionary.
"""
root = os.getcwd()
directory = os.path.join(root, directory_name)
parameter_file_name = directory
parameter_file = open(parameter_file_name, 'r')
parameters = yaml.load(parameter_file, Loader=yaml.FullLoader)
parameter_file.close()
return parameters
def create_network_from_yml(directory_name):
"""
Creates a Network object form a yaml file.
"""
params_input = load_parameters(directory_name)
params = fill_out_dictionary(params_input)
for clss in params['arrival_distributions']:
dists = []
for dist in params['arrival_distributions'][clss]:
dists.append(get_distribution(dist))
params['arrival_distributions'][clss] = dists
for clss in params['service_distributions']:
dists = []
for dist in params['service_distributions'][clss]:
dists.append(get_distribution(dist))
params['service_distributions'][clss] = dists
validify_dictionary(params)
return create_network_from_dictionary(params)
def create_network_from_dictionary(params_input):
"""
Creates a Network object from a parameters dictionary.
"""
params = fill_out_dictionary(params_input)
validify_dictionary(params)
# Then make the Network object
arrivals = [params['arrival_distributions']['Class ' + str(clss)]
for clss in range(len(params['arrival_distributions']))]
services = [params['service_distributions']['Class ' + str(clss)]
for clss in range(len(params['service_distributions']))]
if all(isinstance(f, types.FunctionType) for f in params['routing']):
routing = params['routing']
else:
routing = [params['routing']['Class ' + str(clss)]
for clss in range(len(params['routing']))]
priorities = [params['priority_classes']['Class ' + str(clss)]
for clss in range(len(params['priority_classes']))]
baulking_functions = [params['baulking_functions']['Class ' + str(clss)]
for clss in range(len(params['baulking_functions']))]
batches = [params['batching_distributions']['Class ' + str(clss)]
for clss in range(len(params['batching_distributions']))]
number_of_classes = params['number_of_classes']
number_of_nodes = params['number_of_nodes']
queueing_capacities = [float(i) if i == "Inf" else i for i in params['queue_capacities']]
class_change_matrices = params.get('class_change_matrices',
{'Node ' + str(nd + 1): None for nd in range(number_of_nodes)})
number_of_servers, schedules, nodes, classes, preempts = [], [], [], [], []
for c in params['number_of_servers']:
if isinstance(c, (tuple, list)):
if isinstance(c, tuple):
s = c[0]
p = c[1]
if isinstance(c, list):
s = c
p = False
number_of_servers.append('schedule')
schedules.append(s)
preempts.append(p)
elif c == 'Inf':
number_of_servers.append(float(c))
schedules.append(None)
preempts.append(False)
else:
number_of_servers.append(c)
schedules.append(None)
preempts.append(False)
for nd in range(number_of_nodes):
nodes.append(ServiceCentre(
number_of_servers[nd],
queueing_capacities[nd],
class_change_matrices['Node ' + str(nd + 1)],
schedules[nd],
preempts[nd],
params['ps_thresholds'][nd],
params['server_priority_functions'][nd]))
for clss in range(number_of_classes):
if all(isinstance(f, types.FunctionType) for f in params['routing']):
classes.append(CustomerClass(
arrivals[clss],
services[clss],
routing,
priorities[clss],
baulking_functions[clss],
batches[clss]))
else:
classes.append(CustomerClass(
arrivals[clss],
services[clss],
routing[clss],
priorities[clss],
baulking_functions[clss],
batches[clss]))
n = Network(nodes, classes)
if all(isinstance(f, types.FunctionType) for f in params['routing']):
n.process_based = True
else:
n.process_based = False
return n
def fill_out_dictionary(params_input):
"""
Fills out the parameters dictionary with the
default values of the optional arguments.
"""
params = copy.deepcopy(params_input)
if isinstance(params['arrival_distributions'], list):
arr_dists = params['arrival_distributions']
params['arrival_distributions'] = {'Class 0': arr_dists}
if isinstance(params['service_distributions'], list):
srv_dists = params['service_distributions']
params['service_distributions'] = {'Class 0': srv_dists}
if 'routing' in params:
if all(isinstance(f, list) for f in params['routing']):
rtng_mat = params['routing']
params['routing'] = {'Class 0': rtng_mat}
if 'baulking_functions' in params:
if isinstance(params['baulking_functions'], list):
blk_fncs = params['baulking_functions']
params['baulking_functions'] = {'Class 0': blk_fncs}
if 'batching_distributions' in params:
if isinstance(params['batching_distributions'], list):
btch_dists = params['batching_distributions']
params['batching_distributions'] = {'Class 0': btch_dists}
default_dict = {
'name': 'Simulation',
'routing': {'Class ' + str(i): [[0.0]]
for i in range(len(params['arrival_distributions']))},
'number_of_nodes': len(params['number_of_servers']),
'number_of_classes': len(params['arrival_distributions']),
'queue_capacities': [float('inf') for _ in range(len(
params['number_of_servers']))],
'priority_classes': {'Class ' + str(i): 0
for i in range(len(params['arrival_distributions']))},
'baulking_functions': {'Class ' + str(i): [
None for _ in range(len(params['number_of_servers']))]
for i in range(len(params['arrival_distributions']))},
'batching_distributions': {'Class ' + str(i): [
ciw.dists.Deterministic(1) for _ in range(
len(params['number_of_servers']))] for i in range(
len(params['arrival_distributions']))},
'ps_thresholds': [1 for _ in range(len(
params['number_of_servers']))],
'server_priority_functions' : [
None for _ in range(len(params['number_of_servers']))
]
}
for a in default_dict:
params[a] = params.get(a, default_dict[a])
return params
def validify_dictionary(params):
"""
Raises errors if there is something wrong with the
parameters dictionary.
"""
if all(isinstance(f, types.FunctionType) for f in params['routing']):
consistant_num_classes = (
params['number_of_classes'] ==
len(params['arrival_distributions']) ==
len(params['service_distributions']) ==
len(params['batching_distributions']))
else:
consistant_num_classes = (
params['number_of_classes'] ==
len(params['arrival_distributions']) ==
len(params['service_distributions']) ==
len(params['routing']) ==
len(params['batching_distributions']))
if not consistant_num_classes:
raise ValueError('Ensure consistant number of classes is used throughout.')
if all(isinstance(f, types.FunctionType) for f in params['routing']):
consistant_class_names = (
set(params['arrival_distributions']) ==
set(params['service_distributions']) ==
set(params['batching_distributions']) ==
set(['Class ' + str(i) for i in range(params['number_of_classes'])]))
else:
consistant_class_names = (
set(params['arrival_distributions']) ==
set(params['service_distributions']) ==
set(params['routing']) ==
set(params['batching_distributions']) ==
set(['Class ' + str(i) for i in range(params['number_of_classes'])]))
if not consistant_class_names:
raise ValueError('Ensure correct names for customer classes.')
if all(isinstance(f, types.FunctionType) for f in params['routing']):
num_nodes_count = [
params['number_of_nodes']] + [
len(obs) for obs in params['arrival_distributions'].values()] + [
len(obs) for obs in params['service_distributions'].values()] + [
len(obs) for obs in params['batching_distributions'].values()] + [
len(params['routing'])] + [
len(params['number_of_servers'])] + [
len(params['queue_capacities'])]
else:
num_nodes_count = [
params['number_of_nodes']] + [
len(obs) for obs in params['arrival_distributions'].values()] + [
len(obs) for obs in params['service_distributions'].values()] + [
len(obs) for obs in params['routing'].values()] + [
len(obs) for obs in params['batching_distributions'].values()] + [
len(row) for row in [obs for obs in params['routing'].values()][0]] + [
len(params['number_of_servers'])] + [
len(params['queue_capacities'])]
if len(set(num_nodes_count)) != 1:
raise ValueError('Ensure consistant number of nodes is used throughout.')
if not all(isinstance(f, types.FunctionType) for f in params['routing']):
for clss in params['routing'].values():
for row in clss:
if sum(row) > 1.0 or min(row) < 0.0 or max(row) > 1.0:
raise ValueError('Ensure that routing matrix is valid.')
neg_numservers = any([(isinstance(obs, int) and obs < 0) for obs in params['number_of_servers']])
valid_capacities = all([((isinstance(obs, int) and obs >= 0) or obs==float('inf') or obs=='Inf') for obs in params['queue_capacities']])
if neg_numservers:
raise ValueError('Number of servers must be positive integers.')
if not valid_capacities:
raise ValueError('Queue capacities must be positive integers or zero.')
if 'class_change_matrices' in params:
num_nodes = len(params['class_change_matrices']) == params['number_of_nodes']
node_names = set(params['class_change_matrices']) == set(['Node ' + str(i+1) for i in range(params['number_of_nodes'])])
if not (num_nodes and node_names):
raise ValueError('Ensure correct nodes used in class_change_matrices.')
for nd in params['class_change_matrices'].values():
for row in nd:
if sum(row) > 1.0 or min(row) < 0.0 or max(row) > 1.0:
raise ValueError('Ensure that class change matrix is valid.')
for n in params['number_of_servers']:
if isinstance(n, str) and n != 'Inf':
if n not in params:
raise ValueError('No schedule ' + str(n) + ' defined.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.